cache.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2019-03-29 quanzhao the first version
  9. */
  10. #include <rthw.h>
  11. #include <rtdef.h>
  12. rt_inline rt_uint32_t rt_cpu_icache_line_size(void)
  13. {
  14. rt_uint32_t ctr;
  15. asm volatile ("mrc p15, 0, %0, c0, c0, 1" : "=r"(ctr));
  16. return 4 << (ctr & 0xF);
  17. }
  18. rt_inline rt_uint32_t rt_cpu_dcache_line_size(void)
  19. {
  20. rt_uint32_t ctr;
  21. asm volatile ("mrc p15, 0, %0, c0, c0, 1" : "=r"(ctr));
  22. return 4 << ((ctr >> 16) & 0xF);
  23. }
  24. void rt_hw_cpu_icache_invalidate(void *addr, int size)
  25. {
  26. rt_uint32_t line_size = rt_cpu_icache_line_size();
  27. rt_uint32_t start_addr = (rt_uint32_t)addr;
  28. rt_uint32_t end_addr = (rt_uint32_t) addr + size + line_size - 1;
  29. asm volatile ("dmb":::"memory");
  30. start_addr &= ~(line_size - 1);
  31. end_addr &= ~(line_size - 1);
  32. while (start_addr < end_addr)
  33. {
  34. asm volatile ("mcr p15, 0, %0, c7, c5, 1" :: "r"(start_addr)); /* icimvau */
  35. start_addr += line_size;
  36. }
  37. asm volatile ("dsb\n\tisb":::"memory");
  38. }
  39. void rt_hw_cpu_dcache_invalidate(void *addr, int size)
  40. {
  41. rt_uint32_t line_size = rt_cpu_dcache_line_size();
  42. rt_uint32_t start_addr = (rt_uint32_t)addr;
  43. rt_uint32_t end_addr = (rt_uint32_t) addr + size + line_size - 1;
  44. asm volatile ("dmb":::"memory");
  45. start_addr &= ~(line_size - 1);
  46. end_addr &= ~(line_size - 1);
  47. while (start_addr < end_addr)
  48. {
  49. asm volatile ("mcr p15, 0, %0, c7, c6, 1" :: "r"(start_addr)); /* dcimvac */
  50. start_addr += line_size;
  51. }
  52. asm volatile ("dsb":::"memory");
  53. }
  54. void rt_hw_cpu_dcache_clean(void *addr, int size)
  55. {
  56. rt_uint32_t line_size = rt_cpu_dcache_line_size();
  57. rt_uint32_t start_addr = (rt_uint32_t)addr;
  58. rt_uint32_t end_addr = (rt_uint32_t) addr + size + line_size - 1;
  59. asm volatile ("dmb":::"memory");
  60. start_addr &= ~(line_size - 1);
  61. end_addr &= ~(line_size - 1);
  62. while (start_addr < end_addr)
  63. {
  64. asm volatile ("mcr p15, 0, %0, c7, c10, 1" :: "r"(start_addr)); /* dccmvac */
  65. start_addr += line_size;
  66. }
  67. asm volatile ("dsb":::"memory");
  68. }
  69. void rt_hw_cpu_dcache_clean_and_invalidate(void *addr, int size)
  70. {
  71. rt_uint32_t line_size = rt_cpu_dcache_line_size();
  72. rt_uint32_t start_addr = (rt_uint32_t)addr;
  73. rt_uint32_t end_addr = (rt_uint32_t) addr + size + line_size - 1;
  74. asm volatile ("dmb":::"memory");
  75. start_addr &= ~(line_size - 1);
  76. end_addr &= ~(line_size - 1);
  77. while (start_addr < end_addr)
  78. {
  79. asm volatile ("mcr p15, 0, %0, c7, c10, 1" :: "r"(start_addr)); /* dccmvac */
  80. asm volatile ("mcr p15, 0, %0, c7, c6, 1" :: "r"(start_addr)); /* dcimvac */
  81. start_addr += line_size;
  82. }
  83. asm volatile ("dsb":::"memory");
  84. }
  85. void rt_hw_cpu_icache_ops(int ops, void *addr, int size)
  86. {
  87. if (ops == RT_HW_CACHE_INVALIDATE)
  88. {
  89. rt_hw_cpu_icache_invalidate(addr, size);
  90. }
  91. }
  92. void rt_hw_cpu_dcache_ops(int ops, void *addr, int size)
  93. {
  94. if (ops == RT_HW_CACHE_FLUSH)
  95. {
  96. rt_hw_cpu_dcache_clean(addr, size);
  97. }
  98. else if (ops == RT_HW_CACHE_INVALIDATE)
  99. {
  100. rt_hw_cpu_dcache_invalidate(addr, size);
  101. }
  102. }
  103. rt_base_t rt_hw_cpu_icache_status(void)
  104. {
  105. return 0;
  106. }
  107. rt_base_t rt_hw_cpu_dcache_status(void)
  108. {
  109. return 0;
  110. }
  111. #ifdef RT_USING_LWP
  112. #define ICACHE (1<<0)
  113. #define DCACHE (1<<1)
  114. #define BCACHE (ICACHE|DCACHE)
  115. int sys_cacheflush(void *addr, int size, int cache)
  116. {
  117. if ((size_t)addr < KERNEL_VADDR_START && (size_t)addr + size <= KERNEL_VADDR_START)
  118. {
  119. if ((cache & DCACHE) != 0)
  120. {
  121. rt_hw_cpu_dcache_clean_and_invalidate(addr, size);
  122. }
  123. if ((cache & ICACHE) != 0)
  124. {
  125. rt_hw_cpu_icache_invalidate(addr, size);
  126. }
  127. return 0;
  128. }
  129. return -1;
  130. }
  131. #endif