mips_cache.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2016-09-07 Urey the first version
  9. */
  10. #ifndef _MIPS_CACHE_H_
  11. #define _MIPS_CACHE_H_
  12. #ifndef __ASSEMBLER__
  13. #include <rtdef.h>
  14. #include <mips_cfg.h>
  15. /*
  16. * Cache Operations available on all MIPS processors with R4000-style caches
  17. */
  18. #define INDEX_INVALIDATE_I 0x00
  19. #define INDEX_WRITEBACK_INV_D 0x01
  20. #define INDEX_LOAD_TAG_I 0x04
  21. #define INDEX_LOAD_TAG_D 0x05
  22. #define INDEX_STORE_TAG_I 0x08
  23. #define INDEX_STORE_TAG_D 0x09
  24. #define HIT_INVALIDATE_I 0x10
  25. #define HIT_INVALIDATE_D 0x11
  26. #define HIT_WRITEBACK_INV_D 0x15
  27. /*
  28. *The lock state is cleared by executing an Index
  29. Invalidate, Index Writeback Invalidate, Hit
  30. Invalidate, or Hit Writeback Invalidate
  31. operation to the locked line, or via an Index
  32. Store Tag operation with the lock bit reset in
  33. the TagLo register.
  34. */
  35. #define FETCH_AND_LOCK_I 0x1c
  36. #define FETCH_AND_LOCK_D 0x1d
  37. enum dma_data_direction
  38. {
  39. DMA_BIDIRECTIONAL = 0,
  40. DMA_TO_DEVICE = 1,
  41. DMA_FROM_DEVICE = 2,
  42. DMA_NONE = 3,
  43. };
  44. /*
  45. * R4000-specific cacheops
  46. */
  47. #define CREATE_DIRTY_EXCL_D 0x0d
  48. #define FILL 0x14
  49. #define HIT_WRITEBACK_I 0x18
  50. #define HIT_WRITEBACK_D 0x19
  51. /*
  52. * R4000SC and R4400SC-specific cacheops
  53. */
  54. #define INDEX_INVALIDATE_SI 0x02
  55. #define INDEX_WRITEBACK_INV_SD 0x03
  56. #define INDEX_LOAD_TAG_SI 0x06
  57. #define INDEX_LOAD_TAG_SD 0x07
  58. #define INDEX_STORE_TAG_SI 0x0A
  59. #define INDEX_STORE_TAG_SD 0x0B
  60. #define CREATE_DIRTY_EXCL_SD 0x0f
  61. #define HIT_INVALIDATE_SI 0x12
  62. #define HIT_INVALIDATE_SD 0x13
  63. #define HIT_WRITEBACK_INV_SD 0x17
  64. #define HIT_WRITEBACK_SD 0x1b
  65. #define HIT_SET_VIRTUAL_SI 0x1e
  66. #define HIT_SET_VIRTUAL_SD 0x1f
  67. /*
  68. * R5000-specific cacheops
  69. */
  70. #define R5K_PAGE_INVALIDATE_S 0x17
  71. /*
  72. * RM7000-specific cacheops
  73. */
  74. #define PAGE_INVALIDATE_T 0x16
  75. /*
  76. * R10000-specific cacheops
  77. *
  78. * Cacheops 0x02, 0x06, 0x0a, 0x0c-0x0e, 0x16, 0x1a and 0x1e are unused.
  79. * Most of the _S cacheops are identical to the R4000SC _SD cacheops.
  80. */
  81. #define INDEX_WRITEBACK_INV_S 0x03
  82. #define INDEX_LOAD_TAG_S 0x07
  83. #define INDEX_STORE_TAG_S 0x0B
  84. #define HIT_INVALIDATE_S 0x13
  85. #define CACHE_BARRIER 0x14
  86. #define HIT_WRITEBACK_INV_S 0x17
  87. #define INDEX_LOAD_DATA_I 0x18
  88. #define INDEX_LOAD_DATA_D 0x19
  89. #define INDEX_LOAD_DATA_S 0x1b
  90. #define INDEX_STORE_DATA_I 0x1c
  91. #define INDEX_STORE_DATA_D 0x1d
  92. #define INDEX_STORE_DATA_S 0x1f
  93. #define cache_op(op, addr) \
  94. __asm__ __volatile__( \
  95. ".set push\n" \
  96. ".set noreorder\n" \
  97. ".set mips3\n" \
  98. "cache %0, %1\n" \
  99. ".set pop\n" \
  100. : \
  101. : "i" (op), "R" (*(unsigned char *)(addr)))
  102. #define cache16_unroll32(base, op) \
  103. __asm__ __volatile__( \
  104. " .set noreorder \n" \
  105. " .set mips3 \n" \
  106. " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
  107. " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
  108. " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
  109. " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
  110. " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
  111. " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
  112. " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
  113. " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
  114. " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \
  115. " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \
  116. " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \
  117. " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \
  118. " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \
  119. " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
  120. " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
  121. " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
  122. " .set mips0 \n" \
  123. " .set reorder \n" \
  124. : \
  125. : "r" (base), \
  126. "i" (op));
  127. static inline void flush_icache_line_indexed(rt_ubase_t addr)
  128. {
  129. cache_op(INDEX_INVALIDATE_I, addr);
  130. }
  131. static inline void flush_dcache_line_indexed(rt_ubase_t addr)
  132. {
  133. cache_op(INDEX_WRITEBACK_INV_D, addr);
  134. }
  135. static inline void flush_icache_line(rt_ubase_t addr)
  136. {
  137. cache_op(HIT_INVALIDATE_I, addr);
  138. }
  139. static inline void lock_icache_line(rt_ubase_t addr)
  140. {
  141. cache_op(FETCH_AND_LOCK_I, addr);
  142. }
  143. static inline void lock_dcache_line(rt_ubase_t addr)
  144. {
  145. cache_op(FETCH_AND_LOCK_D, addr);
  146. }
  147. static inline void flush_dcache_line(rt_ubase_t addr)
  148. {
  149. cache_op(HIT_WRITEBACK_INV_D, addr);
  150. }
  151. static inline void invalidate_dcache_line(rt_ubase_t addr)
  152. {
  153. cache_op(HIT_INVALIDATE_D, addr);
  154. }
  155. static inline void blast_dcache16(void)
  156. {
  157. rt_ubase_t start = KSEG0BASE;
  158. rt_ubase_t end = start + g_mips_core.dcache_size;
  159. rt_ubase_t addr;
  160. for (addr = start; addr < end; addr += g_mips_core.dcache_line_size)
  161. cache16_unroll32(addr, INDEX_WRITEBACK_INV_D);
  162. }
  163. static inline void inv_dcache16(void)
  164. {
  165. rt_ubase_t start = KSEG0BASE;
  166. rt_ubase_t end = start + g_mips_core.dcache_size;
  167. rt_ubase_t addr;
  168. for (addr = start; addr < end; addr += g_mips_core.dcache_line_size)
  169. cache16_unroll32(addr, HIT_INVALIDATE_D);
  170. }
  171. static inline void blast_icache16(void)
  172. {
  173. rt_ubase_t start = KSEG0BASE;
  174. rt_ubase_t end = start + g_mips_core.icache_size;
  175. rt_ubase_t addr;
  176. for (addr = start; addr < end; addr += g_mips_core.icache_line_size)
  177. cache16_unroll32(addr, INDEX_INVALIDATE_I);
  178. }
  179. void r4k_cache_init(void);
  180. void r4k_cache_flush_all(void);
  181. void r4k_icache_flush_all(void);
  182. void r4k_icache_flush_range(rt_ubase_t addr, rt_ubase_t size);
  183. void r4k_icache_lock_range(rt_ubase_t addr, rt_ubase_t size);
  184. void r4k_dcache_inv(rt_ubase_t addr, rt_ubase_t size);
  185. void r4k_dcache_wback_inv(rt_ubase_t addr, rt_ubase_t size);
  186. void r4k_dma_cache_sync(rt_ubase_t addr, rt_size_t size, enum dma_data_direction direction);
  187. #endif
  188. #endif /* _MIPS_CACHE_H_ */