cache.S 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /*
  2. * Copyright (c) 2006-2020, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2020-03-17 bigmagic first version
  9. */
  10. /*
  11. * void __asm_dcache_level(level)
  12. *
  13. * flush or invalidate one level cache.
  14. *
  15. * x0: cache level
  16. * x1: 0 clean & invalidate, 1 invalidate only
  17. * x2~x9: clobbered
  18. */
  19. .globl __asm_dcache_level
  20. __asm_dcache_level:
  21. lsl x12, x0, #1
  22. msr csselr_el1, x12 /* select cache level */
  23. isb /* sync change of cssidr_el1 */
  24. mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
  25. and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
  26. add x2, x2, #4 /* x2 <- log2(cache line size) */
  27. mov x3, #0x3ff
  28. and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
  29. clz w5, w3 /* bit position of #ways */
  30. mov x4, #0x7fff
  31. and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
  32. /* x12 <- cache level << 1 */
  33. /* x2 <- line length offset */
  34. /* x3 <- number of cache ways - 1 */
  35. /* x4 <- number of cache sets - 1 */
  36. /* x5 <- bit position of #ways */
  37. loop_set:
  38. mov x6, x3 /* x6 <- working copy of #ways */
  39. loop_way:
  40. lsl x7, x6, x5
  41. orr x9, x12, x7 /* map way and level to cisw value */
  42. lsl x7, x4, x2
  43. orr x9, x9, x7 /* map set number to cisw value */
  44. tbz w1, #0, 1f
  45. dc isw, x9
  46. b 2f
  47. 1: dc cisw, x9 /* clean & invalidate by set/way */
  48. 2: subs x6, x6, #1 /* decrement the way */
  49. b.ge loop_way
  50. subs x4, x4, #1 /* decrement the set */
  51. b.ge loop_set
  52. ret
  53. /*
  54. * void __asm_flush_dcache_all(int invalidate_only)
  55. *
  56. * x0: 0 clean & invalidate, 1 invalidate only
  57. *
  58. * flush or invalidate all data cache by SET/WAY.
  59. */
  60. .globl __asm_dcache_all
  61. __asm_dcache_all:
  62. mov x1, x0
  63. dsb sy
  64. mrs x10, clidr_el1 /* read clidr_el1 */
  65. lsr x11, x10, #24
  66. and x11, x11, #0x7 /* x11 <- loc */
  67. cbz x11, finished /* if loc is 0, exit */
  68. mov x15, lr
  69. mov x0, #0 /* start flush at cache level 0 */
  70. /* x0 <- cache level */
  71. /* x10 <- clidr_el1 */
  72. /* x11 <- loc */
  73. /* x15 <- return address */
  74. loop_level:
  75. lsl x12, x0, #1
  76. add x12, x12, x0 /* x0 <- tripled cache level */
  77. lsr x12, x10, x12
  78. and x12, x12, #7 /* x12 <- cache type */
  79. cmp x12, #2
  80. b.lt skip /* skip if no cache or icache */
  81. bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */
  82. skip:
  83. add x0, x0, #1 /* increment cache level */
  84. cmp x11, x0
  85. b.gt loop_level
  86. mov x0, #0
  87. msr csselr_el1, x0 /* restore csselr_el1 */
  88. dsb sy
  89. isb
  90. mov lr, x15
  91. finished:
  92. ret
  93. .globl __asm_flush_dcache_all
  94. __asm_flush_dcache_all:
  95. mov x0, #0
  96. b __asm_dcache_all
  97. .globl __asm_invalidate_dcache_all
  98. __asm_invalidate_dcache_all:
  99. mov x0, #0x1
  100. b __asm_dcache_all
  101. /*
  102. * void __asm_flush_dcache_range(start, end)
  103. *
  104. * clean & invalidate data cache in the range
  105. *
  106. * x0: start address
  107. * x1: end address
  108. */
  109. .globl __asm_flush_dcache_range
  110. __asm_flush_dcache_range:
  111. mrs x3, ctr_el0
  112. lsr x3, x3, #16
  113. and x3, x3, #0xf
  114. mov x2, #4
  115. lsl x2, x2, x3 /* cache line size */
  116. /* x2 <- minimal cache line size in cache system */
  117. sub x3, x2, #1
  118. bic x0, x0, x3
  119. 1: dc civac, x0 /* clean & invalidate data or unified cache */
  120. add x0, x0, x2
  121. cmp x0, x1
  122. b.lo 1b
  123. dsb sy
  124. ret
  125. /* void __asm_invalidate_dcache_range(start, end)
  126. *
  127. * invalidate data cache in the range
  128. *
  129. * x0: start address
  130. * x1: end address
  131. */
  132. .globl __asm_invalidate_dcache_range
  133. __asm_invalidate_dcache_range:
  134. mrs x3, ctr_el0
  135. lsr x3, x3, #16
  136. and x3, x3, #0xf
  137. mov x2, #4
  138. lsl x2, x2, x3 /* cache line size */
  139. /* x2 <- minimal cache line size in cache system */
  140. sub x3, x2, #1
  141. bic x0, x0, x3
  142. 1: dc ivac, x0 /* invalidate data or unified cache */
  143. add x0, x0, x2
  144. cmp x0, x1
  145. b.lo 1b
  146. dsb sy
  147. ret
  148. /* void __asm_invalidate_icache_range(start, end)
  149. *
  150. * invalidate icache in the range
  151. *
  152. * x0: start address
  153. * x1: end address
  154. */
  155. .globl __asm_invalidate_icache_range
  156. __asm_invalidate_icache_range:
  157. mrs x3, ctr_el0
  158. and x3, x3, #0xf
  159. mov x2, #4
  160. lsl x2, x2, x3 /* cache line size */
  161. /* x2 <- minimal cache line size in cache system */
  162. sub x3, x2, #1
  163. bic x0, x0, x3
  164. 1: ic ivau, x0 /* invalidate instruction or unified cache */
  165. add x0, x0, x2
  166. cmp x0, x1
  167. b.lo 1b
  168. dsb sy
  169. ret
  170. /*
  171. * void __asm_invalidate_icache_all(void)
  172. *
  173. * invalidate all tlb entries.
  174. */
  175. .globl __asm_invalidate_icache_all
  176. __asm_invalidate_icache_all:
  177. dsb sy
  178. ic ialluis
  179. isb sy
  180. ret
  181. .globl __asm_flush_l3_cache
  182. __asm_flush_l3_cache:
  183. mov x0, #0 /* return status as success */
  184. ret