cortexA9_gcc.S 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. /*
  2. * Copyright (c) 2010-2012, Freescale Semiconductor, Inc.
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without modification,
  6. * are permitted provided that the following conditions are met:
  7. *
  8. * o Redistributions of source code must retain the above copyright notice, this list
  9. * of conditions and the following disclaimer.
  10. *
  11. * o Redistributions in binary form must reproduce the above copyright notice, this
  12. * list of conditions and the following disclaimer in the documentation and/or
  13. * other materials provided with the distribution.
  14. *
  15. * o Neither the name of Freescale Semiconductor, Inc. nor the names of its
  16. * contributors may be used to endorse or promote products derived from this
  17. * software without specific prior written permission.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
  20. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  21. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  22. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
  23. * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  24. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  25. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  26. * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  28. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. */
  30. /*!
  31. * @file cortexA9.s
  32. * @brief This file contains cortexA9 functions
  33. *
  34. */
  35. .code 32
  36. .section ".text","ax"
  37. /*
  38. * bool arm_set_interrupt_state(bool enable)
  39. */
  40. .global arm_set_interrupt_state
  41. .func arm_set_interrupt_state
  42. arm_set_interrupt_state:
  43. mrs r2,CPSR @ read CPSR (Current Program Status Register)
  44. teq r0,#0
  45. bicne r1,r2,#0xc0 @ disable IRQ and FIQ
  46. orreq r1,r2,#0xc0 @ enable IRQ and FIQ
  47. msr CPSR_c,r1
  48. tst r2,#0x80
  49. movne r0,#0
  50. moveq r0,#1
  51. bx lr
  52. .endfunc
  53. .global cpu_get_current
  54. @ int cpu_get_current(void)@
  55. @ get current CPU ID
  56. .func cpu_get_current
  57. cpu_get_current:
  58. mrc p15, 0, r0, c0, c0, 5
  59. and r0, r0, #3
  60. BX lr
  61. .endfunc @cpu_get_current()@
  62. .global enable_neon_fpu
  63. .func enable_neon_fpu
  64. enable_neon_fpu:
  65. /* set NSACR, both Secure and Non-secure access are allowed to NEON */
  66. MRC p15, 0, r0, c1, c1, 2
  67. ORR r0, r0, #(0x3<<10) @ enable fpu/neon
  68. MCR p15, 0, r0, c1, c1, 2
  69. /* Set the CPACR for access to CP10 and CP11*/
  70. LDR r0, =0xF00000
  71. MCR p15, 0, r0, c1, c0, 2
  72. /* Set the FPEXC EN bit to enable the FPU */
  73. MOV r3, #0x40000000
  74. @VMSR FPEXC, r3
  75. MCR p10, 7, r3, c8, c0, 0
  76. .endfunc
  77. .global disable_strict_align_check
  78. .func disable_strict_align_check
  79. disable_strict_align_check:
  80. /*Ray's note: disable strict alignment fault checking.
  81. without disabling this, data abort will happen when accessing
  82. the BPB structure of file system since it is packed.*/
  83. push {r0, lr}
  84. mrc p15, 0, r0, c1, c0, 0
  85. bic r0, r0, #(0x1<<1) @clear A bit of SCTLR
  86. mcr p15, 0, r0, c1, c0, 0
  87. pop {r0, pc}
  88. .endfunc
  89. .global disable_L1_cache
  90. .func disable_L1_cache
  91. disable_L1_cache:
  92. push {r0-r6, lr}
  93. mrc p15, 0, r0, c1, c0, 0
  94. bic r0, r0, #(0x1<<12)
  95. bic r0, r0, #(0x1<<11)
  96. bic r0, r0, #(0x1<<2)
  97. bic r0, r0, #(0x1<<0)
  98. mcr p15, 0, r0, c1, c0, 0
  99. pop {r0-r6, pc}
  100. .endfunc
  101. .global get_arm_private_peripheral_base
  102. @ uint32_t get_arm_private_peripheral_base(void)@
  103. .func get_arm_private_peripheral_base
  104. get_arm_private_peripheral_base:
  105. @ Get base address of private perpherial space
  106. mrc p15, 4, r0, c15, c0, 0 @ Read periph base address
  107. bx lr
  108. .endfunc @get_arm_private_peripheral_base()@
  109. @ ------------------------------------------------------------
  110. @ TLB
  111. @ ------------------------------------------------------------
  112. .global arm_unified_tlb_invalidate
  113. @ void arm_unified_tlb_invalidate(void)@
  114. .func arm_unified_tlb_invalidate
  115. arm_unified_tlb_invalidate:
  116. mov r0, #1
  117. mcr p15, 0, r0, c8, c7, 0 @ TLBIALL - Invalidate entire unified TLB
  118. dsb
  119. bx lr
  120. .endfunc
  121. .global arm_unified_tlb_invalidate_is
  122. @ void arm_unified_tlb_invalidate_is(void)@
  123. .func arm_unified_tlb_invalidate_is
  124. arm_unified_tlb_invalidate_is:
  125. mov r0, #1
  126. mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS - Invalidate entire unified TLB Inner Shareable
  127. dsb
  128. bx lr
  129. .endfunc
  130. @ ------------------------------------------------------------
  131. @ Branch Prediction
  132. @ ------------------------------------------------------------
  133. .global arm_branch_prediction_enable
  134. @ void arm_branch_prediction_enable(void)
  135. .func arm_branch_prediction_enable
  136. arm_branch_prediction_enable:
  137. mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR
  138. orr r0, r0, #(1 << 11) @ Set the Z bit (bit 11)
  139. mcr p15, 0,r0, c1, c0, 0 @ Write SCTLR
  140. bx lr
  141. .endfunc
  142. .global arm_branch_prediction_disable
  143. @ void arm_branch_prediction_disable(void)
  144. .func arm_branch_prediction_disable
  145. arm_branch_prediction_disable:
  146. mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR
  147. bic r0, r0, #(1 << 11) @ Clear the Z bit (bit 11)
  148. mcr p15, 0,r0, c1, c0, 0 @ Write SCTLR
  149. bx lr
  150. .endfunc
  151. .global arm_branch_target_cache_invalidate
  152. @ void arm_branch_target_cache_invalidate(void)
  153. .func arm_branch_target_cache_invalidate
  154. arm_branch_target_cache_invalidate:
  155. mov r0, #0
  156. mcr p15, 0, r0, c7, c5, 6 @ BPIALL - Invalidate entire branch predictor array
  157. bx lr
  158. .endfunc
  159. .global arm_branch_target_cache_invalidate_is
  160. @ void arm_branch_target_cache_invalidate_is(void)
  161. .func arm_branch_target_cache_invalidate_is
  162. arm_branch_target_cache_invalidate_is:
  163. mov r0, #0
  164. mcr p15, 0, r0, c7, c1, 6 @ BPIALLIS - Invalidate entire branch predictor array Inner Shareable
  165. bx lr
  166. .endfunc
  167. @ ------------------------------------------------------------
  168. @ SCU
  169. @ ------------------------------------------------------------
  170. @ SCU offset from base of private peripheral space --> 0x000
  171. .global scu_enable
  172. @ void scu_enable(void)
  173. @ Enables the SCU
  174. .func scu_enable
  175. scu_enable:
  176. mrc p15, 4, r0, c15, c0, 0 @ Read periph base address
  177. ldr r1, [r0, #0x0] @ Read the SCU Control Register
  178. orr r1, r1, #0x1 @ Set bit 0 (The Enable bit)
  179. str r1, [r0, #0x0] @ Write back modifed value
  180. bx lr
  181. .endfunc
  182. @ ------------------------------------------------------------
  183. .global scu_join_smp
  184. @ void scu_join_smp(void)
  185. @ Set this CPU as participating in SMP
  186. .func scu_join_smp
  187. scu_join_smp:
  188. @ SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
  189. mrc p15, 0, r0, c1, c0, 1 @ Read ACTLR
  190. orr r0, r0, #0x040 @ Set bit 6
  191. mcr p15, 0, r0, c1, c0, 1 @ Write ACTLR
  192. bx lr
  193. .endfunc
  194. @ ------------------------------------------------------------
  195. .global scu_leave_smp
  196. @ void scu_leave_smp(void)
  197. @ Set this CPU as NOT participating in SMP
  198. .func scu_leave_smp
  199. scu_leave_smp:
  200. @ SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
  201. mrc p15, 0, r0, c1, c0, 1 @ Read ACTLR
  202. bic r0, r0, #0x040 @ Clear bit 6
  203. mcr p15, 0, r0, c1, c0, 1 @ Write ACTLR
  204. bx lr
  205. .endfunc
  206. @ ------------------------------------------------------------
  207. .global scu_get_cpus_in_smp
  208. @ unsigned int scu_get_cpus_in_smp(void)
  209. @ The return value is 1 bit per core:
  210. @ bit 0 - CPU 0
  211. @ bit 1 - CPU 1
  212. @ etc...
  213. .func scu_get_cpus_in_smp
  214. scu_get_cpus_in_smp:
  215. mrc p15, 4, r0, c15, c0, 0 @ Read periph base address
  216. ldr r0, [r0, #0x004] @ Read SCU Configuration register
  217. mov r0, r0, lsr #4 @ Bits 7:4 gives the cores in SMP mode, shift then mask
  218. and r0, r0, #0x0F
  219. bx lr
  220. .endfunc
  221. @ ------------------------------------------------------------
  222. .global scu_enable_maintenance_broadcast
  223. @ void scu_enable_maintenance_broadcast(void)
  224. @ Enable the broadcasting of cache & TLB maintenance operations
  225. @ When enabled AND in SMP, broadcast all "inner sharable"
  226. @ cache and TLM maintenance operations to other SMP cores
  227. .func scu_enable_maintenance_broadcast
  228. scu_enable_maintenance_broadcast:
  229. mrc p15, 0, r0, c1, c0, 1 @ Read Aux Ctrl register
  230. orr r0, r0, #0x01 @ Set the FW bit (bit 0)
  231. mcr p15, 0, r0, c1, c0, 1 @ Write Aux Ctrl register
  232. bx lr
  233. .endfunc
  234. @ ------------------------------------------------------------
  235. .global scu_disable_maintenance_broadcast
  236. @ void scu_disable_maintenance_broadcast(void)
  237. @ Disable the broadcasting of cache & TLB maintenance operations
  238. .func scu_disable_maintenance_broadcast
  239. scu_disable_maintenance_broadcast:
  240. mrc p15, 0, r0, c1, c0, 1 @ Read Aux Ctrl register
  241. bic r0, r0, #0x01 @ Clear the FW bit (bit 0)
  242. mcr p15, 0, r0, c1, c0, 1 @ Write Aux Ctrl register
  243. bx lr
  244. .endfunc
  245. @ ------------------------------------------------------------
  246. .global scu_secure_invalidate
  247. @ void scu_secure_invalidate(unsigned int cpu, unsigned int ways)
  248. @ cpu: 0x0=CPU 0 0x1=CPU 1 etc...
  249. @ This function invalidates the SCU copy of the tag rams
  250. @ for the specified core. typically only done at start-up.
  251. @ Possible flow:
  252. @ - Invalidate L1 caches
  253. @ - Invalidate SCU copy of TAG RAMs
  254. @ - Join SMP
  255. .func scu_secure_invalidate
  256. scu_secure_invalidate:
  257. and r0, r0, #0x03 @ Mask off unused bits of CPU ID
  258. mov r0, r0, lsl #2 @ Convert into bit offset (four bits per core)
  259. and r1, r1, #0x0F @ Mask off unused bits of ways
  260. mov r1, r1, lsl r0 @ Shift ways into the correct CPU field
  261. mrc p15, 4, r2, c15, c0, 0 @ Read periph base address
  262. str r1, [r2, #0x0C] @ Write to SCU Invalidate All in Secure State
  263. bx lr
  264. .endfunc
  265. @ ------------------------------------------------------------
  266. @ End of cortexA9.s
  267. @ ------------------------------------------------------------
  268. .end