gicv3.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2013-07-20 Bernard first version
  9. * 2014-04-03 Grissiom many enhancements
  10. * 2018-11-22 Jesven add rt_hw_ipi_send()
  11. * add rt_hw_ipi_handler_install()
  12. * 2022-03-08 GuEe-GUI add BSP bind SPI CPU self support
  13. * add GICv3 AArch64 system register interface
  14. * modify arm_gic_redist_init() args
  15. * modify arm_gic_cpu_init() args
  16. * modify arm_gic_send_affinity_sgi() args
  17. * remove arm_gic_redist_address_set()
  18. * remove arm_gic_cpu_interface_address_set()
  19. * remove arm_gic_secondary_cpu_init()
  20. * remove get_main_cpu_affval()
  21. * remove arm_gic_cpumask_to_affval()
  22. */
  23. #include <rthw.h>
  24. #include <rtthread.h>
  25. #if defined(BSP_USING_GIC) && defined(BSP_USING_GICV3)
  26. #include <gicv3.h>
  27. #include <cp15.h>
  28. #include <board.h>
  29. #ifndef ARM_SPI_BIND_CPU_ID
  30. #define ARM_SPI_BIND_CPU_ID 0
  31. #endif
  32. #if !defined(RT_USING_SMP) && !defined(RT_USING_AMP)
  33. #define RT_CPUS_NR 1
  34. #else
  35. extern rt_uint64_t rt_cpu_mpidr_early[];
  36. #endif /* RT_USING_SMP */
  37. struct arm_gic
  38. {
  39. rt_uint64_t offset; /* the first interrupt index in the vector table */
  40. rt_uint64_t redist_hw_base[ARM_GIC_CPU_NUM]; /* the pointer of the gic redistributor */
  41. rt_uint64_t dist_hw_base; /* the base address of the gic distributor */
  42. rt_uint64_t cpu_hw_base[ARM_GIC_CPU_NUM]; /* the base address of the gic cpu interface */
  43. };
  44. /* 'ARM_GIC_MAX_NR' is the number of cores */
  45. static struct arm_gic _gic_table[ARM_GIC_MAX_NR];
  46. static unsigned int _gic_max_irq;
  47. #define GET_GICV3_REG(reg, out) __asm__ volatile ("mrs %0, " reg:"=r"(out)::"memory");
  48. #define SET_GICV3_REG(reg, in) __asm__ volatile ("msr " reg ", %0"::"r"(in):"memory");
  49. /* AArch64 System register interface to GICv3 */
  50. #define ICC_IAR0_EL1 "S3_0_C12_C8_0"
  51. #define ICC_IAR1_EL1 "S3_0_C12_C12_0"
  52. #define ICC_EOIR0_EL1 "S3_0_C12_C8_1"
  53. #define ICC_EOIR1_EL1 "S3_0_C12_C12_1"
  54. #define ICC_HPPIR0_EL1 "S3_0_C12_C8_2"
  55. #define ICC_HPPIR1_EL1 "S3_0_C12_C12_2"
  56. #define ICC_BPR0_EL1 "S3_0_C12_C8_3"
  57. #define ICC_BPR1_EL1 "S3_0_C12_C12_3"
  58. #define ICC_DIR_EL1 "S3_0_C12_C11_1"
  59. #define ICC_PMR_EL1 "S3_0_C4_C6_0"
  60. #define ICC_RPR_EL1 "S3_0_C12_C11_3"
  61. #define ICC_CTLR_EL1 "S3_0_C12_C12_4"
  62. #define ICC_CTLR_EL3 "S3_6_C12_C12_4"
  63. #define ICC_SRE_EL1 "S3_0_C12_C12_5"
  64. #define ICC_SRE_EL2 "S3_4_C12_C9_5"
  65. #define ICC_SRE_EL3 "S3_6_C12_C12_5"
  66. #define ICC_IGRPEN0_EL1 "S3_0_C12_C12_6"
  67. #define ICC_IGRPEN1_EL1 "S3_0_C12_C12_7"
  68. #define ICC_IGRPEN1_EL3 "S3_6_C12_C12_7"
  69. #define ICC_SGI0R_EL1 "S3_0_C12_C11_7"
  70. #define ICC_SGI1R_EL1 "S3_0_C12_C11_5"
  71. #define ICC_ASGI1R_EL1 "S3_0_C12_C11_6"
  72. /* Macro to access the Distributor Control Register (GICD_CTLR) */
  73. #define GICD_CTLR_RWP (1U << 31)
  74. #define GICD_CTLR_E1NWF (1U << 7)
  75. #define GICD_CTLR_DS (1U << 6)
  76. #define GICD_CTLR_ARE_NS (1U << 5)
  77. #define GICD_CTLR_ARE_S (1U << 4)
  78. #define GICD_CTLR_ENGRP1S (1U << 2)
  79. #define GICD_CTLR_ENGRP1NS (1U << 1)
  80. #define GICD_CTLR_ENGRP0 (1U << 0)
  81. /* Macro to access the Redistributor Control Register (GICR_CTLR) */
  82. #define GICR_CTLR_UWP (1U << 31)
  83. #define GICR_CTLR_DPG1S (1U << 26)
  84. #define GICR_CTLR_DPG1NS (1U << 25)
  85. #define GICR_CTLR_DPG0 (1U << 24)
  86. #define GICR_CTLR_RWP (1U << 3)
  87. #define GICR_CTLR_IR (1U << 2)
  88. #define GICR_CTLR_CES (1U << 1)
  89. #define GICR_CTLR_EnableLPI (1U << 0)
  90. /* Macro to access the Generic Interrupt Controller Interface (GICC) */
  91. #define GIC_CPU_CTRL(hw_base) HWREG32((hw_base) + 0x00U)
  92. #define GIC_CPU_PRIMASK(hw_base) HWREG32((hw_base) + 0x04U)
  93. #define GIC_CPU_BINPOINT(hw_base) HWREG32((hw_base) + 0x08U)
  94. #define GIC_CPU_INTACK(hw_base) HWREG32((hw_base) + 0x0cU)
  95. #define GIC_CPU_EOI(hw_base) HWREG32((hw_base) + 0x10U)
  96. #define GIC_CPU_RUNNINGPRI(hw_base) HWREG32((hw_base) + 0x14U)
  97. #define GIC_CPU_HIGHPRI(hw_base) HWREG32((hw_base) + 0x18U)
  98. #define GIC_CPU_IIDR(hw_base) HWREG32((hw_base) + 0xFCU)
  99. /* Macro to access the Generic Interrupt Controller Distributor (GICD) */
  100. #define GIC_DIST_CTRL(hw_base) HWREG32((hw_base) + 0x000U)
  101. #define GIC_DIST_TYPE(hw_base) HWREG32((hw_base) + 0x004U)
  102. #define GIC_DIST_IIDR(hw_base) HWREG32((hw_base) + 0x008U)
  103. #define GIC_DIST_IGROUP(hw_base, n) HWREG32((hw_base) + 0x080U + ((n) / 32U) * 4U)
  104. #define GIC_DIST_ENABLE_SET(hw_base, n) HWREG32((hw_base) + 0x100U + ((n) / 32U) * 4U)
  105. #define GIC_DIST_ENABLE_CLEAR(hw_base, n) HWREG32((hw_base) + 0x180U + ((n) / 32U) * 4U)
  106. #define GIC_DIST_PENDING_SET(hw_base, n) HWREG32((hw_base) + 0x200U + ((n) / 32U) * 4U)
  107. #define GIC_DIST_PENDING_CLEAR(hw_base, n) HWREG32((hw_base) + 0x280U + ((n) / 32U) * 4U)
  108. #define GIC_DIST_ACTIVE_SET(hw_base, n) HWREG32((hw_base) + 0x300U + ((n) / 32U) * 4U)
  109. #define GIC_DIST_ACTIVE_CLEAR(hw_base, n) HWREG32((hw_base) + 0x380U + ((n) / 32U) * 4U)
  110. #define GIC_DIST_PRI(hw_base, n) HWREG32((hw_base) + 0x400U + ((n) / 4U) * 4U)
  111. #define GIC_DIST_TARGET(hw_base, n) HWREG32((hw_base) + 0x800U + ((n) / 4U) * 4U)
  112. #define GIC_DIST_CONFIG(hw_base, n) HWREG32((hw_base) + 0xc00U + ((n) / 16U) * 4U)
  113. #define GIC_DIST_SOFTINT(hw_base) HWREG32((hw_base) + 0xf00U)
  114. #define GIC_DIST_CPENDSGI(hw_base, n) HWREG32((hw_base) + 0xf10U + ((n) / 4U) * 4U)
  115. #define GIC_DIST_SPENDSGI(hw_base, n) HWREG32((hw_base) + 0xf20U + ((n) / 4U) * 4U)
  116. #define GIC_DIST_ICPIDR2(hw_base) HWREG32((hw_base) + 0xfe8U)
  117. #define GIC_DIST_IROUTER(hw_base, n) HWREG64((hw_base) + 0x6000U + (n) * 8U)
  118. /* SGI base address is at 64K offset from Redistributor base address */
  119. #define GIC_RSGI_OFFSET 0x10000
  120. /* Macro to access the Generic Interrupt Controller Redistributor (GICR) */
  121. #define GIC_RDIST_CTRL(hw_base) HWREG32((hw_base) + 0x000U)
  122. #define GIC_RDIST_IIDR(hw_base) HWREG32((hw_base) + 0x004U)
  123. #define GIC_RDIST_TYPER(hw_base) HWREG64((hw_base) + 0x008U)
  124. #define GIC_RDIST_TSTATUSR(hw_base) HWREG32((hw_base) + 0x010U)
  125. #define GIC_RDIST_WAKER(hw_base) HWREG32((hw_base) + 0x014U)
  126. #define GIC_RDIST_SETLPIR(hw_base) HWREG32((hw_base) + 0x040U)
  127. #define GIC_RDIST_CLRLPIR(hw_base) HWREG32((hw_base) + 0x048U)
  128. #define GIC_RDIST_PROPBASER(hw_base) HWREG32((hw_base) + 0x070U)
  129. #define GIC_RDIST_PENDBASER(hw_base) HWREG32((hw_base) + 0x078U)
  130. #define GIC_RDIST_INVLPIR(hw_base) HWREG32((hw_base) + 0x0A0U)
  131. #define GIC_RDIST_INVALLR(hw_base) HWREG32((hw_base) + 0x0B0U)
  132. #define GIC_RDIST_SYNCR(hw_base) HWREG32((hw_base) + 0x0C0U)
  133. #define GIC_RDISTSGI_IGROUPR0(hw_base, n) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x080U + (n) * 4U)
  134. #define GIC_RDISTSGI_ISENABLER0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x100U)
  135. #define GIC_RDISTSGI_ICENABLER0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x180U)
  136. #define GIC_RDISTSGI_ISPENDR0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x200U)
  137. #define GIC_RDISTSGI_ICPENDR0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x280U)
  138. #define GIC_RDISTSGI_ISACTIVER0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x300U)
  139. #define GIC_RDISTSGI_ICACTIVER0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x380U)
  140. #define GIC_RDISTSGI_IPRIORITYR(hw_base, n) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x400U + ((n) / 4U) * 4U)
  141. #define GIC_RDISTSGI_ICFGR0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0xC00U)
  142. #define GIC_RDISTSGI_ICFGR1(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0xC04U)
  143. #define GIC_RDISTSGI_IGRPMODR0(hw_base, n) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0xD00U + (n) * 4)
  144. #define GIC_RDISTSGI_NSACR(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0xE00U)
  145. int arm_gic_get_active_irq(rt_uint64_t index)
  146. {
  147. rt_base_t irq;
  148. RT_ASSERT(index < ARM_GIC_MAX_NR);
  149. GET_GICV3_REG(ICC_IAR1_EL1, irq);
  150. irq = (irq & 0x1ffffff) + _gic_table[index].offset;
  151. return irq;
  152. }
  153. void arm_gic_ack(rt_uint64_t index, int irq)
  154. {
  155. RT_ASSERT(index < ARM_GIC_MAX_NR);
  156. RT_ASSERT(irq >= 0);
  157. __DSB();
  158. SET_GICV3_REG(ICC_EOIR1_EL1, (rt_base_t)irq);
  159. }
  160. void arm_gic_mask(rt_uint64_t index, int irq)
  161. {
  162. rt_uint64_t mask = 1 << (irq % 32);
  163. RT_ASSERT(index < ARM_GIC_MAX_NR);
  164. irq = irq - _gic_table[index].offset;
  165. RT_ASSERT(irq >= 0);
  166. if (irq < 32)
  167. {
  168. rt_int32_t cpu_id = rt_hw_cpu_id();
  169. GIC_RDISTSGI_ICENABLER0(_gic_table[index].redist_hw_base[cpu_id]) = mask;
  170. }
  171. else
  172. {
  173. GIC_DIST_ENABLE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
  174. }
  175. }
  176. void arm_gic_umask(rt_uint64_t index, int irq)
  177. {
  178. rt_uint64_t mask = 1 << (irq % 32);
  179. RT_ASSERT(index < ARM_GIC_MAX_NR);
  180. irq = irq - _gic_table[index].offset;
  181. RT_ASSERT(irq >= 0);
  182. if (irq < 32)
  183. {
  184. rt_int32_t cpu_id = rt_hw_cpu_id();
  185. GIC_RDISTSGI_ISENABLER0(_gic_table[index].redist_hw_base[cpu_id]) = mask;
  186. }
  187. else
  188. {
  189. GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base, irq) = mask;
  190. }
  191. }
  192. rt_uint64_t arm_gic_get_pending_irq(rt_uint64_t index, int irq)
  193. {
  194. rt_uint64_t pend;
  195. RT_ASSERT(index < ARM_GIC_MAX_NR);
  196. irq = irq - _gic_table[index].offset;
  197. RT_ASSERT(irq >= 0);
  198. if (irq >= 16)
  199. {
  200. pend = (GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32)) & 0x1;
  201. }
  202. else
  203. {
  204. /* INTID 0-15 Software Generated Interrupt */
  205. pend = (GIC_DIST_SPENDSGI(_gic_table[index].dist_hw_base, irq) >> ((irq % 4) * 8)) & 0xff;
  206. /* No CPU identification offered */
  207. if (pend != 0)
  208. {
  209. pend = 1;
  210. }
  211. else
  212. {
  213. pend = 0;
  214. }
  215. }
  216. return pend;
  217. }
  218. void arm_gic_set_pending_irq(rt_uint64_t index, int irq)
  219. {
  220. RT_ASSERT(index < ARM_GIC_MAX_NR);
  221. irq = irq - _gic_table[index].offset;
  222. RT_ASSERT(irq >= 0);
  223. if (irq >= 16)
  224. {
  225. GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) = 1 << (irq % 32);
  226. }
  227. else
  228. {
  229. /* INTID 0-15 Software Generated Interrupt */
  230. /* Forward the interrupt to the CPU interface that requested it */
  231. GIC_DIST_SOFTINT(_gic_table[index].dist_hw_base) = (irq | 0x02000000);
  232. }
  233. }
  234. void arm_gic_clear_pending_irq(rt_uint64_t index, int irq)
  235. {
  236. rt_uint64_t mask;
  237. RT_ASSERT(index < ARM_GIC_MAX_NR);
  238. irq = irq - _gic_table[index].offset;
  239. RT_ASSERT(irq >= 0);
  240. if (irq >= 16)
  241. {
  242. mask = 1 << (irq % 32);
  243. GIC_DIST_PENDING_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
  244. }
  245. else
  246. {
  247. mask = 1 << ((irq % 4) * 8);
  248. GIC_DIST_CPENDSGI(_gic_table[index].dist_hw_base, irq) = mask;
  249. }
  250. }
  251. void arm_gic_set_configuration(rt_uint64_t index, int irq, rt_uint32_t config)
  252. {
  253. rt_uint64_t icfgr;
  254. rt_uint64_t shift;
  255. RT_ASSERT(index < ARM_GIC_MAX_NR);
  256. irq = irq - _gic_table[index].offset;
  257. RT_ASSERT(irq >= 0);
  258. icfgr = GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq);
  259. shift = (irq % 16) << 1;
  260. icfgr &= (~(3 << shift));
  261. icfgr |= (config << (shift + 1));
  262. GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq) = icfgr;
  263. }
  264. rt_uint64_t arm_gic_get_configuration(rt_uint64_t index, int irq)
  265. {
  266. RT_ASSERT(index < ARM_GIC_MAX_NR);
  267. irq = irq - _gic_table[index].offset;
  268. RT_ASSERT(irq >= 0);
  269. return (GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq) >> ((irq % 16) >> 1));
  270. }
  271. void arm_gic_clear_active(rt_uint64_t index, int irq)
  272. {
  273. rt_uint64_t mask = 1 << (irq % 32);
  274. RT_ASSERT(index < ARM_GIC_MAX_NR);
  275. irq = irq - _gic_table[index].offset;
  276. RT_ASSERT(irq >= 0);
  277. GIC_DIST_ACTIVE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask;
  278. }
  279. void arm_gic_set_router_cpu(rt_uint64_t index, int irq, rt_uint64_t aff)
  280. {
  281. RT_ASSERT(index < ARM_GIC_MAX_NR);
  282. irq = irq - _gic_table[index].offset;
  283. RT_ASSERT(irq >= 32);
  284. GIC_DIST_IROUTER(_gic_table[index].dist_hw_base, irq) = aff & 0xff00ffffffULL;
  285. }
  286. rt_uint64_t arm_gic_get_router_cpu(rt_uint64_t index, int irq)
  287. {
  288. RT_ASSERT(index < ARM_GIC_MAX_NR);
  289. irq = irq - _gic_table[index].offset;
  290. RT_ASSERT(irq >= 32);
  291. return GIC_DIST_IROUTER(_gic_table[index].dist_hw_base, irq);
  292. }
  293. /* Set up the cpu mask for the specific interrupt */
  294. void arm_gic_set_cpu(rt_uint64_t index, int irq, unsigned int cpumask)
  295. {
  296. rt_uint64_t old_tgt;
  297. RT_ASSERT(index < ARM_GIC_MAX_NR);
  298. irq = irq - _gic_table[index].offset;
  299. RT_ASSERT(irq >= 0);
  300. old_tgt = GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq);
  301. old_tgt &= ~(0x0ff << ((irq % 4) * 8));
  302. old_tgt |= cpumask << ((irq % 4) * 8);
  303. GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq) = old_tgt;
  304. }
  305. rt_uint64_t arm_gic_get_target_cpu(rt_uint64_t index, int irq)
  306. {
  307. RT_ASSERT(index < ARM_GIC_MAX_NR);
  308. irq = irq - _gic_table[index].offset;
  309. RT_ASSERT(irq >= 0);
  310. return (GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq) >> ((irq % 4) * 8)) & 0xff;
  311. }
  312. void arm_gic_set_priority(rt_uint64_t index, int irq, rt_uint64_t priority)
  313. {
  314. rt_uint64_t mask;
  315. RT_ASSERT(index < ARM_GIC_MAX_NR);
  316. irq = irq - _gic_table[index].offset;
  317. RT_ASSERT(irq >= 0);
  318. if (irq < 32)
  319. {
  320. rt_int32_t cpu_id = rt_hw_cpu_id();
  321. mask = GIC_RDISTSGI_IPRIORITYR(_gic_table[index].redist_hw_base[cpu_id], irq);
  322. mask &= ~(0xffUL << ((irq % 4) * 8));
  323. mask |= ((priority & 0xff) << ((irq % 4) * 8));
  324. GIC_RDISTSGI_IPRIORITYR(_gic_table[index].redist_hw_base[cpu_id], irq) = mask;
  325. }
  326. else
  327. {
  328. mask = GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq);
  329. mask &= ~(0xff << ((irq % 4) * 8));
  330. mask |= ((priority & 0xff) << ((irq % 4) * 8));
  331. GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq) = mask;
  332. }
  333. }
  334. rt_uint64_t arm_gic_get_priority(rt_uint64_t index, int irq)
  335. {
  336. RT_ASSERT(index < ARM_GIC_MAX_NR);
  337. irq = irq - _gic_table[index].offset;
  338. RT_ASSERT(irq >= 0);
  339. if (irq < 32)
  340. {
  341. rt_int32_t cpu_id = rt_hw_cpu_id();
  342. return (GIC_RDISTSGI_IPRIORITYR(_gic_table[index].redist_hw_base[cpu_id], irq) >> ((irq % 4) * 8)) & 0xff;
  343. }
  344. else
  345. {
  346. return (GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq) >> ((irq % 4) * 8)) & 0xff;
  347. }
  348. }
  349. void arm_gic_set_system_register_enable_mask(rt_uint64_t index, rt_uint64_t value)
  350. {
  351. RT_ASSERT(index < ARM_GIC_MAX_NR);
  352. value &= 0xff;
  353. /* set priority mask */
  354. SET_GICV3_REG(ICC_SRE_EL1, value);
  355. __ISB();
  356. }
  357. rt_uint64_t arm_gic_get_system_register_enable_mask(rt_uint64_t index)
  358. {
  359. RT_ASSERT(index < ARM_GIC_MAX_NR);
  360. rt_uint64_t value;
  361. GET_GICV3_REG(ICC_SRE_EL1, value);
  362. return value;
  363. }
  364. void arm_gic_set_interface_prior_mask(rt_uint64_t index, rt_uint64_t priority)
  365. {
  366. RT_ASSERT(index < ARM_GIC_MAX_NR);
  367. priority &= 0xff;
  368. /* set priority mask */
  369. SET_GICV3_REG(ICC_PMR_EL1, priority);
  370. }
  371. rt_uint64_t arm_gic_get_interface_prior_mask(rt_uint64_t index)
  372. {
  373. RT_ASSERT(index < ARM_GIC_MAX_NR);
  374. rt_uint64_t priority;
  375. GET_GICV3_REG(ICC_PMR_EL1, priority);
  376. return priority;
  377. }
  378. void arm_gic_set_binary_point(rt_uint64_t index, rt_uint64_t binary_point)
  379. {
  380. RT_UNUSED(index);
  381. binary_point &= 0x7;
  382. SET_GICV3_REG(ICC_BPR1_EL1, binary_point);
  383. }
  384. rt_uint64_t arm_gic_get_binary_point(rt_uint64_t index)
  385. {
  386. rt_uint64_t binary_point;
  387. RT_UNUSED(index);
  388. GET_GICV3_REG(ICC_BPR1_EL1, binary_point);
  389. return binary_point;
  390. }
  391. rt_uint64_t arm_gic_get_irq_status(rt_uint64_t index, int irq)
  392. {
  393. rt_uint64_t pending, active;
  394. RT_ASSERT(index < ARM_GIC_MAX_NR);
  395. irq = irq - _gic_table[index].offset;
  396. RT_ASSERT(irq >= 0);
  397. active = (GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32)) & 0x1;
  398. pending = (GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32)) & 0x1;
  399. return ((active << 1) | pending);
  400. }
  401. #if defined(RT_USING_SMP) || defined(RT_USING_AMP)
  402. struct gicv3_sgi_aff
  403. {
  404. rt_uint64_t aff;
  405. rt_uint32_t cpu_mask[(RT_CPUS_NR + 31) >> 5];
  406. rt_uint16_t target_list;
  407. };
  408. static struct gicv3_sgi_aff sgi_aff_table[RT_CPUS_NR];
  409. static rt_uint64_t sgi_aff_table_num;
  410. static void sgi_aff_add_table(rt_uint64_t aff, rt_uint64_t cpu_index)
  411. {
  412. rt_uint64_t i;
  413. for (i = 0; i < sgi_aff_table_num; i++)
  414. {
  415. if (sgi_aff_table[i].aff == aff)
  416. {
  417. sgi_aff_table[i].cpu_mask[cpu_index >> 5] |= (1 << (cpu_index & 0x1F));
  418. return;
  419. }
  420. }
  421. sgi_aff_table[sgi_aff_table_num].aff = aff;
  422. sgi_aff_table[sgi_aff_table_num].cpu_mask[cpu_index >> 5] |= (1 << (cpu_index & 0x1F));
  423. sgi_aff_table_num++;
  424. }
  425. static rt_uint64_t gicv3_sgi_init(void)
  426. {
  427. rt_uint64_t i, icc_sgi1r_value;
  428. for (i = 0; i < RT_CPUS_NR; i++)
  429. {
  430. icc_sgi1r_value = (rt_uint64_t)((rt_cpu_mpidr_early[i] >> 8) & 0xFF) << 16;
  431. icc_sgi1r_value |= (rt_uint64_t)((rt_cpu_mpidr_early[i] >> 16) & 0xFF) << 32;
  432. icc_sgi1r_value |= (rt_uint64_t)((rt_cpu_mpidr_early[i] >> 32) & 0xFF) << 48;
  433. icc_sgi1r_value |= (rt_uint64_t)((rt_cpu_mpidr_early[i] >> 4) & 0xF) << 44;
  434. sgi_aff_add_table(icc_sgi1r_value, i);
  435. }
  436. return (RT_CPUS_NR + 31) >> 5;
  437. }
  438. rt_inline void gicv3_sgi_send(rt_uint64_t int_id)
  439. {
  440. rt_uint64_t i;
  441. for (i = 0; i < sgi_aff_table_num; i++)
  442. {
  443. if (sgi_aff_table[i].target_list)
  444. {
  445. __DSB();
  446. /* Interrupts routed to the PEs specified by Aff3.Aff2.Aff1.<target list>. */
  447. SET_GICV3_REG(ICC_SGI1R_EL1, sgi_aff_table[i].aff | int_id | sgi_aff_table[i].target_list);
  448. __ISB();
  449. sgi_aff_table[i].target_list = 0;
  450. }
  451. }
  452. }
  453. rt_inline void gicv3_sgi_target_list_set(rt_uint64_t array, rt_uint32_t cpu_mask)
  454. {
  455. rt_uint64_t i, value;
  456. for (i = 0; i < sgi_aff_table_num; i++)
  457. {
  458. if (sgi_aff_table[i].cpu_mask[array] & cpu_mask)
  459. {
  460. while (cpu_mask)
  461. {
  462. value = __builtin_ctzl(cpu_mask);
  463. cpu_mask &= ~(1 << value);
  464. sgi_aff_table[i].target_list |= 1 << (rt_cpu_mpidr_early[(array << 5) | value] & 0xF);
  465. }
  466. }
  467. }
  468. }
  469. void arm_gic_send_affinity_sgi(rt_uint64_t index, int irq, rt_uint32_t cpu_masks[], rt_uint64_t routing_mode)
  470. {
  471. rt_uint64_t i;
  472. rt_uint64_t int_id = (irq & 0xf) << 24;
  473. static rt_uint64_t masks_nrs = 0;
  474. if (routing_mode == GICV3_ROUTED_TO_SPEC)
  475. {
  476. if (!masks_nrs)
  477. {
  478. masks_nrs = gicv3_sgi_init();
  479. }
  480. for (i = 0; i < masks_nrs; i++)
  481. {
  482. if (cpu_masks[i] == 0)
  483. {
  484. continue;
  485. }
  486. gicv3_sgi_target_list_set(i, cpu_masks[i]);
  487. }
  488. gicv3_sgi_send(int_id);
  489. }
  490. else
  491. {
  492. __DSB();
  493. /* Interrupts routed to all PEs in the system, excluding "self". */
  494. SET_GICV3_REG(ICC_SGI1R_EL1, (0x10000000000ULL) | int_id);
  495. __ISB();
  496. }
  497. }
  498. #endif /* defined(RT_USING_SMP) || defined(RT_USING_AMP) */
  499. rt_uint64_t arm_gic_get_high_pending_irq(rt_uint64_t index)
  500. {
  501. rt_uint64_t irq;
  502. RT_ASSERT(index < ARM_GIC_MAX_NR);
  503. RT_UNUSED(index);
  504. GET_GICV3_REG(ICC_HPPIR1_EL1, irq);
  505. return irq;
  506. }
  507. rt_uint64_t arm_gic_get_interface_id(rt_uint64_t index)
  508. {
  509. rt_uint64_t ret = 0;
  510. rt_base_t level;
  511. int cpuid;
  512. RT_ASSERT(index < ARM_GIC_MAX_NR);
  513. level = rt_hw_local_irq_disable();
  514. cpuid = rt_hw_cpu_id();
  515. if (_gic_table[index].cpu_hw_base[cpuid] != RT_NULL)
  516. {
  517. ret = GIC_CPU_IIDR(_gic_table[index].cpu_hw_base[cpuid]);
  518. }
  519. rt_hw_local_irq_enable(level);
  520. return ret;
  521. }
  522. void arm_gic_set_group(rt_uint64_t index, int irq, rt_uint64_t group)
  523. {
  524. rt_uint64_t igroupr;
  525. rt_uint64_t shift;
  526. RT_ASSERT(index < ARM_GIC_MAX_NR);
  527. RT_ASSERT(group <= 1);
  528. irq = irq - _gic_table[index].offset;
  529. RT_ASSERT(irq >= 0);
  530. igroupr = GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq);
  531. shift = (irq % 32);
  532. igroupr &= (~(1U << shift));
  533. igroupr |= ((group & 0x1U) << shift);
  534. GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq) = igroupr;
  535. }
  536. rt_uint64_t arm_gic_get_group(rt_uint64_t index, int irq)
  537. {
  538. RT_ASSERT(index < ARM_GIC_MAX_NR);
  539. irq = irq - _gic_table[index].offset;
  540. RT_ASSERT(irq >= 0);
  541. return (GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq) >> (irq % 32)) & 0x1UL;
  542. }
  543. static int arm_gicv3_wait_rwp(rt_uint64_t index, rt_uint64_t irq)
  544. {
  545. rt_uint64_t rwp_bit;
  546. rt_uint64_t base;
  547. RT_ASSERT(index < ARM_GIC_MAX_NR);
  548. if (irq < 32)
  549. {
  550. rt_int32_t cpu_id = rt_hw_cpu_id();
  551. base = _gic_table[index].redist_hw_base[cpu_id];
  552. rwp_bit = GICR_CTLR_RWP;
  553. }
  554. else
  555. {
  556. base = _gic_table[index].dist_hw_base;
  557. rwp_bit = GICD_CTLR_RWP;
  558. }
  559. while (HWREG32(base) & rwp_bit)
  560. {
  561. }
  562. return 0;
  563. }
  564. int arm_gic_dist_init(rt_uint64_t index, rt_uint64_t dist_base, int irq_start)
  565. {
  566. int i;
  567. unsigned int gic_type;
  568. rt_uint64_t main_cpu_affinity_val;
  569. RT_UNUSED(i);
  570. RT_UNUSED(main_cpu_affinity_val);
  571. RT_ASSERT(index < ARM_GIC_MAX_NR);
  572. _gic_table[index].dist_hw_base = dist_base;
  573. _gic_table[index].offset = irq_start;
  574. /* Find out how many interrupts are supported. */
  575. gic_type = GIC_DIST_TYPE(dist_base);
  576. _gic_max_irq = ((gic_type & 0x1f) + 1) * 32;
  577. /*
  578. * The GIC only supports up to 1020 interrupt sources.
  579. * Limit this to either the architected maximum, or the
  580. * platform maximum.
  581. */
  582. if (_gic_max_irq > 1020)
  583. {
  584. _gic_max_irq = 1020;
  585. }
  586. if (_gic_max_irq > ARM_GIC_NR_IRQS) /* the platform maximum interrupts */
  587. {
  588. _gic_max_irq = ARM_GIC_NR_IRQS;
  589. }
  590. #ifndef RT_AMP_SLAVE
  591. GIC_DIST_CTRL(dist_base) = 0;
  592. /* Wait for register write pending */
  593. arm_gicv3_wait_rwp(0, 32);
  594. /* Set all global interrupts to be level triggered, active low. */
  595. for (i = 32; i < _gic_max_irq; i += 16)
  596. {
  597. GIC_DIST_CONFIG(dist_base, i) = 0;
  598. }
  599. arm_gicv3_wait_rwp(0, 32);
  600. #ifdef RT_USING_SMP
  601. main_cpu_affinity_val = rt_cpu_mpidr_early[ARM_SPI_BIND_CPU_ID];
  602. #else
  603. __asm__ volatile ("mrs %0, mpidr_el1":"=r"(main_cpu_affinity_val));
  604. #endif
  605. /* aff3[39:32], aff2[23:16], aff1[15:8], aff0[7:0] */
  606. main_cpu_affinity_val &= 0xff00ffffffULL;
  607. /* Set all global interrupts to this CPU only. */
  608. for (i = 32; i < _gic_max_irq; i++)
  609. {
  610. GIC_DIST_IROUTER(dist_base, i) = main_cpu_affinity_val | (GICV3_ROUTED_TO_SPEC << 31);
  611. }
  612. arm_gicv3_wait_rwp(0, 32);
  613. /* Set priority on spi interrupts. */
  614. for (i = 32; i < _gic_max_irq; i += 4)
  615. {
  616. GIC_DIST_PRI(dist_base, i) = 0xa0a0a0a0;
  617. }
  618. arm_gicv3_wait_rwp(0, 32);
  619. /* Disable all interrupts. */
  620. for (i = 0; i < _gic_max_irq; i += 32)
  621. {
  622. GIC_DIST_PENDING_CLEAR(dist_base, i) = 0xffffffff;
  623. GIC_DIST_ENABLE_CLEAR(dist_base, i) = 0xffffffff;
  624. }
  625. arm_gicv3_wait_rwp(0, 32);
  626. /* All interrupts defaults to IGROUP1(IRQ). */
  627. for (i = 0; i < _gic_max_irq; i += 32)
  628. {
  629. GIC_DIST_IGROUP(dist_base, i) = 0xffffffff;
  630. }
  631. arm_gicv3_wait_rwp(0, 32);
  632. /*
  633. * The Distributor control register (GICD_CTLR) must be configured to enable the interrupt groups and to set the routing mode.
  634. * Enable Affinity routing (ARE bits) The ARE bits in GICD_CTLR control whether affinity routing is enabled.
  635. * If affinity routing is not enabled, GICv3 can be configured for legacy operation.
  636. * Whether affinity routing is enabled or not can be controlled separately for Secure and Non-secure state.
  637. * Enables GICD_CTLR contains separate enable bits for Group 0, Secure Group 1 and Non-secure Group 1:
  638. * GICD_CTLR.EnableGrp1S enables distribution of Secure Group 1 interrupts.
  639. * GICD_CTLR.EnableGrp1NS enables distribution of Non-secure Group 1 interrupts.
  640. * GICD_CTLR.EnableGrp0 enables distribution of Group 0 interrupts.
  641. */
  642. GIC_DIST_CTRL(dist_base) = GICD_CTLR_ARE_NS | GICD_CTLR_ENGRP1NS;
  643. #endif /* RT_AMP_SLAVE */
  644. return 0;
  645. }
  646. int arm_gic_redist_init(rt_uint64_t index, rt_uint64_t redist_base)
  647. {
  648. int i;
  649. int cpu_id = rt_hw_cpu_id();
  650. static int master_cpu_id = -1;
  651. RT_ASSERT(index < ARM_GIC_MAX_NR);
  652. if (master_cpu_id < 0)
  653. {
  654. master_cpu_id = 0;
  655. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, &master_cpu_id, sizeof(master_cpu_id));
  656. }
  657. if (!_gic_table[index].redist_hw_base[master_cpu_id])
  658. {
  659. _gic_table[index].redist_hw_base[master_cpu_id] = redist_base;
  660. }
  661. redist_base = _gic_table[index].redist_hw_base[master_cpu_id];
  662. redist_base += cpu_id * (2 << 16);
  663. _gic_table[index].redist_hw_base[cpu_id] = redist_base;
  664. /* redistributor enable */
  665. GIC_RDIST_WAKER(redist_base) &= ~(1 << 1);
  666. while (GIC_RDIST_WAKER(redist_base) & (1 << 2))
  667. {
  668. }
  669. /* Disable all sgi and ppi interrupt */
  670. GIC_RDISTSGI_ICENABLER0(redist_base) = 0xffffffff;
  671. arm_gicv3_wait_rwp(0, 0);
  672. /* Clear all inetrrupt pending */
  673. GIC_RDISTSGI_ICPENDR0(redist_base) = 0xffffffff;
  674. /* the corresponding interrupt is Group 1 or Non-secure Group 1. */
  675. GIC_RDISTSGI_IGROUPR0(redist_base, 0) = 0xffffffff;
  676. GIC_RDISTSGI_IGRPMODR0(redist_base, 0) = 0xffffffff;
  677. /* Configure default priorities for SGI 0:15 and PPI 16:31. */
  678. for (i = 0; i < 32; i += 4)
  679. {
  680. GIC_RDISTSGI_IPRIORITYR(redist_base, i) = 0xa0a0a0a0U;
  681. }
  682. /* Trigger level for PPI interrupts*/
  683. GIC_RDISTSGI_ICFGR1(redist_base) = 0;
  684. return 0;
  685. }
  686. int arm_gic_cpu_init(rt_uint64_t index, rt_uint64_t cpu_base)
  687. {
  688. rt_uint64_t value;
  689. int cpu_id = rt_hw_cpu_id();
  690. RT_ASSERT(index < ARM_GIC_MAX_NR);
  691. _gic_table[index].cpu_hw_base[cpu_id] = cpu_base;
  692. value = arm_gic_get_system_register_enable_mask(index);
  693. value |= (1 << 0);
  694. arm_gic_set_system_register_enable_mask(index, value);
  695. SET_GICV3_REG(ICC_CTLR_EL1, 0l);
  696. arm_gic_set_interface_prior_mask(index, 0xff);
  697. /* Enable group1 interrupt */
  698. value = 1;
  699. SET_GICV3_REG(ICC_IGRPEN1_EL1, value);
  700. arm_gic_set_binary_point(0, 0);
  701. /* ICC_BPR0_EL1 determines the preemption group for both Group 0 and Group 1 interrupts. */
  702. value = 1; /* ICC_BPR0_EL1 determines the preemption group for both Group 0 and Group 1 interrupts.*/
  703. value |= 1 << 18; /* Targeted SGIs with affinity level 0 values of 0 - 255 are supported. */
  704. SET_GICV3_REG(ICC_CTLR_EL1, value);
  705. return 0;
  706. }
  707. void arm_gic_dump_type(rt_uint64_t index)
  708. {
  709. unsigned int gic_type;
  710. unsigned int gic_version;
  711. unsigned int gic_rp;
  712. gic_version = (GIC_DIST_IIDR(_gic_table[index].dist_hw_base) >> 24) & 0xfUL;
  713. gic_rp = (GIC_DIST_IIDR(_gic_table[index].dist_hw_base) >> 12) & 0xfUL;
  714. gic_type = GIC_DIST_TYPE(_gic_table[index].dist_hw_base);
  715. rt_kprintf("GICv3-%d r%dp%d on %p, max IRQs: %d, %s security extension(%08x)\n",
  716. (gic_version == 0) ? 500 : (gic_version == 2) ? 600 : 0,
  717. (gic_rp >> 4) & 0xF,
  718. gic_rp & 0xF,
  719. _gic_table[index].dist_hw_base,
  720. _gic_max_irq,
  721. gic_type & (1U << 10U) ? "has" : "no",
  722. gic_type);
  723. }
  724. void arm_gic_dump(rt_uint64_t index)
  725. {
  726. int i;
  727. unsigned int val;
  728. val = arm_gic_get_high_pending_irq(0);
  729. rt_kprintf("--- high pending priority: %d(%08x)\n", val, val);
  730. rt_kprintf("--- hw mask ---\n");
  731. for (i = 0; i < _gic_max_irq / 32; ++i)
  732. {
  733. rt_kprintf("0x%08x, ", GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base, i * 32));
  734. }
  735. rt_kprintf("\b\b\n--- hw pending ---\n");
  736. for (i = 0; i < _gic_max_irq / 32; ++i)
  737. {
  738. rt_kprintf("0x%08x, ", GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, i * 32));
  739. }
  740. rt_kprintf("\b\b\n--- hw active ---\n");
  741. for (i = 0; i < _gic_max_irq / 32; ++i)
  742. {
  743. rt_kprintf("0x%08x, ", GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base, i * 32));
  744. }
  745. rt_kprintf("\b\b\n");
  746. }
  747. static void arm_gic_bind_dump(void)
  748. {
  749. #ifdef BSP_USING_GICV3
  750. int i;
  751. for (i = 32; i < _gic_max_irq; i++)
  752. {
  753. rt_kprintf("irq(%d) -> 0x%X\n", i, arm_gic_get_router_cpu(0, i));
  754. }
  755. #endif /* BSP_USING_GICV3 */
  756. }
  757. static void arm_gic_sgi_dump(rt_uint64_t index)
  758. {
  759. rt_int32_t cpu_id = rt_hw_cpu_id();
  760. rt_kprintf("redist_hw_base = 0x%X\n", _gic_table[index].redist_hw_base[cpu_id]);
  761. rt_kprintf("--- sgi mask ---\n");
  762. rt_kprintf("0x%08x\n", GIC_RDISTSGI_ISENABLER0(_gic_table[index].redist_hw_base[cpu_id]));
  763. rt_kprintf("--- sgi pending ---\n");
  764. rt_kprintf("0x%08x\n", GIC_RDISTSGI_ISPENDR0(_gic_table[index].redist_hw_base[cpu_id]));
  765. rt_kprintf("--- sgi active ---\n");
  766. rt_kprintf("0x%08x\n", GIC_RDISTSGI_ISACTIVER0(_gic_table[index].redist_hw_base[cpu_id]));
  767. }
  768. long gic_dump(void)
  769. {
  770. arm_gic_dump_type(0);
  771. arm_gic_dump(0);
  772. arm_gic_bind_dump();
  773. arm_gic_sgi_dump(0);
  774. return 0;
  775. }
  776. MSH_CMD_EXPORT(gic_dump, show gic status);
  777. #endif /* defined(BSP_USING_GIC) && defined(BSP_USING_GICV3) */