pic-gicv3.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2013-07-20 Bernard first version
  9. * 2014-04-03 Grissiom many enhancements
  10. * 2018-11-22 Jesven add rt_hw_ipi_send()
  11. * add rt_hw_ipi_handler_install()
  12. * 2022-08-24 GuEe-GUI add pic support
  13. * 2022-11-07 GuEe-GUI add v2m support
  14. * 2023-01-30 GuEe-GUI add its and espi, eppi, lpi support
  15. */
  16. #include <rthw.h>
  17. #include <rtthread.h>
  18. #include <rtdevice.h>
  19. #define DBG_TAG "pic.gicv3"
  20. #define DBG_LVL DBG_INFO
  21. #include <rtdbg.h>
  22. #include <cpu.h>
  23. #include <ioremap.h>
  24. #include <hashmap.h>
  25. #include "pic-gicv3.h"
  26. #include "pic-gic-common.h"
  27. #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
  28. static int _init_cpu_id;
  29. static struct gicv3 _gic;
  30. static rt_bool_t _gicv3_eoi_mode_ns = RT_FALSE;
  31. static rt_bool_t _gicv3_arm64_2941627_erratum = RT_FALSE;
  32. enum
  33. {
  34. SGI_TYPE,
  35. PPI_TYPE,
  36. SPI_TYPE,
  37. EPPI_TYPE,
  38. ESPI_TYPE,
  39. LPI_TYPE,
  40. UNKNOW_TYPE,
  41. };
  42. rt_inline void *gicv3_percpu_redist_base(void)
  43. {
  44. return _gic.redist_percpu_base[rt_hw_cpu_id()];
  45. }
  46. rt_inline void *gicv3_percpu_redist_sgi_base(void)
  47. {
  48. return gicv3_percpu_redist_base() + GICR_SGI_OFFSET;
  49. }
  50. static rt_uint16_t *gicv3_dist_espi_reg(rt_uint32_t offset)
  51. {
  52. #define __reg_map_bits 5
  53. #define __reg_map_size (1 << __reg_map_bits)
  54. static rt_uint16_t reg_map[__reg_map_size] = {};
  55. int idx = rt_hashmap_32(offset, __reg_map_bits);
  56. LOG_D("%s ESPI Map<0x%04x> = %2d", "Distributor", offset, idx);
  57. return &reg_map[idx];
  58. #undef __reg_map_bits
  59. #undef __reg_map_size
  60. }
  61. static void gicv3_wait_for_rwp(void *base, rt_uint32_t rwp_bit)
  62. {
  63. rt_uint32_t count = 1000000;
  64. while ((HWREG32(base + GICD_CTLR) & rwp_bit))
  65. {
  66. count--;
  67. if (!count)
  68. {
  69. LOG_W("RWP timeout");
  70. break;
  71. }
  72. rt_hw_cpu_relax();
  73. }
  74. }
  75. rt_inline void gicv3_dist_wait_for_rwp(void)
  76. {
  77. gicv3_wait_for_rwp(_gic.dist_base, GICD_CTLR_RWP);
  78. }
  79. rt_inline void gicv3_redist_wait_for_rwp(void)
  80. {
  81. gicv3_wait_for_rwp(_gic.redist_percpu_base[rt_hw_cpu_id()], GICR_CTLR_RWP);
  82. }
  83. static typeof(UNKNOW_TYPE) gicv3_hwirq_type(int hwirq)
  84. {
  85. typeof(UNKNOW_TYPE) ret;
  86. switch (hwirq)
  87. {
  88. case 0 ... 15:
  89. ret = SGI_TYPE;
  90. break;
  91. case 16 ... 31:
  92. ret = PPI_TYPE;
  93. break;
  94. case 32 ... 1019:
  95. ret = SPI_TYPE;
  96. break;
  97. case GIC_EPPI_BASE_INTID ... (GIC_EPPI_BASE_INTID + 63):
  98. ret = EPPI_TYPE;
  99. break;
  100. case GIC_ESPI_BASE_INTID ... (GIC_ESPI_BASE_INTID + 1023):
  101. ret = ESPI_TYPE;
  102. break;
  103. case 8192 ... RT_GENMASK(23, 0):
  104. ret = LPI_TYPE;
  105. break;
  106. default:
  107. ret = UNKNOW_TYPE;
  108. break;
  109. }
  110. return ret;
  111. }
  112. static rt_uint32_t gicv3_hwirq_convert_offset_index(int hwirq, rt_uint32_t offset, rt_uint32_t *index)
  113. {
  114. switch (gicv3_hwirq_type(hwirq))
  115. {
  116. case SGI_TYPE:
  117. case PPI_TYPE:
  118. case SPI_TYPE:
  119. *index = hwirq;
  120. break;
  121. case EPPI_TYPE:
  122. /* EPPI range (GICR_IPRIORITYR<n>E) is contiguousto the PPI (GICR_IPRIORITYR<n>) range in the registers */
  123. *index = hwirq - GIC_EPPI_BASE_INTID + 32;
  124. break;
  125. case ESPI_TYPE:
  126. *index = hwirq - GIC_ESPI_BASE_INTID;
  127. offset = *gicv3_dist_espi_reg(offset);
  128. break;
  129. default:
  130. *index = hwirq;
  131. break;
  132. }
  133. return offset;
  134. }
  135. rt_inline rt_bool_t gicv3_hwirq_in_redist(int hwirq)
  136. {
  137. switch (gicv3_hwirq_type(hwirq))
  138. {
  139. case SGI_TYPE:
  140. case PPI_TYPE:
  141. case EPPI_TYPE:
  142. return RT_TRUE;
  143. default:
  144. return RT_FALSE;
  145. }
  146. }
  147. static void *gicv3_hwirq_reg_base(int hwirq, rt_uint32_t offset, rt_uint32_t *index)
  148. {
  149. void *base;
  150. if (gicv3_hwirq_in_redist(hwirq))
  151. {
  152. base = gicv3_percpu_redist_sgi_base();
  153. }
  154. else
  155. {
  156. base = _gic.dist_base;
  157. }
  158. return base + gicv3_hwirq_convert_offset_index(hwirq, offset, index);
  159. }
  160. static rt_bool_t gicv3_hwirq_peek(int hwirq, rt_uint32_t offset)
  161. {
  162. rt_uint32_t index;
  163. void *base = gicv3_hwirq_reg_base(hwirq, offset, &index);
  164. return !!HWREG32(base + (index / 32) * 4);
  165. }
  166. static void gicv3_hwirq_poke(int hwirq, rt_uint32_t offset)
  167. {
  168. rt_uint32_t index;
  169. void *base = gicv3_hwirq_reg_base(hwirq, offset, &index);
  170. HWREG32(base + (index / 32) * 4) = 1 << (index % 32);
  171. }
  172. static void gicv3_dist_init(void)
  173. {
  174. rt_uint32_t i;
  175. rt_uint64_t affinity;
  176. void *base = _gic.dist_base;
  177. rt_ubase_t mpidr = rt_cpu_mpidr_table[_init_cpu_id = rt_hw_cpu_id()];
  178. _gic.line_nr = rt_min(GICD_TYPER_SPIS(_gic.gicd_typer), 1020U);
  179. _gic.espi_nr = GICD_TYPER_ESPIS(_gic.gicd_typer);
  180. LOG_D("%d SPIs implemented", _gic.line_nr - 32);
  181. LOG_D("%d Extended SPIs implemented", _gic.espi_nr);
  182. if (_gic.skip_init)
  183. {
  184. goto _get_max_irq;
  185. }
  186. /* Disable the distributor */
  187. HWREG32(base + GICD_CTLR) = 0;
  188. gicv3_dist_wait_for_rwp();
  189. /* Non-secure Group-1 */
  190. for (i = 32; i < _gic.line_nr; i += 32)
  191. {
  192. HWREG32(base + GICD_IGROUPR + i / 8) = RT_UINT32_MAX;
  193. }
  194. /* Disable, clear, group */
  195. for (i = 0; i < _gic.espi_nr; i += 4)
  196. {
  197. HWREG32(base + GICD_IPRIORITYRnE + i) = GICD_INT_DEF_PRI_X4;
  198. if (!(i % 16))
  199. {
  200. HWREG32(base + GICD_ICFGRnE + i / 4) = 0;
  201. if (!(i % 32))
  202. {
  203. HWREG32(base + GICD_ICENABLERnE + i / 8) = RT_UINT32_MAX;
  204. HWREG32(base + GICD_ICACTIVERnE + i / 8) = RT_UINT32_MAX;
  205. HWREG32(base + GICD_IGROUPRnE + i / 8) = RT_UINT32_MAX;
  206. }
  207. }
  208. }
  209. gic_common_dist_config(base, _gic.line_nr, RT_NULL, RT_NULL);
  210. /* Enable the distributor */
  211. HWREG32(base + GICD_CTLR) = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
  212. gicv3_dist_wait_for_rwp();
  213. affinity = ((rt_uint64_t)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
  214. MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
  215. MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
  216. MPIDR_AFFINITY_LEVEL(mpidr, 0));
  217. /* Set all global interrupts to this CPU only. */
  218. for (i = 32; i < _gic.line_nr; ++i)
  219. {
  220. HWREG64(base + GICD_IROUTER + i * 8) = affinity;
  221. }
  222. for (i = 0; i < _gic.espi_nr; ++i)
  223. {
  224. HWREG64(base + GICD_IROUTERnE + i * 8) = affinity;
  225. }
  226. _get_max_irq:
  227. if (GICD_TYPER_NUM_LPIS(_gic.gicd_typer) > 1)
  228. {
  229. /* Max LPI = 8192 + Math.pow(2, num_LPIs + 1) - 1 */
  230. rt_size_t num_lpis = 1UL << (GICD_TYPER_NUM_LPIS(_gic.gicd_typer) + 1);
  231. _gic.lpi_nr = rt_min_t(int, num_lpis, 1UL << GICD_TYPER_ID_BITS(_gic.gicd_typer));
  232. }
  233. else
  234. {
  235. _gic.lpi_nr = 1UL << GICD_TYPER_ID_BITS(_gic.gicd_typer);
  236. }
  237. /* SPI + eSPI + LPIs */
  238. _gic.irq_nr = _gic.line_nr - 32 + _gic.espi_nr;
  239. #ifdef RT_PIC_ARM_GIC_V3_ITS
  240. /* ITS will allocate the same number of lpi PIRQs */
  241. _gic.lpi_nr = rt_min_t(rt_size_t, RT_PIC_ARM_GIC_V3_ITS_IRQ_MAX, _gic.lpi_nr);
  242. _gic.irq_nr += _gic.lpi_nr;
  243. #endif
  244. }
  245. static void gicv3_redist_enable(rt_bool_t enable)
  246. {
  247. void *base;
  248. rt_uint32_t count = 1000000, waker;
  249. do {
  250. if (_gic.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
  251. {
  252. break;
  253. }
  254. base = gicv3_percpu_redist_base();
  255. waker = HWREG32(base + GICR_WAKER);
  256. if (enable)
  257. {
  258. waker &= ~GICR_WAKER_ProcessorSleep;
  259. }
  260. else
  261. {
  262. waker |= GICR_WAKER_ProcessorSleep;
  263. }
  264. HWREG32(base + GICR_WAKER) = waker;
  265. if (!enable && !(HWREG32(base + GICR_WAKER) & GICR_WAKER_ProcessorSleep))
  266. {
  267. break;
  268. }
  269. while ((HWREG32(base + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) != 0)
  270. {
  271. if (count-- == 0)
  272. {
  273. LOG_E("%s failed to %s", "Redistributor", enable ? "wakeup" : "sleep");
  274. break;
  275. }
  276. }
  277. } while (0);
  278. }
  279. static void gicv3_redist_init(void)
  280. {
  281. void *base;
  282. rt_uint32_t affinity;
  283. int cpu_id = rt_hw_cpu_id();
  284. rt_bool_t find_ok = RT_TRUE;
  285. rt_uint64_t mpidr = rt_cpu_mpidr_table[cpu_id], gicr_typer;
  286. affinity = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
  287. MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
  288. MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
  289. MPIDR_AFFINITY_LEVEL(mpidr, 0));
  290. for (int i = 0; i < _gic.redist_regions_nr; ++i)
  291. {
  292. base = _gic.redist_regions[i].base;
  293. do {
  294. gicr_typer = HWREG64(base + GICR_TYPER);
  295. if ((gicr_typer >> 32) == affinity)
  296. {
  297. rt_size_t ppi_nr = _gic.percpu_ppi_nr[cpu_id];
  298. rt_size_t typer_nr_ppis = GICR_TYPER_NR_PPIS(gicr_typer);
  299. _gic.percpu_ppi_nr[cpu_id] = rt_min(typer_nr_ppis, ppi_nr);
  300. _gic.redist_percpu_base[cpu_id] = base;
  301. find_ok = RT_TRUE;
  302. break;
  303. }
  304. if (_gic.redist_stride)
  305. {
  306. base += _gic.redist_stride;
  307. }
  308. else
  309. {
  310. base += GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE;
  311. if (gicr_typer & GICR_TYPER_VLPIS)
  312. {
  313. base += GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE;
  314. }
  315. }
  316. } while (!(gicr_typer & GICR_TYPER_LAST));
  317. if (find_ok)
  318. {
  319. break;
  320. }
  321. }
  322. if (find_ok)
  323. {
  324. gicv3_redist_enable(RT_TRUE);
  325. }
  326. }
  327. static void gicv3_cpu_init(void)
  328. {
  329. void *base;
  330. rt_size_t ppi_nr;
  331. rt_uint64_t value;
  332. int cpu_id = rt_hw_cpu_id();
  333. #ifdef ARCH_SUPPORT_HYP
  334. _gicv3_eoi_mode_ns = RT_TRUE;
  335. #else
  336. _gicv3_eoi_mode_ns = !!rt_ofw_bootargs_select("pic.gicv3_eoimode", 0);
  337. #endif
  338. base = gicv3_percpu_redist_sgi_base();
  339. ppi_nr = _gic.percpu_ppi_nr[cpu_id] + 16;
  340. for (rt_uint32_t i = 0; i < ppi_nr; i += 32)
  341. {
  342. HWREG32(base + GICR_IGROUPR0 + i / 8) = RT_UINT32_MAX;
  343. }
  344. gic_common_cpu_config(base, ppi_nr, (void *)gicv3_redist_wait_for_rwp, &_gic.parent);
  345. read_gicreg(ICC_SRE_SYS, value);
  346. value |= (1 << 0);
  347. write_gicreg(ICC_SRE_SYS, value);
  348. rt_hw_isb();
  349. write_gicreg(ICC_PMR_SYS, 0xff);
  350. /* Enable group1 interrupt */
  351. write_gicreg(ICC_IGRPEN1_SYS, 1);
  352. write_gicreg(ICC_BPR1_SYS, 0);
  353. /*
  354. * ICC_BPR0_EL1 determines the preemption group for both Group 0 and Group 1
  355. * interrupts.
  356. * Targeted SGIs with affinity level 0 values of 0 - 255 are supported.
  357. */
  358. value = ICC_CTLR_EL1_RSS | ICC_CTLR_EL1_CBPR_MASK;
  359. if (_gicv3_eoi_mode_ns)
  360. {
  361. value |= ICC_CTLR_EL1_EOImode_drop;
  362. }
  363. write_gicreg(ICC_CTLR_SYS, value);
  364. }
  365. static rt_err_t gicv3_irq_init(struct rt_pic *pic)
  366. {
  367. gicv3_redist_init();
  368. gicv3_cpu_init();
  369. return RT_EOK;
  370. }
  371. static void gicv3_irq_ack(struct rt_pic_irq *pirq)
  372. {
  373. if (!_gicv3_eoi_mode_ns)
  374. {
  375. write_gicreg(ICC_EOIR1_SYS, pirq->hwirq);
  376. rt_hw_isb();
  377. }
  378. }
  379. static void gicv3_irq_mask(struct rt_pic_irq *pirq)
  380. {
  381. int hwirq = pirq->hwirq;
  382. gicv3_hwirq_poke(hwirq, GICD_ICENABLER);
  383. if (gicv3_hwirq_in_redist(hwirq))
  384. {
  385. gicv3_redist_wait_for_rwp();
  386. }
  387. else
  388. {
  389. gicv3_dist_wait_for_rwp();
  390. }
  391. }
  392. static void gicv3_irq_unmask(struct rt_pic_irq *pirq)
  393. {
  394. int hwirq = pirq->hwirq;
  395. gicv3_hwirq_poke(hwirq, GICD_ISENABLER);
  396. }
  397. static void gicv3_irq_eoi(struct rt_pic_irq *pirq)
  398. {
  399. if (_gicv3_eoi_mode_ns)
  400. {
  401. int hwirq = pirq->hwirq;
  402. if (hwirq < 8192)
  403. {
  404. write_gicreg(ICC_EOIR1_SYS, hwirq);
  405. rt_hw_isb();
  406. if (!_gicv3_arm64_2941627_erratum)
  407. {
  408. write_gicreg(ICC_DIR_SYS, hwirq);
  409. rt_hw_isb();
  410. }
  411. }
  412. }
  413. }
  414. static rt_err_t gicv3_irq_set_priority(struct rt_pic_irq *pirq, rt_uint32_t priority)
  415. {
  416. void *base;
  417. int hwirq = pirq->hwirq;
  418. rt_uint32_t index, offset;
  419. if (gicv3_hwirq_in_redist(hwirq))
  420. {
  421. base = gicv3_percpu_redist_sgi_base();
  422. }
  423. else
  424. {
  425. base = _gic.dist_base;
  426. }
  427. offset = gicv3_hwirq_convert_offset_index(hwirq, GICD_IPRIORITYR, &index);
  428. HWREG8(base + offset + index) = priority;
  429. return RT_EOK;
  430. }
  431. static rt_err_t gicv3_irq_set_affinity(struct rt_pic_irq *pirq, rt_bitmap_t *affinity)
  432. {
  433. rt_err_t ret = RT_EOK;
  434. rt_uint64_t val;
  435. rt_ubase_t mpidr;
  436. rt_uint32_t offset, index;
  437. int hwirq = pirq->hwirq, cpu_id = rt_bitmap_next_set_bit(affinity, 0, RT_CPUS_NR);
  438. mpidr = rt_cpu_mpidr_table[cpu_id];
  439. offset = gicv3_hwirq_convert_offset_index(hwirq, GICD_IROUTER, &index);
  440. val = ((rt_uint64_t)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
  441. MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
  442. MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
  443. MPIDR_AFFINITY_LEVEL(mpidr, 0));
  444. HWREG64(_gic.dist_base + offset + (index * 8)) = val;
  445. return ret;
  446. }
  447. static rt_err_t gicv3_irq_set_triger_mode(struct rt_pic_irq *pirq, rt_uint32_t mode)
  448. {
  449. void *base;
  450. rt_err_t ret = RT_EOK;
  451. int hwirq = pirq->hwirq;
  452. rt_uint32_t index, offset;
  453. if (hwirq > 15)
  454. {
  455. if (gicv3_hwirq_in_redist(hwirq))
  456. {
  457. base = gicv3_percpu_redist_sgi_base();
  458. }
  459. else
  460. {
  461. base = _gic.dist_base;
  462. }
  463. offset = gicv3_hwirq_convert_offset_index(hwirq, GICD_ICFGR, &index);
  464. ret = gic_common_configure_irq(base + offset, hwirq, mode, RT_NULL, RT_NULL);
  465. }
  466. else
  467. {
  468. ret = -RT_ENOSYS;
  469. }
  470. return ret;
  471. }
  472. static void gicv3_irq_send_ipi(struct rt_pic_irq *pirq, rt_bitmap_t *cpumask)
  473. {
  474. #define __mpidr_to_sgi_affinity(cluster_id, level) \
  475. (MPIDR_AFFINITY_LEVEL(cluster_id, level) << ICC_SGI1R_AFFINITY_##level##_SHIFT)
  476. int cpu_id, last_cpu_id, limit;
  477. rt_uint64_t initid, range_sel, target_list, cluster_id;
  478. range_sel = 0;
  479. initid = ((pirq->hwirq) << ICC_SGI1R_SGI_ID_SHIFT);
  480. rt_bitmap_for_each_set_bit(cpumask, cpu_id, RT_CPUS_NR)
  481. {
  482. rt_uint64_t mpidr = rt_cpu_mpidr_table[cpu_id];
  483. cluster_id = mpidr & (~MPIDR_LEVEL_MASK);
  484. target_list = 1 << ((mpidr & MPIDR_LEVEL_MASK) % ICC_SGI1R_TARGET_LIST_MAX);
  485. limit = rt_min(cpu_id + ICC_SGI1R_TARGET_LIST_MAX, RT_CPUS_NR);
  486. last_cpu_id = cpu_id;
  487. rt_bitmap_for_each_set_bit_from(cpumask, cpu_id, cpu_id, limit)
  488. {
  489. rt_uint64_t mpidr = rt_cpu_mpidr_table[cpu_id];
  490. if (cluster_id != (mpidr & (~MPIDR_LEVEL_MASK)))
  491. {
  492. range_sel = 0;
  493. /* Don't break next cpuid */
  494. cpu_id = last_cpu_id;
  495. break;
  496. }
  497. last_cpu_id = cpu_id;
  498. target_list |= 1 << ((mpidr & MPIDR_LEVEL_MASK) % ICC_SGI1R_TARGET_LIST_MAX);
  499. }
  500. rt_hw_dsb();
  501. write_gicreg(ICC_SGI1R_SYS,
  502. __mpidr_to_sgi_affinity(cluster_id, 3) |
  503. (range_sel << ICC_SGI1R_RS_SHIFT) |
  504. __mpidr_to_sgi_affinity(cluster_id, 2) |
  505. initid |
  506. __mpidr_to_sgi_affinity(cluster_id, 1) |
  507. target_list);
  508. rt_hw_isb();
  509. ++range_sel;
  510. }
  511. #undef __mpidr_to_sgi_affinity
  512. }
  513. static rt_err_t gicv3_irq_set_state(struct rt_pic *pic, int hwirq, int type, rt_bool_t state)
  514. {
  515. rt_err_t err = RT_EOK;
  516. rt_uint32_t offset = 0;
  517. if (hwirq >= 8192)
  518. {
  519. type = -1;
  520. }
  521. switch (type)
  522. {
  523. case RT_IRQ_STATE_PENDING:
  524. offset = state ? GICD_ISPENDR : GICD_ICPENDR;
  525. break;
  526. case RT_IRQ_STATE_ACTIVE:
  527. offset = state ? GICD_ISACTIVER : GICD_ICACTIVER;
  528. break;
  529. case RT_IRQ_STATE_MASKED:
  530. if (state)
  531. {
  532. struct rt_pic_irq pirq = {};
  533. pirq.hwirq = hwirq;
  534. gicv3_irq_mask(&pirq);
  535. }
  536. else
  537. {
  538. offset = GICD_ISENABLER;
  539. }
  540. break;
  541. default:
  542. err = -RT_EINVAL;
  543. break;
  544. }
  545. if (!err && offset)
  546. {
  547. gicv3_hwirq_poke(hwirq, offset);
  548. }
  549. return err;
  550. }
  551. static rt_err_t gicv3_irq_get_state(struct rt_pic *pic, int hwirq, int type, rt_bool_t *out_state)
  552. {
  553. rt_err_t err = RT_EOK;
  554. rt_uint32_t offset = 0;
  555. switch (type)
  556. {
  557. case RT_IRQ_STATE_PENDING:
  558. offset = GICD_ISPENDR;
  559. break;
  560. case RT_IRQ_STATE_ACTIVE:
  561. offset = GICD_ISACTIVER;
  562. break;
  563. case RT_IRQ_STATE_MASKED:
  564. offset = GICD_ISENABLER;
  565. break;
  566. default:
  567. err = -RT_EINVAL;
  568. break;
  569. }
  570. if (!err)
  571. {
  572. *out_state = gicv3_hwirq_peek(hwirq, offset);
  573. }
  574. return err;
  575. }
  576. static int gicv3_irq_map(struct rt_pic *pic, int hwirq, rt_uint32_t mode)
  577. {
  578. struct rt_pic_irq *pirq;
  579. int irq, hwirq_type, irq_index;
  580. hwirq_type = gicv3_hwirq_type(hwirq);
  581. if (hwirq_type != LPI_TYPE)
  582. {
  583. irq_index = hwirq - GIC_SGI_NR;
  584. }
  585. else
  586. {
  587. irq_index = _gic.irq_nr - _gic.lpi_nr + hwirq - 8192;
  588. }
  589. pirq = rt_pic_find_irq(pic, irq_index);
  590. if (pirq && hwirq >= GIC_SGI_NR)
  591. {
  592. pirq->mode = mode;
  593. pirq->priority = GICD_INT_DEF_PRI;
  594. switch (gicv3_hwirq_type(hwirq))
  595. {
  596. case PPI_TYPE:
  597. gic_fill_ppi_affinity(pirq->affinity);
  598. break;
  599. case SPI_TYPE:
  600. case ESPI_TYPE:
  601. RT_IRQ_AFFINITY_SET(pirq->affinity, _init_cpu_id);
  602. default:
  603. break;
  604. }
  605. irq = rt_pic_config_irq(pic, irq_index, hwirq);
  606. if (irq >= 0 && mode != RT_IRQ_MODE_LEVEL_HIGH)
  607. {
  608. gicv3_irq_set_triger_mode(pirq, mode);
  609. }
  610. }
  611. else
  612. {
  613. irq = -1;
  614. }
  615. return irq;
  616. }
  617. static rt_err_t gicv3_irq_parse(struct rt_pic *pic, struct rt_ofw_cell_args *args, struct rt_pic_irq *out_pirq)
  618. {
  619. rt_err_t err = RT_EOK;
  620. if (args->args_count == 3)
  621. {
  622. out_pirq->mode = args->args[2] & RT_IRQ_MODE_MASK;
  623. switch (args->args[0])
  624. {
  625. case 0:
  626. /* SPI */
  627. out_pirq->hwirq = args->args[1] + 32;
  628. break;
  629. case 1:
  630. /* PPI */
  631. out_pirq->hwirq = args->args[1] + 16;
  632. break;
  633. case 2:
  634. /* ESPI */
  635. out_pirq->hwirq = args->args[1] + GIC_ESPI_BASE_INTID;
  636. break;
  637. case 3:
  638. /* EPPI */
  639. out_pirq->hwirq = args->args[1] + GIC_EPPI_BASE_INTID;
  640. break;
  641. case GIC_IRQ_TYPE_LPI:
  642. /* LPI */
  643. out_pirq->hwirq = args->args[1];
  644. break;
  645. case GIC_IRQ_TYPE_PARTITION:
  646. out_pirq->hwirq = args->args[1];
  647. if (args->args[1] >= 16)
  648. {
  649. out_pirq->hwirq += GIC_EPPI_BASE_INTID - 16;
  650. }
  651. else
  652. {
  653. out_pirq->hwirq += 16;
  654. }
  655. break;
  656. default:
  657. err = -RT_ENOSYS;
  658. break;
  659. }
  660. }
  661. else
  662. {
  663. err = -RT_EINVAL;
  664. }
  665. return err;
  666. }
  667. const static struct rt_pic_ops gicv3_ops =
  668. {
  669. .name = "GICv3",
  670. .irq_init = gicv3_irq_init,
  671. .irq_ack = gicv3_irq_ack,
  672. .irq_mask = gicv3_irq_mask,
  673. .irq_unmask = gicv3_irq_unmask,
  674. .irq_eoi = gicv3_irq_eoi,
  675. .irq_set_priority = gicv3_irq_set_priority,
  676. .irq_set_affinity = gicv3_irq_set_affinity,
  677. .irq_set_triger_mode = gicv3_irq_set_triger_mode,
  678. .irq_send_ipi = gicv3_irq_send_ipi,
  679. .irq_set_state = gicv3_irq_set_state,
  680. .irq_get_state = gicv3_irq_get_state,
  681. .irq_map = gicv3_irq_map,
  682. .irq_parse = gicv3_irq_parse,
  683. };
  684. static rt_bool_t gicv3_handler(void *data)
  685. {
  686. rt_bool_t res = RT_FALSE;
  687. int hwirq;
  688. struct gicv3 *gic = data;
  689. read_gicreg(ICC_IAR1_SYS, hwirq);
  690. if (!(hwirq >= 1020 && hwirq <= 1023))
  691. {
  692. struct rt_pic_irq *pirq;
  693. if (hwirq < GIC_SGI_NR)
  694. {
  695. rt_hw_rmb();
  696. pirq = rt_pic_find_ipi(&gic->parent, hwirq);
  697. }
  698. else
  699. {
  700. int irq_index;
  701. if (hwirq < 8192)
  702. {
  703. irq_index = hwirq - GIC_SGI_NR;
  704. }
  705. else
  706. {
  707. irq_index = gic->irq_nr - gic->lpi_nr + hwirq - 8192;
  708. }
  709. pirq = rt_pic_find_irq(&gic->parent, irq_index);
  710. }
  711. gicv3_irq_ack(pirq);
  712. rt_pic_handle_isr(pirq);
  713. gicv3_irq_eoi(pirq);
  714. res = RT_TRUE;
  715. }
  716. return res;
  717. }
  718. static rt_err_t gicv3_enable_quirk_msm8996(void *data)
  719. {
  720. struct gicv3 *gic = data;
  721. gic->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
  722. return RT_EOK;
  723. }
  724. static rt_err_t gicv3_enable_quirk_arm64_2941627(void *data)
  725. {
  726. _gicv3_arm64_2941627_erratum = RT_TRUE;
  727. return RT_EOK;
  728. }
  729. static const struct gic_quirk _gicv3_quirks[] =
  730. {
  731. {
  732. .desc = "GICv3: Qualcomm MSM8996 broken firmware",
  733. .compatible = "qcom,msm8996-gic-v3",
  734. .init = gicv3_enable_quirk_msm8996,
  735. },
  736. {
  737. /* GIC-700: 2941627 workaround - IP variant [0,1] */
  738. .desc = "GICv3: ARM64 erratum 2941627",
  739. .iidr = 0x0400043b,
  740. .iidr_mask = 0xff0e0fff,
  741. .init = gicv3_enable_quirk_arm64_2941627,
  742. },
  743. {
  744. /* GIC-700: 2941627 workaround - IP variant [2] */
  745. .desc = "GICv3: ARM64 erratum 2941627",
  746. .iidr = 0x0402043b,
  747. .iidr_mask = 0xff0f0fff,
  748. .init = gicv3_enable_quirk_arm64_2941627,
  749. },
  750. { /* sentinel */ }
  751. };
  752. static rt_err_t gicv3_iomap_init(rt_uint64_t *regs)
  753. {
  754. rt_err_t ret = RT_EOK;
  755. int idx;
  756. char *name;
  757. do {
  758. /* GICD->GICR */
  759. _gic.dist_size = regs[1];
  760. _gic.dist_base = rt_ioremap((void *)regs[0], _gic.dist_size);
  761. if (!_gic.dist_base)
  762. {
  763. name = "Distributor";
  764. idx = 0;
  765. ret = -RT_ERROR;
  766. break;
  767. }
  768. name = "Redistributor";
  769. _gic.redist_regions = rt_malloc(sizeof(_gic.redist_regions[0]) * _gic.redist_regions_nr);
  770. if (!_gic.redist_regions)
  771. {
  772. idx = -1;
  773. ret = -RT_ENOMEM;
  774. LOG_E("No memory to save %s", name);
  775. break;
  776. }
  777. for (int i = 0, off = 2; i < _gic.redist_regions_nr; ++i)
  778. {
  779. void *base = (void *)regs[off++];
  780. rt_size_t size = regs[off++];
  781. _gic.redist_regions[i].size = size;
  782. _gic.redist_regions[i].base = rt_ioremap(base, size);
  783. _gic.redist_regions[i].base_phy = base;
  784. if (!base)
  785. {
  786. idx = 1;
  787. ret = -RT_ERROR;
  788. break;
  789. }
  790. }
  791. if (ret)
  792. {
  793. break;
  794. }
  795. /* ArchRev[4:7] */
  796. _gic.version = HWREG32(_gic.dist_base + GICD_PIDR2) >> 4;
  797. } while (0);
  798. if (ret && idx >= 0)
  799. {
  800. RT_UNUSED(name);
  801. LOG_E("%s IO[%p, %p] map fail", name[idx], regs[idx * 2], regs[idx * 2 + 1]);
  802. }
  803. return ret;
  804. }
  805. static void gicv3_init(void)
  806. {
  807. #define __dist_espi_regs_do(func, expr, ...) \
  808. __VA_ARGS__(*func(GICD_IGROUPR) expr GICD_IGROUPRnE); \
  809. __VA_ARGS__(*func(GICD_ISENABLER) expr GICD_ISENABLERnE); \
  810. __VA_ARGS__(*func(GICD_ICENABLER) expr GICD_ICENABLERnE); \
  811. __VA_ARGS__(*func(GICD_ISPENDR) expr GICD_ISPENDRnE); \
  812. __VA_ARGS__(*func(GICD_ICPENDR) expr GICD_ICPENDRnE); \
  813. __VA_ARGS__(*func(GICD_ISACTIVER) expr GICD_ISACTIVERnE); \
  814. __VA_ARGS__(*func(GICD_ICACTIVER) expr GICD_ICACTIVERnE); \
  815. __VA_ARGS__(*func(GICD_IPRIORITYR) expr GICD_IPRIORITYRnE); \
  816. __VA_ARGS__(*func(GICD_ICFGR) expr GICD_ICFGRnE); \
  817. __VA_ARGS__(*func(GICD_IROUTER) expr GICD_IROUTERnE);
  818. /* Map registers for ESPI */
  819. __dist_espi_regs_do(gicv3_dist_espi_reg, =);
  820. __dist_espi_regs_do(gicv3_dist_espi_reg, ==, RT_ASSERT);
  821. #undef __dist_espi_regs_do
  822. _gic.gicd_typer = HWREG32(_gic.dist_base + GICD_TYPER);
  823. gic_common_init_quirk_hw(HWREG32(_gic.dist_base + GICD_IIDR), _gicv3_quirks, &_gic.parent);
  824. gicv3_dist_init();
  825. _gic.parent.priv_data = &_gic;
  826. _gic.parent.ops = &gicv3_ops;
  827. rt_pic_linear_irq(&_gic.parent, _gic.irq_nr - GIC_SGI_NR);
  828. gic_common_sgi_config(_gic.dist_base, &_gic.parent, 0);
  829. rt_pic_add_traps(gicv3_handler, &_gic);
  830. rt_pic_user_extends(&_gic.parent);
  831. }
  832. static void gicv3_init_fail(void)
  833. {
  834. if (_gic.dist_base)
  835. {
  836. rt_iounmap(_gic.dist_base);
  837. }
  838. if (_gic.redist_regions)
  839. {
  840. for (int i = 0; i < _gic.redist_regions_nr; ++i)
  841. {
  842. if (_gic.redist_regions[i].base)
  843. {
  844. rt_iounmap(_gic.redist_regions[i].base);
  845. }
  846. }
  847. rt_free(_gic.redist_regions);
  848. }
  849. rt_memset(&_gic, 0, sizeof(_gic));
  850. }
  851. static rt_err_t gicv3_ofw_init(struct rt_ofw_node *np, const struct rt_ofw_node_id *id)
  852. {
  853. rt_err_t err = RT_EOK;
  854. do {
  855. rt_size_t reg_nr_max;
  856. rt_err_t msi_init = -RT_ENOSYS;
  857. rt_uint32_t redist_regions_nr;
  858. rt_uint64_t *regs, redist_stride;
  859. if (rt_ofw_prop_read_u32(np, "#redistributor-regions", &redist_regions_nr))
  860. {
  861. redist_regions_nr = 1;
  862. }
  863. /* GICD + n * GICR */
  864. reg_nr_max = 2 + (2 * redist_regions_nr);
  865. regs = rt_calloc(1, sizeof(rt_uint64_t) * reg_nr_max);
  866. if (!regs)
  867. {
  868. err = -RT_ENOMEM;
  869. break;
  870. }
  871. rt_ofw_get_address_array(np, reg_nr_max, regs);
  872. _gic.redist_regions_nr = redist_regions_nr;
  873. err = gicv3_iomap_init(regs);
  874. rt_free(regs);
  875. if (err)
  876. {
  877. break;
  878. }
  879. if (_gic.version != 3 && _gic.version != 4)
  880. {
  881. LOG_E("Version = %d is not support", _gic.version);
  882. err = -RT_EINVAL;
  883. break;
  884. }
  885. if (rt_ofw_prop_read_u64(np, "redistributor-stride", &redist_stride))
  886. {
  887. redist_stride = 0;
  888. }
  889. _gic.redist_stride = redist_stride;
  890. _gic.skip_init = rt_ofw_prop_read_bool(np, "skip-init");
  891. gic_common_init_quirk_ofw(np, _gicv3_quirks, &_gic.parent);
  892. gicv3_init();
  893. rt_ofw_data(np) = &_gic.parent;
  894. #ifdef RT_PIC_ARM_GIC_V3_ITS
  895. msi_init = gicv3_its_ofw_probe(np, id);
  896. #endif
  897. /* V2M or ITS only */
  898. if (msi_init)
  899. {
  900. #ifdef RT_PIC_ARM_GIC_V2M
  901. gicv2m_ofw_probe(np, id);
  902. #endif
  903. }
  904. } while (0);
  905. if (err)
  906. {
  907. gicv3_init_fail();
  908. }
  909. return err;
  910. }
  911. static const struct rt_ofw_node_id gicv3_ofw_ids[] =
  912. {
  913. { .compatible = "arm,gic-v3" },
  914. { /* sentinel */ }
  915. };
  916. RT_PIC_OFW_DECLARE(gicv3, gicv3_ofw_ids, gicv3_ofw_init);