pic-gicv3.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2013-07-20 Bernard first version
  9. * 2014-04-03 Grissiom many enhancements
  10. * 2018-11-22 Jesven add rt_hw_ipi_send()
  11. * add rt_hw_ipi_handler_install()
  12. * 2022-08-24 GuEe-GUI add pic support
  13. * 2022-11-07 GuEe-GUI add v2m support
  14. * 2023-01-30 GuEe-GUI add its and espi, eppi, lpi support
  15. */
  16. #include <rthw.h>
  17. #include <rtthread.h>
  18. #include <rtdevice.h>
  19. #define DBG_TAG "pic.gicv3"
  20. #define DBG_LVL DBG_INFO
  21. #include <rtdbg.h>
  22. #include <cpu.h>
  23. #include <ioremap.h>
  24. #include <hashmap.h>
  25. #include "pic-gicv3.h"
  26. #include "pic-gic-common.h"
  27. #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
  28. static int _init_cpu_id;
  29. static struct gicv3 _gic;
  30. static rt_bool_t _gicv3_eoi_mode_ns = RT_FALSE;
  31. static rt_bool_t _gicv3_arm64_2941627_erratum = RT_FALSE;
  32. enum
  33. {
  34. SGI_TYPE,
  35. PPI_TYPE,
  36. SPI_TYPE,
  37. EPPI_TYPE,
  38. ESPI_TYPE,
  39. LPI_TYPE,
  40. UNKNOW_TYPE,
  41. };
  42. rt_inline void *gicv3_percpu_redist_base(void)
  43. {
  44. return _gic.redist_percpu_base[rt_hw_cpu_id()];
  45. }
  46. rt_inline void *gicv3_percpu_redist_sgi_base(void)
  47. {
  48. return gicv3_percpu_redist_base() + GICR_SGI_OFFSET;
  49. }
  50. static rt_uint16_t *gicv3_dist_espi_reg(rt_uint32_t offset)
  51. {
  52. #define __reg_map_bits 5
  53. #define __reg_map_size (1 << __reg_map_bits)
  54. static rt_uint16_t reg_map[__reg_map_size] = {};
  55. int idx = rt_hashmap_32(offset, __reg_map_bits);
  56. LOG_D("%s ESPI Map<0x%04x> = %2d", "Distributor", offset, idx);
  57. return &reg_map[idx];
  58. #undef __reg_map_bits
  59. #undef __reg_map_size
  60. }
  61. static void gicv3_wait_for_rwp(void *base, rt_uint32_t rwp_bit)
  62. {
  63. rt_uint32_t count = 1000000;
  64. while ((HWREG32(base + GICD_CTLR) & rwp_bit))
  65. {
  66. count--;
  67. if (!count)
  68. {
  69. LOG_W("RWP timeout");
  70. break;
  71. }
  72. rt_hw_cpu_relax();
  73. }
  74. }
  75. rt_inline void gicv3_dist_wait_for_rwp(void)
  76. {
  77. gicv3_wait_for_rwp(_gic.dist_base, GICD_CTLR_RWP);
  78. }
  79. rt_inline void gicv3_redist_wait_for_rwp(void)
  80. {
  81. gicv3_wait_for_rwp(_gic.redist_percpu_base[rt_hw_cpu_id()], GICR_CTLR_RWP);
  82. }
  83. static typeof(UNKNOW_TYPE) gicv3_hwirq_type(int hwirq)
  84. {
  85. typeof(UNKNOW_TYPE) ret;
  86. switch (hwirq)
  87. {
  88. case 0 ... 15:
  89. ret = SGI_TYPE;
  90. break;
  91. case 16 ... 31:
  92. ret = PPI_TYPE;
  93. break;
  94. case 32 ... 1019:
  95. ret = SPI_TYPE;
  96. break;
  97. case GIC_EPPI_BASE_INTID ... (GIC_EPPI_BASE_INTID + 63):
  98. ret = EPPI_TYPE;
  99. break;
  100. case GIC_ESPI_BASE_INTID ... (GIC_ESPI_BASE_INTID + 1023):
  101. ret = ESPI_TYPE;
  102. break;
  103. case 8192 ... RT_GENMASK(23, 0):
  104. ret = LPI_TYPE;
  105. break;
  106. default:
  107. ret = UNKNOW_TYPE;
  108. break;
  109. }
  110. return ret;
  111. }
  112. static rt_uint32_t gicv3_hwirq_convert_offset_index(int hwirq, rt_uint32_t offset, rt_uint32_t *index)
  113. {
  114. switch (gicv3_hwirq_type(hwirq))
  115. {
  116. case SGI_TYPE:
  117. case PPI_TYPE:
  118. case SPI_TYPE:
  119. *index = hwirq;
  120. break;
  121. case EPPI_TYPE:
  122. /* EPPI range (GICR_IPRIORITYR<n>E) is contiguousto the PPI (GICR_IPRIORITYR<n>) range in the registers */
  123. *index = hwirq - GIC_EPPI_BASE_INTID + 32;
  124. break;
  125. case ESPI_TYPE:
  126. *index = hwirq - GIC_ESPI_BASE_INTID;
  127. offset = *gicv3_dist_espi_reg(offset);
  128. break;
  129. default:
  130. *index = hwirq;
  131. break;
  132. }
  133. return offset;
  134. }
  135. rt_inline rt_bool_t gicv3_hwirq_in_redist(int hwirq)
  136. {
  137. switch (gicv3_hwirq_type(hwirq))
  138. {
  139. case SGI_TYPE:
  140. case PPI_TYPE:
  141. case EPPI_TYPE:
  142. return RT_TRUE;
  143. default:
  144. return RT_FALSE;
  145. }
  146. }
  147. static void *gicv3_hwirq_reg_base(int hwirq, rt_uint32_t offset, rt_uint32_t *index)
  148. {
  149. void *base;
  150. if (gicv3_hwirq_in_redist(hwirq))
  151. {
  152. base = gicv3_percpu_redist_sgi_base();
  153. }
  154. else
  155. {
  156. base = _gic.dist_base;
  157. }
  158. return base + gicv3_hwirq_convert_offset_index(hwirq, offset, index);
  159. }
  160. static rt_bool_t gicv3_hwirq_peek(int hwirq, rt_uint32_t offset)
  161. {
  162. rt_uint32_t index;
  163. void *base = gicv3_hwirq_reg_base(hwirq, offset, &index);
  164. return !!HWREG32(base + (index / 32) * 4);
  165. }
  166. static void gicv3_hwirq_poke(int hwirq, rt_uint32_t offset)
  167. {
  168. rt_uint32_t index;
  169. void *base = gicv3_hwirq_reg_base(hwirq, offset, &index);
  170. HWREG32(base + (index / 32) * 4) = 1 << (index % 32);
  171. }
  172. static void gicv3_dist_init(void)
  173. {
  174. rt_uint32_t i;
  175. rt_uint64_t affinity;
  176. void *base = _gic.dist_base;
  177. rt_ubase_t mpidr = rt_cpu_mpidr_table[_init_cpu_id = rt_hw_cpu_id()];
  178. _gic.line_nr = rt_min(GICD_TYPER_SPIS(_gic.gicd_typer), 1020U);
  179. _gic.espi_nr = GICD_TYPER_ESPIS(_gic.gicd_typer);
  180. LOG_D("%d SPIs implemented", _gic.line_nr - 32);
  181. LOG_D("%d Extended SPIs implemented", _gic.espi_nr);
  182. /* Disable the distributor */
  183. HWREG32(base + GICD_CTLR) = 0;
  184. gicv3_dist_wait_for_rwp();
  185. /* Non-secure Group-1 */
  186. for (i = 32; i < _gic.line_nr; i += 32)
  187. {
  188. HWREG32(base + GICD_IGROUPR + i / 8) = RT_UINT32_MAX;
  189. }
  190. /* Disable, clear, group */
  191. for (i = 0; i < _gic.espi_nr; i += 4)
  192. {
  193. HWREG32(base + GICD_IPRIORITYRnE + i) = GICD_INT_DEF_PRI_X4;
  194. if (!(i % 16))
  195. {
  196. HWREG32(base + GICD_ICFGRnE + i / 4) = 0;
  197. if (!(i % 32))
  198. {
  199. HWREG32(base + GICD_ICENABLERnE + i / 8) = RT_UINT32_MAX;
  200. HWREG32(base + GICD_ICACTIVERnE + i / 8) = RT_UINT32_MAX;
  201. HWREG32(base + GICD_IGROUPRnE + i / 8) = RT_UINT32_MAX;
  202. }
  203. }
  204. }
  205. gic_common_dist_config(base, _gic.line_nr, RT_NULL, RT_NULL);
  206. /* Enable the distributor */
  207. HWREG32(base + GICD_CTLR) = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
  208. gicv3_dist_wait_for_rwp();
  209. affinity = ((rt_uint64_t)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
  210. MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
  211. MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
  212. MPIDR_AFFINITY_LEVEL(mpidr, 0));
  213. /* Set all global interrupts to this CPU only. */
  214. for (i = 32; i < _gic.line_nr; ++i)
  215. {
  216. HWREG64(base + GICD_IROUTER + i * 8) = affinity;
  217. }
  218. for (i = 0; i < _gic.espi_nr; ++i)
  219. {
  220. HWREG64(base + GICD_IROUTERnE + i * 8) = affinity;
  221. }
  222. if (GICD_TYPER_NUM_LPIS(_gic.gicd_typer) > 1)
  223. {
  224. /* Max LPI = 8192 + Math.pow(2, num_LPIs + 1) - 1 */
  225. rt_size_t num_lpis = 1UL << (GICD_TYPER_NUM_LPIS(_gic.gicd_typer) + 1);
  226. _gic.lpi_nr = rt_min_t(int, num_lpis, 1UL << GICD_TYPER_ID_BITS(_gic.gicd_typer));
  227. }
  228. else
  229. {
  230. _gic.lpi_nr = 1UL << GICD_TYPER_ID_BITS(_gic.gicd_typer);
  231. }
  232. /* SPI + eSPI + LPIs */
  233. _gic.irq_nr = _gic.line_nr - 32 + _gic.espi_nr;
  234. #ifdef RT_PIC_ARM_GIC_V3_ITS
  235. /* ITS will allocate the same number of lpi PIRQs */
  236. _gic.lpi_nr = rt_min_t(rt_size_t, RT_PIC_ARM_GIC_V3_ITS_IRQ_MAX, _gic.lpi_nr);
  237. _gic.irq_nr += _gic.lpi_nr;
  238. #endif
  239. }
  240. static void gicv3_redist_enable(rt_bool_t enable)
  241. {
  242. void *base;
  243. rt_uint32_t count = 1000000, waker;
  244. do {
  245. if (_gic.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
  246. {
  247. break;
  248. }
  249. base = gicv3_percpu_redist_base();
  250. waker = HWREG32(base + GICR_WAKER);
  251. if (enable)
  252. {
  253. waker &= ~GICR_WAKER_ProcessorSleep;
  254. }
  255. else
  256. {
  257. waker |= GICR_WAKER_ProcessorSleep;
  258. }
  259. HWREG32(base + GICR_WAKER) = waker;
  260. if (!enable && !(HWREG32(base + GICR_WAKER) & GICR_WAKER_ProcessorSleep))
  261. {
  262. break;
  263. }
  264. while ((HWREG32(base + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) != 0)
  265. {
  266. if (count-- == 0)
  267. {
  268. LOG_E("%s failed to %s", "Redistributor", enable ? "wakeup" : "sleep");
  269. break;
  270. }
  271. }
  272. } while (0);
  273. }
  274. static void gicv3_redist_init(void)
  275. {
  276. void *base;
  277. rt_uint32_t affinity;
  278. int cpu_id = rt_hw_cpu_id();
  279. rt_bool_t find_ok = RT_TRUE;
  280. rt_uint64_t mpidr = rt_cpu_mpidr_table[cpu_id], gicr_typer;
  281. affinity = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
  282. MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
  283. MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
  284. MPIDR_AFFINITY_LEVEL(mpidr, 0));
  285. for (int i = 0; i < _gic.redist_regions_nr; ++i)
  286. {
  287. base = _gic.redist_regions[i].base;
  288. do {
  289. gicr_typer = HWREG64(base + GICR_TYPER);
  290. if ((gicr_typer >> 32) == affinity)
  291. {
  292. rt_size_t ppi_nr = _gic.percpu_ppi_nr[cpu_id];
  293. rt_size_t typer_nr_ppis = GICR_TYPER_NR_PPIS(gicr_typer);
  294. _gic.percpu_ppi_nr[cpu_id] = rt_min(typer_nr_ppis, ppi_nr);
  295. _gic.redist_percpu_base[cpu_id] = base;
  296. find_ok = RT_TRUE;
  297. break;
  298. }
  299. if (_gic.redist_stride)
  300. {
  301. base += _gic.redist_stride;
  302. }
  303. else
  304. {
  305. base += GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE;
  306. if (gicr_typer & GICR_TYPER_VLPIS)
  307. {
  308. base += GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE;
  309. }
  310. }
  311. } while (!(gicr_typer & GICR_TYPER_LAST));
  312. if (find_ok)
  313. {
  314. break;
  315. }
  316. }
  317. if (find_ok)
  318. {
  319. gicv3_redist_enable(RT_TRUE);
  320. }
  321. }
  322. static void gicv3_cpu_init(void)
  323. {
  324. void *base;
  325. rt_size_t ppi_nr;
  326. rt_uint64_t value;
  327. int cpu_id = rt_hw_cpu_id();
  328. #ifdef ARCH_SUPPORT_HYP
  329. _gicv3_eoi_mode_ns = RT_TRUE;
  330. #else
  331. _gicv3_eoi_mode_ns = !!rt_ofw_bootargs_select("pic.gicv3_eoimode", 0);
  332. #endif
  333. base = gicv3_percpu_redist_sgi_base();
  334. ppi_nr = _gic.percpu_ppi_nr[cpu_id] + 16;
  335. for (rt_uint32_t i = 0; i < ppi_nr; i += 32)
  336. {
  337. HWREG32(base + GICR_IGROUPR0 + i / 8) = RT_UINT32_MAX;
  338. }
  339. gic_common_cpu_config(base, ppi_nr, (void *)gicv3_redist_wait_for_rwp, &_gic.parent);
  340. read_gicreg(ICC_SRE_SYS, value);
  341. value |= (1 << 0);
  342. write_gicreg(ICC_SRE_SYS, value);
  343. rt_hw_isb();
  344. write_gicreg(ICC_PMR_SYS, 0xff);
  345. /* Enable group1 interrupt */
  346. write_gicreg(ICC_IGRPEN1_SYS, 1);
  347. write_gicreg(ICC_BPR1_SYS, 0);
  348. /*
  349. * ICC_BPR0_EL1 determines the preemption group for both Group 0 and Group 1
  350. * interrupts.
  351. * Targeted SGIs with affinity level 0 values of 0 - 255 are supported.
  352. */
  353. value = ICC_CTLR_EL1_RSS | ICC_CTLR_EL1_CBPR_MASK;
  354. if (_gicv3_eoi_mode_ns)
  355. {
  356. value |= ICC_CTLR_EL1_EOImode_drop;
  357. }
  358. write_gicreg(ICC_CTLR_SYS, value);
  359. }
  360. static rt_err_t gicv3_irq_init(struct rt_pic *pic)
  361. {
  362. gicv3_redist_init();
  363. gicv3_cpu_init();
  364. return RT_EOK;
  365. }
  366. static void gicv3_irq_ack(struct rt_pic_irq *pirq)
  367. {
  368. if (!_gicv3_eoi_mode_ns)
  369. {
  370. write_gicreg(ICC_EOIR1_SYS, pirq->hwirq);
  371. rt_hw_isb();
  372. }
  373. }
  374. static void gicv3_irq_mask(struct rt_pic_irq *pirq)
  375. {
  376. int hwirq = pirq->hwirq;
  377. gicv3_hwirq_poke(hwirq, GICD_ICENABLER);
  378. if (gicv3_hwirq_in_redist(hwirq))
  379. {
  380. gicv3_redist_wait_for_rwp();
  381. }
  382. else
  383. {
  384. gicv3_dist_wait_for_rwp();
  385. }
  386. }
  387. static void gicv3_irq_unmask(struct rt_pic_irq *pirq)
  388. {
  389. int hwirq = pirq->hwirq;
  390. gicv3_hwirq_poke(hwirq, GICD_ISENABLER);
  391. }
  392. static void gicv3_irq_eoi(struct rt_pic_irq *pirq)
  393. {
  394. if (_gicv3_eoi_mode_ns)
  395. {
  396. int hwirq = pirq->hwirq;
  397. if (hwirq < 8192)
  398. {
  399. write_gicreg(ICC_EOIR1_SYS, hwirq);
  400. rt_hw_isb();
  401. if (!_gicv3_arm64_2941627_erratum)
  402. {
  403. write_gicreg(ICC_DIR_SYS, hwirq);
  404. rt_hw_isb();
  405. }
  406. }
  407. }
  408. }
  409. static rt_err_t gicv3_irq_set_priority(struct rt_pic_irq *pirq, rt_uint32_t priority)
  410. {
  411. void *base;
  412. int hwirq = pirq->hwirq;
  413. rt_uint32_t index, offset;
  414. if (gicv3_hwirq_in_redist(hwirq))
  415. {
  416. base = gicv3_percpu_redist_sgi_base();
  417. }
  418. else
  419. {
  420. base = _gic.dist_base;
  421. }
  422. offset = gicv3_hwirq_convert_offset_index(hwirq, GICD_IPRIORITYR, &index);
  423. HWREG8(base + offset + index) = priority;
  424. return RT_EOK;
  425. }
  426. static rt_err_t gicv3_irq_set_affinity(struct rt_pic_irq *pirq, rt_bitmap_t *affinity)
  427. {
  428. rt_err_t ret = RT_EOK;
  429. rt_uint64_t val;
  430. rt_ubase_t mpidr;
  431. rt_uint32_t offset, index;
  432. int hwirq = pirq->hwirq, cpu_id = rt_bitmap_next_set_bit(affinity, 0, RT_CPUS_NR);
  433. mpidr = rt_cpu_mpidr_table[cpu_id];
  434. offset = gicv3_hwirq_convert_offset_index(hwirq, GICD_IROUTER, &index);
  435. val = ((rt_uint64_t)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
  436. MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
  437. MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
  438. MPIDR_AFFINITY_LEVEL(mpidr, 0));
  439. HWREG64(_gic.dist_base + offset + (index * 8)) = val;
  440. return ret;
  441. }
  442. static rt_err_t gicv3_irq_set_triger_mode(struct rt_pic_irq *pirq, rt_uint32_t mode)
  443. {
  444. void *base;
  445. rt_err_t ret = RT_EOK;
  446. int hwirq = pirq->hwirq;
  447. rt_uint32_t index, offset;
  448. if (hwirq > 15)
  449. {
  450. if (gicv3_hwirq_in_redist(hwirq))
  451. {
  452. base = gicv3_percpu_redist_sgi_base();
  453. }
  454. else
  455. {
  456. base = _gic.dist_base;
  457. }
  458. offset = gicv3_hwirq_convert_offset_index(hwirq, GICD_ICFGR, &index);
  459. ret = gic_common_configure_irq(base + offset, hwirq, mode, RT_NULL, RT_NULL);
  460. }
  461. else
  462. {
  463. ret = -RT_ENOSYS;
  464. }
  465. return ret;
  466. }
  467. static void gicv3_irq_send_ipi(struct rt_pic_irq *pirq, rt_bitmap_t *cpumask)
  468. {
  469. #define __mpidr_to_sgi_affinity(cluster_id, level) \
  470. (MPIDR_AFFINITY_LEVEL(cluster_id, level) << ICC_SGI1R_AFFINITY_##level##_SHIFT)
  471. int cpu_id, last_cpu_id, limit;
  472. rt_uint64_t initid, range_sel, target_list, cluster_id;
  473. range_sel = 0;
  474. initid = ((pirq->hwirq) << ICC_SGI1R_SGI_ID_SHIFT);
  475. rt_bitmap_for_each_set_bit(cpumask, cpu_id, RT_CPUS_NR)
  476. {
  477. rt_uint64_t mpidr = rt_cpu_mpidr_table[cpu_id];
  478. cluster_id = mpidr & (~MPIDR_LEVEL_MASK);
  479. target_list = 1 << ((mpidr & MPIDR_LEVEL_MASK) % ICC_SGI1R_TARGET_LIST_MAX);
  480. limit = rt_min(cpu_id + ICC_SGI1R_TARGET_LIST_MAX, RT_CPUS_NR);
  481. last_cpu_id = cpu_id;
  482. rt_bitmap_for_each_set_bit_from(cpumask, cpu_id, cpu_id, limit)
  483. {
  484. rt_uint64_t mpidr = rt_cpu_mpidr_table[cpu_id];
  485. if (cluster_id != (mpidr & (~MPIDR_LEVEL_MASK)))
  486. {
  487. range_sel = 0;
  488. /* Don't break next cpuid */
  489. cpu_id = last_cpu_id;
  490. break;
  491. }
  492. last_cpu_id = cpu_id;
  493. target_list |= 1 << ((mpidr & MPIDR_LEVEL_MASK) % ICC_SGI1R_TARGET_LIST_MAX);
  494. }
  495. rt_hw_dsb();
  496. write_gicreg(ICC_SGI1R_SYS,
  497. __mpidr_to_sgi_affinity(cluster_id, 3) |
  498. (range_sel << ICC_SGI1R_RS_SHIFT) |
  499. __mpidr_to_sgi_affinity(cluster_id, 2) |
  500. initid |
  501. __mpidr_to_sgi_affinity(cluster_id, 1) |
  502. target_list);
  503. rt_hw_isb();
  504. ++range_sel;
  505. }
  506. #undef __mpidr_to_sgi_affinity
  507. }
  508. static rt_err_t gicv3_irq_set_state(struct rt_pic *pic, int hwirq, int type, rt_bool_t state)
  509. {
  510. rt_err_t err = RT_EOK;
  511. rt_uint32_t offset = 0;
  512. if (hwirq >= 8192)
  513. {
  514. type = -1;
  515. }
  516. switch (type)
  517. {
  518. case RT_IRQ_STATE_PENDING:
  519. offset = state ? GICD_ISPENDR : GICD_ICPENDR;
  520. break;
  521. case RT_IRQ_STATE_ACTIVE:
  522. offset = state ? GICD_ISACTIVER : GICD_ICACTIVER;
  523. break;
  524. case RT_IRQ_STATE_MASKED:
  525. if (state)
  526. {
  527. struct rt_pic_irq pirq = {};
  528. pirq.hwirq = hwirq;
  529. gicv3_irq_mask(&pirq);
  530. }
  531. else
  532. {
  533. offset = GICD_ISENABLER;
  534. }
  535. break;
  536. default:
  537. err = -RT_EINVAL;
  538. break;
  539. }
  540. if (!err && offset)
  541. {
  542. gicv3_hwirq_poke(hwirq, offset);
  543. }
  544. return err;
  545. }
  546. static rt_err_t gicv3_irq_get_state(struct rt_pic *pic, int hwirq, int type, rt_bool_t *out_state)
  547. {
  548. rt_err_t err = RT_EOK;
  549. rt_uint32_t offset = 0;
  550. switch (type)
  551. {
  552. case RT_IRQ_STATE_PENDING:
  553. offset = GICD_ISPENDR;
  554. break;
  555. case RT_IRQ_STATE_ACTIVE:
  556. offset = GICD_ISACTIVER;
  557. break;
  558. case RT_IRQ_STATE_MASKED:
  559. offset = GICD_ISENABLER;
  560. break;
  561. default:
  562. err = -RT_EINVAL;
  563. break;
  564. }
  565. if (!err)
  566. {
  567. *out_state = gicv3_hwirq_peek(hwirq, offset);
  568. }
  569. return err;
  570. }
  571. static int gicv3_irq_map(struct rt_pic *pic, int hwirq, rt_uint32_t mode)
  572. {
  573. struct rt_pic_irq *pirq;
  574. int irq, hwirq_type, irq_index;
  575. hwirq_type = gicv3_hwirq_type(hwirq);
  576. if (hwirq_type != LPI_TYPE)
  577. {
  578. irq_index = hwirq - GIC_SGI_NR;
  579. }
  580. else
  581. {
  582. irq_index = _gic.irq_nr - _gic.lpi_nr + hwirq - 8192;
  583. }
  584. pirq = rt_pic_find_irq(pic, irq_index);
  585. if (pirq && hwirq >= GIC_SGI_NR)
  586. {
  587. pirq->mode = mode;
  588. pirq->priority = GICD_INT_DEF_PRI;
  589. switch (gicv3_hwirq_type(hwirq))
  590. {
  591. case PPI_TYPE:
  592. gic_fill_ppi_affinity(pirq->affinity);
  593. break;
  594. case SPI_TYPE:
  595. case ESPI_TYPE:
  596. RT_IRQ_AFFINITY_SET(pirq->affinity, _init_cpu_id);
  597. default:
  598. break;
  599. }
  600. irq = rt_pic_config_irq(pic, irq_index, hwirq);
  601. if (irq >= 0 && mode != RT_IRQ_MODE_LEVEL_HIGH)
  602. {
  603. gicv3_irq_set_triger_mode(pirq, mode);
  604. }
  605. }
  606. else
  607. {
  608. irq = -1;
  609. }
  610. return irq;
  611. }
  612. static rt_err_t gicv3_irq_parse(struct rt_pic *pic, struct rt_ofw_cell_args *args, struct rt_pic_irq *out_pirq)
  613. {
  614. rt_err_t err = RT_EOK;
  615. if (args->args_count == 3)
  616. {
  617. out_pirq->mode = args->args[2] & RT_IRQ_MODE_MASK;
  618. switch (args->args[0])
  619. {
  620. case 0:
  621. /* SPI */
  622. out_pirq->hwirq = args->args[1] + 32;
  623. break;
  624. case 1:
  625. /* PPI */
  626. out_pirq->hwirq = args->args[1] + 16;
  627. break;
  628. case 2:
  629. /* ESPI */
  630. out_pirq->hwirq = args->args[1] + GIC_ESPI_BASE_INTID;
  631. break;
  632. case 3:
  633. /* EPPI */
  634. out_pirq->hwirq = args->args[1] + GIC_EPPI_BASE_INTID;
  635. break;
  636. case GIC_IRQ_TYPE_LPI:
  637. /* LPI */
  638. out_pirq->hwirq = args->args[1];
  639. break;
  640. case GIC_IRQ_TYPE_PARTITION:
  641. out_pirq->hwirq = args->args[1];
  642. if (args->args[1] >= 16)
  643. {
  644. out_pirq->hwirq += GIC_EPPI_BASE_INTID - 16;
  645. }
  646. else
  647. {
  648. out_pirq->hwirq += 16;
  649. }
  650. break;
  651. default:
  652. err = -RT_ENOSYS;
  653. break;
  654. }
  655. }
  656. else
  657. {
  658. err = -RT_EINVAL;
  659. }
  660. return err;
  661. }
  662. const static struct rt_pic_ops gicv3_ops =
  663. {
  664. .name = "GICv3",
  665. .irq_init = gicv3_irq_init,
  666. .irq_ack = gicv3_irq_ack,
  667. .irq_mask = gicv3_irq_mask,
  668. .irq_unmask = gicv3_irq_unmask,
  669. .irq_eoi = gicv3_irq_eoi,
  670. .irq_set_priority = gicv3_irq_set_priority,
  671. .irq_set_affinity = gicv3_irq_set_affinity,
  672. .irq_set_triger_mode = gicv3_irq_set_triger_mode,
  673. .irq_send_ipi = gicv3_irq_send_ipi,
  674. .irq_set_state = gicv3_irq_set_state,
  675. .irq_get_state = gicv3_irq_get_state,
  676. .irq_map = gicv3_irq_map,
  677. .irq_parse = gicv3_irq_parse,
  678. };
  679. static rt_bool_t gicv3_handler(void *data)
  680. {
  681. rt_bool_t res = RT_FALSE;
  682. int hwirq;
  683. struct gicv3 *gic = data;
  684. read_gicreg(ICC_IAR1_SYS, hwirq);
  685. if (!(hwirq >= 1020 && hwirq <= 1023))
  686. {
  687. struct rt_pic_irq *pirq;
  688. if (hwirq < GIC_SGI_NR)
  689. {
  690. rt_hw_rmb();
  691. pirq = rt_pic_find_ipi(&gic->parent, hwirq);
  692. }
  693. else
  694. {
  695. int irq_index;
  696. if (hwirq < 8192)
  697. {
  698. irq_index = hwirq - GIC_SGI_NR;
  699. }
  700. else
  701. {
  702. irq_index = gic->irq_nr - gic->lpi_nr + hwirq - 8192;
  703. }
  704. pirq = rt_pic_find_irq(&gic->parent, irq_index);
  705. }
  706. gicv3_irq_ack(pirq);
  707. rt_pic_handle_isr(pirq);
  708. gicv3_irq_eoi(pirq);
  709. res = RT_TRUE;
  710. }
  711. return res;
  712. }
  713. static rt_err_t gicv3_enable_quirk_msm8996(void *data)
  714. {
  715. struct gicv3 *gic = data;
  716. gic->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
  717. return RT_EOK;
  718. }
  719. static rt_err_t gicv3_enable_quirk_arm64_2941627(void *data)
  720. {
  721. _gicv3_arm64_2941627_erratum = RT_TRUE;
  722. return RT_EOK;
  723. }
  724. static const struct gic_quirk _gicv3_quirks[] =
  725. {
  726. {
  727. .desc = "GICv3: Qualcomm MSM8996 broken firmware",
  728. .compatible = "qcom,msm8996-gic-v3",
  729. .init = gicv3_enable_quirk_msm8996,
  730. },
  731. {
  732. /* GIC-700: 2941627 workaround - IP variant [0,1] */
  733. .desc = "GICv3: ARM64 erratum 2941627",
  734. .iidr = 0x0400043b,
  735. .iidr_mask = 0xff0e0fff,
  736. .init = gicv3_enable_quirk_arm64_2941627,
  737. },
  738. {
  739. /* GIC-700: 2941627 workaround - IP variant [2] */
  740. .desc = "GICv3: ARM64 erratum 2941627",
  741. .iidr = 0x0402043b,
  742. .iidr_mask = 0xff0f0fff,
  743. .init = gicv3_enable_quirk_arm64_2941627,
  744. },
  745. { /* sentinel */ }
  746. };
  747. static rt_err_t gicv3_iomap_init(rt_uint64_t *regs)
  748. {
  749. rt_err_t ret = RT_EOK;
  750. int idx;
  751. char *name;
  752. do {
  753. /* GICD->GICR */
  754. _gic.dist_size = regs[1];
  755. _gic.dist_base = rt_ioremap((void *)regs[0], _gic.dist_size);
  756. if (!_gic.dist_base)
  757. {
  758. name = "Distributor";
  759. idx = 0;
  760. ret = -RT_ERROR;
  761. break;
  762. }
  763. name = "Redistributor";
  764. _gic.redist_regions = rt_malloc(sizeof(_gic.redist_regions[0]) * _gic.redist_regions_nr);
  765. if (!_gic.redist_regions)
  766. {
  767. idx = -1;
  768. ret = -RT_ENOMEM;
  769. LOG_E("No memory to save %s", name);
  770. break;
  771. }
  772. for (int i = 0, off = 2; i < _gic.redist_regions_nr; ++i)
  773. {
  774. void *base = (void *)regs[off++];
  775. rt_size_t size = regs[off++];
  776. _gic.redist_regions[i].size = size;
  777. _gic.redist_regions[i].base = rt_ioremap(base, size);
  778. _gic.redist_regions[i].base_phy = base;
  779. if (!base)
  780. {
  781. idx = 1;
  782. ret = -RT_ERROR;
  783. break;
  784. }
  785. }
  786. if (ret)
  787. {
  788. break;
  789. }
  790. /* ArchRev[4:7] */
  791. _gic.version = HWREG32(_gic.dist_base + GICD_PIDR2) >> 4;
  792. } while (0);
  793. if (ret && idx >= 0)
  794. {
  795. RT_UNUSED(name);
  796. LOG_E("%s IO[%p, %p] map fail", name[idx], regs[idx * 2], regs[idx * 2 + 1]);
  797. }
  798. return ret;
  799. }
  800. static void gicv3_init(void)
  801. {
  802. #define __dist_espi_regs_do(func, expr, ...) \
  803. __VA_ARGS__(*func(GICD_IGROUPR) expr GICD_IGROUPRnE); \
  804. __VA_ARGS__(*func(GICD_ISENABLER) expr GICD_ISENABLERnE); \
  805. __VA_ARGS__(*func(GICD_ICENABLER) expr GICD_ICENABLERnE); \
  806. __VA_ARGS__(*func(GICD_ISPENDR) expr GICD_ISPENDRnE); \
  807. __VA_ARGS__(*func(GICD_ICPENDR) expr GICD_ICPENDRnE); \
  808. __VA_ARGS__(*func(GICD_ISACTIVER) expr GICD_ISACTIVERnE); \
  809. __VA_ARGS__(*func(GICD_ICACTIVER) expr GICD_ICACTIVERnE); \
  810. __VA_ARGS__(*func(GICD_IPRIORITYR) expr GICD_IPRIORITYRnE); \
  811. __VA_ARGS__(*func(GICD_ICFGR) expr GICD_ICFGRnE); \
  812. __VA_ARGS__(*func(GICD_IROUTER) expr GICD_IROUTERnE);
  813. /* Map registers for ESPI */
  814. __dist_espi_regs_do(gicv3_dist_espi_reg, =);
  815. __dist_espi_regs_do(gicv3_dist_espi_reg, ==, RT_ASSERT);
  816. #undef __dist_espi_regs_do
  817. _gic.gicd_typer = HWREG32(_gic.dist_base + GICD_TYPER);
  818. gic_common_init_quirk_hw(HWREG32(_gic.dist_base + GICD_IIDR), _gicv3_quirks, &_gic.parent);
  819. gicv3_dist_init();
  820. _gic.parent.priv_data = &_gic;
  821. _gic.parent.ops = &gicv3_ops;
  822. rt_pic_linear_irq(&_gic.parent, _gic.irq_nr - GIC_SGI_NR);
  823. gic_common_sgi_config(_gic.dist_base, &_gic.parent, 0);
  824. rt_pic_add_traps(gicv3_handler, &_gic);
  825. rt_pic_user_extends(&_gic.parent);
  826. }
  827. static void gicv3_init_fail(void)
  828. {
  829. if (_gic.dist_base)
  830. {
  831. rt_iounmap(_gic.dist_base);
  832. }
  833. if (_gic.redist_regions)
  834. {
  835. for (int i = 0; i < _gic.redist_regions_nr; ++i)
  836. {
  837. if (_gic.redist_regions[i].base)
  838. {
  839. rt_iounmap(_gic.redist_regions[i].base);
  840. }
  841. }
  842. rt_free(_gic.redist_regions);
  843. }
  844. rt_memset(&_gic, 0, sizeof(_gic));
  845. }
  846. static rt_err_t gicv3_ofw_init(struct rt_ofw_node *np, const struct rt_ofw_node_id *id)
  847. {
  848. rt_err_t err = RT_EOK;
  849. do {
  850. rt_size_t reg_nr_max;
  851. rt_err_t msi_init = -RT_ENOSYS;
  852. rt_uint32_t redist_regions_nr;
  853. rt_uint64_t *regs, redist_stride;
  854. if (rt_ofw_prop_read_u32(np, "#redistributor-regions", &redist_regions_nr))
  855. {
  856. redist_regions_nr = 1;
  857. }
  858. /* GICD + n * GICR */
  859. reg_nr_max = 2 + (2 * redist_regions_nr);
  860. regs = rt_calloc(1, sizeof(rt_uint64_t) * reg_nr_max);
  861. if (!regs)
  862. {
  863. err = -RT_ENOMEM;
  864. break;
  865. }
  866. rt_ofw_get_address_array(np, reg_nr_max, regs);
  867. _gic.redist_regions_nr = redist_regions_nr;
  868. err = gicv3_iomap_init(regs);
  869. rt_free(regs);
  870. if (err)
  871. {
  872. break;
  873. }
  874. if (_gic.version != 3 && _gic.version != 4)
  875. {
  876. LOG_E("Version = %d is not support", _gic.version);
  877. err = -RT_EINVAL;
  878. break;
  879. }
  880. if (rt_ofw_prop_read_u64(np, "redistributor-stride", &redist_stride))
  881. {
  882. redist_stride = 0;
  883. }
  884. _gic.redist_stride = redist_stride;
  885. gic_common_init_quirk_ofw(np, _gicv3_quirks, &_gic.parent);
  886. gicv3_init();
  887. rt_ofw_data(np) = &_gic.parent;
  888. #ifdef RT_PIC_ARM_GIC_V3_ITS
  889. msi_init = gicv3_its_ofw_probe(np, id);
  890. #endif
  891. /* V2M or ITS only */
  892. if (msi_init)
  893. {
  894. #ifdef RT_PIC_ARM_GIC_V2M
  895. gicv2m_ofw_probe(np, id);
  896. #endif
  897. }
  898. } while (0);
  899. if (err)
  900. {
  901. gicv3_init_fail();
  902. }
  903. return err;
  904. }
  905. static const struct rt_ofw_node_id gicv3_ofw_ids[] =
  906. {
  907. { .compatible = "arm,gic-v3" },
  908. { /* sentinel */ }
  909. };
  910. RT_PIC_OFW_DECLARE(gicv3, gicv3_ofw_ids, gicv3_ofw_init);