1
0

pic-gicv3-its.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-01-30 GuEe-GUI first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #include <rtdevice.h>
  13. #define DBG_TAG "pic.gicv3-its"
  14. #define DBG_LVL DBG_INFO
  15. #include <rtdbg.h>
  16. #include <mmu.h>
  17. #include <mm_page.h>
  18. #include <cpuport.h>
  19. #include <dt-bindings/size.h>
  20. #include "pic-gicv3.h"
  21. #include "pic-gic-common.h"
  22. #define ITS_CMD_QUEUE_SIZE (64 * SIZE_KB)
  23. #define ITS_CMD_QUEUE_ALIGN (64 * SIZE_KB)
  24. #define ITS_CMD_QUEUE_NR (ITS_CMD_QUEUE_SIZE / sizeof(struct its_command))
  25. #define ITS_ITT_ALIGN (256 * SIZE_KB)
  26. #define ITS_LPI_CONFIG_TABLE_ALIGN (64 * SIZE_KB)
  27. #define ITS_LPI_CONFIG_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
  28. #define ITS_LPI_CONFIG_PROP_SHIFT 2
  29. #define ITS_LPI_CONFIG_PROP_MASK RT_GENMASK(7, ITS_LPI_CONFIG_PROP_SHIFT)
  30. #define ITS_LPI_PENDING_TABLE_ALIGN (64 * SIZE_KB)
  31. #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING RT_BIT(0)
  32. #define RDIST_FLAGS_RD_TABLES_PREALLOCATED RT_BIT(1)
  33. #define RDIST_FLAGS_FORCE_NON_SHAREABLE RT_BIT(2)
  34. #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING RT_BIT(0)
  35. #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 RT_BIT(1)
  36. #define ITS_FLAGS_FORCE_NON_SHAREABLE RT_BIT(2)
  37. #define RD_LOCAL_LPI_ENABLED RT_BIT(0)
  38. #define RD_LOCAL_PENDTABLE_PREALLOCATED RT_BIT(1)
  39. #define RD_LOCAL_MEMRESERVE_DONE RT_BIT(2)
  40. struct its_command
  41. {
  42. union
  43. {
  44. rt_le64_t code_raw[4];
  45. rt_uint64_t code[4];
  46. };
  47. };
  48. struct its_table
  49. {
  50. void *base;
  51. rt_uint64_t val;
  52. rt_uint32_t size_bits;
  53. rt_uint32_t page_size;
  54. union
  55. {
  56. struct
  57. {
  58. rt_uint32_t itt_entries;
  59. rt_uint32_t lvl2_bits;
  60. };
  61. };
  62. };
  63. struct its_collection
  64. {
  65. rt_uint64_t target_address;
  66. rt_uint16_t id;
  67. };
  68. struct gicv3_its;
  69. struct its_map
  70. {
  71. rt_list_t list;
  72. struct rt_ref ref;
  73. struct gicv3_its *its;
  74. int device_id;
  75. int lpi_base;
  76. int cpu_id;
  77. void *itt;
  78. void *lvl2_dte;
  79. };
  80. struct gicv3_its
  81. {
  82. struct rt_pic parent;
  83. rt_list_t list;
  84. void *base;
  85. void *base_phy;
  86. void *cmd_base;
  87. rt_ubase_t cmd_idx;
  88. rt_uint32_t flags;
  89. struct rt_spinlock cmd_lock;
  90. struct its_table tbls[GITS_BASER_NR_REGS];
  91. struct its_collection collections[RT_CPUS_NR];
  92. struct gicv3 *gic;
  93. struct rt_ofw_node *np;
  94. };
  95. #define raw_to_gicv3_its(raw) rt_container_of(raw, struct gicv3_its, parent)
  96. static rt_size_t lpi_nr;
  97. static rt_uint32_t lpi_id_bits;
  98. static void *lpi_table;
  99. static void *lpi_pending_table;
  100. static rt_bitmap_t *lpis_vectors = RT_NULL;
  101. static RT_DEFINE_SPINLOCK(lpis_lock);
  102. static RT_DEFINE_SPINLOCK(map_lock);
  103. static rt_list_t its_nodes = RT_LIST_OBJECT_INIT(its_nodes);
  104. static rt_list_t map_nodes = RT_LIST_OBJECT_INIT(map_nodes);
  105. rt_inline rt_uint64_t its_readq(struct gicv3_its *its, int off)
  106. {
  107. return HWREG32(its->base + off) |
  108. (rt_uint64_t)HWREG32(its->base + off + 4) << 32;
  109. }
  110. rt_inline void its_writeq(struct gicv3_its *its, int off, rt_uint64_t value)
  111. {
  112. HWREG32(its->base + off) = (rt_uint32_t)value;
  113. HWREG32(its->base + off + 4) = (rt_uint32_t)(value >> 32);
  114. }
  115. rt_inline rt_uint32_t its_readl(struct gicv3_its *its, int off)
  116. {
  117. return HWREG32(its->base + off);
  118. }
  119. rt_inline void its_writel(struct gicv3_its *its, int off, rt_uint32_t value)
  120. {
  121. HWREG32(its->base + off) = value;
  122. }
  123. rt_inline rt_uint32_t its_pirq_event_id(struct gicv3_its *its, struct rt_pic_irq *pirq)
  124. {
  125. return pirq->hwirq - 8192;
  126. }
  127. rt_inline rt_uint32_t its_pirq_device_id(struct gicv3_its *its, struct rt_pic_irq *pirq)
  128. {
  129. struct its_map *map = pirq->msi_desc->priv;
  130. return map->device_id;
  131. }
  132. rt_inline rt_size_t its_device_id_bits(struct gicv3_its *its)
  133. {
  134. return RT_FIELD_GET(GITS_TYPER_DEVBITS, HWREG64(its->base + GITS_TYPER)) + 1;
  135. }
  136. rt_inline void *lpi_base_config(int index)
  137. {
  138. return &((rt_uint8_t *)lpi_table)[index - 8192];
  139. }
  140. static void its_mask_encode(rt_uint64_t *raw_code, rt_uint64_t val, int h, int l)
  141. {
  142. rt_uint64_t mask = RT_GENMASK_ULL(h, l);
  143. *raw_code &= ~mask;
  144. *raw_code |= (val << l) & mask;
  145. }
  146. rt_inline void its_encode_cmd(struct its_command *cmd, rt_uint8_t cmd_nr)
  147. {
  148. its_mask_encode(&cmd->code[0], cmd_nr, 7, 0);
  149. }
  150. rt_inline void its_encode_valid(struct its_command *cmd, rt_bool_t valid)
  151. {
  152. its_mask_encode(&cmd->code[2], !!valid, 63, 63);
  153. }
  154. rt_inline void its_encode_phys_id(struct its_command *cmd, rt_uint32_t phys_id)
  155. {
  156. its_mask_encode(&cmd->code[1], phys_id, 63, 32);
  157. }
  158. rt_inline void its_encode_size(struct its_command *cmd, rt_uint8_t size)
  159. {
  160. its_mask_encode(&cmd->code[1], size, 4, 0);
  161. }
  162. rt_inline void its_encode_itt(struct its_command *cmd, rt_uint64_t itt_addr)
  163. {
  164. its_mask_encode(&cmd->code[2], itt_addr >> 8, 51, 8);
  165. }
  166. rt_inline void its_encode_target(struct its_command *cmd, rt_uint64_t target_addr)
  167. {
  168. its_mask_encode(&cmd->code[2], target_addr >> 16, 51, 16);
  169. }
  170. rt_inline void its_encode_device_id(struct its_command *cmd, rt_uint32_t device_id)
  171. {
  172. its_mask_encode(&cmd->code[0], device_id, 63, 32);
  173. }
  174. rt_inline void its_encode_event_id(struct its_command *cmd, rt_uint32_t event_id)
  175. {
  176. its_mask_encode(&cmd->code[1], event_id, 31, 0);
  177. }
  178. rt_inline void its_encode_collection(struct its_command *cmd, rt_uint16_t collection_id)
  179. {
  180. its_mask_encode(&cmd->code[2], collection_id, 15, 0);
  181. }
  182. static struct its_table *its_baser_type(struct gicv3_its *its, int type)
  183. {
  184. for (int i = 0; i < RT_ARRAY_SIZE(its->tbls); ++i)
  185. {
  186. if (GITS_BASER_TYPE(its->tbls[i].val) == type)
  187. {
  188. return &its->tbls[i];
  189. }
  190. }
  191. return RT_NULL;
  192. }
  193. static struct its_command *its_cmd_alloc(struct gicv3_its *its)
  194. {
  195. struct its_command *cmd = RT_NULL;
  196. for (rt_uint32_t count = 0; count <= 10000; ++count)
  197. {
  198. if ((its->cmd_idx + 1) % ITS_CMD_QUEUE_NR != its_readl(its, GITS_CREADR) / sizeof(*cmd))
  199. {
  200. struct its_command *cmds = its->cmd_base;
  201. cmd = &cmds[its->cmd_idx++];
  202. its->cmd_idx %= ITS_CMD_QUEUE_NR;
  203. rt_memset(cmd, 0, sizeof(*cmd));
  204. break;
  205. }
  206. rt_hw_us_delay(10);
  207. }
  208. return cmd;
  209. }
  210. static rt_err_t its_cmd_submit_raw(struct gicv3_its *its, struct its_command *cmd)
  211. {
  212. rt_uint64_t cwriter;
  213. rt_bool_t retry = RT_FALSE;
  214. cwriter = (void *)(cmd + 1) - its->cmd_base;
  215. rt_hw_rmb();
  216. #ifdef ARCH_CPU_BIG_ENDIAN
  217. cmd->code_raw[0] = rt_cpu_to_le64(cmd->code[0]);
  218. cmd->code_raw[1] = rt_cpu_to_le64(cmd->code[1]);
  219. cmd->code_raw[2] = rt_cpu_to_le64(cmd->code[2]);
  220. cmd->code_raw[3] = rt_cpu_to_le64(cmd->code[3]);
  221. #endif /* ARCH_CPU_BIG_ENDIAN */
  222. /* Make sure the commands written to memory are observable by the ITS */
  223. if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
  224. {
  225. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cmd, sizeof(*cmd));
  226. }
  227. else
  228. {
  229. rt_hw_wmb();
  230. }
  231. its_writel(its, GITS_CWRITER, cwriter);
  232. for (rt_uint32_t count = 0; count < 10000; ++count)
  233. {
  234. if (its_readl(its, GITS_CREADR) == cwriter)
  235. {
  236. return RT_EOK;
  237. }
  238. /* Stalled */
  239. if (!retry && its_readl(its, GITS_CREADR) & 1)
  240. {
  241. /* Retry */
  242. its_writel(its, GITS_CWRITER, cwriter);
  243. retry = RT_TRUE;
  244. }
  245. else if (retry)
  246. {
  247. LOG_E("Retry command 0x%02x fail", cmd->code[0] & 0xff);
  248. return -RT_EIO;
  249. }
  250. rt_hw_us_delay(10);
  251. }
  252. return -RT_ETIMEOUT;
  253. }
  254. static rt_err_t its_cmd_submit_nomap(struct gicv3_its *its, struct its_command *cmd,
  255. int cpu_id, rt_bool_t sync)
  256. {
  257. rt_err_t err;
  258. struct its_command *hw_cmd;
  259. rt_hw_spin_lock(&its->cmd_lock.lock);
  260. if (!(hw_cmd = its_cmd_alloc(its)))
  261. {
  262. err = -RT_EBUSY;
  263. goto _out_lock;
  264. }
  265. rt_memcpy(hw_cmd, cmd, sizeof(*hw_cmd));
  266. if ((err = its_cmd_submit_raw(its, hw_cmd)))
  267. {
  268. goto _out_lock;
  269. }
  270. if (sync)
  271. {
  272. if (!(hw_cmd = its_cmd_alloc(its)))
  273. {
  274. err = -RT_EBUSY;
  275. goto _out_lock;
  276. }
  277. its_encode_cmd(hw_cmd, GITS_CMD_SYNC);
  278. its_encode_target(hw_cmd, its->collections[cpu_id].target_address);
  279. err = its_cmd_submit_raw(its, hw_cmd);
  280. }
  281. _out_lock:
  282. rt_hw_spin_unlock(&its->cmd_lock.lock);
  283. return err;
  284. }
  285. static rt_err_t its_cmd_submit(struct gicv3_its *its, struct its_command *cmd,
  286. struct its_map *map, rt_bool_t sync)
  287. {
  288. return its_cmd_submit_nomap(its, cmd, map->cpu_id, sync);
  289. }
  290. static rt_err_t lpi_flush_config(struct gicv3_its *its, rt_uint8_t *conf,
  291. struct rt_pic_irq *pirq)
  292. {
  293. struct its_command cmd;
  294. struct its_map *map = pirq->msi_desc->priv;
  295. if (its->gic->redist_flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
  296. {
  297. /* Clean D-cache under command */
  298. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, conf, sizeof(*conf));
  299. }
  300. else
  301. {
  302. /* DSB inner shareable, store */
  303. rt_hw_wmb();
  304. }
  305. rt_memset(&cmd, 0, sizeof(cmd));
  306. its_encode_cmd(&cmd, GITS_CMD_INV);
  307. its_encode_device_id(&cmd, its_pirq_device_id(its, pirq));
  308. its_encode_event_id(&cmd, its_pirq_event_id(its, pirq));
  309. return its_cmd_submit(its, &cmd, map, RT_FALSE);
  310. }
  311. rt_inline void *gicr_rd_base_percpu(struct gicv3 *gic)
  312. {
  313. return gic->redist_regions[rt_hw_cpu_id()].base;
  314. }
  315. rt_inline void *gicr_rd_base(struct gicv3_its *its)
  316. {
  317. return its->gic->redist_percpu_base[rt_hw_cpu_id()];
  318. }
  319. rt_inline rt_uint64_t *gicr_rd_flags(struct gicv3_its *its)
  320. {
  321. return &its->gic->redist_percpu_flags[rt_hw_cpu_id()];
  322. }
  323. static rt_bool_t gicr_supports_plpis(struct gicv3_its *its)
  324. {
  325. return !!(HWREG64(gicr_rd_base(its) + GICR_TYPER) & GICR_TYPER_PLPIS);
  326. }
  327. static rt_err_t redist_disable_lpis(struct gicv3_its *its)
  328. {
  329. void *gicr = gicr_rd_base(its);
  330. rt_uint64_t timeout = 1000000L, val;
  331. if (!gicr_supports_plpis(its))
  332. {
  333. LOG_E("CPU#%d: LPIs not supported", rt_hw_cpu_id());
  334. return -RT_ENOSYS;
  335. }
  336. val = HWREG32(gicr + GICR_CTLR);
  337. if (!(val & GICR_CTLR_ENABLE_LPIS))
  338. {
  339. return RT_EOK;
  340. }
  341. /*
  342. * If coming via a CPU hotplug event, we don't need to disable
  343. * LPIs before trying to re-enable them. They are already
  344. * configured and all is well in the world.
  345. *
  346. * If running with preallocated tables, there is nothing to do.
  347. */
  348. if ((*gicr_rd_flags(its) & RD_LOCAL_LPI_ENABLED) ||
  349. (its->gic->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
  350. {
  351. return RT_EOK;
  352. }
  353. /* From that point on, we only try to do some damage control */
  354. LOG_W("CPU%d: Booted with LPIs enabled, memory probably corrupted", rt_hw_cpu_id());
  355. /* Disable LPIs */
  356. val &= ~GICR_CTLR_ENABLE_LPIS;
  357. HWREG32(gicr + GICR_CTLR) = val;
  358. /* Make sure any change to GICR_CTLR is observable by the GIC */
  359. rt_hw_barrier(dsb, sy);
  360. /*
  361. * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
  362. * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
  363. * Error out if we time out waiting for RWP to clear.
  364. */
  365. while (HWREG32(gicr + GICR_CTLR) & GICR_CTLR_RWP)
  366. {
  367. if (!timeout)
  368. {
  369. LOG_E("CPU#%d: Timeout while disabling LPIs", rt_hw_cpu_id());
  370. return -RT_ETIMEOUT;
  371. }
  372. rt_hw_us_delay(1);
  373. --timeout;
  374. }
  375. /*
  376. * After it has been written to 1, it is IMPLEMENTATION
  377. * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
  378. * cleared to 0. Error out if clearing the bit failed.
  379. */
  380. if (HWREG32(gicr + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS)
  381. {
  382. LOG_E("CPU#%d: Failed to disable LPIs", rt_hw_cpu_id());
  383. return -RT_EBUSY;
  384. }
  385. return RT_EOK;
  386. }
  387. static void gicv3_its_cpu_init_lpis(struct gicv3_its *its)
  388. {
  389. void *gicr;
  390. rt_ubase_t paddr;
  391. rt_uint64_t val, tmp;
  392. if (*gicr_rd_flags(its) & RD_LOCAL_LPI_ENABLED)
  393. {
  394. return;
  395. }
  396. gicr = gicr_rd_base(its);
  397. val = HWREG32(gicr + GICR_CTLR);
  398. if ((its->gic->redist_flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
  399. (val & GICR_CTLR_ENABLE_LPIS))
  400. {
  401. *gicr_rd_flags(its) |= RD_LOCAL_PENDTABLE_PREALLOCATED;
  402. goto _out;
  403. }
  404. paddr = (rt_ubase_t)rt_kmem_v2p(lpi_pending_table);
  405. /* Set PROPBASE */
  406. val = ((rt_ubase_t)rt_kmem_v2p(lpi_table) |
  407. GITS_CBASER_InnerShareable |
  408. GITS_CBASER_RaWaWb |
  409. ((lpi_id_bits - 1) & GICR_PROPBASER_IDBITS_MASK));
  410. HWREG64(gicr + GICR_PROPBASER) = val;
  411. tmp = HWREG64(gicr + GICR_PROPBASER);
  412. if (its->gic->redist_flags & RDIST_FLAGS_FORCE_NON_SHAREABLE)
  413. {
  414. tmp &= ~GICR_PBASER_SHARE_MASK_ALL;
  415. }
  416. if ((tmp ^ val) & GICR_PBASER_SHARE_MASK_ALL)
  417. {
  418. if (!(tmp & GICR_PBASER_SHARE_MASK_ALL))
  419. {
  420. /*
  421. * The HW reports non-shareable,
  422. * we must remove the cacheability attributes as well.
  423. */
  424. val &= ~(GICR_PBASER_SHARE_MASK_ALL | GICR_PBASER_INNER_MASK_ALL);
  425. val |= GICR_PBASER_nC;
  426. HWREG64(gicr + GICR_PROPBASER) = val;
  427. }
  428. if (!rt_hw_cpu_id())
  429. {
  430. LOG_I("Using cache flushing for LPI property table");
  431. }
  432. its->gic->redist_flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
  433. }
  434. val = (paddr | GICR_PBASER_InnerShareable | GICR_PBASER_RaWaWb);
  435. HWREG64(gicr + GICR_PENDBASER) = val;
  436. tmp = HWREG64(gicr + GICR_PENDBASER);
  437. if (its->gic->redist_flags & RDIST_FLAGS_FORCE_NON_SHAREABLE)
  438. {
  439. tmp &= ~GICR_PBASER_SHARE_MASK_ALL;
  440. }
  441. if (!(tmp & GICR_PBASER_SHARE_MASK_ALL))
  442. {
  443. /*
  444. * The HW reports non-shareable, we must remove the
  445. * cacheability attributes as well.
  446. */
  447. val &= ~(GICR_PBASER_SHARE_MASK_ALL | GICR_PBASER_INNER_MASK_ALL);
  448. val |= GICR_PBASER_nC;
  449. HWREG64(gicr + GICR_PENDBASER) = val;
  450. }
  451. /* Enable LPIs */
  452. val = HWREG32(gicr + GICR_CTLR);
  453. val |= GICR_CTLR_ENABLE_LPIS;
  454. HWREG32(gicr + GICR_CTLR) = val;
  455. rt_hw_barrier(dsb, sy);
  456. _out:
  457. *gicr_rd_flags(its) |= RD_LOCAL_LPI_ENABLED;
  458. }
  459. static void gicv3_its_cpu_init_collection(struct gicv3_its *its)
  460. {
  461. rt_uint64_t target;
  462. int cpu_id = rt_hw_cpu_id();
  463. struct its_command cmd;
  464. struct its_collection *collection;
  465. if (HWREG64(its->base + GITS_TYPER) & GITS_TYPER_PTA)
  466. {
  467. target = (rt_uint64_t)rt_kmem_v2p(gicr_rd_base(its));
  468. }
  469. else
  470. {
  471. /* Linear by GICR processor number */
  472. target = HWREG64(gicr_rd_base(its) + GICR_TYPER);
  473. target = GICR_TYPER_CPU_NO(target) << 16;
  474. }
  475. collection = &its->collections[cpu_id];
  476. collection->target_address = target;
  477. collection->id = cpu_id;
  478. rt_memset(&cmd, 0, sizeof(cmd));
  479. its_encode_cmd(&cmd, GITS_CMD_MAPC);
  480. its_encode_collection(&cmd, collection->id);
  481. its_encode_target(&cmd, target);
  482. its_encode_valid(&cmd, RT_TRUE);
  483. its_cmd_submit_nomap(its, &cmd, cpu_id, RT_TRUE);
  484. rt_memset(&cmd, 0, sizeof(cmd));
  485. its_encode_cmd(&cmd, GITS_CMD_INVALL);
  486. its_encode_collection(&cmd, collection->id);
  487. its_cmd_submit_nomap(its, &cmd, cpu_id, RT_TRUE);
  488. }
  489. static rt_err_t gicv3_its_irq_init(struct rt_pic *pic)
  490. {
  491. rt_err_t err;
  492. struct gicv3_its *its = raw_to_gicv3_its(pic);
  493. if ((err = redist_disable_lpis(its)))
  494. {
  495. return err;
  496. }
  497. gicv3_its_cpu_init_lpis(its);
  498. gicv3_its_cpu_init_collection(its);
  499. return RT_EOK;
  500. }
  501. static void gicv3_its_irq_mask(struct rt_pic_irq *pirq)
  502. {
  503. rt_uint8_t *conf = lpi_base_config(pirq->hwirq);
  504. struct gicv3_its *its = raw_to_gicv3_its(pirq->pic);
  505. *conf &= ~GITS_LPI_CFG_ENABLED;
  506. lpi_flush_config(its, conf, pirq);
  507. rt_pci_msi_mask_irq(pirq);
  508. }
  509. static void gicv3_its_irq_unmask(struct rt_pic_irq *pirq)
  510. {
  511. rt_uint8_t *conf = lpi_base_config(pirq->hwirq);
  512. struct gicv3_its *its = raw_to_gicv3_its(pirq->pic);
  513. *conf |= GITS_LPI_CFG_ENABLED;
  514. lpi_flush_config(its, conf, pirq);
  515. rt_pci_msi_unmask_irq(pirq);
  516. }
  517. static rt_err_t gicv3_its_irq_set_priority(struct rt_pic_irq *pirq, rt_uint32_t priority)
  518. {
  519. rt_uint8_t *conf = lpi_base_config(pirq->hwirq);
  520. struct gicv3_its *its = raw_to_gicv3_its(pirq->pic);
  521. *conf = (priority << ITS_LPI_CONFIG_PROP_SHIFT) | (*conf & (~ITS_LPI_CONFIG_PROP_MASK));
  522. return lpi_flush_config(its, conf, pirq);
  523. }
  524. static rt_err_t gicv3_its_irq_set_affinity(struct rt_pic_irq *pirq, rt_bitmap_t *affinity)
  525. {
  526. int cpu_id;
  527. rt_err_t err;
  528. struct its_map *map;
  529. struct its_command cmd;
  530. struct its_collection *collection;
  531. struct gicv3_its *its = raw_to_gicv3_its(pirq->pic);
  532. map = pirq->msi_desc->priv;
  533. cpu_id = rt_bitmap_next_set_bit(affinity, 0, RT_CPUS_NR);
  534. collection = &its->collections[cpu_id];
  535. if (collection->target_address == ~0ULL)
  536. {
  537. return -RT_EIO;
  538. }
  539. if (map->cpu_id == cpu_id)
  540. {
  541. return RT_EOK;
  542. }
  543. rt_memset(&cmd, 0, sizeof(cmd));
  544. its_encode_cmd(&cmd, GITS_CMD_MOVI);
  545. its_encode_device_id(&cmd, map->device_id);
  546. its_encode_event_id(&cmd, its_pirq_event_id(its, pirq));
  547. its_encode_collection(&cmd, collection->id);
  548. if (!(err = its_cmd_submit(its, &cmd, map, RT_TRUE)))
  549. {
  550. map->cpu_id = cpu_id;
  551. }
  552. return err;
  553. }
  554. static void gicv3_its_irq_compose_msi_msg(struct rt_pic_irq *pirq, struct rt_pci_msi_msg *msg)
  555. {
  556. rt_ubase_t addr;
  557. struct gicv3_its *its = raw_to_gicv3_its(pirq->pic);
  558. addr = (rt_ubase_t)its->base_phy + GITS_TRANSLATER;
  559. msg->address_hi = rt_upper_32_bits(addr);
  560. msg->address_lo = rt_lower_32_bits(addr);
  561. msg->data = its_pirq_event_id(its, pirq);
  562. }
  563. static int gicv3_its_irq_alloc_msi(struct rt_pic *pic, struct rt_pci_msi_desc *msi_desc)
  564. {
  565. rt_ubase_t level;
  566. rt_uint32_t device_id = -1;
  567. int irq = -1, hwirq, parent_irq, hwirq_index, lpi_base = 0;
  568. struct its_map *map = RT_NULL, *map_tmp;
  569. struct its_table *tbl;
  570. struct its_command cmd;
  571. struct rt_pic_irq *pirq;
  572. struct rt_pci_device *pdev = msi_desc->pdev;
  573. struct gicv3_its *its = raw_to_gicv3_its(pic);
  574. struct rt_pic *ppic = &its->gic->parent;
  575. tbl = its_baser_type(its, GITS_BASER_TYPE_DEVICE);
  576. RT_ASSERT(tbl != RT_NULL);
  577. if (!pdev->parent.ofw_node)
  578. {
  579. device_id = rt_pci_dev_id(pdev);
  580. }
  581. else
  582. {
  583. struct rt_ofw_cell_args args;
  584. for (int index = 0; ; ++index)
  585. {
  586. rt_err_t err = rt_ofw_parse_phandle_cells(pdev->parent.ofw_node,
  587. "msi-parent", "#msi-cells", index, &args);
  588. if (err)
  589. {
  590. return (int)err;
  591. }
  592. if (args.data == its->np)
  593. {
  594. device_id = args.args[0];
  595. }
  596. rt_ofw_node_put(args.data);
  597. if ((rt_int32_t)device_id >= 0)
  598. {
  599. break;
  600. }
  601. }
  602. }
  603. if (device_id >= (1 << tbl->size_bits))
  604. {
  605. LOG_E("Device ID = is %x not supported", device_id);
  606. return -RT_EINVAL;
  607. }
  608. /* Find old map info */
  609. level = rt_spin_lock_irqsave(&map_lock);
  610. rt_list_for_each_entry(map_tmp, &map_nodes, list)
  611. {
  612. if (map_tmp->device_id == device_id)
  613. {
  614. map = map_tmp;
  615. lpi_base = map->lpi_base - 8192;
  616. break;
  617. }
  618. }
  619. rt_spin_unlock_irqrestore(&map_lock, level);
  620. if (!map)
  621. {
  622. rt_size_t itt_size;
  623. if (!(map = rt_calloc(1, sizeof(*map))))
  624. {
  625. return -RT_ENOMEM;
  626. }
  627. itt_size = tbl->itt_entries * (RT_FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE,
  628. HWREG64(its->base + GITS_TYPER)) + 1);
  629. itt_size = rt_max_t(rt_size_t, itt_size, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
  630. map->itt = rt_malloc_align(itt_size, ITS_ITT_ALIGN);
  631. if (!map->itt)
  632. {
  633. rt_free(map);
  634. return -RT_ENOMEM;
  635. }
  636. if (tbl->lvl2_bits)
  637. {
  638. void *lvl2_dte;
  639. rt_uint64_t *entry;
  640. entry = tbl->base;
  641. entry += device_id / (tbl->page_size / GITS_LVL1_ENTRY_SIZE);
  642. if (*entry)
  643. {
  644. lvl2_dte = (void *)(*entry - PV_OFFSET);
  645. rt_page_ref_inc(lvl2_dte, tbl->lvl2_bits);
  646. }
  647. else
  648. {
  649. rt_size_t dte_size;
  650. lvl2_dte = rt_pages_alloc(tbl->lvl2_bits);
  651. if (!lvl2_dte)
  652. {
  653. rt_free_align(map->itt);
  654. rt_free(map);
  655. return -RT_ENOMEM;
  656. }
  657. dte_size = rt_page_bits(tbl->lvl2_bits);
  658. rt_memset(lvl2_dte, 0, dte_size);
  659. if (!(tbl->val & GITS_BASER_SHARE_MASK_ALL))
  660. {
  661. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lvl2_dte, dte_size);
  662. }
  663. *entry = rt_cpu_to_le64((rt_uint64_t)rt_kmem_v2p(lvl2_dte) | GITS_BASER_VALID);
  664. if (!(tbl->val & GITS_BASER_SHARE_MASK_ALL))
  665. {
  666. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, entry, sizeof(*entry));
  667. }
  668. rt_hw_dsb();
  669. }
  670. map->lvl2_dte = lvl2_dte;
  671. }
  672. rt_memset(map->itt, 0, itt_size);
  673. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, map->itt, itt_size);
  674. }
  675. msi_desc->priv = map;
  676. /* Alloc the LPI base on the first LPI */
  677. level = rt_spin_lock_irqsave(&lpis_lock);
  678. hwirq_index = rt_bitmap_next_clear_bit(lpis_vectors, lpi_base, lpi_nr);
  679. if (hwirq_index >= lpi_nr)
  680. {
  681. irq = -RT_EEMPTY;
  682. goto _out_lock;
  683. }
  684. hwirq = 8192 + hwirq_index;
  685. parent_irq = ppic->ops->irq_map(ppic, hwirq, RT_IRQ_MODE_EDGE_RISING);
  686. if (parent_irq < 0)
  687. {
  688. irq = parent_irq;
  689. goto _out_lock;
  690. }
  691. irq = rt_pic_config_irq(pic, hwirq_index, hwirq);
  692. if (irq < 0)
  693. {
  694. goto _out_lock;
  695. }
  696. pirq = rt_pic_find_irq(pic, hwirq_index);
  697. pirq->mode = RT_IRQ_MODE_EDGE_RISING;
  698. rt_pic_cascade(pirq, parent_irq);
  699. rt_bitmap_set_bit(lpis_vectors, hwirq_index);
  700. _out_lock:
  701. rt_spin_unlock_irqrestore(&lpis_lock, level);
  702. if (irq < 0)
  703. {
  704. return irq;
  705. }
  706. if (map->its)
  707. {
  708. rt_ref_get(&map->ref);
  709. }
  710. else
  711. {
  712. rt_list_init(&map->list);
  713. rt_ref_init(&map->ref);
  714. map->its = its;
  715. map->device_id = device_id;
  716. map->lpi_base = hwirq;
  717. level = rt_spin_lock_irqsave(&map_lock);
  718. rt_list_insert_before(&map_nodes, &map->list);
  719. rt_spin_unlock_irqrestore(&map_lock, level);
  720. }
  721. /* Default to CPU#0 */
  722. map->cpu_id = 0;
  723. RT_IRQ_AFFINITY_SET(pirq->affinity, map->cpu_id);
  724. rt_memset(&cmd, 0, sizeof(cmd));
  725. its_encode_cmd(&cmd, GITS_CMD_MAPD);
  726. its_encode_device_id(&cmd, device_id);
  727. its_encode_size(&cmd, rt_ilog2(tbl->itt_entries) - 1);
  728. its_encode_itt(&cmd, (rt_uint64_t)rt_kmem_v2p(map->itt));
  729. its_encode_valid(&cmd, RT_TRUE);
  730. its_cmd_submit(its, &cmd, map, RT_FALSE);
  731. rt_memset(&cmd, 0, sizeof(cmd));
  732. its_encode_cmd(&cmd, GITS_CMD_MAPTI);
  733. its_encode_device_id(&cmd, device_id);
  734. its_encode_event_id(&cmd, its_pirq_event_id(its, pirq));
  735. its_encode_phys_id(&cmd, hwirq);
  736. its_encode_collection(&cmd, its->collections[map->cpu_id].id);
  737. its_cmd_submit(its, &cmd, map, RT_TRUE);
  738. return irq;
  739. }
  740. static void its_map_release(struct rt_ref *r)
  741. {
  742. rt_ubase_t level;
  743. struct gicv3_its *its;
  744. struct its_table *tbl;
  745. struct its_command cmd;
  746. struct its_map *map = rt_container_of(r, struct its_map, ref);
  747. its = map->its;
  748. tbl = its_baser_type(its, GITS_BASER_TYPE_DEVICE);
  749. rt_memset(&cmd, 0, sizeof(cmd));
  750. its_encode_cmd(&cmd, GITS_CMD_MAPD);
  751. its_encode_device_id(&cmd, map->device_id);
  752. its_encode_size(&cmd, rt_ilog2(tbl->itt_entries) - 1);
  753. its_encode_itt(&cmd, (rt_uint64_t)rt_kmem_v2p(map->itt));
  754. its_encode_valid(&cmd, RT_FALSE);
  755. its_cmd_submit(its, &cmd, map, RT_TRUE);
  756. level = rt_spin_lock_irqsave(&map_lock);
  757. rt_list_insert_before(&map_nodes, &map->list);
  758. rt_spin_unlock_irqrestore(&map_lock, level);
  759. if (map->itt)
  760. {
  761. rt_free_align(map->itt);
  762. }
  763. if (map->lvl2_dte)
  764. {
  765. if (rt_page_ref_get(map->lvl2_dte, tbl->lvl2_bits) == 1)
  766. {
  767. rt_uint64_t *entry;
  768. entry = tbl->base + (map->device_id / (tbl->page_size / GITS_LVL1_ENTRY_SIZE));
  769. *entry = rt_cpu_to_le64(0);
  770. if (!(tbl->val & GITS_BASER_SHARE_MASK_ALL))
  771. {
  772. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, entry, sizeof(*entry));
  773. }
  774. }
  775. rt_pages_free(map->lvl2_dte, tbl->lvl2_bits);
  776. }
  777. rt_free(map);
  778. }
  779. static void gicv3_its_irq_free_msi(struct rt_pic *pic, int irq)
  780. {
  781. rt_ubase_t level;
  782. struct its_map *map;
  783. struct its_command cmd;
  784. struct rt_pic_irq *pirq;
  785. struct gicv3_its *its = raw_to_gicv3_its(pic);
  786. pirq = rt_pic_find_pirq(pic, irq);
  787. if (!pirq)
  788. {
  789. return;
  790. }
  791. map = pirq->msi_desc->priv;
  792. rt_memset(&cmd, 0, sizeof(cmd));
  793. its_encode_cmd(&cmd, GITS_CMD_DISCARD);
  794. its_encode_device_id(&cmd, map->device_id);
  795. its_encode_event_id(&cmd, its_pirq_event_id(its, pirq));
  796. its_cmd_submit(its, &cmd, map, RT_TRUE);
  797. rt_pic_uncascade(pirq);
  798. level = rt_spin_lock_irqsave(&lpis_lock);
  799. rt_bitmap_clear_bit(lpis_vectors, pirq->hwirq - 8192);
  800. rt_spin_unlock_irqrestore(&lpis_lock, level);
  801. rt_ref_put(&map->ref, its_map_release);
  802. }
  803. static rt_err_t gicv3_its_irq_set_state(struct rt_pic *pic, int hwirq, int type, rt_bool_t state)
  804. {
  805. struct its_map *map;
  806. struct its_command cmd;
  807. struct rt_pic_irq *pirq;
  808. struct gicv3_its *its = raw_to_gicv3_its(pic);
  809. if (type != RT_IRQ_STATE_PENDING || hwirq > 8192 + lpi_nr)
  810. {
  811. return -RT_ENOSYS;
  812. }
  813. if (!(pirq = rt_pic_find_irq(pic, hwirq - 8192)))
  814. {
  815. return -RT_ENOSYS;
  816. }
  817. map = pirq->msi_desc->priv;
  818. rt_memset(&cmd, 0, sizeof(cmd));
  819. if (state)
  820. {
  821. its_encode_cmd(&cmd, GITS_CMD_INT);
  822. its_encode_device_id(&cmd, map->device_id);
  823. its_encode_event_id(&cmd, its_pirq_event_id(its, pirq));
  824. }
  825. else
  826. {
  827. its_encode_cmd(&cmd, GITS_CMD_CLEAR);
  828. its_encode_device_id(&cmd, map->device_id);
  829. its_encode_event_id(&cmd, its_pirq_event_id(its, pirq));
  830. }
  831. its_cmd_submit(its, &cmd, map, RT_TRUE);
  832. return RT_EOK;
  833. }
  834. const static struct rt_pic_ops gicv3_its_ops =
  835. {
  836. .name = "GICv3-ITS",
  837. .irq_init = gicv3_its_irq_init,
  838. .irq_ack = rt_pic_irq_parent_ack,
  839. .irq_mask = gicv3_its_irq_mask,
  840. .irq_unmask = gicv3_its_irq_unmask,
  841. .irq_eoi = rt_pic_irq_parent_eoi,
  842. .irq_set_priority = gicv3_its_irq_set_priority,
  843. .irq_set_affinity = gicv3_its_irq_set_affinity,
  844. .irq_compose_msi_msg = gicv3_its_irq_compose_msi_msg,
  845. .irq_alloc_msi = gicv3_its_irq_alloc_msi,
  846. .irq_free_msi = gicv3_its_irq_free_msi,
  847. .irq_set_state = gicv3_its_irq_set_state,
  848. .flags = RT_PIC_F_IRQ_ROUTING,
  849. };
  850. static rt_ssize_t its_baser_page_size(struct gicv3_its *its, struct its_table *tbl)
  851. {
  852. rt_size_t page_size = 64 * SIZE_KB;
  853. while (page_size)
  854. {
  855. rt_uint64_t val, baser_page_size;
  856. rt_off_t baser = GITS_BASERn((int)(tbl - its->tbls));
  857. val = its_readq(its, baser);
  858. val &= ~GITS_BASER_PAGE_SIZE_MASK;
  859. switch (page_size)
  860. {
  861. case 64 * SIZE_KB:
  862. baser_page_size = GITS_BASER_PAGE_SIZE_64K;
  863. break;
  864. case 16 * SIZE_KB:
  865. baser_page_size = GITS_BASER_PAGE_SIZE_16K;
  866. break;
  867. case 4 * SIZE_KB:
  868. default:
  869. baser_page_size = GITS_BASER_PAGE_SIZE_4K;
  870. break;
  871. }
  872. baser_page_size >>= GITS_BASER_PAGE_SIZE_SHIFT;
  873. val |= RT_FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, baser_page_size);
  874. its_writeq(its, baser, val);
  875. tbl->val = its_readq(its, baser);
  876. if (RT_FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, tbl->val) == baser_page_size)
  877. {
  878. break;
  879. }
  880. switch (page_size)
  881. {
  882. case 64 * SIZE_KB:
  883. page_size = 16 * SIZE_KB;
  884. break;
  885. case 16 * SIZE_KB:
  886. page_size = 4 * SIZE_KB;
  887. break;
  888. case 4 * SIZE_KB:
  889. default:
  890. return -RT_EINVAL;
  891. }
  892. }
  893. return page_size;
  894. }
  895. static rt_err_t its_table_init(struct gicv3_its *its)
  896. {
  897. int inited = 0;
  898. rt_off_t baser;
  899. rt_bool_t indirect = RT_FALSE;
  900. rt_size_t pages_nr, alloc_size;
  901. rt_uint64_t val, type, entry_size, share, cache;
  902. struct its_table *tbl;
  903. share = GITS_BASER_InnerShareable;
  904. cache = GITS_BASER_RaWaWb;
  905. for (int i = 0; i < RT_ARRAY_SIZE(its->tbls); ++i)
  906. {
  907. tbl = &its->tbls[i];
  908. val = its_readq(its, GITS_BASERn(i));
  909. type = GITS_BASER_TYPE(val);
  910. if (type != GITS_BASER_TYPE_DEVICE &&
  911. type != GITS_BASER_TYPE_COLLECTION)
  912. {
  913. continue;
  914. }
  915. tbl->page_size = its_baser_page_size(its, tbl);
  916. if (tbl->page_size < 0)
  917. {
  918. continue;
  919. }
  920. baser = GITS_BASERn((int)(tbl - its->tbls));
  921. entry_size = GITS_BASER_ENTRY_SIZE(val);
  922. if (type == GITS_BASER_TYPE_DEVICE)
  923. {
  924. tbl->size_bits = its_device_id_bits(its);
  925. LOG_D("Device Max IDs = %lu", 1UL << tbl->size_bits);
  926. /* For MSI-X */
  927. tbl->itt_entries = 2048;
  928. while (MAX_HANDLERS / tbl->itt_entries < (1 << tbl->size_bits) &&
  929. tbl->itt_entries > 32)
  930. {
  931. tbl->itt_entries >>= 1;
  932. }
  933. }
  934. its_writeq(its, baser, tbl->val | GITS_BASER_INDIRECT);
  935. tbl->val = its_readq(its, baser);
  936. indirect = !!(tbl->val & GITS_BASER_INDIRECT);
  937. if (indirect && type == GITS_BASER_TYPE_DEVICE)
  938. {
  939. /* The size of the level 2 table is equal to ITS page size */
  940. tbl->lvl2_bits = tbl->size_bits - rt_ilog2(tbl->page_size / (int)entry_size);
  941. /* Get level 1 entries count */
  942. alloc_size = (1 << tbl->size_bits) / (tbl->page_size / entry_size);
  943. alloc_size *= GITS_LVL1_ENTRY_SIZE;
  944. }
  945. else
  946. {
  947. alloc_size = (1 << tbl->size_bits) * entry_size;
  948. indirect = RT_FALSE;
  949. }
  950. tbl->base = rt_malloc_align(alloc_size, tbl->page_size);
  951. pages_nr = alloc_size / tbl->page_size;
  952. if (!tbl->base)
  953. {
  954. return -RT_ENOMEM;
  955. }
  956. if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
  957. {
  958. cache = GITS_BASER_nCnB;
  959. }
  960. if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
  961. {
  962. cache = GITS_BASER_nC;
  963. share = 0;
  964. }
  965. val = ((rt_ubase_t)rt_kmem_v2p(tbl->base) |
  966. (type << GITS_BASER_TYPE_SHIFT) |
  967. ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
  968. (pages_nr << GITS_BASER_PAGES_SHIFT) |
  969. cache | share | GITS_BASER_VALID);
  970. val |= indirect ? GITS_BASER_INDIRECT : 0;
  971. switch (tbl->page_size)
  972. {
  973. case 4 * SIZE_KB:
  974. val |= GITS_BASER_PAGE_SIZE_4K;
  975. break;
  976. case 16 * SIZE_KB:
  977. val |= GITS_BASER_PAGE_SIZE_16K;
  978. break;
  979. case 64 * SIZE_KB:
  980. val |= GITS_BASER_PAGE_SIZE_64K;
  981. break;
  982. }
  983. its_writeq(its, baser, val);
  984. tbl->val = its_readq(its, baser);
  985. rt_memset(tbl->base, 0, alloc_size);
  986. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, tbl->base, alloc_size);
  987. cache = tbl->val & GITS_BASER_INNER_MASK_ALL;
  988. share = tbl->val & GITS_BASER_SHARE_MASK_ALL;
  989. ++inited;
  990. }
  991. return inited == 2 ? RT_EOK : -RT_ENOSYS;
  992. }
  993. static rt_err_t its_cmd_queue_init(struct gicv3_its *its)
  994. {
  995. void *cmd_phy_base;
  996. rt_uint64_t baser, tmp;
  997. its->cmd_base = rt_malloc_align(ITS_CMD_QUEUE_SIZE, ITS_CMD_QUEUE_ALIGN);
  998. if (!its->cmd_base)
  999. {
  1000. return -RT_ENOMEM;
  1001. }
  1002. its->cmd_idx = 0;
  1003. rt_memset(its->cmd_base, 0, ITS_CMD_QUEUE_SIZE);
  1004. cmd_phy_base = rt_kmem_v2p(its->cmd_base);
  1005. baser = GITS_CBASER_VALID | GITS_CBASER_RaWaWb | GITS_CBASER_InnerShareable | \
  1006. ((rt_uint64_t)cmd_phy_base) | (ITS_CMD_QUEUE_SIZE / (4 * SIZE_KB) - 1);
  1007. its_writeq(its, GITS_CBASER, baser);
  1008. tmp = its_readq(its, GITS_CBASER);
  1009. if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
  1010. {
  1011. tmp &= ~GITS_CBASER_SHARE_MASK_ALL;
  1012. }
  1013. if ((tmp ^ baser) & GITS_CBASER_SHARE_MASK_ALL)
  1014. {
  1015. if (!(tmp & GITS_CBASER_SHARE_MASK_ALL))
  1016. {
  1017. /* The HW reports non-shareable, we must remove the cacheability attributes as well */
  1018. baser &= ~(GITS_CBASER_SHARE_MASK_ALL | GITS_CBASER_INNER_MASK_ALL);
  1019. baser |= GITS_CBASER_nC;
  1020. its_writeq(its, GITS_CBASER, baser);
  1021. }
  1022. LOG_I("Using cache flushing for CMD queue");
  1023. its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
  1024. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, its->cmd_base, ITS_CMD_QUEUE_SIZE);
  1025. }
  1026. /* Get the next command from the start of the buffer */
  1027. its_writeq(its, GITS_CWRITER, 0);
  1028. return RT_EOK;
  1029. }
  1030. static rt_err_t its_lpi_table_init(struct gicv3 *gic)
  1031. {
  1032. rt_size_t lpi_table_size, lpi_pending_table_size;
  1033. rt_uint32_t numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic->gicd_typer);
  1034. if (HWREG32(gicr_rd_base_percpu(gic) + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS)
  1035. {
  1036. gic->redist_flags |= RDIST_FLAGS_RD_TABLES_PREALLOCATED;
  1037. gic->redist_flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
  1038. LOG_I("Using preallocated redistributor tables");
  1039. }
  1040. lpi_id_bits = GICD_TYPER_ID_BITS(gic->gicd_typer);
  1041. if (gic->redist_flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)
  1042. {
  1043. rt_uint64_t val = HWREG64(gicr_rd_base_percpu(gic) + GICR_PROPBASER);
  1044. lpi_id_bits = rt_min_t(rt_uint32_t, lpi_id_bits, (val & GICR_PROPBASER_IDBITS_MASK) + 1);
  1045. }
  1046. lpi_nr = rt_min_t(rt_size_t, (1UL << lpi_id_bits) - 8192, gic->lpi_nr);
  1047. lpi_id_bits = __rt_clz(lpi_nr + 8192);
  1048. if (numlpis > 2 && numlpis > lpi_nr)
  1049. {
  1050. lpi_nr = numlpis;
  1051. LOG_W("Using hypervisor restricted LPI range [%u]", lpi_nr);
  1052. }
  1053. gic->lpi_nr = lpi_nr;
  1054. /* LPI Configuration table entry is 1 byte, Pending table bytes is N / 8. */
  1055. lpi_table_size = RT_GENMASK(lpi_id_bits, 0);
  1056. lpi_pending_table_size = lpi_table_size / 8;
  1057. lpi_table = rt_malloc_align(lpi_table_size, ITS_LPI_CONFIG_TABLE_ALIGN);
  1058. lpi_pending_table = rt_malloc_align(lpi_pending_table_size, ITS_LPI_PENDING_TABLE_ALIGN);
  1059. lpis_vectors = rt_calloc(1, RT_BITMAP_LEN(lpi_nr) * sizeof(rt_bitmap_t));
  1060. if (!lpi_table || !lpi_pending_table || !lpis_vectors)
  1061. {
  1062. if (lpi_table)
  1063. {
  1064. rt_free_align(lpi_table);
  1065. }
  1066. if (lpi_pending_table)
  1067. {
  1068. rt_free_align(lpi_pending_table);
  1069. }
  1070. if (lpis_vectors)
  1071. {
  1072. rt_free_align(lpis_vectors);
  1073. }
  1074. lpi_table = RT_NULL;
  1075. lpi_pending_table = RT_NULL;
  1076. lpis_vectors = RT_NULL;
  1077. return -RT_ENOMEM;
  1078. }
  1079. /* Set the default configuration */
  1080. rt_memset(lpi_table, ITS_LPI_CONFIG_PROP_DEFAULT_PRIO | GITS_LPI_CFG_GROUP1, lpi_table_size);
  1081. /*
  1082. * We should make a full mask size with lpi_id_bits,
  1083. * otherwise 'undefined' LPI will occur.
  1084. */
  1085. rt_memset(lpi_pending_table, 0, lpi_pending_table_size);
  1086. /* Flush the table to memory */
  1087. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lpi_table, lpi_table_size);
  1088. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lpi_pending_table, lpi_pending_table_size);
  1089. LOG_D("ITS: Allocator initialized for %u LPIs", lpi_nr);
  1090. return RT_EOK;
  1091. }
  1092. static void its_init_fail(struct gicv3_its *its)
  1093. {
  1094. if (its->base)
  1095. {
  1096. rt_iounmap(its->base);
  1097. }
  1098. if (its->cmd_base)
  1099. {
  1100. rt_free_align(its->cmd_base);
  1101. }
  1102. for (int i = 0; i < RT_ARRAY_SIZE(its->tbls); ++i)
  1103. {
  1104. struct its_table *tbl = &its->tbls[i];
  1105. if (tbl->base)
  1106. {
  1107. rt_free_align(tbl->base);
  1108. }
  1109. }
  1110. rt_list_remove(&its->list);
  1111. rt_free(its);
  1112. }
  1113. static rt_err_t its_quirk_cavium_22375(void *data)
  1114. {
  1115. struct gicv3_its *its = data;
  1116. its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
  1117. return RT_EOK;
  1118. }
  1119. static rt_err_t its_enable_rockchip(void *data)
  1120. {
  1121. struct gicv3_its *its = data;
  1122. struct gicv3 *gic = its->gic;
  1123. if (!rt_ofw_machine_is_compatible("rockchip,rk3566") &&
  1124. !rt_ofw_machine_is_compatible("rockchip,rk3567") &&
  1125. !rt_ofw_machine_is_compatible("rockchip,rk3568") &&
  1126. !rt_ofw_machine_is_compatible("rockchip,rk3588") &&
  1127. !rt_ofw_machine_is_compatible("rockchip,rk3588s"))
  1128. {
  1129. return -RT_EINVAL;
  1130. }
  1131. its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
  1132. gic->redist_flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
  1133. return RT_EOK;
  1134. }
  1135. static rt_err_t its_set_non_coherent(void *data)
  1136. {
  1137. struct gicv3_its *its = data;
  1138. if (!rt_ofw_prop_read_bool(its->np, "dma-noncoherent"))
  1139. {
  1140. return -RT_EINVAL;
  1141. }
  1142. its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
  1143. return RT_EOK;
  1144. }
  1145. static const struct gic_quirk _its_quirks[] =
  1146. {
  1147. {
  1148. .desc = "ITS: Cavium ThunderX errata: 22375, 24313",
  1149. .iidr = 0xa100034c,
  1150. .iidr_mask = 0xffff0fff,
  1151. .init = its_quirk_cavium_22375,
  1152. },
  1153. {
  1154. .desc = "ITS: Rockchip erratum RK3566 ~ RK3588",
  1155. .iidr = 0x0201743b,
  1156. .iidr_mask = 0xffffffff,
  1157. .init = its_enable_rockchip,
  1158. },
  1159. {
  1160. .desc = "ITS: non-coherent attribute",
  1161. .compatible = "arm,gic-v3-its",
  1162. .init = its_set_non_coherent,
  1163. },
  1164. { /* sentinel */ }
  1165. };
  1166. static const struct rt_ofw_node_id gicv3_its_ofw_match[] =
  1167. {
  1168. { .compatible = "arm,gic-v3-its" },
  1169. { /* sentinel */ }
  1170. };
  1171. rt_err_t gicv3_its_ofw_probe(struct rt_ofw_node *np, const struct rt_ofw_node_id *id)
  1172. {
  1173. rt_err_t err = -RT_EEMPTY;
  1174. struct rt_ofw_node *its_np;
  1175. struct gicv3_its *its, *its_next;
  1176. rt_ofw_foreach_available_child_node(np, its_np)
  1177. {
  1178. if (!rt_ofw_node_match(its_np, gicv3_its_ofw_match))
  1179. {
  1180. continue;
  1181. }
  1182. if (!rt_ofw_prop_read_bool(its_np, "msi-controller"))
  1183. {
  1184. continue;
  1185. }
  1186. if (!(its = rt_calloc(1, sizeof(struct gicv3_its))))
  1187. {
  1188. rt_ofw_node_put(its_np);
  1189. err = -RT_ENOMEM;
  1190. goto _free_all;
  1191. }
  1192. its->base = rt_ofw_iomap(its_np, 0);
  1193. if (!its->base)
  1194. {
  1195. LOG_E("%s: IO map failed", rt_ofw_node_full_name(its_np));
  1196. its_init_fail(its);
  1197. continue;
  1198. }
  1199. /*
  1200. * Make sure ALL the ITS are reset before we probe any,
  1201. * as they may be sharing memory
  1202. */
  1203. for (int i = 0; i < GITS_BASER_NR_REGS; ++i)
  1204. {
  1205. its_writeq(its, GITS_BASER + (i << 3), 0);
  1206. }
  1207. its->np = its_np;
  1208. rt_list_init(&its->list);
  1209. rt_list_insert_before(&its_nodes, &its->list);
  1210. }
  1211. if (!rt_list_isempty(&its_nodes))
  1212. {
  1213. if ((err = its_lpi_table_init(rt_ofw_data(np))))
  1214. {
  1215. goto _free_all;
  1216. }
  1217. }
  1218. rt_list_for_each_entry_safe(its, its_next, &its_nodes, list)
  1219. {
  1220. rt_uint32_t ctlr;
  1221. its->base_phy = rt_kmem_v2p(its->base);
  1222. its->gic = rt_ofw_data(np);
  1223. gic_common_init_quirk_hw(HWREG32(its->base + GITS_IIDR), _its_quirks, its);
  1224. gic_common_init_quirk_ofw(its->np, _its_quirks, its);
  1225. if ((err = its_cmd_queue_init(its)))
  1226. {
  1227. goto _fail;
  1228. }
  1229. rt_spin_lock_init(&its->cmd_lock);
  1230. if ((err = its_table_init(its)))
  1231. {
  1232. goto _fail;
  1233. }
  1234. for (int i = 0; i < RT_CPUS_NR; ++i)
  1235. {
  1236. its->collections[i].target_address = ~0ULL;
  1237. }
  1238. ctlr = its_readl(its, GITS_CTLR);
  1239. ctlr |= GITS_CTLR_ENABLE;
  1240. its_writel(its, GITS_CTLR, ctlr);
  1241. its->parent.priv_data = its;
  1242. its->parent.ops = &gicv3_its_ops;
  1243. rt_pic_linear_irq(&its->parent, its->gic->lpi_nr);
  1244. rt_pic_user_extends(&its->parent);
  1245. its_np = its->np;
  1246. rt_ofw_data(its_np) = &its->parent;
  1247. rt_ofw_node_set_flag(its_np, RT_OFW_F_READLY);
  1248. continue;
  1249. _fail:
  1250. its_init_fail(its);
  1251. if (err == -RT_ENOMEM)
  1252. {
  1253. break;
  1254. }
  1255. }
  1256. if (rt_list_isempty(&its_nodes) && lpis_vectors)
  1257. {
  1258. rt_free(lpis_vectors);
  1259. rt_free_align(lpi_table);
  1260. rt_free_align(lpi_pending_table);
  1261. lpis_vectors = RT_NULL;
  1262. }
  1263. return err;
  1264. _free_all:
  1265. rt_list_for_each_entry_safe(its, its_next, &its_nodes, list)
  1266. {
  1267. rt_free(its);
  1268. rt_list_remove(&its->list);
  1269. }
  1270. return err;
  1271. }