pic-gicv3-its.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-01-30 GuEe-GUI first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #include <rtdevice.h>
  13. #define DBG_TAG "pic.gicv3-its"
  14. #define DBG_LVL DBG_INFO
  15. #include <rtdbg.h>
  16. #include <mmu.h>
  17. #include <mm_page.h>
  18. #include <cpuport.h>
  19. #include <dt-bindings/size.h>
  20. #include "pic-gicv3.h"
  21. #include "pic-gic-common.h"
  22. #define ITS_CMD_QUEUE_SIZE (64 * SIZE_KB)
  23. #define ITS_CMD_QUEUE_ALIGN (64 * SIZE_KB)
  24. #define ITS_CMD_QUEUE_NR (ITS_CMD_QUEUE_SIZE / sizeof(struct its_command))
  25. #define ITS_ITT_ALIGN (256 * SIZE_KB)
  26. #define ITS_LPI_CONFIG_TABLE_ALIGN (64 * SIZE_KB)
  27. #define ITS_LPI_CONFIG_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
  28. #define ITS_LPI_CONFIG_PROP_SHIFT 2
  29. #define ITS_LPI_CONFIG_PROP_MASK RT_GENMASK(7, ITS_LPI_CONFIG_PROP_SHIFT)
  30. #define ITS_LPI_PENDING_TABLE_ALIGN (64 * SIZE_KB)
  31. #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING RT_BIT(0)
  32. #define RDIST_FLAGS_RD_TABLES_PREALLOCATED RT_BIT(1)
  33. #define RDIST_FLAGS_FORCE_NON_SHAREABLE RT_BIT(2)
  34. #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING RT_BIT(0)
  35. #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 RT_BIT(1)
  36. #define ITS_FLAGS_FORCE_NON_SHAREABLE RT_BIT(2)
  37. #define RD_LOCAL_LPI_ENABLED RT_BIT(0)
  38. #define RD_LOCAL_PENDTABLE_PREALLOCATED RT_BIT(1)
  39. #define RD_LOCAL_MEMRESERVE_DONE RT_BIT(2)
  40. struct its_command
  41. {
  42. union
  43. {
  44. rt_le64_t code_raw[4];
  45. rt_uint64_t code[4];
  46. };
  47. };
  48. struct its_table
  49. {
  50. void *base;
  51. rt_uint64_t val;
  52. rt_uint32_t size_bits;
  53. rt_uint32_t page_size;
  54. union
  55. {
  56. struct
  57. {
  58. rt_uint32_t itt_entries;
  59. rt_uint32_t lvl2_bits;
  60. };
  61. };
  62. };
  63. struct its_collection
  64. {
  65. rt_uint64_t target_address;
  66. rt_uint16_t id;
  67. };
  68. struct gicv3_its;
  69. struct its_map
  70. {
  71. rt_list_t list;
  72. struct rt_ref ref;
  73. struct gicv3_its *its;
  74. int device_id;
  75. int lpi_base;
  76. int cpu_id;
  77. void *itt;
  78. void *lvl2_dte;
  79. };
  80. struct gicv3_its
  81. {
  82. struct rt_pic parent;
  83. rt_list_t list;
  84. void *base;
  85. void *base_phy;
  86. void *cmd_base;
  87. rt_ubase_t cmd_idx;
  88. rt_uint32_t flags;
  89. struct rt_spinlock cmd_lock;
  90. struct its_table tbls[GITS_BASER_NR_REGS];
  91. struct its_collection collections[RT_CPUS_NR];
  92. struct gicv3 *gic;
  93. struct rt_ofw_node *np;
  94. };
  95. #define raw_to_gicv3_its(raw) rt_container_of(raw, struct gicv3_its, parent)
  96. static rt_size_t lpi_nr;
  97. static rt_uint32_t lpi_id_bits;
  98. static void *lpi_table;
  99. static void *lpi_pending_table;
  100. static rt_bitmap_t *lpis_vectors = RT_NULL;
  101. static struct rt_spinlock lpis_lock = {}, map_lock = {};
  102. static rt_list_t its_nodes = RT_LIST_OBJECT_INIT(its_nodes);
  103. static rt_list_t map_nodes = RT_LIST_OBJECT_INIT(map_nodes);
  104. rt_inline rt_uint64_t its_readq(struct gicv3_its *its, int off)
  105. {
  106. return HWREG32(its->base + off) |
  107. (rt_uint64_t)HWREG32(its->base + off + 4) << 32;
  108. }
  109. rt_inline void its_writeq(struct gicv3_its *its, int off, rt_uint64_t value)
  110. {
  111. HWREG32(its->base + off) = (rt_uint32_t)value;
  112. HWREG32(its->base + off + 4) = (rt_uint32_t)(value >> 32);
  113. }
  114. rt_inline rt_uint32_t its_readl(struct gicv3_its *its, int off)
  115. {
  116. return HWREG32(its->base + off);
  117. }
  118. rt_inline void its_writel(struct gicv3_its *its, int off, rt_uint32_t value)
  119. {
  120. HWREG32(its->base + off) = value;
  121. }
  122. rt_inline rt_uint32_t its_pirq_event_id(struct gicv3_its *its, struct rt_pic_irq *pirq)
  123. {
  124. return pirq->hwirq - 8192;
  125. }
  126. rt_inline rt_uint32_t its_pirq_device_id(struct gicv3_its *its, struct rt_pic_irq *pirq)
  127. {
  128. struct its_map *map = pirq->msi_desc->priv;
  129. return map->device_id;
  130. }
  131. rt_inline rt_size_t its_device_id_bits(struct gicv3_its *its)
  132. {
  133. return RT_FIELD_GET(GITS_TYPER_DEVBITS, HWREG64(its->base + GITS_TYPER)) + 1;
  134. }
  135. rt_inline void *lpi_base_config(int index)
  136. {
  137. return &((rt_uint8_t *)lpi_table)[index - 8192];
  138. }
  139. static void its_mask_encode(rt_uint64_t *raw_code, rt_uint64_t val, int h, int l)
  140. {
  141. rt_uint64_t mask = RT_GENMASK_ULL(h, l);
  142. *raw_code &= ~mask;
  143. *raw_code |= (val << l) & mask;
  144. }
  145. rt_inline void its_encode_cmd(struct its_command *cmd, rt_uint8_t cmd_nr)
  146. {
  147. its_mask_encode(&cmd->code[0], cmd_nr, 7, 0);
  148. }
  149. rt_inline void its_encode_valid(struct its_command *cmd, rt_bool_t valid)
  150. {
  151. its_mask_encode(&cmd->code[2], !!valid, 63, 63);
  152. }
  153. rt_inline void its_encode_phys_id(struct its_command *cmd, rt_uint32_t phys_id)
  154. {
  155. its_mask_encode(&cmd->code[1], phys_id, 63, 32);
  156. }
  157. rt_inline void its_encode_size(struct its_command *cmd, rt_uint8_t size)
  158. {
  159. its_mask_encode(&cmd->code[1], size, 4, 0);
  160. }
  161. rt_inline void its_encode_itt(struct its_command *cmd, rt_uint64_t itt_addr)
  162. {
  163. its_mask_encode(&cmd->code[2], itt_addr >> 8, 51, 8);
  164. }
  165. rt_inline void its_encode_target(struct its_command *cmd, rt_uint64_t target_addr)
  166. {
  167. its_mask_encode(&cmd->code[2], target_addr >> 16, 51, 16);
  168. }
  169. rt_inline void its_encode_device_id(struct its_command *cmd, rt_uint32_t device_id)
  170. {
  171. its_mask_encode(&cmd->code[0], device_id, 63, 32);
  172. }
  173. rt_inline void its_encode_event_id(struct its_command *cmd, rt_uint32_t event_id)
  174. {
  175. its_mask_encode(&cmd->code[1], event_id, 31, 0);
  176. }
  177. rt_inline void its_encode_collection(struct its_command *cmd, rt_uint16_t collection_id)
  178. {
  179. its_mask_encode(&cmd->code[2], collection_id, 15, 0);
  180. }
  181. static struct its_table *its_baser_type(struct gicv3_its *its, int type)
  182. {
  183. for (int i = 0; i < RT_ARRAY_SIZE(its->tbls); ++i)
  184. {
  185. if (GITS_BASER_TYPE(its->tbls[i].val) == type)
  186. {
  187. return &its->tbls[i];
  188. }
  189. }
  190. return RT_NULL;
  191. }
  192. static struct its_command *its_cmd_alloc(struct gicv3_its *its)
  193. {
  194. struct its_command *cmd = RT_NULL;
  195. for (rt_uint32_t count = 0; count <= 10000; ++count)
  196. {
  197. if ((its->cmd_idx + 1) % ITS_CMD_QUEUE_NR != its_readl(its, GITS_CREADR) / sizeof(*cmd))
  198. {
  199. struct its_command *cmds = its->cmd_base;
  200. cmd = &cmds[its->cmd_idx++];
  201. its->cmd_idx %= ITS_CMD_QUEUE_NR;
  202. rt_memset(cmd, 0, sizeof(*cmd));
  203. break;
  204. }
  205. rt_hw_us_delay(10);
  206. }
  207. return cmd;
  208. }
  209. static rt_err_t its_cmd_submit_raw(struct gicv3_its *its, struct its_command *cmd)
  210. {
  211. rt_uint64_t cwriter;
  212. rt_bool_t retry = RT_FALSE;
  213. cwriter = (void *)(cmd + 1) - its->cmd_base;
  214. rt_hw_rmb();
  215. #ifdef ARCH_CPU_BIG_ENDIAN
  216. cmd->code_raw[0] = rt_cpu_to_le64(cmd->code[0]);
  217. cmd->code_raw[1] = rt_cpu_to_le64(cmd->code[1]);
  218. cmd->code_raw[2] = rt_cpu_to_le64(cmd->code[2]);
  219. cmd->code_raw[3] = rt_cpu_to_le64(cmd->code[3]);
  220. #endif /* ARCH_CPU_BIG_ENDIAN */
  221. /* Make sure the commands written to memory are observable by the ITS */
  222. if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
  223. {
  224. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cmd, sizeof(*cmd));
  225. }
  226. else
  227. {
  228. rt_hw_wmb();
  229. }
  230. its_writel(its, GITS_CWRITER, cwriter);
  231. for (rt_uint32_t count = 0; count < 10000; ++count)
  232. {
  233. if (its_readl(its, GITS_CREADR) == cwriter)
  234. {
  235. return RT_EOK;
  236. }
  237. /* Stalled */
  238. if (!retry && its_readl(its, GITS_CREADR) & 1)
  239. {
  240. /* Retry */
  241. its_writel(its, GITS_CWRITER, cwriter);
  242. retry = RT_TRUE;
  243. }
  244. else if (retry)
  245. {
  246. LOG_E("Retry command 0x%02x fail", cmd->code[0] & 0xff);
  247. return -RT_EIO;
  248. }
  249. rt_hw_us_delay(10);
  250. }
  251. return -RT_ETIMEOUT;
  252. }
  253. static rt_err_t its_cmd_submit_nomap(struct gicv3_its *its, struct its_command *cmd,
  254. int cpu_id, rt_bool_t sync)
  255. {
  256. rt_err_t err;
  257. struct its_command *hw_cmd;
  258. rt_hw_spin_lock(&its->cmd_lock.lock);
  259. if (!(hw_cmd = its_cmd_alloc(its)))
  260. {
  261. err = -RT_EBUSY;
  262. goto _out_lock;
  263. }
  264. rt_memcpy(hw_cmd, cmd, sizeof(*hw_cmd));
  265. if ((err = its_cmd_submit_raw(its, hw_cmd)))
  266. {
  267. goto _out_lock;
  268. }
  269. if (sync)
  270. {
  271. if (!(hw_cmd = its_cmd_alloc(its)))
  272. {
  273. err = -RT_EBUSY;
  274. goto _out_lock;
  275. }
  276. its_encode_cmd(hw_cmd, GITS_CMD_SYNC);
  277. its_encode_target(hw_cmd, its->collections[cpu_id].target_address);
  278. err = its_cmd_submit_raw(its, hw_cmd);
  279. }
  280. _out_lock:
  281. rt_hw_spin_unlock(&its->cmd_lock.lock);
  282. return err;
  283. }
  284. static rt_err_t its_cmd_submit(struct gicv3_its *its, struct its_command *cmd,
  285. struct its_map *map, rt_bool_t sync)
  286. {
  287. return its_cmd_submit_nomap(its, cmd, map->cpu_id, sync);
  288. }
  289. static rt_err_t lpi_flush_config(struct gicv3_its *its, rt_uint8_t *conf,
  290. struct rt_pic_irq *pirq)
  291. {
  292. struct its_command cmd;
  293. struct its_map *map = pirq->msi_desc->priv;
  294. if (its->gic->redist_flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
  295. {
  296. /* Clean D-cache under command */
  297. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, conf, sizeof(*conf));
  298. }
  299. else
  300. {
  301. /* DSB inner shareable, store */
  302. rt_hw_wmb();
  303. }
  304. rt_memset(&cmd, 0, sizeof(cmd));
  305. its_encode_cmd(&cmd, GITS_CMD_INV);
  306. its_encode_device_id(&cmd, its_pirq_device_id(its, pirq));
  307. its_encode_event_id(&cmd, its_pirq_event_id(its, pirq));
  308. return its_cmd_submit(its, &cmd, map, RT_FALSE);
  309. }
  310. rt_inline void *gicr_rd_base_percpu(struct gicv3 *gic)
  311. {
  312. return gic->redist_regions[rt_hw_cpu_id()].base;
  313. }
  314. rt_inline void *gicr_rd_base(struct gicv3_its *its)
  315. {
  316. return its->gic->redist_percpu_base[rt_hw_cpu_id()];
  317. }
  318. rt_inline rt_uint64_t *gicr_rd_flags(struct gicv3_its *its)
  319. {
  320. return &its->gic->redist_percpu_flags[rt_hw_cpu_id()];
  321. }
  322. static rt_bool_t gicr_supports_plpis(struct gicv3_its *its)
  323. {
  324. return !!(HWREG64(gicr_rd_base(its) + GICR_TYPER) & GICR_TYPER_PLPIS);
  325. }
  326. static rt_err_t redist_disable_lpis(struct gicv3_its *its)
  327. {
  328. void *gicr = gicr_rd_base(its);
  329. rt_uint64_t timeout = 1000000L, val;
  330. if (!gicr_supports_plpis(its))
  331. {
  332. LOG_E("CPU#%d: LPIs not supported", rt_hw_cpu_id());
  333. return -RT_ENOSYS;
  334. }
  335. val = HWREG32(gicr + GICR_CTLR);
  336. if (!(val & GICR_CTLR_ENABLE_LPIS))
  337. {
  338. return RT_EOK;
  339. }
  340. /*
  341. * If coming via a CPU hotplug event, we don't need to disable
  342. * LPIs before trying to re-enable them. They are already
  343. * configured and all is well in the world.
  344. *
  345. * If running with preallocated tables, there is nothing to do.
  346. */
  347. if ((*gicr_rd_flags(its) & RD_LOCAL_LPI_ENABLED) ||
  348. (its->gic->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
  349. {
  350. return RT_EOK;
  351. }
  352. /* From that point on, we only try to do some damage control */
  353. LOG_W("CPU%d: Booted with LPIs enabled, memory probably corrupted", rt_hw_cpu_id());
  354. /* Disable LPIs */
  355. val &= ~GICR_CTLR_ENABLE_LPIS;
  356. HWREG32(gicr + GICR_CTLR) = val;
  357. /* Make sure any change to GICR_CTLR is observable by the GIC */
  358. rt_hw_barrier(dsb, sy);
  359. /*
  360. * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
  361. * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
  362. * Error out if we time out waiting for RWP to clear.
  363. */
  364. while (HWREG32(gicr + GICR_CTLR) & GICR_CTLR_RWP)
  365. {
  366. if (!timeout)
  367. {
  368. LOG_E("CPU#%d: Timeout while disabling LPIs", rt_hw_cpu_id());
  369. return -RT_ETIMEOUT;
  370. }
  371. rt_hw_us_delay(1);
  372. --timeout;
  373. }
  374. /*
  375. * After it has been written to 1, it is IMPLEMENTATION
  376. * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
  377. * cleared to 0. Error out if clearing the bit failed.
  378. */
  379. if (HWREG32(gicr + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS)
  380. {
  381. LOG_E("CPU#%d: Failed to disable LPIs", rt_hw_cpu_id());
  382. return -RT_EBUSY;
  383. }
  384. return RT_EOK;
  385. }
  386. static void gicv3_its_cpu_init_lpis(struct gicv3_its *its)
  387. {
  388. void *gicr;
  389. rt_ubase_t paddr;
  390. rt_uint64_t val, tmp;
  391. if (*gicr_rd_flags(its) & RD_LOCAL_LPI_ENABLED)
  392. {
  393. return;
  394. }
  395. gicr = gicr_rd_base(its);
  396. val = HWREG32(gicr + GICR_CTLR);
  397. if ((its->gic->redist_flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
  398. (val & GICR_CTLR_ENABLE_LPIS))
  399. {
  400. *gicr_rd_flags(its) |= RD_LOCAL_PENDTABLE_PREALLOCATED;
  401. goto _out;
  402. }
  403. paddr = (rt_ubase_t)rt_kmem_v2p(lpi_pending_table);
  404. /* Set PROPBASE */
  405. val = ((rt_ubase_t)rt_kmem_v2p(lpi_table) |
  406. GITS_CBASER_InnerShareable |
  407. GITS_CBASER_RaWaWb |
  408. ((lpi_id_bits - 1) & GICR_PROPBASER_IDBITS_MASK));
  409. HWREG64(gicr + GICR_PROPBASER) = val;
  410. tmp = HWREG64(gicr + GICR_PROPBASER);
  411. if (its->gic->redist_flags & RDIST_FLAGS_FORCE_NON_SHAREABLE)
  412. {
  413. tmp &= ~GICR_PBASER_SHARE_MASK_ALL;
  414. }
  415. if ((tmp ^ val) & GICR_PBASER_SHARE_MASK_ALL)
  416. {
  417. if (!(tmp & GICR_PBASER_SHARE_MASK_ALL))
  418. {
  419. /*
  420. * The HW reports non-shareable,
  421. * we must remove the cacheability attributes as well.
  422. */
  423. val &= ~(GICR_PBASER_SHARE_MASK_ALL | GICR_PBASER_INNER_MASK_ALL);
  424. val |= GICR_PBASER_nC;
  425. HWREG64(gicr + GICR_PROPBASER) = val;
  426. }
  427. if (!rt_hw_cpu_id())
  428. {
  429. LOG_I("Using cache flushing for LPI property table");
  430. }
  431. its->gic->redist_flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
  432. }
  433. val = (paddr | GICR_PBASER_InnerShareable | GICR_PBASER_RaWaWb);
  434. HWREG64(gicr + GICR_PENDBASER) = val;
  435. tmp = HWREG64(gicr + GICR_PENDBASER);
  436. if (its->gic->redist_flags & RDIST_FLAGS_FORCE_NON_SHAREABLE)
  437. {
  438. tmp &= ~GICR_PBASER_SHARE_MASK_ALL;
  439. }
  440. if (!(tmp & GICR_PBASER_SHARE_MASK_ALL))
  441. {
  442. /*
  443. * The HW reports non-shareable, we must remove the
  444. * cacheability attributes as well.
  445. */
  446. val &= ~(GICR_PBASER_SHARE_MASK_ALL | GICR_PBASER_INNER_MASK_ALL);
  447. val |= GICR_PBASER_nC;
  448. HWREG64(gicr + GICR_PENDBASER) = val;
  449. }
  450. /* Enable LPIs */
  451. val = HWREG32(gicr + GICR_CTLR);
  452. val |= GICR_CTLR_ENABLE_LPIS;
  453. HWREG32(gicr + GICR_CTLR) = val;
  454. rt_hw_barrier(dsb, sy);
  455. _out:
  456. *gicr_rd_flags(its) |= RD_LOCAL_LPI_ENABLED;
  457. }
  458. static void gicv3_its_cpu_init_collection(struct gicv3_its *its)
  459. {
  460. rt_uint64_t target;
  461. int cpu_id = rt_hw_cpu_id();
  462. struct its_command cmd;
  463. struct its_collection *collection;
  464. if (HWREG64(its->base + GITS_TYPER) & GITS_TYPER_PTA)
  465. {
  466. target = (rt_uint64_t)rt_kmem_v2p(gicr_rd_base(its));
  467. }
  468. else
  469. {
  470. /* Linear by GICR processor number */
  471. target = HWREG64(gicr_rd_base(its) + GICR_TYPER);
  472. target = GICR_TYPER_CPU_NO(target) << 16;
  473. }
  474. collection = &its->collections[cpu_id];
  475. collection->target_address = target;
  476. collection->id = cpu_id;
  477. rt_memset(&cmd, 0, sizeof(cmd));
  478. its_encode_cmd(&cmd, GITS_CMD_MAPC);
  479. its_encode_collection(&cmd, collection->id);
  480. its_encode_target(&cmd, target);
  481. its_encode_valid(&cmd, RT_TRUE);
  482. its_cmd_submit_nomap(its, &cmd, cpu_id, RT_TRUE);
  483. rt_memset(&cmd, 0, sizeof(cmd));
  484. its_encode_cmd(&cmd, GITS_CMD_INVALL);
  485. its_encode_collection(&cmd, collection->id);
  486. its_cmd_submit_nomap(its, &cmd, cpu_id, RT_TRUE);
  487. }
  488. static rt_err_t gicv3_its_irq_init(struct rt_pic *pic)
  489. {
  490. rt_err_t err;
  491. struct gicv3_its *its = raw_to_gicv3_its(pic);
  492. if ((err = redist_disable_lpis(its)))
  493. {
  494. return err;
  495. }
  496. gicv3_its_cpu_init_lpis(its);
  497. gicv3_its_cpu_init_collection(its);
  498. return RT_EOK;
  499. }
  500. static void gicv3_its_irq_mask(struct rt_pic_irq *pirq)
  501. {
  502. rt_uint8_t *conf = lpi_base_config(pirq->hwirq);
  503. struct gicv3_its *its = raw_to_gicv3_its(pirq->pic);
  504. *conf &= ~GITS_LPI_CFG_ENABLED;
  505. lpi_flush_config(its, conf, pirq);
  506. rt_pci_msi_mask_irq(pirq);
  507. }
  508. static void gicv3_its_irq_unmask(struct rt_pic_irq *pirq)
  509. {
  510. rt_uint8_t *conf = lpi_base_config(pirq->hwirq);
  511. struct gicv3_its *its = raw_to_gicv3_its(pirq->pic);
  512. *conf |= GITS_LPI_CFG_ENABLED;
  513. lpi_flush_config(its, conf, pirq);
  514. rt_pci_msi_unmask_irq(pirq);
  515. }
  516. static rt_err_t gicv3_its_irq_set_priority(struct rt_pic_irq *pirq, rt_uint32_t priority)
  517. {
  518. rt_uint8_t *conf = lpi_base_config(pirq->hwirq);
  519. struct gicv3_its *its = raw_to_gicv3_its(pirq->pic);
  520. *conf = (priority << ITS_LPI_CONFIG_PROP_SHIFT) | (*conf & (~ITS_LPI_CONFIG_PROP_MASK));
  521. return lpi_flush_config(its, conf, pirq);
  522. }
  523. static rt_err_t gicv3_its_irq_set_affinity(struct rt_pic_irq *pirq, rt_bitmap_t *affinity)
  524. {
  525. int cpu_id;
  526. rt_err_t err;
  527. struct its_map *map;
  528. struct its_command cmd;
  529. struct its_collection *collection;
  530. struct gicv3_its *its = raw_to_gicv3_its(pirq->pic);
  531. map = pirq->msi_desc->priv;
  532. cpu_id = rt_bitmap_next_set_bit(affinity, 0, RT_CPUS_NR);
  533. collection = &its->collections[cpu_id];
  534. if (collection->target_address == ~0ULL)
  535. {
  536. return -RT_EIO;
  537. }
  538. if (map->cpu_id == cpu_id)
  539. {
  540. return RT_EOK;
  541. }
  542. rt_memset(&cmd, 0, sizeof(cmd));
  543. its_encode_cmd(&cmd, GITS_CMD_MOVI);
  544. its_encode_device_id(&cmd, map->device_id);
  545. its_encode_event_id(&cmd, its_pirq_event_id(its, pirq));
  546. its_encode_collection(&cmd, collection->id);
  547. if (!(err = its_cmd_submit(its, &cmd, map, RT_TRUE)))
  548. {
  549. map->cpu_id = cpu_id;
  550. }
  551. return err;
  552. }
  553. static void gicv3_its_irq_compose_msi_msg(struct rt_pic_irq *pirq, struct rt_pci_msi_msg *msg)
  554. {
  555. rt_ubase_t addr;
  556. struct gicv3_its *its = raw_to_gicv3_its(pirq->pic);
  557. addr = (rt_ubase_t)its->base_phy + GITS_TRANSLATER;
  558. msg->address_hi = rt_upper_32_bits(addr);
  559. msg->address_lo = rt_lower_32_bits(addr);
  560. msg->data = its_pirq_event_id(its, pirq);
  561. }
  562. static int gicv3_its_irq_alloc_msi(struct rt_pic *pic, struct rt_pci_msi_desc *msi_desc)
  563. {
  564. rt_ubase_t level;
  565. rt_uint32_t device_id = -1;
  566. int irq = -1, hwirq, parent_irq, hwirq_index, lpi_base = 0;
  567. struct its_map *map = RT_NULL, *map_tmp;
  568. struct its_table *tbl;
  569. struct its_command cmd;
  570. struct rt_pic_irq *pirq;
  571. struct rt_pci_device *pdev = msi_desc->pdev;
  572. struct gicv3_its *its = raw_to_gicv3_its(pic);
  573. struct rt_pic *ppic = &its->gic->parent;
  574. tbl = its_baser_type(its, GITS_BASER_TYPE_DEVICE);
  575. RT_ASSERT(tbl != RT_NULL);
  576. if (!pdev->parent.ofw_node)
  577. {
  578. device_id = rt_pci_dev_id(pdev);
  579. }
  580. else
  581. {
  582. struct rt_ofw_cell_args args;
  583. for (int index = 0; ; ++index)
  584. {
  585. rt_err_t err = rt_ofw_parse_phandle_cells(pdev->parent.ofw_node,
  586. "msi-parent", "#msi-cells", index, &args);
  587. if (err)
  588. {
  589. return (int)err;
  590. }
  591. if (args.data == its->np)
  592. {
  593. device_id = args.args[0];
  594. }
  595. rt_ofw_node_put(args.data);
  596. if ((rt_int32_t)device_id >= 0)
  597. {
  598. break;
  599. }
  600. }
  601. }
  602. if (device_id >= (1 << tbl->size_bits))
  603. {
  604. LOG_E("Device ID = is %x not supported", device_id);
  605. return -RT_EINVAL;
  606. }
  607. /* Find old map info */
  608. level = rt_spin_lock_irqsave(&map_lock);
  609. rt_list_for_each_entry(map_tmp, &map_nodes, list)
  610. {
  611. if (map_tmp->device_id == device_id)
  612. {
  613. map = map_tmp;
  614. lpi_base = map->lpi_base - 8192;
  615. break;
  616. }
  617. }
  618. rt_spin_unlock_irqrestore(&map_lock, level);
  619. if (!map)
  620. {
  621. rt_size_t itt_size;
  622. if (!(map = rt_calloc(1, sizeof(*map))))
  623. {
  624. return -RT_ENOMEM;
  625. }
  626. itt_size = tbl->itt_entries * (RT_FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE,
  627. HWREG64(its->base + GITS_TYPER)) + 1);
  628. itt_size = rt_max_t(rt_size_t, itt_size, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
  629. map->itt = rt_malloc_align(itt_size, ITS_ITT_ALIGN);
  630. if (!map->itt)
  631. {
  632. rt_free(map);
  633. return -RT_ENOMEM;
  634. }
  635. if (tbl->lvl2_bits)
  636. {
  637. void *lvl2_dte;
  638. rt_uint64_t *entry;
  639. entry = tbl->base;
  640. entry += device_id / (tbl->page_size / GITS_LVL1_ENTRY_SIZE);
  641. if (*entry)
  642. {
  643. lvl2_dte = (void *)(*entry - PV_OFFSET);
  644. rt_page_ref_inc(lvl2_dte, tbl->lvl2_bits);
  645. }
  646. else
  647. {
  648. rt_size_t dte_size;
  649. lvl2_dte = rt_pages_alloc(tbl->lvl2_bits);
  650. if (!lvl2_dte)
  651. {
  652. rt_free_align(map->itt);
  653. rt_free(map);
  654. return -RT_ENOMEM;
  655. }
  656. dte_size = rt_page_bits(tbl->lvl2_bits);
  657. rt_memset(lvl2_dte, 0, dte_size);
  658. if (!(tbl->val & GITS_BASER_SHARE_MASK_ALL))
  659. {
  660. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lvl2_dte, dte_size);
  661. }
  662. *entry = rt_cpu_to_le64((rt_uint64_t)rt_kmem_v2p(lvl2_dte) | GITS_BASER_VALID);
  663. if (!(tbl->val & GITS_BASER_SHARE_MASK_ALL))
  664. {
  665. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, entry, sizeof(*entry));
  666. }
  667. rt_hw_dsb();
  668. }
  669. map->lvl2_dte = lvl2_dte;
  670. }
  671. rt_memset(map->itt, 0, itt_size);
  672. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, map->itt, itt_size);
  673. }
  674. msi_desc->priv = map;
  675. /* Alloc the LPI base on the first LPI */
  676. level = rt_spin_lock_irqsave(&lpis_lock);
  677. hwirq_index = rt_bitmap_next_clear_bit(lpis_vectors, lpi_base, lpi_nr);
  678. if (hwirq_index >= lpi_nr)
  679. {
  680. irq = -RT_EEMPTY;
  681. goto _out_lock;
  682. }
  683. hwirq = 8192 + hwirq_index;
  684. parent_irq = ppic->ops->irq_map(ppic, hwirq, RT_IRQ_MODE_EDGE_RISING);
  685. if (parent_irq < 0)
  686. {
  687. irq = parent_irq;
  688. goto _out_lock;
  689. }
  690. irq = rt_pic_config_irq(pic, hwirq_index, hwirq);
  691. if (irq < 0)
  692. {
  693. goto _out_lock;
  694. }
  695. pirq = rt_pic_find_irq(pic, hwirq_index);
  696. pirq->mode = RT_IRQ_MODE_EDGE_RISING;
  697. rt_pic_cascade(pirq, parent_irq);
  698. rt_bitmap_set_bit(lpis_vectors, hwirq_index);
  699. _out_lock:
  700. rt_spin_unlock_irqrestore(&lpis_lock, level);
  701. if (irq < 0)
  702. {
  703. return irq;
  704. }
  705. if (map->its)
  706. {
  707. rt_ref_get(&map->ref);
  708. }
  709. else
  710. {
  711. rt_list_init(&map->list);
  712. rt_ref_init(&map->ref);
  713. map->its = its;
  714. map->device_id = device_id;
  715. map->lpi_base = hwirq;
  716. level = rt_spin_lock_irqsave(&map_lock);
  717. rt_list_insert_before(&map_nodes, &map->list);
  718. rt_spin_unlock_irqrestore(&map_lock, level);
  719. }
  720. /* Default to CPU#0 */
  721. map->cpu_id = 0;
  722. RT_IRQ_AFFINITY_SET(pirq->affinity, map->cpu_id);
  723. rt_memset(&cmd, 0, sizeof(cmd));
  724. its_encode_cmd(&cmd, GITS_CMD_MAPD);
  725. its_encode_device_id(&cmd, device_id);
  726. its_encode_size(&cmd, rt_ilog2(tbl->itt_entries) - 1);
  727. its_encode_itt(&cmd, (rt_uint64_t)rt_kmem_v2p(map->itt));
  728. its_encode_valid(&cmd, RT_TRUE);
  729. its_cmd_submit(its, &cmd, map, RT_FALSE);
  730. rt_memset(&cmd, 0, sizeof(cmd));
  731. its_encode_cmd(&cmd, GITS_CMD_MAPTI);
  732. its_encode_device_id(&cmd, device_id);
  733. its_encode_event_id(&cmd, its_pirq_event_id(its, pirq));
  734. its_encode_phys_id(&cmd, hwirq);
  735. its_encode_collection(&cmd, its->collections[map->cpu_id].id);
  736. its_cmd_submit(its, &cmd, map, RT_TRUE);
  737. return irq;
  738. }
  739. static void its_map_release(struct rt_ref *r)
  740. {
  741. rt_ubase_t level;
  742. struct gicv3_its *its;
  743. struct its_table *tbl;
  744. struct its_command cmd;
  745. struct its_map *map = rt_container_of(r, struct its_map, ref);
  746. its = map->its;
  747. tbl = its_baser_type(its, GITS_BASER_TYPE_DEVICE);
  748. rt_memset(&cmd, 0, sizeof(cmd));
  749. its_encode_cmd(&cmd, GITS_CMD_MAPD);
  750. its_encode_device_id(&cmd, map->device_id);
  751. its_encode_size(&cmd, rt_ilog2(tbl->itt_entries) - 1);
  752. its_encode_itt(&cmd, (rt_uint64_t)rt_kmem_v2p(map->itt));
  753. its_encode_valid(&cmd, RT_FALSE);
  754. its_cmd_submit(its, &cmd, map, RT_TRUE);
  755. level = rt_spin_lock_irqsave(&map_lock);
  756. rt_list_insert_before(&map_nodes, &map->list);
  757. rt_spin_unlock_irqrestore(&map_lock, level);
  758. if (map->itt)
  759. {
  760. rt_free_align(map->itt);
  761. }
  762. if (map->lvl2_dte)
  763. {
  764. if (rt_page_ref_get(map->lvl2_dte, tbl->lvl2_bits) == 1)
  765. {
  766. rt_uint64_t *entry;
  767. entry = tbl->base + (map->device_id / (tbl->page_size / GITS_LVL1_ENTRY_SIZE));
  768. *entry = rt_cpu_to_le64(0);
  769. if (!(tbl->val & GITS_BASER_SHARE_MASK_ALL))
  770. {
  771. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, entry, sizeof(*entry));
  772. }
  773. }
  774. rt_pages_free(map->lvl2_dte, tbl->lvl2_bits);
  775. }
  776. rt_free(map);
  777. }
  778. static void gicv3_its_irq_free_msi(struct rt_pic *pic, int irq)
  779. {
  780. rt_ubase_t level;
  781. struct its_map *map;
  782. struct its_command cmd;
  783. struct rt_pic_irq *pirq;
  784. struct gicv3_its *its = raw_to_gicv3_its(pic);
  785. pirq = rt_pic_find_pirq(pic, irq);
  786. if (!pirq)
  787. {
  788. return;
  789. }
  790. map = pirq->msi_desc->priv;
  791. rt_memset(&cmd, 0, sizeof(cmd));
  792. its_encode_cmd(&cmd, GITS_CMD_DISCARD);
  793. its_encode_device_id(&cmd, map->device_id);
  794. its_encode_event_id(&cmd, its_pirq_event_id(its, pirq));
  795. its_cmd_submit(its, &cmd, map, RT_TRUE);
  796. rt_pic_uncascade(pirq);
  797. level = rt_spin_lock_irqsave(&lpis_lock);
  798. rt_bitmap_clear_bit(lpis_vectors, pirq->hwirq - 8192);
  799. rt_spin_unlock_irqrestore(&lpis_lock, level);
  800. rt_ref_put(&map->ref, its_map_release);
  801. }
  802. static rt_err_t gicv3_its_irq_set_state(struct rt_pic *pic, int hwirq, int type, rt_bool_t state)
  803. {
  804. struct its_map *map;
  805. struct its_command cmd;
  806. struct rt_pic_irq *pirq;
  807. struct gicv3_its *its = raw_to_gicv3_its(pic);
  808. if (type != RT_IRQ_STATE_PENDING || hwirq > 8192 + lpi_nr)
  809. {
  810. return -RT_ENOSYS;
  811. }
  812. if (!(pirq = rt_pic_find_irq(pic, hwirq - 8192)))
  813. {
  814. return -RT_ENOSYS;
  815. }
  816. map = pirq->msi_desc->priv;
  817. rt_memset(&cmd, 0, sizeof(cmd));
  818. if (state)
  819. {
  820. its_encode_cmd(&cmd, GITS_CMD_INT);
  821. its_encode_device_id(&cmd, map->device_id);
  822. its_encode_event_id(&cmd, its_pirq_event_id(its, pirq));
  823. }
  824. else
  825. {
  826. its_encode_cmd(&cmd, GITS_CMD_CLEAR);
  827. its_encode_device_id(&cmd, map->device_id);
  828. its_encode_event_id(&cmd, its_pirq_event_id(its, pirq));
  829. }
  830. its_cmd_submit(its, &cmd, map, RT_TRUE);
  831. return RT_EOK;
  832. }
  833. const static struct rt_pic_ops gicv3_its_ops =
  834. {
  835. .name = "GICv3-ITS",
  836. .irq_init = gicv3_its_irq_init,
  837. .irq_ack = rt_pic_irq_parent_ack,
  838. .irq_mask = gicv3_its_irq_mask,
  839. .irq_unmask = gicv3_its_irq_unmask,
  840. .irq_eoi = rt_pic_irq_parent_eoi,
  841. .irq_set_priority = gicv3_its_irq_set_priority,
  842. .irq_set_affinity = gicv3_its_irq_set_affinity,
  843. .irq_compose_msi_msg = gicv3_its_irq_compose_msi_msg,
  844. .irq_alloc_msi = gicv3_its_irq_alloc_msi,
  845. .irq_free_msi = gicv3_its_irq_free_msi,
  846. .irq_set_state = gicv3_its_irq_set_state,
  847. .flags = RT_PIC_F_IRQ_ROUTING,
  848. };
  849. static rt_ssize_t its_baser_page_size(struct gicv3_its *its, struct its_table *tbl)
  850. {
  851. rt_size_t page_size = 64 * SIZE_KB;
  852. while (page_size)
  853. {
  854. rt_uint64_t val, baser_page_size;
  855. rt_off_t baser = GITS_BASERn((int)(tbl - its->tbls));
  856. val = its_readq(its, baser);
  857. val &= ~GITS_BASER_PAGE_SIZE_MASK;
  858. switch (page_size)
  859. {
  860. case 64 * SIZE_KB:
  861. baser_page_size = GITS_BASER_PAGE_SIZE_64K;
  862. break;
  863. case 16 * SIZE_KB:
  864. baser_page_size = GITS_BASER_PAGE_SIZE_16K;
  865. break;
  866. case 4 * SIZE_KB:
  867. default:
  868. baser_page_size = GITS_BASER_PAGE_SIZE_4K;
  869. break;
  870. }
  871. baser_page_size >>= GITS_BASER_PAGE_SIZE_SHIFT;
  872. val |= RT_FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, baser_page_size);
  873. its_writeq(its, baser, val);
  874. tbl->val = its_readq(its, baser);
  875. if (RT_FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, tbl->val) == baser_page_size)
  876. {
  877. break;
  878. }
  879. switch (page_size)
  880. {
  881. case 64 * SIZE_KB:
  882. page_size = 16 * SIZE_KB;
  883. break;
  884. case 16 * SIZE_KB:
  885. page_size = 4 * SIZE_KB;
  886. break;
  887. case 4 * SIZE_KB:
  888. default:
  889. return -RT_EINVAL;
  890. }
  891. }
  892. return page_size;
  893. }
  894. static rt_err_t its_table_init(struct gicv3_its *its)
  895. {
  896. int inited = 0;
  897. rt_off_t baser;
  898. rt_bool_t indirect = RT_FALSE;
  899. rt_size_t pages_nr, alloc_size;
  900. rt_uint64_t val, type, entry_size, share, cache;
  901. struct its_table *tbl;
  902. share = GITS_BASER_InnerShareable;
  903. cache = GITS_BASER_RaWaWb;
  904. for (int i = 0; i < RT_ARRAY_SIZE(its->tbls); ++i)
  905. {
  906. tbl = &its->tbls[i];
  907. val = its_readq(its, GITS_BASERn(i));
  908. type = GITS_BASER_TYPE(val);
  909. if (type != GITS_BASER_TYPE_DEVICE &&
  910. type != GITS_BASER_TYPE_COLLECTION)
  911. {
  912. continue;
  913. }
  914. tbl->page_size = its_baser_page_size(its, tbl);
  915. if (tbl->page_size < 0)
  916. {
  917. continue;
  918. }
  919. baser = GITS_BASERn((int)(tbl - its->tbls));
  920. entry_size = GITS_BASER_ENTRY_SIZE(val);
  921. if (type == GITS_BASER_TYPE_DEVICE)
  922. {
  923. tbl->size_bits = its_device_id_bits(its);
  924. LOG_D("Device Max IDs = %lu", 1UL << tbl->size_bits);
  925. /* For MSI-X */
  926. tbl->itt_entries = 2048;
  927. while (MAX_HANDLERS / tbl->itt_entries < (1 << tbl->size_bits) &&
  928. tbl->itt_entries > 32)
  929. {
  930. tbl->itt_entries >>= 1;
  931. }
  932. }
  933. its_writeq(its, baser, tbl->val | GITS_BASER_INDIRECT);
  934. tbl->val = its_readq(its, baser);
  935. indirect = !!(tbl->val & GITS_BASER_INDIRECT);
  936. if (indirect && type == GITS_BASER_TYPE_DEVICE)
  937. {
  938. /* The size of the level 2 table is equal to ITS page size */
  939. tbl->lvl2_bits = tbl->size_bits - rt_ilog2(tbl->page_size / (int)entry_size);
  940. /* Get level 1 entries count */
  941. alloc_size = (1 << tbl->size_bits) / (tbl->page_size / entry_size);
  942. alloc_size *= GITS_LVL1_ENTRY_SIZE;
  943. }
  944. else
  945. {
  946. alloc_size = (1 << tbl->size_bits) * entry_size;
  947. indirect = RT_FALSE;
  948. }
  949. tbl->base = rt_malloc_align(alloc_size, tbl->page_size);
  950. pages_nr = alloc_size / tbl->page_size;
  951. if (!tbl->base)
  952. {
  953. return -RT_ENOMEM;
  954. }
  955. if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
  956. {
  957. cache = GITS_BASER_nCnB;
  958. }
  959. if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
  960. {
  961. cache = GITS_BASER_nC;
  962. share = 0;
  963. }
  964. val = ((rt_ubase_t)rt_kmem_v2p(tbl->base) |
  965. (type << GITS_BASER_TYPE_SHIFT) |
  966. ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
  967. (pages_nr << GITS_BASER_PAGES_SHIFT) |
  968. cache | share | GITS_BASER_VALID);
  969. val |= indirect ? GITS_BASER_INDIRECT : 0;
  970. switch (tbl->page_size)
  971. {
  972. case 4 * SIZE_KB:
  973. val |= GITS_BASER_PAGE_SIZE_4K;
  974. break;
  975. case 16 * SIZE_KB:
  976. val |= GITS_BASER_PAGE_SIZE_16K;
  977. break;
  978. case 64 * SIZE_KB:
  979. val |= GITS_BASER_PAGE_SIZE_64K;
  980. break;
  981. }
  982. its_writeq(its, baser, val);
  983. tbl->val = its_readq(its, baser);
  984. rt_memset(tbl->base, 0, alloc_size);
  985. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, tbl->base, alloc_size);
  986. cache = tbl->val & GITS_BASER_INNER_MASK_ALL;
  987. share = tbl->val & GITS_BASER_SHARE_MASK_ALL;
  988. ++inited;
  989. }
  990. return inited == 2 ? RT_EOK : -RT_ENOSYS;
  991. }
  992. static rt_err_t its_cmd_queue_init(struct gicv3_its *its)
  993. {
  994. void *cmd_phy_base;
  995. rt_uint64_t baser, tmp;
  996. its->cmd_base = rt_malloc_align(ITS_CMD_QUEUE_SIZE, ITS_CMD_QUEUE_ALIGN);
  997. if (!its->cmd_base)
  998. {
  999. return -RT_ENOMEM;
  1000. }
  1001. its->cmd_idx = 0;
  1002. rt_memset(its->cmd_base, 0, ITS_CMD_QUEUE_SIZE);
  1003. cmd_phy_base = rt_kmem_v2p(its->cmd_base);
  1004. baser = GITS_CBASER_VALID | GITS_CBASER_RaWaWb | GITS_CBASER_InnerShareable | \
  1005. ((rt_uint64_t)cmd_phy_base) | (ITS_CMD_QUEUE_SIZE / (4 * SIZE_KB) - 1);
  1006. its_writeq(its, GITS_CBASER, baser);
  1007. tmp = its_readq(its, GITS_CBASER);
  1008. if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
  1009. {
  1010. tmp &= ~GITS_CBASER_SHARE_MASK_ALL;
  1011. }
  1012. if ((tmp ^ baser) & GITS_CBASER_SHARE_MASK_ALL)
  1013. {
  1014. if (!(tmp & GITS_CBASER_SHARE_MASK_ALL))
  1015. {
  1016. /* The HW reports non-shareable, we must remove the cacheability attributes as well */
  1017. baser &= ~(GITS_CBASER_SHARE_MASK_ALL | GITS_CBASER_INNER_MASK_ALL);
  1018. baser |= GITS_CBASER_nC;
  1019. its_writeq(its, GITS_CBASER, baser);
  1020. }
  1021. LOG_I("Using cache flushing for CMD queue");
  1022. its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
  1023. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, its->cmd_base, ITS_CMD_QUEUE_SIZE);
  1024. }
  1025. /* Get the next command from the start of the buffer */
  1026. its_writeq(its, GITS_CWRITER, 0);
  1027. return RT_EOK;
  1028. }
  1029. static rt_err_t its_lpi_table_init(struct gicv3 *gic)
  1030. {
  1031. rt_size_t lpi_table_size, lpi_pending_table_size;
  1032. rt_uint32_t numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic->gicd_typer);
  1033. if (HWREG32(gicr_rd_base_percpu(gic) + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS)
  1034. {
  1035. gic->redist_flags |= RDIST_FLAGS_RD_TABLES_PREALLOCATED;
  1036. gic->redist_flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
  1037. LOG_I("Using preallocated redistributor tables");
  1038. }
  1039. lpi_id_bits = GICD_TYPER_ID_BITS(gic->gicd_typer);
  1040. if (gic->redist_flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)
  1041. {
  1042. rt_uint64_t val = HWREG64(gicr_rd_base_percpu(gic) + GICR_PROPBASER);
  1043. lpi_id_bits = rt_min_t(rt_uint32_t, lpi_id_bits, (val & GICR_PROPBASER_IDBITS_MASK) + 1);
  1044. }
  1045. lpi_nr = rt_min_t(rt_size_t, (1UL << lpi_id_bits) - 8192, gic->lpi_nr);
  1046. lpi_id_bits = __rt_clz(lpi_nr + 8192);
  1047. if (numlpis > 2 && numlpis > lpi_nr)
  1048. {
  1049. lpi_nr = numlpis;
  1050. LOG_W("Using hypervisor restricted LPI range [%u]", lpi_nr);
  1051. }
  1052. gic->lpi_nr = lpi_nr;
  1053. /* LPI Configuration table entry is 1 byte, Pending table bytes is N / 8. */
  1054. lpi_table_size = RT_GENMASK(lpi_id_bits, 0);
  1055. lpi_pending_table_size = lpi_table_size / 8;
  1056. lpi_table = rt_malloc_align(lpi_table_size, ITS_LPI_CONFIG_TABLE_ALIGN);
  1057. lpi_pending_table = rt_malloc_align(lpi_pending_table_size, ITS_LPI_PENDING_TABLE_ALIGN);
  1058. lpis_vectors = rt_calloc(1, RT_BITMAP_LEN(lpi_nr) * sizeof(rt_bitmap_t));
  1059. if (!lpi_table || !lpi_pending_table || !lpis_vectors)
  1060. {
  1061. if (lpi_table)
  1062. {
  1063. rt_free_align(lpi_table);
  1064. }
  1065. if (lpi_pending_table)
  1066. {
  1067. rt_free_align(lpi_pending_table);
  1068. }
  1069. if (lpis_vectors)
  1070. {
  1071. rt_free_align(lpis_vectors);
  1072. }
  1073. lpi_table = RT_NULL;
  1074. lpi_pending_table = RT_NULL;
  1075. lpis_vectors = RT_NULL;
  1076. return -RT_ENOMEM;
  1077. }
  1078. /* Set the default configuration */
  1079. rt_memset(lpi_table, ITS_LPI_CONFIG_PROP_DEFAULT_PRIO | GITS_LPI_CFG_GROUP1, lpi_table_size);
  1080. /*
  1081. * We should make a full mask size with lpi_id_bits,
  1082. * otherwise 'undefined' LPI will occur.
  1083. */
  1084. rt_memset(lpi_pending_table, 0, lpi_pending_table_size);
  1085. /* Flush the table to memory */
  1086. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lpi_table, lpi_table_size);
  1087. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lpi_pending_table, lpi_pending_table_size);
  1088. LOG_D("ITS: Allocator initialized for %u LPIs", lpi_nr);
  1089. return RT_EOK;
  1090. }
  1091. static void its_init_fail(struct gicv3_its *its)
  1092. {
  1093. if (its->base)
  1094. {
  1095. rt_iounmap(its->base);
  1096. }
  1097. if (its->cmd_base)
  1098. {
  1099. rt_free_align(its->cmd_base);
  1100. }
  1101. for (int i = 0; i < RT_ARRAY_SIZE(its->tbls); ++i)
  1102. {
  1103. struct its_table *tbl = &its->tbls[i];
  1104. if (tbl->base)
  1105. {
  1106. rt_free_align(tbl->base);
  1107. }
  1108. }
  1109. rt_list_remove(&its->list);
  1110. rt_free(its);
  1111. }
  1112. static rt_err_t its_quirk_cavium_22375(void *data)
  1113. {
  1114. struct gicv3_its *its = data;
  1115. its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
  1116. return RT_EOK;
  1117. }
  1118. static rt_err_t its_enable_rockchip(void *data)
  1119. {
  1120. struct gicv3_its *its = data;
  1121. struct gicv3 *gic = its->gic;
  1122. if (!rt_ofw_machine_is_compatible("rockchip,rk3566") &&
  1123. !rt_ofw_machine_is_compatible("rockchip,rk3567") &&
  1124. !rt_ofw_machine_is_compatible("rockchip,rk3568") &&
  1125. !rt_ofw_machine_is_compatible("rockchip,rk3588") &&
  1126. !rt_ofw_machine_is_compatible("rockchip,rk3588s"))
  1127. {
  1128. return -RT_EINVAL;
  1129. }
  1130. its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
  1131. gic->redist_flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
  1132. return RT_EOK;
  1133. }
  1134. static rt_err_t its_set_non_coherent(void *data)
  1135. {
  1136. struct gicv3_its *its = data;
  1137. if (!rt_ofw_prop_read_bool(its->np, "dma-noncoherent"))
  1138. {
  1139. return -RT_EINVAL;
  1140. }
  1141. its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
  1142. return RT_EOK;
  1143. }
  1144. static const struct gic_quirk _its_quirks[] =
  1145. {
  1146. {
  1147. .desc = "ITS: Cavium ThunderX errata: 22375, 24313",
  1148. .iidr = 0xa100034c,
  1149. .iidr_mask = 0xffff0fff,
  1150. .init = its_quirk_cavium_22375,
  1151. },
  1152. {
  1153. .desc = "ITS: Rockchip erratum RK3566 ~ RK3588",
  1154. .iidr = 0x0201743b,
  1155. .iidr_mask = 0xffffffff,
  1156. .init = its_enable_rockchip,
  1157. },
  1158. {
  1159. .desc = "ITS: non-coherent attribute",
  1160. .compatible = "arm,gic-v3-its",
  1161. .init = its_set_non_coherent,
  1162. },
  1163. { /* sentinel */ }
  1164. };
  1165. static const struct rt_ofw_node_id gicv3_its_ofw_match[] =
  1166. {
  1167. { .compatible = "arm,gic-v3-its" },
  1168. { /* sentinel */ }
  1169. };
  1170. rt_err_t gicv3_its_ofw_probe(struct rt_ofw_node *np, const struct rt_ofw_node_id *id)
  1171. {
  1172. rt_err_t err = -RT_EEMPTY;
  1173. struct rt_ofw_node *its_np;
  1174. struct gicv3_its *its, *its_next;
  1175. rt_ofw_foreach_available_child_node(np, its_np)
  1176. {
  1177. if (!rt_ofw_node_match(its_np, gicv3_its_ofw_match))
  1178. {
  1179. continue;
  1180. }
  1181. if (!rt_ofw_prop_read_bool(its_np, "msi-controller"))
  1182. {
  1183. continue;
  1184. }
  1185. if (!(its = rt_calloc(1, sizeof(struct gicv3_its))))
  1186. {
  1187. rt_ofw_node_put(its_np);
  1188. err = -RT_ENOMEM;
  1189. goto _free_all;
  1190. }
  1191. its->base = rt_ofw_iomap(its_np, 0);
  1192. if (!its->base)
  1193. {
  1194. LOG_E("%s: IO map failed", rt_ofw_node_full_name(its_np));
  1195. its_init_fail(its);
  1196. continue;
  1197. }
  1198. /*
  1199. * Make sure ALL the ITS are reset before we probe any,
  1200. * as they may be sharing memory
  1201. */
  1202. for (int i = 0; i < GITS_BASER_NR_REGS; ++i)
  1203. {
  1204. its_writeq(its, GITS_BASER + (i << 3), 0);
  1205. }
  1206. its->np = its_np;
  1207. rt_list_init(&its->list);
  1208. rt_list_insert_before(&its_nodes, &its->list);
  1209. }
  1210. if (!rt_list_isempty(&its_nodes))
  1211. {
  1212. if ((err = its_lpi_table_init(rt_ofw_data(np))))
  1213. {
  1214. goto _free_all;
  1215. }
  1216. }
  1217. rt_list_for_each_entry_safe(its, its_next, &its_nodes, list)
  1218. {
  1219. rt_uint32_t ctlr;
  1220. its->base_phy = rt_kmem_v2p(its->base);
  1221. its->gic = rt_ofw_data(np);
  1222. gic_common_init_quirk_hw(HWREG32(its->base + GITS_IIDR), _its_quirks, its);
  1223. gic_common_init_quirk_ofw(its->np, _its_quirks, its);
  1224. if ((err = its_cmd_queue_init(its)))
  1225. {
  1226. goto _fail;
  1227. }
  1228. rt_spin_lock_init(&its->cmd_lock);
  1229. if ((err = its_table_init(its)))
  1230. {
  1231. goto _fail;
  1232. }
  1233. for (int i = 0; i < RT_CPUS_NR; ++i)
  1234. {
  1235. its->collections[i].target_address = ~0ULL;
  1236. }
  1237. ctlr = its_readl(its, GITS_CTLR);
  1238. ctlr |= GITS_CTLR_ENABLE;
  1239. its_writel(its, GITS_CTLR, ctlr);
  1240. its->parent.priv_data = its;
  1241. its->parent.ops = &gicv3_its_ops;
  1242. rt_pic_linear_irq(&its->parent, its->gic->lpi_nr);
  1243. rt_pic_user_extends(&its->parent);
  1244. its_np = its->np;
  1245. rt_ofw_data(its_np) = &its->parent;
  1246. rt_ofw_node_set_flag(its_np, RT_OFW_F_READLY);
  1247. continue;
  1248. _fail:
  1249. its_init_fail(its);
  1250. if (err == -RT_ENOMEM)
  1251. {
  1252. break;
  1253. }
  1254. }
  1255. if (rt_list_isempty(&its_nodes) && lpis_vectors)
  1256. {
  1257. rt_free(lpis_vectors);
  1258. rt_free_align(lpi_table);
  1259. rt_free_align(lpi_pending_table);
  1260. lpis_vectors = RT_NULL;
  1261. }
  1262. return err;
  1263. _free_all:
  1264. rt_list_for_each_entry_safe(its, its_next, &its_nodes, list)
  1265. {
  1266. rt_free(its);
  1267. rt_list_remove(&its->list);
  1268. }
  1269. return err;
  1270. }