pic.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-08-24 GuEe-GUI first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #define DBG_TAG "rtdm.pic"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. #include <drivers/pic.h>
  16. #include <ktime.h>
  17. struct irq_traps
  18. {
  19. rt_list_t list;
  20. void *data;
  21. rt_bool_t (*handler)(void *);
  22. };
  23. static int _ipi_hash[] =
  24. {
  25. #ifdef RT_USING_SMP
  26. [RT_SCHEDULE_IPI] = RT_SCHEDULE_IPI,
  27. [RT_STOP_IPI] = RT_STOP_IPI,
  28. #endif
  29. };
  30. /* reserved ipi */
  31. static int _pirq_hash_idx = RT_ARRAY_SIZE(_ipi_hash);
  32. static struct rt_pic_irq _pirq_hash[MAX_HANDLERS] =
  33. {
  34. [0 ... MAX_HANDLERS - 1] =
  35. {
  36. .irq = -1,
  37. .hwirq = -1,
  38. .mode = RT_IRQ_MODE_NONE,
  39. .priority = RT_UINT32_MAX,
  40. .rw_lock = { },
  41. }
  42. };
  43. static struct rt_spinlock _pic_lock = { };
  44. static rt_size_t _pic_name_max = sizeof("PIC");
  45. static rt_list_t _pic_nodes = RT_LIST_OBJECT_INIT(_pic_nodes);
  46. static rt_list_t _traps_nodes = RT_LIST_OBJECT_INIT(_traps_nodes);
  47. static struct rt_pic_irq *irq2pirq(int irq)
  48. {
  49. struct rt_pic_irq *pirq = RT_NULL;
  50. if ((irq >= 0) && (irq < MAX_HANDLERS))
  51. {
  52. pirq = &_pirq_hash[irq];
  53. if (pirq->irq < 0)
  54. {
  55. pirq = RT_NULL;
  56. }
  57. }
  58. if (!pirq)
  59. {
  60. LOG_E("irq = %d is invalid", irq);
  61. }
  62. return pirq;
  63. }
  64. static void append_pic(struct rt_pic *pic)
  65. {
  66. int pic_name_len = rt_strlen(pic->ops->name);
  67. rt_list_insert_before(&_pic_nodes, &pic->list);
  68. if (pic_name_len > _pic_name_max)
  69. {
  70. _pic_name_max = pic_name_len;
  71. }
  72. }
  73. void rt_pic_default_name(struct rt_pic *pic)
  74. {
  75. if (pic)
  76. {
  77. #if RT_NAME_MAX > 0
  78. rt_strncpy(pic->parent.name, "PIC", RT_NAME_MAX - 1);
  79. pic->parent.name[RT_NAME_MAX - 1] = '\0';
  80. #else
  81. pic->parent.name = "PIC";
  82. #endif
  83. }
  84. }
  85. struct rt_pic *rt_pic_dynamic_cast(void *ptr)
  86. {
  87. struct rt_pic *pic = RT_NULL, *tmp = RT_NULL;
  88. if (ptr)
  89. {
  90. struct rt_object *obj = ptr;
  91. if (obj->type == RT_Object_Class_Unknown)
  92. {
  93. tmp = (void *)obj;
  94. }
  95. else if (obj->type == RT_Object_Class_Device)
  96. {
  97. tmp = (void *)obj + sizeof(struct rt_device);
  98. }
  99. else
  100. {
  101. tmp = (void *)obj + sizeof(struct rt_object);
  102. }
  103. if (tmp && !rt_strcmp(tmp->parent.name, "PIC"))
  104. {
  105. pic = tmp;
  106. }
  107. }
  108. return pic;
  109. }
  110. rt_err_t rt_pic_linear_irq(struct rt_pic *pic, rt_size_t irq_nr)
  111. {
  112. rt_err_t err = RT_EOK;
  113. if (pic && pic->ops && pic->ops->name)
  114. {
  115. rt_ubase_t level = rt_spin_lock_irqsave(&_pic_lock);
  116. if (_pirq_hash_idx + irq_nr <= RT_ARRAY_SIZE(_pirq_hash))
  117. {
  118. rt_list_init(&pic->list);
  119. rt_pic_default_name(pic);
  120. pic->parent.type = RT_Object_Class_Unknown;
  121. pic->irq_start = _pirq_hash_idx;
  122. pic->irq_nr = irq_nr;
  123. pic->pirqs = &_pirq_hash[_pirq_hash_idx];
  124. _pirq_hash_idx += irq_nr;
  125. append_pic(pic);
  126. LOG_D("%s alloc irqs ranges [%d, %d]", pic->ops->name,
  127. pic->irq_start, pic->irq_start + pic->irq_nr);
  128. }
  129. else
  130. {
  131. LOG_E("%s alloc %d irqs is overflow", pic->ops->name, irq_nr);
  132. err = -RT_EEMPTY;
  133. }
  134. rt_spin_unlock_irqrestore(&_pic_lock, level);
  135. }
  136. else
  137. {
  138. err = -RT_EINVAL;
  139. }
  140. return err;
  141. }
  142. static void config_pirq(struct rt_pic *pic, struct rt_pic_irq *pirq, int irq, int hwirq)
  143. {
  144. rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
  145. pirq->irq = irq;
  146. pirq->hwirq = hwirq;
  147. pirq->pic = pic;
  148. rt_list_init(&pirq->list);
  149. rt_list_init(&pirq->children_nodes);
  150. rt_list_init(&pirq->isr.list);
  151. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  152. }
  153. int rt_pic_config_ipi(struct rt_pic *pic, int ipi_index, int hwirq)
  154. {
  155. int ipi = ipi_index;
  156. struct rt_pic_irq *pirq;
  157. if (pic && ipi < RT_ARRAY_SIZE(_ipi_hash) && hwirq >= 0 && pic->ops->irq_send_ipi)
  158. {
  159. pirq = &_pirq_hash[ipi];
  160. config_pirq(pic, pirq, ipi, hwirq);
  161. for (int cpuid = 0; cpuid < RT_CPUS_NR; ++cpuid)
  162. {
  163. RT_IRQ_AFFINITY_SET(pirq->affinity, cpuid);
  164. }
  165. LOG_D("%s config %s %d to hwirq %d", pic->ops->name, "ipi", ipi, hwirq);
  166. }
  167. else
  168. {
  169. ipi = -RT_EINVAL;
  170. }
  171. return ipi;
  172. }
  173. int rt_pic_config_irq(struct rt_pic *pic, int irq_index, int hwirq)
  174. {
  175. int irq;
  176. if (pic && hwirq >= 0)
  177. {
  178. irq = pic->irq_start + irq_index;
  179. if (irq >= 0 && irq < MAX_HANDLERS)
  180. {
  181. config_pirq(pic, &_pirq_hash[irq], irq, hwirq);
  182. LOG_D("%s config %s %d to hwirq %d", pic->ops->name, "irq", irq, hwirq);
  183. }
  184. else
  185. {
  186. irq = -RT_ERROR;
  187. }
  188. }
  189. else
  190. {
  191. irq = -RT_EINVAL;
  192. }
  193. return irq;
  194. }
  195. struct rt_pic_irq *rt_pic_find_ipi(struct rt_pic *pic, int ipi_index)
  196. {
  197. struct rt_pic_irq *pirq = &_pirq_hash[ipi_index];
  198. RT_ASSERT(ipi_index < RT_ARRAY_SIZE(_ipi_hash));
  199. RT_ASSERT(pirq->pic == pic);
  200. return pirq;
  201. }
  202. struct rt_pic_irq *rt_pic_find_pirq(struct rt_pic *pic, int irq)
  203. {
  204. if (pic && irq >= pic->irq_start && irq <= pic->irq_start + pic->irq_nr)
  205. {
  206. return &pic->pirqs[irq - pic->irq_start];
  207. }
  208. return RT_NULL;
  209. }
  210. rt_err_t rt_pic_cascade(struct rt_pic_irq *pirq, int parent_irq)
  211. {
  212. rt_err_t err = RT_EOK;
  213. if (pirq && !pirq->parent && parent_irq >= 0)
  214. {
  215. struct rt_pic_irq *parent;
  216. rt_spin_lock(&pirq->rw_lock);
  217. parent = irq2pirq(parent_irq);
  218. if (parent)
  219. {
  220. pirq->parent = parent;
  221. pirq->priority = parent->priority;
  222. rt_memcpy(&pirq->affinity, &parent->affinity, sizeof(pirq->affinity));
  223. }
  224. rt_spin_unlock(&pirq->rw_lock);
  225. if (parent && pirq->pic->ops->flags & RT_PIC_F_IRQ_ROUTING)
  226. {
  227. rt_spin_lock(&parent->rw_lock);
  228. rt_list_insert_before(&parent->children_nodes, &pirq->list);
  229. rt_spin_unlock(&parent->rw_lock);
  230. }
  231. }
  232. else
  233. {
  234. err = -RT_EINVAL;
  235. }
  236. return err;
  237. }
  238. rt_err_t rt_pic_uncascade(struct rt_pic_irq *pirq)
  239. {
  240. rt_err_t err = RT_EOK;
  241. if (pirq && pirq->parent)
  242. {
  243. struct rt_pic_irq *parent;
  244. rt_spin_lock(&pirq->rw_lock);
  245. parent = pirq->parent;
  246. pirq->parent = RT_NULL;
  247. rt_spin_unlock(&pirq->rw_lock);
  248. if (parent && pirq->pic->ops->flags & RT_PIC_F_IRQ_ROUTING)
  249. {
  250. rt_spin_lock(&parent->rw_lock);
  251. rt_list_remove(&pirq->list);
  252. rt_spin_unlock(&parent->rw_lock);
  253. }
  254. }
  255. else
  256. {
  257. err = -RT_EINVAL;
  258. }
  259. return err;
  260. }
  261. rt_err_t rt_pic_attach_irq(int irq, rt_isr_handler_t handler, void *uid, const char *name, int flags)
  262. {
  263. rt_err_t err = -RT_EINVAL;
  264. struct rt_pic_irq *pirq;
  265. if (handler && name && (pirq = irq2pirq(irq)))
  266. {
  267. struct rt_pic_isr *isr = RT_NULL;
  268. rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
  269. err = RT_EOK;
  270. if (!pirq->isr.action.handler)
  271. {
  272. /* first attach */
  273. isr = &pirq->isr;
  274. rt_list_init(&isr->list);
  275. }
  276. else
  277. {
  278. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  279. if ((isr = rt_malloc(sizeof(*isr))))
  280. {
  281. rt_list_init(&isr->list);
  282. level = rt_spin_lock_irqsave(&pirq->rw_lock);
  283. rt_list_insert_after(&pirq->isr.list, &isr->list);
  284. }
  285. else
  286. {
  287. LOG_E("No memory to save '%s' isr", name);
  288. err = -RT_ERROR;
  289. }
  290. }
  291. if (!err)
  292. {
  293. isr->flags = flags;
  294. isr->action.handler = handler;
  295. isr->action.param = uid;
  296. #ifdef RT_USING_INTERRUPT_INFO
  297. isr->action.counter = 0;
  298. rt_strncpy(isr->action.name, name, RT_NAME_MAX - 1);
  299. isr->action.name[RT_NAME_MAX - 1] = '\0';
  300. #ifdef RT_USING_SMP
  301. rt_memset(isr->action.cpu_counter, 0, sizeof(isr->action.cpu_counter));
  302. #endif
  303. #endif
  304. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  305. }
  306. }
  307. return err;
  308. }
  309. rt_err_t rt_pic_detach_irq(int irq, void *uid)
  310. {
  311. rt_err_t err = -RT_EINVAL;
  312. struct rt_pic_irq *pirq = irq2pirq(irq);
  313. if (pirq)
  314. {
  315. rt_bool_t will_free = RT_FALSE;
  316. struct rt_pic_isr *isr = RT_NULL;
  317. rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
  318. isr = &pirq->isr;
  319. if (isr->action.param == uid)
  320. {
  321. if (rt_list_isempty(&isr->list))
  322. {
  323. isr->action.handler = RT_NULL;
  324. isr->action.param = RT_NULL;
  325. }
  326. else
  327. {
  328. struct rt_pic_isr *next_isr = rt_list_first_entry(&isr->list, struct rt_pic_isr, list);
  329. rt_list_remove(&next_isr->list);
  330. isr->action.handler = next_isr->action.handler;
  331. isr->action.param = next_isr->action.param;
  332. #ifdef RT_USING_INTERRUPT_INFO
  333. isr->action.counter = next_isr->action.counter;
  334. rt_strncpy(isr->action.name, next_isr->action.name, RT_NAME_MAX);
  335. #ifdef RT_USING_SMP
  336. rt_memcpy(isr->action.cpu_counter, next_isr->action.cpu_counter, sizeof(next_isr->action.cpu_counter));
  337. #endif
  338. #endif
  339. isr = next_isr;
  340. will_free = RT_TRUE;
  341. }
  342. err = RT_EOK;
  343. }
  344. else
  345. {
  346. rt_list_for_each_entry(isr, &pirq->isr.list, list)
  347. {
  348. if (isr->action.param == uid)
  349. {
  350. err = RT_EOK;
  351. will_free = RT_TRUE;
  352. rt_list_remove(&isr->list);
  353. break;
  354. }
  355. }
  356. }
  357. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  358. if (will_free)
  359. {
  360. rt_free(isr);
  361. }
  362. }
  363. return err;
  364. }
  365. rt_err_t rt_pic_add_traps(rt_bool_t (*handler)(void *), void *data)
  366. {
  367. rt_err_t err = -RT_EINVAL;
  368. if (handler)
  369. {
  370. struct irq_traps *traps = rt_malloc(sizeof(*traps));
  371. if (traps)
  372. {
  373. rt_ubase_t level = rt_hw_interrupt_disable();
  374. rt_list_init(&traps->list);
  375. traps->data = data;
  376. traps->handler = handler;
  377. rt_list_insert_before(&_traps_nodes, &traps->list);
  378. err = RT_EOK;
  379. rt_hw_interrupt_enable(level);
  380. }
  381. else
  382. {
  383. LOG_E("No memory to save '%p' handler", handler);
  384. err = -RT_ENOMEM;
  385. }
  386. }
  387. return err;
  388. }
  389. rt_err_t rt_pic_do_traps(void)
  390. {
  391. rt_err_t err = -RT_ERROR;
  392. struct irq_traps *traps;
  393. rt_list_for_each_entry(traps, &_traps_nodes, list)
  394. {
  395. if (traps->handler(traps->data))
  396. {
  397. err = RT_EOK;
  398. break;
  399. }
  400. }
  401. return err;
  402. }
  403. rt_err_t rt_pic_handle_isr(struct rt_pic_irq *pirq)
  404. {
  405. rt_err_t err = -RT_EEMPTY;
  406. rt_list_t *handler_nodes;
  407. struct rt_irq_desc *action;
  408. #ifdef RT_USING_PIC_STATISTICS
  409. struct timespec ts;
  410. rt_ubase_t irq_time_ns;
  411. rt_ubase_t current_irq_begin;
  412. #endif
  413. RT_ASSERT(pirq != RT_NULL);
  414. RT_ASSERT(pirq->pic != RT_NULL);
  415. #ifdef RT_USING_PIC_STATISTICS
  416. rt_ktime_boottime_get_ns(&ts);
  417. current_irq_begin = ts.tv_sec * (1000UL * 1000 * 1000) + ts.tv_nsec;
  418. #endif
  419. handler_nodes = &pirq->isr.list;
  420. action = &pirq->isr.action;
  421. if (!rt_list_isempty(&pirq->children_nodes))
  422. {
  423. struct rt_pic_irq *child;
  424. rt_list_for_each_entry(child, &pirq->children_nodes, list)
  425. {
  426. rt_pic_irq_ack(child->irq);
  427. err = rt_pic_handle_isr(child);
  428. rt_pic_irq_eoi(child->irq);
  429. }
  430. }
  431. if (action->handler)
  432. {
  433. action->handler(pirq->irq, action->param);
  434. #ifdef RT_USING_INTERRUPT_INFO
  435. action->counter++;
  436. #ifdef RT_USING_SMP
  437. action->cpu_counter[rt_hw_cpu_id()]++;
  438. #endif
  439. #endif
  440. if (!rt_list_isempty(handler_nodes))
  441. {
  442. struct rt_pic_isr *isr;
  443. rt_list_for_each_entry(isr, handler_nodes, list)
  444. {
  445. action = &isr->action;
  446. RT_ASSERT(action->handler != RT_NULL);
  447. action->handler(pirq->irq, action->param);
  448. #ifdef RT_USING_INTERRUPT_INFO
  449. action->counter++;
  450. #ifdef RT_USING_SMP
  451. action->cpu_counter[rt_hw_cpu_id()]++;
  452. #endif
  453. #endif
  454. }
  455. }
  456. err = RT_EOK;
  457. }
  458. #ifdef RT_USING_PIC_STATISTICS
  459. rt_ktime_boottime_get_ns(&ts);
  460. irq_time_ns = ts.tv_sec * (1000UL * 1000 * 1000) + ts.tv_nsec - current_irq_begin;
  461. pirq->stat.sum_irq_time_ns += irq_time_ns;
  462. if (irq_time_ns < pirq->stat.min_irq_time_ns || pirq->stat.min_irq_time_ns == 0)
  463. {
  464. pirq->stat.min_irq_time_ns = irq_time_ns;
  465. }
  466. if (irq_time_ns > pirq->stat.max_irq_time_ns)
  467. {
  468. pirq->stat.max_irq_time_ns = irq_time_ns;
  469. }
  470. #endif
  471. return err;
  472. }
  473. rt_weak rt_err_t rt_pic_user_extends(struct rt_pic *pic)
  474. {
  475. return -RT_ENOSYS;
  476. }
  477. rt_err_t rt_pic_irq_init(void)
  478. {
  479. rt_err_t err = RT_EOK;
  480. struct rt_pic *pic;
  481. rt_list_for_each_entry(pic, &_pic_nodes, list)
  482. {
  483. if (pic->ops->irq_init)
  484. {
  485. err = pic->ops->irq_init(pic);
  486. if (err)
  487. {
  488. LOG_E("PIC = %s init fail", pic->ops->name);
  489. break;
  490. }
  491. }
  492. }
  493. return err;
  494. }
  495. rt_err_t rt_pic_irq_finit(void)
  496. {
  497. rt_err_t err = RT_EOK;
  498. struct rt_pic *pic;
  499. rt_list_for_each_entry(pic, &_pic_nodes, list)
  500. {
  501. if (pic->ops->irq_finit)
  502. {
  503. err = pic->ops->irq_finit(pic);
  504. if (err)
  505. {
  506. LOG_E("PIC = %s finit fail", pic->ops->name);
  507. break;
  508. }
  509. }
  510. }
  511. return err;
  512. }
  513. void rt_pic_irq_enable(int irq)
  514. {
  515. struct rt_pic_irq *pirq = irq2pirq(irq);
  516. RT_ASSERT(pirq != RT_NULL);
  517. rt_hw_spin_lock(&pirq->rw_lock.lock);
  518. if (pirq->pic->ops->irq_enable)
  519. {
  520. pirq->pic->ops->irq_enable(pirq);
  521. }
  522. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  523. }
  524. void rt_pic_irq_disable(int irq)
  525. {
  526. struct rt_pic_irq *pirq = irq2pirq(irq);
  527. RT_ASSERT(pirq != RT_NULL);
  528. rt_hw_spin_lock(&pirq->rw_lock.lock);
  529. if (pirq->pic->ops->irq_disable)
  530. {
  531. pirq->pic->ops->irq_disable(pirq);
  532. }
  533. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  534. }
  535. void rt_pic_irq_ack(int irq)
  536. {
  537. struct rt_pic_irq *pirq = irq2pirq(irq);
  538. RT_ASSERT(pirq != RT_NULL);
  539. rt_hw_spin_lock(&pirq->rw_lock.lock);
  540. if (pirq->pic->ops->irq_ack)
  541. {
  542. pirq->pic->ops->irq_ack(pirq);
  543. }
  544. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  545. }
  546. void rt_pic_irq_mask(int irq)
  547. {
  548. struct rt_pic_irq *pirq = irq2pirq(irq);
  549. RT_ASSERT(pirq != RT_NULL);
  550. rt_hw_spin_lock(&pirq->rw_lock.lock);
  551. if (pirq->pic->ops->irq_mask)
  552. {
  553. pirq->pic->ops->irq_mask(pirq);
  554. }
  555. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  556. }
  557. void rt_pic_irq_unmask(int irq)
  558. {
  559. struct rt_pic_irq *pirq = irq2pirq(irq);
  560. RT_ASSERT(pirq != RT_NULL);
  561. rt_hw_spin_lock(&pirq->rw_lock.lock);
  562. if (pirq->pic->ops->irq_unmask)
  563. {
  564. pirq->pic->ops->irq_unmask(pirq);
  565. }
  566. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  567. }
  568. void rt_pic_irq_eoi(int irq)
  569. {
  570. struct rt_pic_irq *pirq = irq2pirq(irq);
  571. RT_ASSERT(pirq != RT_NULL);
  572. rt_hw_spin_lock(&pirq->rw_lock.lock);
  573. if (pirq->pic->ops->irq_eoi)
  574. {
  575. pirq->pic->ops->irq_eoi(pirq);
  576. }
  577. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  578. }
  579. rt_err_t rt_pic_irq_set_priority(int irq, rt_uint32_t priority)
  580. {
  581. rt_err_t err = -RT_EINVAL;
  582. struct rt_pic_irq *pirq = irq2pirq(irq);
  583. if (pirq)
  584. {
  585. rt_hw_spin_lock(&pirq->rw_lock.lock);
  586. if (pirq->pic->ops->irq_set_priority)
  587. {
  588. err = pirq->pic->ops->irq_set_priority(pirq, priority);
  589. if (!err)
  590. {
  591. pirq->priority = priority;
  592. }
  593. }
  594. else
  595. {
  596. err = -RT_ENOSYS;
  597. }
  598. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  599. }
  600. return err;
  601. }
  602. rt_uint32_t rt_pic_irq_get_priority(int irq)
  603. {
  604. rt_uint32_t priority = RT_UINT32_MAX;
  605. struct rt_pic_irq *pirq = irq2pirq(irq);
  606. if (pirq)
  607. {
  608. rt_hw_spin_lock(&pirq->rw_lock.lock);
  609. priority = pirq->priority;
  610. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  611. }
  612. return priority;
  613. }
  614. rt_err_t rt_pic_irq_set_affinity(int irq, rt_bitmap_t *affinity)
  615. {
  616. rt_err_t err = -RT_EINVAL;
  617. struct rt_pic_irq *pirq;
  618. if (affinity && (pirq = irq2pirq(irq)))
  619. {
  620. rt_hw_spin_lock(&pirq->rw_lock.lock);
  621. if (pirq->pic->ops->irq_set_affinity)
  622. {
  623. err = pirq->pic->ops->irq_set_affinity(pirq, affinity);
  624. if (!err)
  625. {
  626. rt_memcpy(pirq->affinity, affinity, sizeof(pirq->affinity));
  627. }
  628. }
  629. else
  630. {
  631. err = -RT_ENOSYS;
  632. }
  633. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  634. }
  635. return err;
  636. }
  637. rt_err_t rt_pic_irq_get_affinity(int irq, rt_bitmap_t *out_affinity)
  638. {
  639. rt_err_t err = -RT_EINVAL;
  640. struct rt_pic_irq *pirq;
  641. if (out_affinity && (pirq = irq2pirq(irq)))
  642. {
  643. rt_hw_spin_lock(&pirq->rw_lock.lock);
  644. rt_memcpy(out_affinity, pirq->affinity, sizeof(pirq->affinity));
  645. err = RT_EOK;
  646. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  647. }
  648. return err;
  649. }
  650. rt_err_t rt_pic_irq_set_triger_mode(int irq, rt_uint32_t mode)
  651. {
  652. rt_err_t err = -RT_EINVAL;
  653. struct rt_pic_irq *pirq;
  654. if ((~mode & RT_IRQ_MODE_MASK) && (pirq = irq2pirq(irq)))
  655. {
  656. rt_hw_spin_lock(&pirq->rw_lock.lock);
  657. if (pirq->pic->ops->irq_set_triger_mode)
  658. {
  659. err = pirq->pic->ops->irq_set_triger_mode(pirq, mode);
  660. if (!err)
  661. {
  662. pirq->mode = mode;
  663. }
  664. }
  665. else
  666. {
  667. err = -RT_ENOSYS;
  668. }
  669. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  670. }
  671. return err;
  672. }
  673. rt_uint32_t rt_pic_irq_get_triger_mode(int irq)
  674. {
  675. rt_uint32_t mode = RT_UINT32_MAX;
  676. struct rt_pic_irq *pirq = irq2pirq(irq);
  677. if (pirq)
  678. {
  679. rt_hw_spin_lock(&pirq->rw_lock.lock);
  680. mode = pirq->mode;
  681. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  682. }
  683. return mode;
  684. }
  685. void rt_pic_irq_send_ipi(int irq, rt_bitmap_t *cpumask)
  686. {
  687. struct rt_pic_irq *pirq;
  688. if (cpumask && (pirq = irq2pirq(irq)))
  689. {
  690. rt_hw_spin_lock(&pirq->rw_lock.lock);
  691. if (pirq->pic->ops->irq_send_ipi)
  692. {
  693. pirq->pic->ops->irq_send_ipi(pirq, cpumask);
  694. }
  695. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  696. }
  697. }
  698. rt_err_t rt_pic_irq_set_state_raw(struct rt_pic *pic, int hwirq, int type, rt_bool_t state)
  699. {
  700. rt_err_t err;
  701. if (pic && hwirq >= 0)
  702. {
  703. if (pic->ops->irq_set_state)
  704. {
  705. err = pic->ops->irq_set_state(pic, hwirq, type, state);
  706. }
  707. else
  708. {
  709. err = -RT_ENOSYS;
  710. }
  711. }
  712. else
  713. {
  714. err = -RT_EINVAL;
  715. }
  716. return err;
  717. }
  718. rt_err_t rt_pic_irq_get_state_raw(struct rt_pic *pic, int hwirq, int type, rt_bool_t *out_state)
  719. {
  720. rt_err_t err;
  721. if (pic && hwirq >= 0)
  722. {
  723. if (pic->ops->irq_get_state)
  724. {
  725. rt_bool_t state;
  726. if (!(err = pic->ops->irq_get_state(pic, hwirq, type, &state)) && out_state)
  727. {
  728. *out_state = state;
  729. }
  730. }
  731. else
  732. {
  733. err = -RT_ENOSYS;
  734. }
  735. }
  736. else
  737. {
  738. err = -RT_EINVAL;
  739. }
  740. return err;
  741. }
  742. rt_err_t rt_pic_irq_set_state(int irq, int type, rt_bool_t state)
  743. {
  744. rt_err_t err;
  745. struct rt_pic_irq *pirq = irq2pirq(irq);
  746. RT_ASSERT(pirq != RT_NULL);
  747. rt_hw_spin_lock(&pirq->rw_lock.lock);
  748. err = rt_pic_irq_set_state_raw(pirq->pic, pirq->hwirq, type, state);
  749. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  750. return err;
  751. }
  752. rt_err_t rt_pic_irq_get_state(int irq, int type, rt_bool_t *out_state)
  753. {
  754. rt_err_t err;
  755. struct rt_pic_irq *pirq = irq2pirq(irq);
  756. RT_ASSERT(pirq != RT_NULL);
  757. rt_hw_spin_lock(&pirq->rw_lock.lock);
  758. err = rt_pic_irq_get_state_raw(pirq->pic, pirq->hwirq, type, out_state);
  759. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  760. return err;
  761. }
  762. void rt_pic_irq_parent_enable(struct rt_pic_irq *pirq)
  763. {
  764. RT_ASSERT(pirq != RT_NULL);
  765. pirq = pirq->parent;
  766. if (pirq->pic->ops->irq_enable)
  767. {
  768. pirq->pic->ops->irq_enable(pirq);
  769. }
  770. }
  771. void rt_pic_irq_parent_disable(struct rt_pic_irq *pirq)
  772. {
  773. RT_ASSERT(pirq != RT_NULL);
  774. pirq = pirq->parent;
  775. if (pirq->pic->ops->irq_disable)
  776. {
  777. pirq->pic->ops->irq_disable(pirq);
  778. }
  779. }
  780. void rt_pic_irq_parent_ack(struct rt_pic_irq *pirq)
  781. {
  782. RT_ASSERT(pirq != RT_NULL);
  783. pirq = pirq->parent;
  784. if (pirq->pic->ops->irq_ack)
  785. {
  786. pirq->pic->ops->irq_ack(pirq);
  787. }
  788. }
  789. void rt_pic_irq_parent_mask(struct rt_pic_irq *pirq)
  790. {
  791. RT_ASSERT(pirq != RT_NULL);
  792. pirq = pirq->parent;
  793. if (pirq->pic->ops->irq_mask)
  794. {
  795. pirq->pic->ops->irq_mask(pirq);
  796. }
  797. }
  798. void rt_pic_irq_parent_unmask(struct rt_pic_irq *pirq)
  799. {
  800. RT_ASSERT(pirq != RT_NULL);
  801. pirq = pirq->parent;
  802. if (pirq->pic->ops->irq_unmask)
  803. {
  804. pirq->pic->ops->irq_unmask(pirq);
  805. }
  806. }
  807. void rt_pic_irq_parent_eoi(struct rt_pic_irq *pirq)
  808. {
  809. RT_ASSERT(pirq != RT_NULL);
  810. pirq = pirq->parent;
  811. if (pirq->pic->ops->irq_eoi)
  812. {
  813. pirq->pic->ops->irq_eoi(pirq);
  814. }
  815. }
  816. rt_err_t rt_pic_irq_parent_set_priority(struct rt_pic_irq *pirq, rt_uint32_t priority)
  817. {
  818. rt_err_t err = -RT_ENOSYS;
  819. RT_ASSERT(pirq != RT_NULL);
  820. pirq = pirq->parent;
  821. if (pirq->pic->ops->irq_set_priority)
  822. {
  823. if (!(err = pirq->pic->ops->irq_set_priority(pirq, priority)))
  824. {
  825. pirq->priority = priority;
  826. }
  827. }
  828. return err;
  829. }
  830. rt_err_t rt_pic_irq_parent_set_affinity(struct rt_pic_irq *pirq, rt_bitmap_t *affinity)
  831. {
  832. rt_err_t err = -RT_ENOSYS;
  833. RT_ASSERT(pirq != RT_NULL);
  834. pirq = pirq->parent;
  835. if (pirq->pic->ops->irq_set_affinity)
  836. {
  837. if (!(err = pirq->pic->ops->irq_set_affinity(pirq, affinity)))
  838. {
  839. rt_memcpy(pirq->affinity, affinity, sizeof(pirq->affinity));
  840. }
  841. }
  842. return err;
  843. }
  844. rt_err_t rt_pic_irq_parent_set_triger_mode(struct rt_pic_irq *pirq, rt_uint32_t mode)
  845. {
  846. rt_err_t err = -RT_ENOSYS;
  847. RT_ASSERT(pirq != RT_NULL);
  848. pirq = pirq->parent;
  849. if (pirq->pic->ops->irq_set_triger_mode)
  850. {
  851. if (!(err = pirq->pic->ops->irq_set_triger_mode(pirq, mode)))
  852. {
  853. pirq->mode = mode;
  854. }
  855. }
  856. return err;
  857. }
  858. #ifdef RT_USING_OFW
  859. RT_OFW_STUB_RANGE_EXPORT(pic, _pic_ofw_start, _pic_ofw_end);
  860. static rt_err_t ofw_pic_init(void)
  861. {
  862. struct rt_ofw_node *ic_np;
  863. rt_ofw_foreach_node_by_prop(ic_np, "interrupt-controller")
  864. {
  865. rt_ofw_stub_probe_range(ic_np, &_pic_ofw_start, &_pic_ofw_end);
  866. }
  867. return RT_EOK;
  868. }
  869. #else
  870. static rt_err_t ofw_pic_init(void)
  871. {
  872. return RT_EOK;
  873. }
  874. #endif /* !RT_USING_OFW */
  875. rt_err_t rt_pic_init(void)
  876. {
  877. rt_err_t err;
  878. LOG_D("init start");
  879. err = ofw_pic_init();
  880. LOG_D("init end");
  881. return err;
  882. }
  883. #if defined(RT_USING_CONSOLE) && defined(RT_USING_MSH)
  884. static int list_irq(int argc, char**argv)
  885. {
  886. rt_size_t irq_nr = 0;
  887. rt_bool_t dump_all = RT_FALSE;
  888. const char *const irq_modes[] =
  889. {
  890. [RT_IRQ_MODE_NONE] = "None",
  891. [RT_IRQ_MODE_EDGE_RISING] = "Edge-Rising",
  892. [RT_IRQ_MODE_EDGE_FALLING] = "Edge-Falling",
  893. [RT_IRQ_MODE_EDGE_BOTH] = "Edge-Both",
  894. [RT_IRQ_MODE_LEVEL_HIGH] = "Level-High",
  895. [RT_IRQ_MODE_LEVEL_LOW] = "Level-Low",
  896. };
  897. static char info[RT_CONSOLEBUF_SIZE];
  898. #ifdef RT_USING_SMP
  899. static char cpumask[RT_CPUS_NR + 1] = { [RT_CPUS_NR] = '\0' };
  900. #endif
  901. if (argc > 1)
  902. {
  903. if (!rt_strcmp(argv[1], "all"))
  904. {
  905. dump_all = RT_TRUE;
  906. }
  907. }
  908. rt_kprintf("%-*.s %-*.s %s %-*.s %-*.s %-*.s %-*.sUsers%-*.s",
  909. 6, "IRQ",
  910. 6, "HW-IRQ",
  911. "MSI",
  912. _pic_name_max, "PIC",
  913. 12, "Mode",
  914. #ifdef RT_USING_SMP
  915. RT_CPUS_NR, "CPUs",
  916. #else
  917. 0, 0,
  918. #endif
  919. #ifdef RT_USING_INTERRUPT_INFO
  920. 11, "Count",
  921. 5, ""
  922. #else
  923. 0, 0,
  924. 10, "-Number"
  925. #endif
  926. );
  927. #if defined(RT_USING_SMP) && defined(RT_USING_INTERRUPT_INFO)
  928. for (int i = 0; i < RT_CPUS_NR; i++)
  929. {
  930. rt_kprintf(" cpu%2d ", i);
  931. }
  932. #endif
  933. #ifdef RT_USING_PIC_STATISTICS
  934. rt_kprintf(" max/ns avg/ns min/ns");
  935. #endif
  936. rt_kputs("\n");
  937. for (int i = 0; i < RT_ARRAY_SIZE(_pirq_hash); ++i)
  938. {
  939. struct rt_pic_irq *pirq = &_pirq_hash[i];
  940. if (!pirq->pic || !(dump_all || pirq->isr.action.handler))
  941. {
  942. continue;
  943. }
  944. rt_snprintf(info, sizeof(info), "%-6d %-6d %c %-*.s %-*.s ",
  945. pirq->irq,
  946. pirq->hwirq,
  947. pirq->msi_desc ? 'Y' : 'N',
  948. _pic_name_max, pirq->pic->ops->name,
  949. 12, irq_modes[pirq->mode]);
  950. #ifdef RT_USING_SMP
  951. for (int group = 0, id = 0; group < RT_ARRAY_SIZE(pirq->affinity); ++group)
  952. {
  953. rt_bitmap_t mask = pirq->affinity[group];
  954. for (int idx = 0; id < RT_CPUS_NR && idx < RT_BITMAP_BIT_LEN(1); ++idx, ++id)
  955. {
  956. cpumask[RT_ARRAY_SIZE(cpumask) - id - 2] = '0' + ((mask >> idx) & 1);
  957. }
  958. }
  959. #endif /* RT_USING_SMP */
  960. rt_kputs(info);
  961. #ifdef RT_USING_SMP
  962. rt_kputs(cpumask);
  963. #endif
  964. #ifdef RT_USING_INTERRUPT_INFO
  965. rt_kprintf(" %-10d ", pirq->isr.action.counter);
  966. rt_kprintf("%-*.s", 10, pirq->isr.action.name);
  967. #ifdef RT_USING_SMP
  968. for (int cpuid = 0; cpuid < RT_CPUS_NR; cpuid++)
  969. {
  970. rt_kprintf(" %-10d", pirq->isr.action.cpu_counter[cpuid]);
  971. }
  972. #endif
  973. #ifdef RT_USING_PIC_STATISTICS
  974. rt_kprintf(" %-10d %-10d %-10d", pirq->stat.max_irq_time_ns, pirq->stat.sum_irq_time_ns/pirq->isr.action.counter, pirq->stat.min_irq_time_ns);
  975. #endif
  976. rt_kputs("\n");
  977. if (!rt_list_isempty(&pirq->isr.list))
  978. {
  979. struct rt_pic_isr *repeat_isr;
  980. rt_list_for_each_entry(repeat_isr, &pirq->isr.list, list)
  981. {
  982. rt_kputs(info);
  983. #ifdef RT_USING_SMP
  984. rt_kputs(cpumask);
  985. #endif
  986. rt_kprintf("%-10d ", repeat_isr->action.counter);
  987. rt_kprintf("%-*.s", 10, repeat_isr->action.name);
  988. #ifdef RT_USING_SMP
  989. for (int cpuid = 0; cpuid < RT_CPUS_NR; cpuid++)
  990. {
  991. rt_kprintf(" %-10d", repeat_isr->action.cpu_counter[cpuid]);
  992. }
  993. #endif
  994. #ifdef RT_USING_PIC_STATISTICS
  995. rt_kprintf(" --- --- ---");
  996. #endif
  997. rt_kputs("\n");
  998. }
  999. }
  1000. #else
  1001. rt_kprintf(" %d\n", rt_list_len(&pirq->isr.list));
  1002. #endif
  1003. ++irq_nr;
  1004. }
  1005. rt_kprintf("%d IRQs found\n", irq_nr);
  1006. return 0;
  1007. }
  1008. MSH_CMD_EXPORT(list_irq, dump using or args = all of irq information);
  1009. #endif /* RT_USING_CONSOLE && RT_USING_MSH */