pic.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-08-24 GuEe-GUI first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #define DBG_TAG "rtdm.pic"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. #include <drivers/pic.h>
  16. #include <ktime.h>
  17. struct irq_traps
  18. {
  19. rt_list_t list;
  20. void *data;
  21. rt_bool_t (*handler)(void *);
  22. };
  23. static int _ipi_hash[] =
  24. {
  25. #ifdef RT_USING_SMP
  26. [RT_SCHEDULE_IPI] = RT_SCHEDULE_IPI,
  27. [RT_STOP_IPI] = RT_STOP_IPI,
  28. [RT_SMP_CALL_IPI] = RT_SMP_CALL_IPI,
  29. #endif
  30. };
  31. /* reserved ipi */
  32. static int _pirq_hash_idx = RT_ARRAY_SIZE(_ipi_hash);
  33. static struct rt_pic_irq _pirq_hash[MAX_HANDLERS] =
  34. {
  35. [0 ... MAX_HANDLERS - 1] =
  36. {
  37. .irq = -1,
  38. .hwirq = -1,
  39. .mode = RT_IRQ_MODE_NONE,
  40. .priority = RT_UINT32_MAX,
  41. .rw_lock = { },
  42. }
  43. };
  44. static struct rt_spinlock _pic_lock = { };
  45. static rt_size_t _pic_name_max = sizeof("PIC");
  46. static rt_list_t _pic_nodes = RT_LIST_OBJECT_INIT(_pic_nodes);
  47. static rt_list_t _traps_nodes = RT_LIST_OBJECT_INIT(_traps_nodes);
  48. static struct rt_pic_irq *irq2pirq(int irq)
  49. {
  50. struct rt_pic_irq *pirq = RT_NULL;
  51. if ((irq >= 0) && (irq < MAX_HANDLERS))
  52. {
  53. pirq = &_pirq_hash[irq];
  54. if (pirq->irq < 0)
  55. {
  56. pirq = RT_NULL;
  57. }
  58. }
  59. if (!pirq)
  60. {
  61. LOG_E("irq = %d is invalid", irq);
  62. }
  63. return pirq;
  64. }
  65. static void append_pic(struct rt_pic *pic)
  66. {
  67. int pic_name_len = rt_strlen(pic->ops->name);
  68. rt_list_insert_before(&_pic_nodes, &pic->list);
  69. if (pic_name_len > _pic_name_max)
  70. {
  71. _pic_name_max = pic_name_len;
  72. }
  73. }
  74. void rt_pic_default_name(struct rt_pic *pic)
  75. {
  76. if (pic)
  77. {
  78. #if RT_NAME_MAX > 0
  79. rt_strncpy(pic->parent.name, "PIC", RT_NAME_MAX - 1);
  80. pic->parent.name[RT_NAME_MAX - 1] = '\0';
  81. #else
  82. pic->parent.name = "PIC";
  83. #endif
  84. }
  85. }
  86. struct rt_pic *rt_pic_dynamic_cast(void *ptr)
  87. {
  88. struct rt_pic *pic = RT_NULL, *tmp = RT_NULL;
  89. if (ptr)
  90. {
  91. struct rt_object *obj = ptr;
  92. if (obj->type == RT_Object_Class_Unknown)
  93. {
  94. tmp = (void *)obj;
  95. }
  96. else if (obj->type == RT_Object_Class_Device)
  97. {
  98. tmp = (void *)obj + sizeof(struct rt_device);
  99. }
  100. else
  101. {
  102. tmp = (void *)obj + sizeof(struct rt_object);
  103. }
  104. if (tmp && !rt_strcmp(tmp->parent.name, "PIC"))
  105. {
  106. pic = tmp;
  107. }
  108. }
  109. return pic;
  110. }
  111. rt_err_t rt_pic_linear_irq(struct rt_pic *pic, rt_size_t irq_nr)
  112. {
  113. rt_err_t err = RT_EOK;
  114. if (pic && pic->ops && pic->ops->name)
  115. {
  116. rt_ubase_t level = rt_spin_lock_irqsave(&_pic_lock);
  117. if (_pirq_hash_idx + irq_nr <= RT_ARRAY_SIZE(_pirq_hash))
  118. {
  119. rt_list_init(&pic->list);
  120. rt_pic_default_name(pic);
  121. pic->parent.type = RT_Object_Class_Unknown;
  122. pic->irq_start = _pirq_hash_idx;
  123. pic->irq_nr = irq_nr;
  124. pic->pirqs = &_pirq_hash[_pirq_hash_idx];
  125. _pirq_hash_idx += irq_nr;
  126. append_pic(pic);
  127. LOG_D("%s alloc irqs ranges [%d, %d]", pic->ops->name,
  128. pic->irq_start, pic->irq_start + pic->irq_nr);
  129. }
  130. else
  131. {
  132. LOG_E("%s alloc %d irqs is overflow", pic->ops->name, irq_nr);
  133. err = -RT_EEMPTY;
  134. }
  135. rt_spin_unlock_irqrestore(&_pic_lock, level);
  136. }
  137. else
  138. {
  139. err = -RT_EINVAL;
  140. }
  141. return err;
  142. }
  143. rt_err_t rt_pic_cancel_irq(struct rt_pic *pic)
  144. {
  145. rt_err_t err = RT_EOK;
  146. if (pic && pic->pirqs)
  147. {
  148. rt_ubase_t level = rt_spin_lock_irqsave(&_pic_lock);
  149. /*
  150. * This is only to make system runtime safely,
  151. * we don't recommend PICs to unregister.
  152. */
  153. rt_list_remove(&pic->list);
  154. rt_spin_unlock_irqrestore(&_pic_lock, level);
  155. }
  156. else
  157. {
  158. err = -RT_EINVAL;
  159. }
  160. return err;
  161. }
  162. static void config_pirq(struct rt_pic *pic, struct rt_pic_irq *pirq, int irq, int hwirq)
  163. {
  164. rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
  165. if (pirq->irq < 0)
  166. {
  167. rt_list_init(&pirq->list);
  168. rt_list_init(&pirq->children_nodes);
  169. rt_list_init(&pirq->isr.list);
  170. }
  171. else if (pirq->pic != pic)
  172. {
  173. RT_ASSERT(rt_list_isempty(&pirq->list) == RT_TRUE);
  174. RT_ASSERT(rt_list_isempty(&pirq->children_nodes) == RT_TRUE);
  175. RT_ASSERT(rt_list_isempty(&pirq->isr.list) == RT_TRUE);
  176. }
  177. pirq->irq = irq;
  178. pirq->hwirq = hwirq;
  179. pirq->pic = pic;
  180. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  181. }
  182. int rt_pic_config_ipi(struct rt_pic *pic, int ipi_index, int hwirq)
  183. {
  184. int ipi = ipi_index;
  185. struct rt_pic_irq *pirq;
  186. if (pic && ipi < RT_ARRAY_SIZE(_ipi_hash) && hwirq >= 0 && pic->ops->irq_send_ipi)
  187. {
  188. pirq = &_pirq_hash[ipi];
  189. config_pirq(pic, pirq, ipi, hwirq);
  190. for (int cpuid = 0; cpuid < RT_CPUS_NR; ++cpuid)
  191. {
  192. RT_IRQ_AFFINITY_SET(pirq->affinity, cpuid);
  193. }
  194. LOG_D("%s config %s %d to hwirq %d", pic->ops->name, "ipi", ipi, hwirq);
  195. }
  196. else
  197. {
  198. ipi = -RT_EINVAL;
  199. }
  200. return ipi;
  201. }
  202. int rt_pic_config_irq(struct rt_pic *pic, int irq_index, int hwirq)
  203. {
  204. int irq;
  205. if (pic && hwirq >= 0)
  206. {
  207. irq = pic->irq_start + irq_index;
  208. if (irq >= 0 && irq < MAX_HANDLERS)
  209. {
  210. config_pirq(pic, &_pirq_hash[irq], irq, hwirq);
  211. LOG_D("%s config %s %d to hwirq %d", pic->ops->name, "irq", irq, hwirq);
  212. }
  213. else
  214. {
  215. irq = -RT_ERROR;
  216. }
  217. }
  218. else
  219. {
  220. irq = -RT_EINVAL;
  221. }
  222. return irq;
  223. }
  224. struct rt_pic_irq *rt_pic_find_ipi(struct rt_pic *pic, int ipi_index)
  225. {
  226. struct rt_pic_irq *pirq = &_pirq_hash[ipi_index];
  227. RT_ASSERT(ipi_index < RT_ARRAY_SIZE(_ipi_hash));
  228. RT_ASSERT(pirq->pic == pic);
  229. return pirq;
  230. }
  231. struct rt_pic_irq *rt_pic_find_pirq(struct rt_pic *pic, int irq)
  232. {
  233. if (pic && irq >= pic->irq_start && irq <= pic->irq_start + pic->irq_nr)
  234. {
  235. return &pic->pirqs[irq - pic->irq_start];
  236. }
  237. return RT_NULL;
  238. }
  239. rt_err_t rt_pic_cascade(struct rt_pic_irq *pirq, int parent_irq)
  240. {
  241. rt_err_t err = RT_EOK;
  242. if (pirq && !pirq->parent && parent_irq >= 0)
  243. {
  244. struct rt_pic_irq *parent;
  245. rt_spin_lock(&pirq->rw_lock);
  246. parent = irq2pirq(parent_irq);
  247. if (parent)
  248. {
  249. pirq->parent = parent;
  250. pirq->priority = parent->priority;
  251. rt_memcpy(&pirq->affinity, &parent->affinity, sizeof(pirq->affinity));
  252. }
  253. rt_spin_unlock(&pirq->rw_lock);
  254. if (parent && pirq->pic->ops->flags & RT_PIC_F_IRQ_ROUTING)
  255. {
  256. rt_spin_lock(&parent->rw_lock);
  257. rt_list_insert_before(&parent->children_nodes, &pirq->list);
  258. rt_spin_unlock(&parent->rw_lock);
  259. }
  260. }
  261. else
  262. {
  263. err = -RT_EINVAL;
  264. }
  265. return err;
  266. }
  267. rt_err_t rt_pic_uncascade(struct rt_pic_irq *pirq)
  268. {
  269. rt_err_t err = RT_EOK;
  270. if (pirq && pirq->parent)
  271. {
  272. struct rt_pic_irq *parent;
  273. rt_spin_lock(&pirq->rw_lock);
  274. parent = pirq->parent;
  275. pirq->parent = RT_NULL;
  276. rt_spin_unlock(&pirq->rw_lock);
  277. if (parent && pirq->pic->ops->flags & RT_PIC_F_IRQ_ROUTING)
  278. {
  279. rt_spin_lock(&parent->rw_lock);
  280. rt_list_remove(&pirq->list);
  281. rt_spin_unlock(&parent->rw_lock);
  282. }
  283. }
  284. else
  285. {
  286. err = -RT_EINVAL;
  287. }
  288. return err;
  289. }
  290. rt_err_t rt_pic_attach_irq(int irq, rt_isr_handler_t handler, void *uid, const char *name, int flags)
  291. {
  292. rt_err_t err = -RT_EINVAL;
  293. struct rt_pic_irq *pirq;
  294. if (handler && name && (pirq = irq2pirq(irq)))
  295. {
  296. struct rt_pic_isr *isr = RT_NULL;
  297. rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
  298. err = RT_EOK;
  299. if (!pirq->isr.action.handler)
  300. {
  301. /* first attach */
  302. isr = &pirq->isr;
  303. rt_list_init(&isr->list);
  304. }
  305. else
  306. {
  307. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  308. if ((isr = rt_malloc(sizeof(*isr))))
  309. {
  310. rt_list_init(&isr->list);
  311. level = rt_spin_lock_irqsave(&pirq->rw_lock);
  312. rt_list_insert_after(&pirq->isr.list, &isr->list);
  313. }
  314. else
  315. {
  316. LOG_E("No memory to save '%s' isr", name);
  317. err = -RT_ERROR;
  318. }
  319. }
  320. if (!err)
  321. {
  322. isr->flags = flags;
  323. isr->action.handler = handler;
  324. isr->action.param = uid;
  325. #ifdef RT_USING_INTERRUPT_INFO
  326. isr->action.counter = 0;
  327. rt_strncpy(isr->action.name, name, RT_NAME_MAX - 1);
  328. isr->action.name[RT_NAME_MAX - 1] = '\0';
  329. #ifdef RT_USING_SMP
  330. rt_memset(isr->action.cpu_counter, 0, sizeof(isr->action.cpu_counter));
  331. #endif
  332. #endif
  333. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  334. }
  335. }
  336. return err;
  337. }
  338. rt_err_t rt_pic_detach_irq(int irq, void *uid)
  339. {
  340. rt_err_t err = -RT_EINVAL;
  341. struct rt_pic_irq *pirq = irq2pirq(irq);
  342. if (pirq)
  343. {
  344. rt_bool_t will_free = RT_FALSE;
  345. struct rt_pic_isr *isr = RT_NULL;
  346. rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
  347. isr = &pirq->isr;
  348. if (isr->action.param == uid)
  349. {
  350. if (rt_list_isempty(&isr->list))
  351. {
  352. isr->action.handler = RT_NULL;
  353. isr->action.param = RT_NULL;
  354. }
  355. else
  356. {
  357. struct rt_pic_isr *next_isr = rt_list_first_entry(&isr->list, struct rt_pic_isr, list);
  358. rt_list_remove(&next_isr->list);
  359. isr->action.handler = next_isr->action.handler;
  360. isr->action.param = next_isr->action.param;
  361. #ifdef RT_USING_INTERRUPT_INFO
  362. isr->action.counter = next_isr->action.counter;
  363. rt_strncpy(isr->action.name, next_isr->action.name, RT_NAME_MAX);
  364. #ifdef RT_USING_SMP
  365. rt_memcpy(isr->action.cpu_counter, next_isr->action.cpu_counter, sizeof(next_isr->action.cpu_counter));
  366. #endif
  367. #endif
  368. isr = next_isr;
  369. will_free = RT_TRUE;
  370. }
  371. err = RT_EOK;
  372. }
  373. else
  374. {
  375. rt_list_for_each_entry(isr, &pirq->isr.list, list)
  376. {
  377. if (isr->action.param == uid)
  378. {
  379. err = RT_EOK;
  380. will_free = RT_TRUE;
  381. rt_list_remove(&isr->list);
  382. break;
  383. }
  384. }
  385. }
  386. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  387. if (will_free)
  388. {
  389. rt_free(isr);
  390. }
  391. }
  392. return err;
  393. }
  394. rt_err_t rt_pic_add_traps(rt_bool_t (*handler)(void *), void *data)
  395. {
  396. rt_err_t err = -RT_EINVAL;
  397. if (handler)
  398. {
  399. struct irq_traps *traps = rt_malloc(sizeof(*traps));
  400. if (traps)
  401. {
  402. rt_ubase_t level = rt_hw_interrupt_disable();
  403. rt_list_init(&traps->list);
  404. traps->data = data;
  405. traps->handler = handler;
  406. rt_list_insert_before(&_traps_nodes, &traps->list);
  407. err = RT_EOK;
  408. rt_hw_interrupt_enable(level);
  409. }
  410. else
  411. {
  412. LOG_E("No memory to save '%p' handler", handler);
  413. err = -RT_ENOMEM;
  414. }
  415. }
  416. return err;
  417. }
  418. rt_err_t rt_pic_do_traps(void)
  419. {
  420. rt_err_t err = -RT_ERROR;
  421. struct irq_traps *traps;
  422. rt_interrupt_enter();
  423. rt_list_for_each_entry(traps, &_traps_nodes, list)
  424. {
  425. if (traps->handler(traps->data))
  426. {
  427. err = RT_EOK;
  428. break;
  429. }
  430. }
  431. rt_interrupt_leave();
  432. return err;
  433. }
  434. rt_err_t rt_pic_handle_isr(struct rt_pic_irq *pirq)
  435. {
  436. rt_err_t err = -RT_EEMPTY;
  437. rt_list_t *handler_nodes;
  438. struct rt_irq_desc *action;
  439. #ifdef RT_USING_PIC_STATISTICS
  440. struct timespec ts;
  441. rt_ubase_t irq_time_ns;
  442. rt_ubase_t current_irq_begin;
  443. #endif
  444. RT_ASSERT(pirq != RT_NULL);
  445. RT_ASSERT(pirq->pic != RT_NULL);
  446. #ifdef RT_USING_PIC_STATISTICS
  447. rt_ktime_boottime_get_ns(&ts);
  448. current_irq_begin = ts.tv_sec * (1000UL * 1000 * 1000) + ts.tv_nsec;
  449. #endif
  450. handler_nodes = &pirq->isr.list;
  451. action = &pirq->isr.action;
  452. if (!rt_list_isempty(&pirq->children_nodes))
  453. {
  454. struct rt_pic_irq *child;
  455. rt_list_for_each_entry(child, &pirq->children_nodes, list)
  456. {
  457. if (child->pic->ops->irq_ack)
  458. {
  459. child->pic->ops->irq_ack(child);
  460. }
  461. err = rt_pic_handle_isr(child);
  462. if (child->pic->ops->irq_eoi)
  463. {
  464. child->pic->ops->irq_eoi(child);
  465. }
  466. }
  467. }
  468. if (action->handler)
  469. {
  470. action->handler(pirq->irq, action->param);
  471. #ifdef RT_USING_INTERRUPT_INFO
  472. action->counter++;
  473. #ifdef RT_USING_SMP
  474. action->cpu_counter[rt_hw_cpu_id()]++;
  475. #endif
  476. #endif
  477. if (!rt_list_isempty(handler_nodes))
  478. {
  479. struct rt_pic_isr *isr;
  480. rt_list_for_each_entry(isr, handler_nodes, list)
  481. {
  482. action = &isr->action;
  483. RT_ASSERT(action->handler != RT_NULL);
  484. action->handler(pirq->irq, action->param);
  485. #ifdef RT_USING_INTERRUPT_INFO
  486. action->counter++;
  487. #ifdef RT_USING_SMP
  488. action->cpu_counter[rt_hw_cpu_id()]++;
  489. #endif
  490. #endif
  491. }
  492. }
  493. err = RT_EOK;
  494. }
  495. #ifdef RT_USING_PIC_STATISTICS
  496. rt_ktime_boottime_get_ns(&ts);
  497. irq_time_ns = ts.tv_sec * (1000UL * 1000 * 1000) + ts.tv_nsec - current_irq_begin;
  498. pirq->stat.sum_irq_time_ns += irq_time_ns;
  499. if (irq_time_ns < pirq->stat.min_irq_time_ns || pirq->stat.min_irq_time_ns == 0)
  500. {
  501. pirq->stat.min_irq_time_ns = irq_time_ns;
  502. }
  503. if (irq_time_ns > pirq->stat.max_irq_time_ns)
  504. {
  505. pirq->stat.max_irq_time_ns = irq_time_ns;
  506. }
  507. #endif
  508. return err;
  509. }
  510. rt_weak rt_err_t rt_pic_user_extends(struct rt_pic *pic)
  511. {
  512. return -RT_ENOSYS;
  513. }
  514. rt_err_t rt_pic_irq_init(void)
  515. {
  516. rt_err_t err = RT_EOK;
  517. struct rt_pic *pic;
  518. rt_list_for_each_entry(pic, &_pic_nodes, list)
  519. {
  520. if (pic->ops->irq_init)
  521. {
  522. err = pic->ops->irq_init(pic);
  523. if (err)
  524. {
  525. LOG_E("PIC = %s init fail", pic->ops->name);
  526. break;
  527. }
  528. }
  529. }
  530. return err;
  531. }
  532. rt_err_t rt_pic_irq_finit(void)
  533. {
  534. rt_err_t err = RT_EOK;
  535. struct rt_pic *pic;
  536. rt_list_for_each_entry(pic, &_pic_nodes, list)
  537. {
  538. if (pic->ops->irq_finit)
  539. {
  540. err = pic->ops->irq_finit(pic);
  541. if (err)
  542. {
  543. LOG_E("PIC = %s finit fail", pic->ops->name);
  544. break;
  545. }
  546. }
  547. }
  548. return err;
  549. }
  550. void rt_pic_irq_enable(int irq)
  551. {
  552. struct rt_pic_irq *pirq = irq2pirq(irq);
  553. RT_ASSERT(pirq != RT_NULL);
  554. rt_hw_spin_lock(&pirq->rw_lock.lock);
  555. if (pirq->pic->ops->irq_enable)
  556. {
  557. pirq->pic->ops->irq_enable(pirq);
  558. }
  559. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  560. }
  561. void rt_pic_irq_disable(int irq)
  562. {
  563. struct rt_pic_irq *pirq = irq2pirq(irq);
  564. RT_ASSERT(pirq != RT_NULL);
  565. rt_hw_spin_lock(&pirq->rw_lock.lock);
  566. if (pirq->pic->ops->irq_disable)
  567. {
  568. pirq->pic->ops->irq_disable(pirq);
  569. }
  570. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  571. }
  572. void rt_pic_irq_ack(int irq)
  573. {
  574. struct rt_pic_irq *pirq = irq2pirq(irq);
  575. RT_ASSERT(pirq != RT_NULL);
  576. rt_hw_spin_lock(&pirq->rw_lock.lock);
  577. if (pirq->pic->ops->irq_ack)
  578. {
  579. pirq->pic->ops->irq_ack(pirq);
  580. }
  581. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  582. }
  583. void rt_pic_irq_mask(int irq)
  584. {
  585. struct rt_pic_irq *pirq = irq2pirq(irq);
  586. RT_ASSERT(pirq != RT_NULL);
  587. rt_hw_spin_lock(&pirq->rw_lock.lock);
  588. if (pirq->pic->ops->irq_mask)
  589. {
  590. pirq->pic->ops->irq_mask(pirq);
  591. }
  592. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  593. }
  594. void rt_pic_irq_unmask(int irq)
  595. {
  596. struct rt_pic_irq *pirq = irq2pirq(irq);
  597. RT_ASSERT(pirq != RT_NULL);
  598. rt_hw_spin_lock(&pirq->rw_lock.lock);
  599. if (pirq->pic->ops->irq_unmask)
  600. {
  601. pirq->pic->ops->irq_unmask(pirq);
  602. }
  603. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  604. }
  605. void rt_pic_irq_eoi(int irq)
  606. {
  607. struct rt_pic_irq *pirq = irq2pirq(irq);
  608. RT_ASSERT(pirq != RT_NULL);
  609. rt_hw_spin_lock(&pirq->rw_lock.lock);
  610. if (pirq->pic->ops->irq_eoi)
  611. {
  612. pirq->pic->ops->irq_eoi(pirq);
  613. }
  614. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  615. }
  616. rt_err_t rt_pic_irq_set_priority(int irq, rt_uint32_t priority)
  617. {
  618. rt_err_t err = -RT_EINVAL;
  619. struct rt_pic_irq *pirq = irq2pirq(irq);
  620. if (pirq)
  621. {
  622. rt_hw_spin_lock(&pirq->rw_lock.lock);
  623. if (pirq->pic->ops->irq_set_priority)
  624. {
  625. err = pirq->pic->ops->irq_set_priority(pirq, priority);
  626. if (!err)
  627. {
  628. pirq->priority = priority;
  629. }
  630. }
  631. else
  632. {
  633. err = -RT_ENOSYS;
  634. }
  635. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  636. }
  637. return err;
  638. }
  639. rt_uint32_t rt_pic_irq_get_priority(int irq)
  640. {
  641. rt_uint32_t priority = RT_UINT32_MAX;
  642. struct rt_pic_irq *pirq = irq2pirq(irq);
  643. if (pirq)
  644. {
  645. rt_hw_spin_lock(&pirq->rw_lock.lock);
  646. priority = pirq->priority;
  647. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  648. }
  649. return priority;
  650. }
  651. rt_err_t rt_pic_irq_set_affinity(int irq, rt_bitmap_t *affinity)
  652. {
  653. rt_err_t err = -RT_EINVAL;
  654. struct rt_pic_irq *pirq;
  655. if (affinity && (pirq = irq2pirq(irq)))
  656. {
  657. rt_hw_spin_lock(&pirq->rw_lock.lock);
  658. if (pirq->pic->ops->irq_set_affinity)
  659. {
  660. err = pirq->pic->ops->irq_set_affinity(pirq, affinity);
  661. if (!err)
  662. {
  663. rt_memcpy(pirq->affinity, affinity, sizeof(pirq->affinity));
  664. }
  665. }
  666. else
  667. {
  668. err = -RT_ENOSYS;
  669. }
  670. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  671. }
  672. return err;
  673. }
  674. rt_err_t rt_pic_irq_get_affinity(int irq, rt_bitmap_t *out_affinity)
  675. {
  676. rt_err_t err = -RT_EINVAL;
  677. struct rt_pic_irq *pirq;
  678. if (out_affinity && (pirq = irq2pirq(irq)))
  679. {
  680. rt_hw_spin_lock(&pirq->rw_lock.lock);
  681. rt_memcpy(out_affinity, pirq->affinity, sizeof(pirq->affinity));
  682. err = RT_EOK;
  683. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  684. }
  685. return err;
  686. }
  687. rt_err_t rt_pic_irq_set_triger_mode(int irq, rt_uint32_t mode)
  688. {
  689. rt_err_t err = -RT_EINVAL;
  690. struct rt_pic_irq *pirq;
  691. if ((~mode & RT_IRQ_MODE_MASK) && (pirq = irq2pirq(irq)))
  692. {
  693. rt_hw_spin_lock(&pirq->rw_lock.lock);
  694. if (pirq->pic->ops->irq_set_triger_mode)
  695. {
  696. err = pirq->pic->ops->irq_set_triger_mode(pirq, mode);
  697. if (!err)
  698. {
  699. pirq->mode = mode;
  700. }
  701. }
  702. else
  703. {
  704. err = -RT_ENOSYS;
  705. }
  706. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  707. }
  708. return err;
  709. }
  710. rt_uint32_t rt_pic_irq_get_triger_mode(int irq)
  711. {
  712. rt_uint32_t mode = RT_UINT32_MAX;
  713. struct rt_pic_irq *pirq = irq2pirq(irq);
  714. if (pirq)
  715. {
  716. rt_hw_spin_lock(&pirq->rw_lock.lock);
  717. mode = pirq->mode;
  718. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  719. }
  720. return mode;
  721. }
  722. void rt_pic_irq_send_ipi(int irq, rt_bitmap_t *cpumask)
  723. {
  724. struct rt_pic_irq *pirq;
  725. if (cpumask && (pirq = irq2pirq(irq)))
  726. {
  727. rt_hw_spin_lock(&pirq->rw_lock.lock);
  728. if (pirq->pic->ops->irq_send_ipi)
  729. {
  730. pirq->pic->ops->irq_send_ipi(pirq, cpumask);
  731. }
  732. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  733. }
  734. }
  735. rt_err_t rt_pic_irq_set_state_raw(struct rt_pic *pic, int hwirq, int type, rt_bool_t state)
  736. {
  737. rt_err_t err;
  738. if (pic && hwirq >= 0)
  739. {
  740. if (pic->ops->irq_set_state)
  741. {
  742. err = pic->ops->irq_set_state(pic, hwirq, type, state);
  743. }
  744. else
  745. {
  746. err = -RT_ENOSYS;
  747. }
  748. }
  749. else
  750. {
  751. err = -RT_EINVAL;
  752. }
  753. return err;
  754. }
  755. rt_err_t rt_pic_irq_get_state_raw(struct rt_pic *pic, int hwirq, int type, rt_bool_t *out_state)
  756. {
  757. rt_err_t err;
  758. if (pic && hwirq >= 0)
  759. {
  760. if (pic->ops->irq_get_state)
  761. {
  762. rt_bool_t state;
  763. if (!(err = pic->ops->irq_get_state(pic, hwirq, type, &state)) && out_state)
  764. {
  765. *out_state = state;
  766. }
  767. }
  768. else
  769. {
  770. err = -RT_ENOSYS;
  771. }
  772. }
  773. else
  774. {
  775. err = -RT_EINVAL;
  776. }
  777. return err;
  778. }
  779. rt_err_t rt_pic_irq_set_state(int irq, int type, rt_bool_t state)
  780. {
  781. rt_err_t err;
  782. struct rt_pic_irq *pirq = irq2pirq(irq);
  783. RT_ASSERT(pirq != RT_NULL);
  784. rt_hw_spin_lock(&pirq->rw_lock.lock);
  785. err = rt_pic_irq_set_state_raw(pirq->pic, pirq->hwirq, type, state);
  786. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  787. return err;
  788. }
  789. rt_err_t rt_pic_irq_get_state(int irq, int type, rt_bool_t *out_state)
  790. {
  791. rt_err_t err;
  792. struct rt_pic_irq *pirq = irq2pirq(irq);
  793. RT_ASSERT(pirq != RT_NULL);
  794. rt_hw_spin_lock(&pirq->rw_lock.lock);
  795. err = rt_pic_irq_get_state_raw(pirq->pic, pirq->hwirq, type, out_state);
  796. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  797. return err;
  798. }
  799. void rt_pic_irq_parent_enable(struct rt_pic_irq *pirq)
  800. {
  801. RT_ASSERT(pirq != RT_NULL);
  802. pirq = pirq->parent;
  803. if (pirq->pic->ops->irq_enable)
  804. {
  805. pirq->pic->ops->irq_enable(pirq);
  806. }
  807. }
  808. void rt_pic_irq_parent_disable(struct rt_pic_irq *pirq)
  809. {
  810. RT_ASSERT(pirq != RT_NULL);
  811. pirq = pirq->parent;
  812. if (pirq->pic->ops->irq_disable)
  813. {
  814. pirq->pic->ops->irq_disable(pirq);
  815. }
  816. }
  817. void rt_pic_irq_parent_ack(struct rt_pic_irq *pirq)
  818. {
  819. RT_ASSERT(pirq != RT_NULL);
  820. pirq = pirq->parent;
  821. if (pirq->pic->ops->irq_ack)
  822. {
  823. pirq->pic->ops->irq_ack(pirq);
  824. }
  825. }
  826. void rt_pic_irq_parent_mask(struct rt_pic_irq *pirq)
  827. {
  828. RT_ASSERT(pirq != RT_NULL);
  829. pirq = pirq->parent;
  830. if (pirq->pic->ops->irq_mask)
  831. {
  832. pirq->pic->ops->irq_mask(pirq);
  833. }
  834. }
  835. void rt_pic_irq_parent_unmask(struct rt_pic_irq *pirq)
  836. {
  837. RT_ASSERT(pirq != RT_NULL);
  838. pirq = pirq->parent;
  839. if (pirq->pic->ops->irq_unmask)
  840. {
  841. pirq->pic->ops->irq_unmask(pirq);
  842. }
  843. }
  844. void rt_pic_irq_parent_eoi(struct rt_pic_irq *pirq)
  845. {
  846. RT_ASSERT(pirq != RT_NULL);
  847. pirq = pirq->parent;
  848. if (pirq->pic->ops->irq_eoi)
  849. {
  850. pirq->pic->ops->irq_eoi(pirq);
  851. }
  852. }
  853. rt_err_t rt_pic_irq_parent_set_priority(struct rt_pic_irq *pirq, rt_uint32_t priority)
  854. {
  855. rt_err_t err = -RT_ENOSYS;
  856. RT_ASSERT(pirq != RT_NULL);
  857. pirq = pirq->parent;
  858. if (pirq->pic->ops->irq_set_priority)
  859. {
  860. if (!(err = pirq->pic->ops->irq_set_priority(pirq, priority)))
  861. {
  862. pirq->priority = priority;
  863. }
  864. }
  865. return err;
  866. }
  867. rt_err_t rt_pic_irq_parent_set_affinity(struct rt_pic_irq *pirq, rt_bitmap_t *affinity)
  868. {
  869. rt_err_t err = -RT_ENOSYS;
  870. RT_ASSERT(pirq != RT_NULL);
  871. pirq = pirq->parent;
  872. if (pirq->pic->ops->irq_set_affinity)
  873. {
  874. if (!(err = pirq->pic->ops->irq_set_affinity(pirq, affinity)))
  875. {
  876. rt_memcpy(pirq->affinity, affinity, sizeof(pirq->affinity));
  877. }
  878. }
  879. return err;
  880. }
  881. rt_err_t rt_pic_irq_parent_set_triger_mode(struct rt_pic_irq *pirq, rt_uint32_t mode)
  882. {
  883. rt_err_t err = -RT_ENOSYS;
  884. RT_ASSERT(pirq != RT_NULL);
  885. pirq = pirq->parent;
  886. if (pirq->pic->ops->irq_set_triger_mode)
  887. {
  888. if (!(err = pirq->pic->ops->irq_set_triger_mode(pirq, mode)))
  889. {
  890. pirq->mode = mode;
  891. }
  892. }
  893. return err;
  894. }
  895. #ifdef RT_USING_OFW
  896. RT_OFW_STUB_RANGE_EXPORT(pic, _pic_ofw_start, _pic_ofw_end);
  897. static rt_err_t ofw_pic_init(void)
  898. {
  899. struct rt_ofw_node *ic_np;
  900. rt_ofw_foreach_node_by_prop(ic_np, "interrupt-controller")
  901. {
  902. rt_ofw_stub_probe_range(ic_np, &_pic_ofw_start, &_pic_ofw_end);
  903. }
  904. return RT_EOK;
  905. }
  906. #else
  907. static rt_err_t ofw_pic_init(void)
  908. {
  909. return RT_EOK;
  910. }
  911. #endif /* !RT_USING_OFW */
  912. rt_err_t rt_pic_init(void)
  913. {
  914. rt_err_t err;
  915. LOG_D("init start");
  916. err = ofw_pic_init();
  917. LOG_D("init end");
  918. return err;
  919. }
  920. #if defined(RT_USING_CONSOLE) && defined(RT_USING_MSH)
  921. static int list_irq(int argc, char**argv)
  922. {
  923. rt_size_t irq_nr = 0;
  924. rt_bool_t dump_all = RT_FALSE;
  925. const char *const irq_modes[] =
  926. {
  927. [RT_IRQ_MODE_NONE] = "None",
  928. [RT_IRQ_MODE_EDGE_RISING] = "Edge-Rising",
  929. [RT_IRQ_MODE_EDGE_FALLING] = "Edge-Falling",
  930. [RT_IRQ_MODE_EDGE_BOTH] = "Edge-Both",
  931. [RT_IRQ_MODE_LEVEL_HIGH] = "Level-High",
  932. [RT_IRQ_MODE_LEVEL_LOW] = "Level-Low",
  933. };
  934. static char info[RT_CONSOLEBUF_SIZE];
  935. #ifdef RT_USING_SMP
  936. static char cpumask[RT_CPUS_NR + 1] = { [RT_CPUS_NR] = '\0' };
  937. #endif
  938. if (argc > 1)
  939. {
  940. if (!rt_strcmp(argv[1], "all"))
  941. {
  942. dump_all = RT_TRUE;
  943. }
  944. }
  945. rt_kprintf("%-*.s %-*.s %s %-*.s %-*.s %-*.s %-*.sUsers%-*.s",
  946. 6, "IRQ",
  947. 6, "HW-IRQ",
  948. "MSI",
  949. _pic_name_max, "PIC",
  950. 12, "Mode",
  951. #ifdef RT_USING_SMP
  952. RT_CPUS_NR, "CPUs",
  953. #else
  954. 0, 0,
  955. #endif
  956. #ifdef RT_USING_INTERRUPT_INFO
  957. 11, "Count",
  958. 5, ""
  959. #else
  960. 0, 0,
  961. 10, "-Number"
  962. #endif
  963. );
  964. #if defined(RT_USING_SMP) && defined(RT_USING_INTERRUPT_INFO)
  965. for (int i = 0; i < RT_CPUS_NR; i++)
  966. {
  967. rt_kprintf(" cpu%2d ", i);
  968. }
  969. #endif
  970. #ifdef RT_USING_PIC_STATISTICS
  971. rt_kprintf(" max/ns avg/ns min/ns");
  972. #endif
  973. rt_kputs("\n");
  974. for (int i = 0; i < RT_ARRAY_SIZE(_pirq_hash); ++i)
  975. {
  976. struct rt_pic_irq *pirq = &_pirq_hash[i];
  977. if (!pirq->pic || !(dump_all || pirq->isr.action.handler))
  978. {
  979. continue;
  980. }
  981. rt_snprintf(info, sizeof(info), "%-6d %-6d %c %-*.s %-*.s ",
  982. pirq->irq,
  983. pirq->hwirq,
  984. pirq->msi_desc ? 'Y' : 'N',
  985. _pic_name_max, pirq->pic->ops->name,
  986. 12, irq_modes[pirq->mode]);
  987. #ifdef RT_USING_SMP
  988. for (int group = 0, id = 0; group < RT_ARRAY_SIZE(pirq->affinity); ++group)
  989. {
  990. rt_bitmap_t mask = pirq->affinity[group];
  991. for (int idx = 0; id < RT_CPUS_NR && idx < RT_BITMAP_BIT_LEN(1); ++idx, ++id)
  992. {
  993. cpumask[RT_ARRAY_SIZE(cpumask) - id - 2] = '0' + ((mask >> idx) & 1);
  994. }
  995. }
  996. #endif /* RT_USING_SMP */
  997. rt_kputs(info);
  998. #ifdef RT_USING_SMP
  999. rt_kputs(cpumask);
  1000. #endif
  1001. #ifdef RT_USING_INTERRUPT_INFO
  1002. rt_kprintf(" %-10d ", pirq->isr.action.counter);
  1003. rt_kprintf("%-*.s", 10, pirq->isr.action.name);
  1004. #ifdef RT_USING_SMP
  1005. for (int cpuid = 0; cpuid < RT_CPUS_NR; cpuid++)
  1006. {
  1007. rt_kprintf(" %-10d", pirq->isr.action.cpu_counter[cpuid]);
  1008. }
  1009. #endif
  1010. #ifdef RT_USING_PIC_STATISTICS
  1011. rt_kprintf(" %-10d %-10d %-10d", pirq->stat.max_irq_time_ns, pirq->stat.sum_irq_time_ns/pirq->isr.action.counter, pirq->stat.min_irq_time_ns);
  1012. #endif
  1013. rt_kputs("\n");
  1014. if (!rt_list_isempty(&pirq->isr.list))
  1015. {
  1016. struct rt_pic_isr *repeat_isr;
  1017. rt_list_for_each_entry(repeat_isr, &pirq->isr.list, list)
  1018. {
  1019. rt_kputs(info);
  1020. #ifdef RT_USING_SMP
  1021. rt_kputs(cpumask);
  1022. #endif
  1023. rt_kprintf("%-10d ", repeat_isr->action.counter);
  1024. rt_kprintf("%-*.s", 10, repeat_isr->action.name);
  1025. #ifdef RT_USING_SMP
  1026. for (int cpuid = 0; cpuid < RT_CPUS_NR; cpuid++)
  1027. {
  1028. rt_kprintf(" %-10d", repeat_isr->action.cpu_counter[cpuid]);
  1029. }
  1030. #endif
  1031. #ifdef RT_USING_PIC_STATISTICS
  1032. rt_kprintf(" --- --- ---");
  1033. #endif
  1034. rt_kputs("\n");
  1035. }
  1036. }
  1037. #else
  1038. rt_kprintf(" %d\n", rt_list_len(&pirq->isr.list));
  1039. #endif
  1040. ++irq_nr;
  1041. }
  1042. rt_kprintf("%d IRQs found\n", irq_nr);
  1043. return 0;
  1044. }
  1045. MSH_CMD_EXPORT(list_irq, dump using or args = all of irq information);
  1046. #endif /* RT_USING_CONSOLE && RT_USING_MSH */