pic.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-08-24 GuEe-GUI first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #define DBG_TAG "rtdm.pic"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. #include <drivers/pic.h>
  16. #include <ktime.h>
  17. struct irq_traps
  18. {
  19. rt_list_t list;
  20. void *data;
  21. rt_bool_t (*handler)(void *);
  22. };
  23. static int _ipi_hash[] =
  24. {
  25. #ifdef RT_USING_SMP
  26. [RT_SCHEDULE_IPI] = RT_SCHEDULE_IPI,
  27. [RT_STOP_IPI] = RT_STOP_IPI,
  28. #endif
  29. };
  30. /* reserved ipi */
  31. static int _pirq_hash_idx = RT_ARRAY_SIZE(_ipi_hash);
  32. static struct rt_pic_irq _pirq_hash[MAX_HANDLERS] =
  33. {
  34. [0 ... MAX_HANDLERS - 1] =
  35. {
  36. .irq = -1,
  37. .hwirq = -1,
  38. .mode = RT_IRQ_MODE_NONE,
  39. .priority = RT_UINT32_MAX,
  40. .rw_lock = { },
  41. }
  42. };
  43. static struct rt_spinlock _pic_lock = { };
  44. static rt_size_t _pic_name_max = sizeof("PIC");
  45. static rt_list_t _pic_nodes = RT_LIST_OBJECT_INIT(_pic_nodes);
  46. static rt_list_t _traps_nodes = RT_LIST_OBJECT_INIT(_traps_nodes);
  47. static struct rt_pic_irq *irq2pirq(int irq)
  48. {
  49. struct rt_pic_irq *pirq = RT_NULL;
  50. if ((irq >= 0) && (irq < MAX_HANDLERS))
  51. {
  52. pirq = &_pirq_hash[irq];
  53. if (pirq->irq < 0)
  54. {
  55. pirq = RT_NULL;
  56. }
  57. }
  58. if (!pirq)
  59. {
  60. LOG_E("irq = %d is invalid", irq);
  61. }
  62. return pirq;
  63. }
  64. static void append_pic(struct rt_pic *pic)
  65. {
  66. int pic_name_len = rt_strlen(pic->ops->name);
  67. rt_list_insert_before(&_pic_nodes, &pic->list);
  68. if (pic_name_len > _pic_name_max)
  69. {
  70. _pic_name_max = pic_name_len;
  71. }
  72. }
  73. void rt_pic_default_name(struct rt_pic *pic)
  74. {
  75. if (pic)
  76. {
  77. #if RT_NAME_MAX > 0
  78. rt_strncpy(pic->parent.name, "PIC", RT_NAME_MAX - 1);
  79. pic->parent.name[RT_NAME_MAX - 1] = '\0';
  80. #else
  81. pic->parent.name = "PIC";
  82. #endif
  83. }
  84. }
  85. struct rt_pic *rt_pic_dynamic_cast(void *ptr)
  86. {
  87. struct rt_pic *pic = RT_NULL, *tmp = RT_NULL;
  88. if (ptr)
  89. {
  90. struct rt_object *obj = ptr;
  91. if (obj->type == RT_Object_Class_Unknown)
  92. {
  93. tmp = (void *)obj;
  94. }
  95. else if (obj->type == RT_Object_Class_Device)
  96. {
  97. tmp = (void *)obj + sizeof(struct rt_device);
  98. }
  99. else
  100. {
  101. tmp = (void *)obj + sizeof(struct rt_object);
  102. }
  103. if (tmp && !rt_strcmp(tmp->parent.name, "PIC"))
  104. {
  105. pic = tmp;
  106. }
  107. }
  108. return pic;
  109. }
  110. rt_err_t rt_pic_linear_irq(struct rt_pic *pic, rt_size_t irq_nr)
  111. {
  112. rt_err_t err = RT_EOK;
  113. if (pic && pic->ops && pic->ops->name)
  114. {
  115. rt_ubase_t level = rt_spin_lock_irqsave(&_pic_lock);
  116. if (_pirq_hash_idx + irq_nr <= RT_ARRAY_SIZE(_pirq_hash))
  117. {
  118. rt_list_init(&pic->list);
  119. rt_pic_default_name(pic);
  120. pic->parent.type = RT_Object_Class_Unknown;
  121. pic->irq_start = _pirq_hash_idx;
  122. pic->irq_nr = irq_nr;
  123. pic->pirqs = &_pirq_hash[_pirq_hash_idx];
  124. _pirq_hash_idx += irq_nr;
  125. append_pic(pic);
  126. LOG_D("%s alloc irqs ranges [%d, %d]", pic->ops->name,
  127. pic->irq_start, pic->irq_start + pic->irq_nr);
  128. }
  129. else
  130. {
  131. LOG_E("%s alloc %d irqs is overflow", pic->ops->name, irq_nr);
  132. err = -RT_EEMPTY;
  133. }
  134. rt_spin_unlock_irqrestore(&_pic_lock, level);
  135. }
  136. else
  137. {
  138. err = -RT_EINVAL;
  139. }
  140. return err;
  141. }
  142. static void config_pirq(struct rt_pic *pic, struct rt_pic_irq *pirq, int irq, int hwirq)
  143. {
  144. rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
  145. pirq->irq = irq;
  146. pirq->hwirq = hwirq;
  147. pirq->pic = pic;
  148. rt_list_init(&pirq->list);
  149. rt_list_init(&pirq->children_nodes);
  150. rt_list_init(&pirq->isr.list);
  151. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  152. }
  153. int rt_pic_config_ipi(struct rt_pic *pic, int ipi_index, int hwirq)
  154. {
  155. int ipi = ipi_index;
  156. struct rt_pic_irq *pirq;
  157. if (pic && ipi < RT_ARRAY_SIZE(_ipi_hash) && hwirq >= 0 && pic->ops->irq_send_ipi)
  158. {
  159. pirq = &_pirq_hash[ipi];
  160. config_pirq(pic, pirq, ipi, hwirq);
  161. for (int cpuid = 0; cpuid < RT_CPUS_NR; ++cpuid)
  162. {
  163. RT_IRQ_AFFINITY_SET(pirq->affinity, cpuid);
  164. }
  165. LOG_D("%s config %s %d to hwirq %d", pic->ops->name, "ipi", ipi, hwirq);
  166. }
  167. else
  168. {
  169. ipi = -RT_EINVAL;
  170. }
  171. return ipi;
  172. }
  173. int rt_pic_config_irq(struct rt_pic *pic, int irq_index, int hwirq)
  174. {
  175. int irq;
  176. if (pic && hwirq >= 0)
  177. {
  178. irq = pic->irq_start + irq_index;
  179. if (irq >= 0 && irq < MAX_HANDLERS)
  180. {
  181. config_pirq(pic, &_pirq_hash[irq], irq, hwirq);
  182. LOG_D("%s config %s %d to hwirq %d", pic->ops->name, "irq", irq, hwirq);
  183. }
  184. else
  185. {
  186. irq = -RT_ERROR;
  187. }
  188. }
  189. else
  190. {
  191. irq = -RT_EINVAL;
  192. }
  193. return irq;
  194. }
  195. struct rt_pic_irq *rt_pic_find_ipi(struct rt_pic *pic, int ipi_index)
  196. {
  197. struct rt_pic_irq *pirq = &_pirq_hash[ipi_index];
  198. RT_ASSERT(ipi_index < RT_ARRAY_SIZE(_ipi_hash));
  199. RT_ASSERT(pirq->pic == pic);
  200. return pirq;
  201. }
  202. struct rt_pic_irq *rt_pic_find_pirq(struct rt_pic *pic, int irq)
  203. {
  204. if (pic && irq >= pic->irq_start && irq <= pic->irq_start + pic->irq_nr)
  205. {
  206. return &pic->pirqs[irq - pic->irq_start];
  207. }
  208. return RT_NULL;
  209. }
  210. rt_err_t rt_pic_cascade(struct rt_pic_irq *pirq, int parent_irq)
  211. {
  212. rt_err_t err = RT_EOK;
  213. if (pirq && !pirq->parent && parent_irq >= 0)
  214. {
  215. struct rt_pic_irq *parent;
  216. rt_spin_lock(&pirq->rw_lock);
  217. parent = irq2pirq(parent_irq);
  218. if (parent)
  219. {
  220. pirq->parent = parent;
  221. pirq->priority = parent->priority;
  222. rt_memcpy(&pirq->affinity, &parent->affinity, sizeof(pirq->affinity));
  223. }
  224. rt_spin_unlock(&pirq->rw_lock);
  225. if (parent && pirq->pic->ops->flags & RT_PIC_F_IRQ_ROUTING)
  226. {
  227. rt_spin_lock(&parent->rw_lock);
  228. rt_list_insert_before(&parent->children_nodes, &pirq->list);
  229. rt_spin_unlock(&parent->rw_lock);
  230. }
  231. }
  232. else
  233. {
  234. err = -RT_EINVAL;
  235. }
  236. return err;
  237. }
  238. rt_err_t rt_pic_uncascade(struct rt_pic_irq *pirq)
  239. {
  240. rt_err_t err = RT_EOK;
  241. if (pirq && pirq->parent)
  242. {
  243. struct rt_pic_irq *parent;
  244. rt_spin_lock(&pirq->rw_lock);
  245. parent = pirq->parent;
  246. pirq->parent = RT_NULL;
  247. rt_spin_unlock(&pirq->rw_lock);
  248. if (parent && pirq->pic->ops->flags & RT_PIC_F_IRQ_ROUTING)
  249. {
  250. rt_spin_lock(&parent->rw_lock);
  251. rt_list_remove(&pirq->list);
  252. rt_spin_unlock(&parent->rw_lock);
  253. }
  254. }
  255. else
  256. {
  257. err = -RT_EINVAL;
  258. }
  259. return err;
  260. }
  261. rt_err_t rt_pic_attach_irq(int irq, rt_isr_handler_t handler, void *uid, const char *name, int flags)
  262. {
  263. rt_err_t err = -RT_EINVAL;
  264. struct rt_pic_irq *pirq;
  265. if (handler && name && (pirq = irq2pirq(irq)))
  266. {
  267. struct rt_pic_isr *isr = RT_NULL;
  268. rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
  269. err = RT_EOK;
  270. if (!pirq->isr.action.handler)
  271. {
  272. /* first attach */
  273. isr = &pirq->isr;
  274. rt_list_init(&isr->list);
  275. }
  276. else
  277. {
  278. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  279. if ((isr = rt_malloc(sizeof(*isr))))
  280. {
  281. rt_list_init(&isr->list);
  282. level = rt_spin_lock_irqsave(&pirq->rw_lock);
  283. rt_list_insert_after(&pirq->isr.list, &isr->list);
  284. }
  285. else
  286. {
  287. LOG_E("No memory to save '%s' isr", name);
  288. err = -RT_ERROR;
  289. }
  290. }
  291. if (!err)
  292. {
  293. isr->flags = flags;
  294. isr->action.handler = handler;
  295. isr->action.param = uid;
  296. #ifdef RT_USING_INTERRUPT_INFO
  297. isr->action.counter = 0;
  298. rt_strncpy(isr->action.name, name, RT_NAME_MAX - 1);
  299. isr->action.name[RT_NAME_MAX - 1] = '\0';
  300. #ifdef RT_USING_SMP
  301. rt_memset(isr->action.cpu_counter, 0, sizeof(isr->action.cpu_counter));
  302. #endif
  303. #endif
  304. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  305. }
  306. }
  307. return err;
  308. }
  309. rt_err_t rt_pic_detach_irq(int irq, void *uid)
  310. {
  311. rt_err_t err = -RT_EINVAL;
  312. struct rt_pic_irq *pirq = irq2pirq(irq);
  313. if (pirq)
  314. {
  315. rt_bool_t will_free = RT_FALSE;
  316. struct rt_pic_isr *isr = RT_NULL;
  317. rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
  318. isr = &pirq->isr;
  319. if (isr->action.param == uid)
  320. {
  321. if (rt_list_isempty(&isr->list))
  322. {
  323. isr->action.handler = RT_NULL;
  324. isr->action.param = RT_NULL;
  325. }
  326. else
  327. {
  328. struct rt_pic_isr *next_isr = rt_list_first_entry(&isr->list, struct rt_pic_isr, list);
  329. rt_list_remove(&next_isr->list);
  330. isr->action.handler = next_isr->action.handler;
  331. isr->action.param = next_isr->action.param;
  332. #ifdef RT_USING_INTERRUPT_INFO
  333. isr->action.counter = next_isr->action.counter;
  334. rt_strncpy(isr->action.name, next_isr->action.name, RT_NAME_MAX);
  335. #ifdef RT_USING_SMP
  336. rt_memcpy(isr->action.cpu_counter, next_isr->action.cpu_counter, sizeof(next_isr->action.cpu_counter));
  337. #endif
  338. #endif
  339. isr = next_isr;
  340. will_free = RT_TRUE;
  341. }
  342. err = RT_EOK;
  343. }
  344. else
  345. {
  346. rt_list_for_each_entry(isr, &pirq->isr.list, list)
  347. {
  348. if (isr->action.param == uid)
  349. {
  350. err = RT_EOK;
  351. will_free = RT_TRUE;
  352. rt_list_remove(&isr->list);
  353. break;
  354. }
  355. }
  356. }
  357. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  358. if (will_free)
  359. {
  360. rt_free(isr);
  361. }
  362. }
  363. return err;
  364. }
  365. rt_err_t rt_pic_add_traps(rt_bool_t (*handler)(void *), void *data)
  366. {
  367. rt_err_t err = -RT_EINVAL;
  368. if (handler)
  369. {
  370. struct irq_traps *traps = rt_malloc(sizeof(*traps));
  371. if (traps)
  372. {
  373. rt_ubase_t level = rt_hw_interrupt_disable();
  374. rt_list_init(&traps->list);
  375. traps->data = data;
  376. traps->handler = handler;
  377. rt_list_insert_before(&_traps_nodes, &traps->list);
  378. err = RT_EOK;
  379. rt_hw_interrupt_enable(level);
  380. }
  381. else
  382. {
  383. LOG_E("No memory to save '%p' handler", handler);
  384. err = -RT_ENOMEM;
  385. }
  386. }
  387. return err;
  388. }
  389. rt_err_t rt_pic_do_traps(void)
  390. {
  391. rt_err_t err = -RT_ERROR;
  392. struct irq_traps *traps;
  393. rt_list_for_each_entry(traps, &_traps_nodes, list)
  394. {
  395. if (traps->handler(traps->data))
  396. {
  397. err = RT_EOK;
  398. break;
  399. }
  400. }
  401. return err;
  402. }
  403. rt_err_t rt_pic_handle_isr(struct rt_pic_irq *pirq)
  404. {
  405. rt_err_t err = -RT_EEMPTY;
  406. rt_list_t *handler_nodes;
  407. struct rt_irq_desc *action;
  408. #ifdef RT_USING_PIC_STATISTICS
  409. struct timespec ts;
  410. rt_ubase_t irq_time_ns;
  411. #endif
  412. RT_ASSERT(pirq != RT_NULL);
  413. RT_ASSERT(pirq->pic != RT_NULL);
  414. #ifdef RT_USING_PIC_STATISTICS
  415. rt_ktime_boottime_get_ns(&ts);
  416. pirq->stat.current_irq_begin[rt_hw_cpu_id()] = ts.tv_sec * (1000UL * 1000 * 1000) + ts.tv_nsec;
  417. #endif
  418. handler_nodes = &pirq->isr.list;
  419. action = &pirq->isr.action;
  420. if (!rt_list_isempty(&pirq->children_nodes))
  421. {
  422. struct rt_pic_irq *child;
  423. rt_list_for_each_entry(child, &pirq->children_nodes, list)
  424. {
  425. rt_pic_irq_ack(child->irq);
  426. err = rt_pic_handle_isr(child);
  427. rt_pic_irq_eoi(child->irq);
  428. }
  429. }
  430. if (action->handler)
  431. {
  432. action->handler(pirq->irq, action->param);
  433. #ifdef RT_USING_INTERRUPT_INFO
  434. action->counter++;
  435. #ifdef RT_USING_SMP
  436. action->cpu_counter[rt_hw_cpu_id()]++;
  437. #endif
  438. #endif
  439. if (!rt_list_isempty(handler_nodes))
  440. {
  441. struct rt_pic_isr *isr;
  442. rt_list_for_each_entry(isr, handler_nodes, list)
  443. {
  444. action = &isr->action;
  445. RT_ASSERT(action->handler != RT_NULL);
  446. action->handler(pirq->irq, action->param);
  447. #ifdef RT_USING_INTERRUPT_INFO
  448. action->counter++;
  449. #ifdef RT_USING_SMP
  450. action->cpu_counter[rt_hw_cpu_id()]++;
  451. #endif
  452. #endif
  453. }
  454. }
  455. err = RT_EOK;
  456. }
  457. #ifdef RT_USING_PIC_STATISTICS
  458. rt_ktime_boottime_get_ns(&ts);
  459. irq_time_ns = ts.tv_sec * (1000UL * 1000 * 1000) + ts.tv_nsec - pirq->stat.current_irq_begin[rt_hw_cpu_id()];
  460. pirq->stat.sum_irq_time_ns += irq_time_ns;
  461. if (irq_time_ns < pirq->stat.min_irq_time_ns || pirq->stat.min_irq_time_ns == 0)
  462. {
  463. pirq->stat.min_irq_time_ns = irq_time_ns;
  464. }
  465. if (irq_time_ns > pirq->stat.max_irq_time_ns)
  466. {
  467. pirq->stat.max_irq_time_ns = irq_time_ns;
  468. }
  469. #endif
  470. return err;
  471. }
  472. rt_weak rt_err_t rt_pic_user_extends(struct rt_pic *pic)
  473. {
  474. return -RT_ENOSYS;
  475. }
  476. rt_err_t rt_pic_irq_init(void)
  477. {
  478. rt_err_t err = RT_EOK;
  479. struct rt_pic *pic;
  480. rt_list_for_each_entry(pic, &_pic_nodes, list)
  481. {
  482. if (pic->ops->irq_init)
  483. {
  484. err = pic->ops->irq_init(pic);
  485. if (err)
  486. {
  487. LOG_E("PIC = %s init fail", pic->ops->name);
  488. break;
  489. }
  490. }
  491. }
  492. return err;
  493. }
  494. rt_err_t rt_pic_irq_finit(void)
  495. {
  496. rt_err_t err = RT_EOK;
  497. struct rt_pic *pic;
  498. rt_list_for_each_entry(pic, &_pic_nodes, list)
  499. {
  500. if (pic->ops->irq_finit)
  501. {
  502. err = pic->ops->irq_finit(pic);
  503. if (err)
  504. {
  505. LOG_E("PIC = %s finit fail", pic->ops->name);
  506. break;
  507. }
  508. }
  509. }
  510. return err;
  511. }
  512. void rt_pic_irq_enable(int irq)
  513. {
  514. struct rt_pic_irq *pirq = irq2pirq(irq);
  515. RT_ASSERT(pirq != RT_NULL);
  516. rt_hw_spin_lock(&pirq->rw_lock.lock);
  517. if (pirq->pic->ops->irq_enable)
  518. {
  519. pirq->pic->ops->irq_enable(pirq);
  520. }
  521. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  522. }
  523. void rt_pic_irq_disable(int irq)
  524. {
  525. struct rt_pic_irq *pirq = irq2pirq(irq);
  526. RT_ASSERT(pirq != RT_NULL);
  527. rt_hw_spin_lock(&pirq->rw_lock.lock);
  528. if (pirq->pic->ops->irq_disable)
  529. {
  530. pirq->pic->ops->irq_disable(pirq);
  531. }
  532. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  533. }
  534. void rt_pic_irq_ack(int irq)
  535. {
  536. struct rt_pic_irq *pirq = irq2pirq(irq);
  537. RT_ASSERT(pirq != RT_NULL);
  538. rt_hw_spin_lock(&pirq->rw_lock.lock);
  539. if (pirq->pic->ops->irq_ack)
  540. {
  541. pirq->pic->ops->irq_ack(pirq);
  542. }
  543. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  544. }
  545. void rt_pic_irq_mask(int irq)
  546. {
  547. struct rt_pic_irq *pirq = irq2pirq(irq);
  548. RT_ASSERT(pirq != RT_NULL);
  549. rt_hw_spin_lock(&pirq->rw_lock.lock);
  550. if (pirq->pic->ops->irq_mask)
  551. {
  552. pirq->pic->ops->irq_mask(pirq);
  553. }
  554. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  555. }
  556. void rt_pic_irq_unmask(int irq)
  557. {
  558. struct rt_pic_irq *pirq = irq2pirq(irq);
  559. RT_ASSERT(pirq != RT_NULL);
  560. rt_hw_spin_lock(&pirq->rw_lock.lock);
  561. if (pirq->pic->ops->irq_unmask)
  562. {
  563. pirq->pic->ops->irq_unmask(pirq);
  564. }
  565. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  566. }
  567. void rt_pic_irq_eoi(int irq)
  568. {
  569. struct rt_pic_irq *pirq = irq2pirq(irq);
  570. RT_ASSERT(pirq != RT_NULL);
  571. rt_hw_spin_lock(&pirq->rw_lock.lock);
  572. if (pirq->pic->ops->irq_eoi)
  573. {
  574. pirq->pic->ops->irq_eoi(pirq);
  575. }
  576. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  577. }
  578. rt_err_t rt_pic_irq_set_priority(int irq, rt_uint32_t priority)
  579. {
  580. rt_err_t err = -RT_EINVAL;
  581. struct rt_pic_irq *pirq = irq2pirq(irq);
  582. if (pirq)
  583. {
  584. rt_hw_spin_lock(&pirq->rw_lock.lock);
  585. if (pirq->pic->ops->irq_set_priority)
  586. {
  587. err = pirq->pic->ops->irq_set_priority(pirq, priority);
  588. if (!err)
  589. {
  590. pirq->priority = priority;
  591. }
  592. }
  593. else
  594. {
  595. err = -RT_ENOSYS;
  596. }
  597. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  598. }
  599. return err;
  600. }
  601. rt_uint32_t rt_pic_irq_get_priority(int irq)
  602. {
  603. rt_uint32_t priority = RT_UINT32_MAX;
  604. struct rt_pic_irq *pirq = irq2pirq(irq);
  605. if (pirq)
  606. {
  607. rt_hw_spin_lock(&pirq->rw_lock.lock);
  608. priority = pirq->priority;
  609. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  610. }
  611. return priority;
  612. }
  613. rt_err_t rt_pic_irq_set_affinity(int irq, rt_bitmap_t *affinity)
  614. {
  615. rt_err_t err = -RT_EINVAL;
  616. struct rt_pic_irq *pirq;
  617. if (affinity && (pirq = irq2pirq(irq)))
  618. {
  619. rt_hw_spin_lock(&pirq->rw_lock.lock);
  620. if (pirq->pic->ops->irq_set_affinity)
  621. {
  622. err = pirq->pic->ops->irq_set_affinity(pirq, affinity);
  623. if (!err)
  624. {
  625. rt_memcpy(pirq->affinity, affinity, sizeof(pirq->affinity));
  626. }
  627. }
  628. else
  629. {
  630. err = -RT_ENOSYS;
  631. }
  632. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  633. }
  634. return err;
  635. }
  636. rt_err_t rt_pic_irq_get_affinity(int irq, rt_bitmap_t *out_affinity)
  637. {
  638. rt_err_t err = -RT_EINVAL;
  639. struct rt_pic_irq *pirq;
  640. if (out_affinity && (pirq = irq2pirq(irq)))
  641. {
  642. rt_hw_spin_lock(&pirq->rw_lock.lock);
  643. rt_memcpy(out_affinity, pirq->affinity, sizeof(pirq->affinity));
  644. err = RT_EOK;
  645. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  646. }
  647. return err;
  648. }
  649. rt_err_t rt_pic_irq_set_triger_mode(int irq, rt_uint32_t mode)
  650. {
  651. rt_err_t err = -RT_EINVAL;
  652. struct rt_pic_irq *pirq;
  653. if ((~mode & RT_IRQ_MODE_MASK) && (pirq = irq2pirq(irq)))
  654. {
  655. rt_hw_spin_lock(&pirq->rw_lock.lock);
  656. if (pirq->pic->ops->irq_set_triger_mode)
  657. {
  658. err = pirq->pic->ops->irq_set_triger_mode(pirq, mode);
  659. if (!err)
  660. {
  661. pirq->mode = mode;
  662. }
  663. }
  664. else
  665. {
  666. err = -RT_ENOSYS;
  667. }
  668. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  669. }
  670. return err;
  671. }
  672. rt_uint32_t rt_pic_irq_get_triger_mode(int irq)
  673. {
  674. rt_uint32_t mode = RT_UINT32_MAX;
  675. struct rt_pic_irq *pirq = irq2pirq(irq);
  676. if (pirq)
  677. {
  678. rt_hw_spin_lock(&pirq->rw_lock.lock);
  679. mode = pirq->mode;
  680. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  681. }
  682. return mode;
  683. }
  684. void rt_pic_irq_send_ipi(int irq, rt_bitmap_t *cpumask)
  685. {
  686. struct rt_pic_irq *pirq;
  687. if (cpumask && (pirq = irq2pirq(irq)))
  688. {
  689. rt_hw_spin_lock(&pirq->rw_lock.lock);
  690. if (pirq->pic->ops->irq_send_ipi)
  691. {
  692. pirq->pic->ops->irq_send_ipi(pirq, cpumask);
  693. }
  694. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  695. }
  696. }
  697. rt_err_t rt_pic_irq_set_state_raw(struct rt_pic *pic, int hwirq, int type, rt_bool_t state)
  698. {
  699. rt_err_t err;
  700. if (pic && hwirq >= 0)
  701. {
  702. if (pic->ops->irq_set_state)
  703. {
  704. err = pic->ops->irq_set_state(pic, hwirq, type, state);
  705. }
  706. else
  707. {
  708. err = -RT_ENOSYS;
  709. }
  710. }
  711. else
  712. {
  713. err = -RT_EINVAL;
  714. }
  715. return err;
  716. }
  717. rt_err_t rt_pic_irq_get_state_raw(struct rt_pic *pic, int hwirq, int type, rt_bool_t *out_state)
  718. {
  719. rt_err_t err;
  720. if (pic && hwirq >= 0)
  721. {
  722. if (pic->ops->irq_get_state)
  723. {
  724. rt_bool_t state;
  725. if (!(err = pic->ops->irq_get_state(pic, hwirq, type, &state)) && out_state)
  726. {
  727. *out_state = state;
  728. }
  729. }
  730. else
  731. {
  732. err = -RT_ENOSYS;
  733. }
  734. }
  735. else
  736. {
  737. err = -RT_EINVAL;
  738. }
  739. return err;
  740. }
  741. rt_err_t rt_pic_irq_set_state(int irq, int type, rt_bool_t state)
  742. {
  743. rt_err_t err;
  744. struct rt_pic_irq *pirq = irq2pirq(irq);
  745. RT_ASSERT(pirq != RT_NULL);
  746. rt_hw_spin_lock(&pirq->rw_lock.lock);
  747. err = rt_pic_irq_set_state_raw(pirq->pic, pirq->hwirq, type, state);
  748. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  749. return err;
  750. }
  751. rt_err_t rt_pic_irq_get_state(int irq, int type, rt_bool_t *out_state)
  752. {
  753. rt_err_t err;
  754. struct rt_pic_irq *pirq = irq2pirq(irq);
  755. RT_ASSERT(pirq != RT_NULL);
  756. rt_hw_spin_lock(&pirq->rw_lock.lock);
  757. err = rt_pic_irq_get_state_raw(pirq->pic, pirq->hwirq, type, out_state);
  758. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  759. return err;
  760. }
  761. void rt_pic_irq_parent_enable(struct rt_pic_irq *pirq)
  762. {
  763. RT_ASSERT(pirq != RT_NULL);
  764. pirq = pirq->parent;
  765. if (pirq->pic->ops->irq_enable)
  766. {
  767. pirq->pic->ops->irq_enable(pirq);
  768. }
  769. }
  770. void rt_pic_irq_parent_disable(struct rt_pic_irq *pirq)
  771. {
  772. RT_ASSERT(pirq != RT_NULL);
  773. pirq = pirq->parent;
  774. if (pirq->pic->ops->irq_disable)
  775. {
  776. pirq->pic->ops->irq_disable(pirq);
  777. }
  778. }
  779. void rt_pic_irq_parent_ack(struct rt_pic_irq *pirq)
  780. {
  781. RT_ASSERT(pirq != RT_NULL);
  782. pirq = pirq->parent;
  783. if (pirq->pic->ops->irq_ack)
  784. {
  785. pirq->pic->ops->irq_ack(pirq);
  786. }
  787. }
  788. void rt_pic_irq_parent_mask(struct rt_pic_irq *pirq)
  789. {
  790. RT_ASSERT(pirq != RT_NULL);
  791. pirq = pirq->parent;
  792. if (pirq->pic->ops->irq_mask)
  793. {
  794. pirq->pic->ops->irq_mask(pirq);
  795. }
  796. }
  797. void rt_pic_irq_parent_unmask(struct rt_pic_irq *pirq)
  798. {
  799. RT_ASSERT(pirq != RT_NULL);
  800. pirq = pirq->parent;
  801. if (pirq->pic->ops->irq_unmask)
  802. {
  803. pirq->pic->ops->irq_unmask(pirq);
  804. }
  805. }
  806. void rt_pic_irq_parent_eoi(struct rt_pic_irq *pirq)
  807. {
  808. RT_ASSERT(pirq != RT_NULL);
  809. pirq = pirq->parent;
  810. if (pirq->pic->ops->irq_eoi)
  811. {
  812. pirq->pic->ops->irq_eoi(pirq);
  813. }
  814. }
  815. rt_err_t rt_pic_irq_parent_set_priority(struct rt_pic_irq *pirq, rt_uint32_t priority)
  816. {
  817. rt_err_t err = -RT_ENOSYS;
  818. RT_ASSERT(pirq != RT_NULL);
  819. pirq = pirq->parent;
  820. if (pirq->pic->ops->irq_set_priority)
  821. {
  822. if (!(err = pirq->pic->ops->irq_set_priority(pirq, priority)))
  823. {
  824. pirq->priority = priority;
  825. }
  826. }
  827. return err;
  828. }
  829. rt_err_t rt_pic_irq_parent_set_affinity(struct rt_pic_irq *pirq, rt_bitmap_t *affinity)
  830. {
  831. rt_err_t err = -RT_ENOSYS;
  832. RT_ASSERT(pirq != RT_NULL);
  833. pirq = pirq->parent;
  834. if (pirq->pic->ops->irq_set_affinity)
  835. {
  836. if (!(err = pirq->pic->ops->irq_set_affinity(pirq, affinity)))
  837. {
  838. rt_memcpy(pirq->affinity, affinity, sizeof(pirq->affinity));
  839. }
  840. }
  841. return err;
  842. }
  843. rt_err_t rt_pic_irq_parent_set_triger_mode(struct rt_pic_irq *pirq, rt_uint32_t mode)
  844. {
  845. rt_err_t err = -RT_ENOSYS;
  846. RT_ASSERT(pirq != RT_NULL);
  847. pirq = pirq->parent;
  848. if (pirq->pic->ops->irq_set_triger_mode)
  849. {
  850. if (!(err = pirq->pic->ops->irq_set_triger_mode(pirq, mode)))
  851. {
  852. pirq->mode = mode;
  853. }
  854. }
  855. return err;
  856. }
  857. #ifdef RT_USING_OFW
  858. RT_OFW_STUB_RANGE_EXPORT(pic, _pic_ofw_start, _pic_ofw_end);
  859. static rt_err_t ofw_pic_init(void)
  860. {
  861. struct rt_ofw_node *ic_np;
  862. rt_ofw_foreach_node_by_prop(ic_np, "interrupt-controller")
  863. {
  864. rt_ofw_stub_probe_range(ic_np, &_pic_ofw_start, &_pic_ofw_end);
  865. }
  866. return RT_EOK;
  867. }
  868. #else
  869. static rt_err_t ofw_pic_init(void)
  870. {
  871. return RT_EOK;
  872. }
  873. #endif /* !RT_USING_OFW */
  874. rt_err_t rt_pic_init(void)
  875. {
  876. rt_err_t err;
  877. LOG_D("init start");
  878. err = ofw_pic_init();
  879. LOG_D("init end");
  880. return err;
  881. }
  882. #if defined(RT_USING_CONSOLE) && defined(RT_USING_MSH)
  883. static int list_irq(int argc, char**argv)
  884. {
  885. rt_size_t irq_nr = 0;
  886. rt_bool_t dump_all = RT_FALSE;
  887. const char *const irq_modes[] =
  888. {
  889. [RT_IRQ_MODE_NONE] = "None",
  890. [RT_IRQ_MODE_EDGE_RISING] = "Edge-Rising",
  891. [RT_IRQ_MODE_EDGE_FALLING] = "Edge-Falling",
  892. [RT_IRQ_MODE_EDGE_BOTH] = "Edge-Both",
  893. [RT_IRQ_MODE_LEVEL_HIGH] = "Level-High",
  894. [RT_IRQ_MODE_LEVEL_LOW] = "Level-Low",
  895. };
  896. static char info[RT_CONSOLEBUF_SIZE];
  897. #ifdef RT_USING_SMP
  898. static char cpumask[RT_CPUS_NR + 1] = { [RT_CPUS_NR] = '\0' };
  899. #endif
  900. if (argc > 1)
  901. {
  902. if (!rt_strcmp(argv[1], "all"))
  903. {
  904. dump_all = RT_TRUE;
  905. }
  906. }
  907. rt_kprintf("%-*.s %-*.s %s %-*.s %-*.s %-*.s %-*.sUsers%-*.s",
  908. 6, "IRQ",
  909. 6, "HW-IRQ",
  910. "MSI",
  911. _pic_name_max, "PIC",
  912. 12, "Mode",
  913. #ifdef RT_USING_SMP
  914. RT_CPUS_NR, "CPUs",
  915. #else
  916. 0, 0,
  917. #endif
  918. #ifdef RT_USING_INTERRUPT_INFO
  919. 11, "Count",
  920. 5, ""
  921. #else
  922. 0, 0,
  923. 10, "-Number"
  924. #endif
  925. );
  926. #if defined(RT_USING_SMP) && defined(RT_USING_INTERRUPT_INFO)
  927. for (int i = 0; i < RT_CPUS_NR; i++)
  928. {
  929. rt_kprintf(" cpu%2d ", i);
  930. }
  931. #endif
  932. #ifdef RT_USING_PIC_STATISTICS
  933. rt_kprintf(" max/ns avg/ns min/ns");
  934. #endif
  935. rt_kputs("\n");
  936. for (int i = 0; i < RT_ARRAY_SIZE(_pirq_hash); ++i)
  937. {
  938. struct rt_pic_irq *pirq = &_pirq_hash[i];
  939. if (!pirq->pic || !(dump_all || pirq->isr.action.handler))
  940. {
  941. continue;
  942. }
  943. rt_snprintf(info, sizeof(info), "%-6d %-6d %c %-*.s %-*.s ",
  944. pirq->irq,
  945. pirq->hwirq,
  946. pirq->msi_desc ? 'Y' : 'N',
  947. _pic_name_max, pirq->pic->ops->name,
  948. 12, irq_modes[pirq->mode]);
  949. #ifdef RT_USING_SMP
  950. for (int group = 0, id = 0; group < RT_ARRAY_SIZE(pirq->affinity); ++group)
  951. {
  952. rt_bitmap_t mask = pirq->affinity[group];
  953. for (int idx = 0; id < RT_CPUS_NR && idx < RT_BITMAP_BIT_LEN(1); ++idx, ++id)
  954. {
  955. cpumask[RT_ARRAY_SIZE(cpumask) - id - 2] = '0' + ((mask >> idx) & 1);
  956. }
  957. }
  958. #endif /* RT_USING_SMP */
  959. rt_kputs(info);
  960. #ifdef RT_USING_SMP
  961. rt_kputs(cpumask);
  962. #endif
  963. #ifdef RT_USING_INTERRUPT_INFO
  964. rt_kprintf(" %-10d ", pirq->isr.action.counter);
  965. rt_kprintf("%-*.s", 10, pirq->isr.action.name);
  966. #ifdef RT_USING_SMP
  967. for (int cpuid = 0; cpuid < RT_CPUS_NR; cpuid++)
  968. {
  969. rt_kprintf(" %-10d", pirq->isr.action.cpu_counter[cpuid]);
  970. }
  971. #endif
  972. #ifdef RT_USING_PIC_STATISTICS
  973. rt_kprintf(" %-10d %-10d %-10d", pirq->stat.max_irq_time_ns, pirq->stat.sum_irq_time_ns/pirq->isr.action.counter, pirq->stat.min_irq_time_ns);
  974. #endif
  975. rt_kputs("\n");
  976. if (!rt_list_isempty(&pirq->isr.list))
  977. {
  978. struct rt_pic_isr *repeat_isr;
  979. rt_list_for_each_entry(repeat_isr, &pirq->isr.list, list)
  980. {
  981. rt_kputs(info);
  982. #ifdef RT_USING_SMP
  983. rt_kputs(cpumask);
  984. #endif
  985. rt_kprintf("%-10d ", repeat_isr->action.counter);
  986. rt_kprintf("%-*.s", 10, repeat_isr->action.name);
  987. #ifdef RT_USING_SMP
  988. for (int cpuid = 0; cpuid < RT_CPUS_NR; cpuid++)
  989. {
  990. rt_kprintf(" %-10d", repeat_isr->action.cpu_counter[cpuid]);
  991. }
  992. #endif
  993. #ifdef RT_USING_PIC_STATISTICS
  994. rt_kprintf(" --- --- ---");
  995. #endif
  996. rt_kputs("\n");
  997. }
  998. }
  999. #else
  1000. rt_kprintf(" %d\n", rt_list_len(&pirq->isr.list));
  1001. #endif
  1002. ++irq_nr;
  1003. }
  1004. rt_kprintf("%d IRQs found\n", irq_nr);
  1005. return 0;
  1006. }
  1007. MSH_CMD_EXPORT(list_irq, dump using or args = all of irq information);
  1008. #endif /* RT_USING_CONSOLE && RT_USING_MSH */