pic.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-08-24 GuEe-GUI first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #define DBG_TAG "rtdm.pic"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. #include <drivers/pic.h>
  16. #ifdef RT_USING_PIC_STATISTICS
  17. #include <ktime.h>
  18. #endif
  19. struct irq_traps
  20. {
  21. rt_list_t list;
  22. void *data;
  23. rt_bool_t (*handler)(void *);
  24. };
  25. static int _ipi_hash[] =
  26. {
  27. #ifdef RT_USING_SMP
  28. [RT_SCHEDULE_IPI] = RT_SCHEDULE_IPI,
  29. [RT_STOP_IPI] = RT_STOP_IPI,
  30. [RT_SMP_CALL_IPI] = RT_SMP_CALL_IPI,
  31. #endif
  32. };
  33. /* reserved ipi */
  34. static int _pirq_hash_idx = RT_ARRAY_SIZE(_ipi_hash);
  35. static struct rt_pic_irq _pirq_hash[MAX_HANDLERS] =
  36. {
  37. [0 ... MAX_HANDLERS - 1] =
  38. {
  39. .irq = -1,
  40. .hwirq = -1,
  41. .mode = RT_IRQ_MODE_NONE,
  42. .priority = RT_UINT32_MAX,
  43. .rw_lock = { },
  44. }
  45. };
  46. static RT_DEFINE_SPINLOCK(_pic_lock);
  47. static rt_size_t _pic_name_max = sizeof("PIC");
  48. static rt_list_t _pic_nodes = RT_LIST_OBJECT_INIT(_pic_nodes);
  49. static rt_list_t _traps_nodes = RT_LIST_OBJECT_INIT(_traps_nodes);
  50. static struct rt_pic_irq *irq2pirq(int irq)
  51. {
  52. struct rt_pic_irq *pirq = RT_NULL;
  53. if ((irq >= 0) && (irq < MAX_HANDLERS))
  54. {
  55. pirq = &_pirq_hash[irq];
  56. if (pirq->irq < 0)
  57. {
  58. pirq = RT_NULL;
  59. }
  60. }
  61. if (!pirq)
  62. {
  63. LOG_E("irq = %d is invalid", irq);
  64. }
  65. return pirq;
  66. }
  67. static void append_pic(struct rt_pic *pic)
  68. {
  69. int pic_name_len = rt_strlen(pic->ops->name);
  70. rt_list_insert_before(&_pic_nodes, &pic->list);
  71. if (pic_name_len > _pic_name_max)
  72. {
  73. _pic_name_max = pic_name_len;
  74. }
  75. }
  76. void rt_pic_default_name(struct rt_pic *pic)
  77. {
  78. if (pic)
  79. {
  80. #if RT_NAME_MAX > 0
  81. rt_strncpy(pic->parent.name, "PIC", RT_NAME_MAX - 1);
  82. pic->parent.name[RT_NAME_MAX - 1] = '\0';
  83. #else
  84. pic->parent.name = "PIC";
  85. #endif
  86. }
  87. }
  88. struct rt_pic *rt_pic_dynamic_cast(void *ptr)
  89. {
  90. struct rt_pic *pic = RT_NULL, *tmp = RT_NULL;
  91. if (ptr)
  92. {
  93. struct rt_object *obj = ptr;
  94. if (obj->type == RT_Object_Class_Unknown)
  95. {
  96. tmp = (void *)obj;
  97. }
  98. else if (obj->type == RT_Object_Class_Device)
  99. {
  100. tmp = (void *)obj + sizeof(struct rt_device);
  101. }
  102. else
  103. {
  104. tmp = (void *)obj + sizeof(struct rt_object);
  105. }
  106. if (tmp && !rt_strcmp(tmp->parent.name, "PIC"))
  107. {
  108. pic = tmp;
  109. }
  110. }
  111. return pic;
  112. }
  113. rt_err_t rt_pic_linear_irq(struct rt_pic *pic, rt_size_t irq_nr)
  114. {
  115. rt_err_t err = RT_EOK;
  116. if (pic && pic->ops && pic->ops->name)
  117. {
  118. rt_ubase_t level = rt_spin_lock_irqsave(&_pic_lock);
  119. if (_pirq_hash_idx + irq_nr <= RT_ARRAY_SIZE(_pirq_hash))
  120. {
  121. rt_list_init(&pic->list);
  122. rt_pic_default_name(pic);
  123. pic->parent.type = RT_Object_Class_Unknown;
  124. pic->irq_start = _pirq_hash_idx;
  125. pic->irq_nr = irq_nr;
  126. pic->pirqs = &_pirq_hash[_pirq_hash_idx];
  127. _pirq_hash_idx += irq_nr;
  128. append_pic(pic);
  129. LOG_D("%s alloc irqs ranges [%d, %d]", pic->ops->name,
  130. pic->irq_start, pic->irq_start + pic->irq_nr);
  131. }
  132. else
  133. {
  134. LOG_E("%s alloc %d irqs is overflow", pic->ops->name, irq_nr);
  135. err = -RT_EEMPTY;
  136. }
  137. rt_spin_unlock_irqrestore(&_pic_lock, level);
  138. }
  139. else
  140. {
  141. err = -RT_EINVAL;
  142. }
  143. return err;
  144. }
  145. rt_err_t rt_pic_cancel_irq(struct rt_pic *pic)
  146. {
  147. rt_err_t err = RT_EOK;
  148. if (pic && pic->pirqs)
  149. {
  150. rt_ubase_t level = rt_spin_lock_irqsave(&_pic_lock);
  151. /*
  152. * This is only to make system runtime safely,
  153. * we don't recommend PICs to unregister.
  154. */
  155. rt_list_remove(&pic->list);
  156. rt_spin_unlock_irqrestore(&_pic_lock, level);
  157. }
  158. else
  159. {
  160. err = -RT_EINVAL;
  161. }
  162. return err;
  163. }
  164. static void config_pirq(struct rt_pic *pic, struct rt_pic_irq *pirq, int irq, int hwirq)
  165. {
  166. rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
  167. if (pirq->irq < 0)
  168. {
  169. rt_list_init(&pirq->list);
  170. rt_list_init(&pirq->children_nodes);
  171. rt_list_init(&pirq->isr.list);
  172. }
  173. else if (pirq->pic != pic)
  174. {
  175. RT_ASSERT(rt_list_isempty(&pirq->list) == RT_TRUE);
  176. RT_ASSERT(rt_list_isempty(&pirq->children_nodes) == RT_TRUE);
  177. RT_ASSERT(rt_list_isempty(&pirq->isr.list) == RT_TRUE);
  178. }
  179. pirq->irq = irq;
  180. pirq->hwirq = hwirq;
  181. pirq->pic = pic;
  182. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  183. }
  184. int rt_pic_config_ipi(struct rt_pic *pic, int ipi_index, int hwirq)
  185. {
  186. int ipi = ipi_index;
  187. struct rt_pic_irq *pirq;
  188. if (pic && ipi < RT_ARRAY_SIZE(_ipi_hash) && hwirq >= 0 && pic->ops->irq_send_ipi)
  189. {
  190. pirq = &_pirq_hash[ipi];
  191. config_pirq(pic, pirq, ipi, hwirq);
  192. for (int cpuid = 0; cpuid < RT_CPUS_NR; ++cpuid)
  193. {
  194. RT_IRQ_AFFINITY_SET(pirq->affinity, cpuid);
  195. }
  196. LOG_D("%s config %s %d to hwirq %d", pic->ops->name, "ipi", ipi, hwirq);
  197. }
  198. else
  199. {
  200. ipi = -RT_EINVAL;
  201. }
  202. return ipi;
  203. }
  204. int rt_pic_config_irq(struct rt_pic *pic, int irq_index, int hwirq)
  205. {
  206. int irq;
  207. if (pic && hwirq >= 0)
  208. {
  209. irq = pic->irq_start + irq_index;
  210. if (irq >= 0 && irq < MAX_HANDLERS)
  211. {
  212. config_pirq(pic, &_pirq_hash[irq], irq, hwirq);
  213. LOG_D("%s config %s %d to hwirq %d", pic->ops->name, "irq", irq, hwirq);
  214. }
  215. else
  216. {
  217. irq = -RT_ERROR;
  218. }
  219. }
  220. else
  221. {
  222. irq = -RT_EINVAL;
  223. }
  224. return irq;
  225. }
  226. struct rt_pic_irq *rt_pic_find_ipi(struct rt_pic *pic, int ipi_index)
  227. {
  228. struct rt_pic_irq *pirq = &_pirq_hash[ipi_index];
  229. RT_ASSERT(ipi_index < RT_ARRAY_SIZE(_ipi_hash));
  230. RT_ASSERT(pirq->pic == pic);
  231. return pirq;
  232. }
  233. struct rt_pic_irq *rt_pic_find_pirq(struct rt_pic *pic, int irq)
  234. {
  235. if (pic && irq >= pic->irq_start && irq <= pic->irq_start + pic->irq_nr)
  236. {
  237. return &pic->pirqs[irq - pic->irq_start];
  238. }
  239. return RT_NULL;
  240. }
  241. rt_err_t rt_pic_cascade(struct rt_pic_irq *pirq, int parent_irq)
  242. {
  243. rt_err_t err = RT_EOK;
  244. if (pirq && !pirq->parent && parent_irq >= 0)
  245. {
  246. struct rt_pic_irq *parent;
  247. rt_spin_lock(&pirq->rw_lock);
  248. parent = irq2pirq(parent_irq);
  249. if (parent)
  250. {
  251. pirq->parent = parent;
  252. pirq->priority = parent->priority;
  253. rt_memcpy(&pirq->affinity, &parent->affinity, sizeof(pirq->affinity));
  254. }
  255. rt_spin_unlock(&pirq->rw_lock);
  256. if (parent && pirq->pic->ops->flags & RT_PIC_F_IRQ_ROUTING)
  257. {
  258. rt_spin_lock(&parent->rw_lock);
  259. rt_list_insert_before(&parent->children_nodes, &pirq->list);
  260. rt_spin_unlock(&parent->rw_lock);
  261. }
  262. }
  263. else
  264. {
  265. err = -RT_EINVAL;
  266. }
  267. return err;
  268. }
  269. rt_err_t rt_pic_uncascade(struct rt_pic_irq *pirq)
  270. {
  271. rt_err_t err = RT_EOK;
  272. if (pirq && pirq->parent)
  273. {
  274. struct rt_pic_irq *parent;
  275. rt_spin_lock(&pirq->rw_lock);
  276. parent = pirq->parent;
  277. pirq->parent = RT_NULL;
  278. rt_spin_unlock(&pirq->rw_lock);
  279. if (parent && pirq->pic->ops->flags & RT_PIC_F_IRQ_ROUTING)
  280. {
  281. rt_spin_lock(&parent->rw_lock);
  282. rt_list_remove(&pirq->list);
  283. rt_spin_unlock(&parent->rw_lock);
  284. }
  285. }
  286. else
  287. {
  288. err = -RT_EINVAL;
  289. }
  290. return err;
  291. }
  292. rt_err_t rt_pic_attach_irq(int irq, rt_isr_handler_t handler, void *uid, const char *name, int flags)
  293. {
  294. rt_err_t err = -RT_EINVAL;
  295. struct rt_pic_irq *pirq;
  296. if (handler && name && (pirq = irq2pirq(irq)))
  297. {
  298. struct rt_pic_isr *isr = RT_NULL;
  299. rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
  300. err = RT_EOK;
  301. if (!pirq->isr.action.handler)
  302. {
  303. /* first attach */
  304. isr = &pirq->isr;
  305. rt_list_init(&isr->list);
  306. }
  307. else
  308. {
  309. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  310. if ((isr = rt_malloc(sizeof(*isr))))
  311. {
  312. rt_list_init(&isr->list);
  313. level = rt_spin_lock_irqsave(&pirq->rw_lock);
  314. rt_list_insert_after(&pirq->isr.list, &isr->list);
  315. }
  316. else
  317. {
  318. LOG_E("No memory to save '%s' isr", name);
  319. err = -RT_ERROR;
  320. }
  321. }
  322. if (!err)
  323. {
  324. isr->flags = flags;
  325. isr->action.handler = handler;
  326. isr->action.param = uid;
  327. #ifdef RT_USING_INTERRUPT_INFO
  328. isr->action.counter = 0;
  329. rt_strncpy(isr->action.name, name, RT_NAME_MAX - 1);
  330. isr->action.name[RT_NAME_MAX - 1] = '\0';
  331. #ifdef RT_USING_SMP
  332. rt_memset(isr->action.cpu_counter, 0, sizeof(isr->action.cpu_counter));
  333. #endif
  334. #endif
  335. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  336. }
  337. }
  338. return err;
  339. }
  340. rt_err_t rt_pic_detach_irq(int irq, void *uid)
  341. {
  342. rt_err_t err = -RT_EINVAL;
  343. struct rt_pic_irq *pirq = irq2pirq(irq);
  344. if (pirq)
  345. {
  346. rt_bool_t will_free = RT_FALSE;
  347. struct rt_pic_isr *isr = RT_NULL;
  348. rt_ubase_t level = rt_spin_lock_irqsave(&pirq->rw_lock);
  349. isr = &pirq->isr;
  350. if (isr->action.param == uid)
  351. {
  352. if (rt_list_isempty(&isr->list))
  353. {
  354. isr->action.handler = RT_NULL;
  355. isr->action.param = RT_NULL;
  356. }
  357. else
  358. {
  359. struct rt_pic_isr *next_isr = rt_list_first_entry(&isr->list, struct rt_pic_isr, list);
  360. rt_list_remove(&next_isr->list);
  361. isr->action.handler = next_isr->action.handler;
  362. isr->action.param = next_isr->action.param;
  363. #ifdef RT_USING_INTERRUPT_INFO
  364. isr->action.counter = next_isr->action.counter;
  365. rt_strncpy(isr->action.name, next_isr->action.name, RT_NAME_MAX);
  366. #ifdef RT_USING_SMP
  367. rt_memcpy(isr->action.cpu_counter, next_isr->action.cpu_counter, sizeof(next_isr->action.cpu_counter));
  368. #endif
  369. #endif
  370. isr = next_isr;
  371. will_free = RT_TRUE;
  372. }
  373. err = RT_EOK;
  374. }
  375. else
  376. {
  377. rt_list_for_each_entry(isr, &pirq->isr.list, list)
  378. {
  379. if (isr->action.param == uid)
  380. {
  381. err = RT_EOK;
  382. will_free = RT_TRUE;
  383. rt_list_remove(&isr->list);
  384. break;
  385. }
  386. }
  387. }
  388. rt_spin_unlock_irqrestore(&pirq->rw_lock, level);
  389. if (will_free)
  390. {
  391. rt_free(isr);
  392. }
  393. }
  394. return err;
  395. }
  396. rt_err_t rt_pic_add_traps(rt_bool_t (*handler)(void *), void *data)
  397. {
  398. rt_err_t err = -RT_EINVAL;
  399. if (handler)
  400. {
  401. struct irq_traps *traps = rt_malloc(sizeof(*traps));
  402. if (traps)
  403. {
  404. rt_ubase_t level = rt_hw_interrupt_disable();
  405. rt_list_init(&traps->list);
  406. traps->data = data;
  407. traps->handler = handler;
  408. rt_list_insert_before(&_traps_nodes, &traps->list);
  409. err = RT_EOK;
  410. rt_hw_interrupt_enable(level);
  411. }
  412. else
  413. {
  414. LOG_E("No memory to save '%p' handler", handler);
  415. err = -RT_ENOMEM;
  416. }
  417. }
  418. return err;
  419. }
  420. rt_err_t rt_pic_do_traps(void)
  421. {
  422. rt_err_t err = -RT_ERROR;
  423. struct irq_traps *traps;
  424. rt_interrupt_enter();
  425. rt_list_for_each_entry(traps, &_traps_nodes, list)
  426. {
  427. if (traps->handler(traps->data))
  428. {
  429. err = RT_EOK;
  430. break;
  431. }
  432. }
  433. rt_interrupt_leave();
  434. return err;
  435. }
  436. rt_err_t rt_pic_handle_isr(struct rt_pic_irq *pirq)
  437. {
  438. rt_err_t err = -RT_EEMPTY;
  439. rt_list_t *handler_nodes;
  440. struct rt_irq_desc *action;
  441. #ifdef RT_USING_PIC_STATISTICS
  442. struct timespec ts;
  443. rt_ubase_t irq_time_ns;
  444. rt_ubase_t current_irq_begin;
  445. #endif
  446. RT_ASSERT(pirq != RT_NULL);
  447. RT_ASSERT(pirq->pic != RT_NULL);
  448. #ifdef RT_USING_PIC_STATISTICS
  449. rt_ktime_boottime_get_ns(&ts);
  450. current_irq_begin = ts.tv_sec * (1000UL * 1000 * 1000) + ts.tv_nsec;
  451. #endif
  452. handler_nodes = &pirq->isr.list;
  453. action = &pirq->isr.action;
  454. if (!rt_list_isempty(&pirq->children_nodes))
  455. {
  456. struct rt_pic_irq *child;
  457. rt_list_for_each_entry(child, &pirq->children_nodes, list)
  458. {
  459. if (child->pic->ops->irq_ack)
  460. {
  461. child->pic->ops->irq_ack(child);
  462. }
  463. err = rt_pic_handle_isr(child);
  464. if (child->pic->ops->irq_eoi)
  465. {
  466. child->pic->ops->irq_eoi(child);
  467. }
  468. }
  469. }
  470. if (action->handler)
  471. {
  472. action->handler(pirq->irq, action->param);
  473. #ifdef RT_USING_INTERRUPT_INFO
  474. action->counter++;
  475. #ifdef RT_USING_SMP
  476. action->cpu_counter[rt_hw_cpu_id()]++;
  477. #endif
  478. #endif
  479. if (!rt_list_isempty(handler_nodes))
  480. {
  481. struct rt_pic_isr *isr;
  482. rt_list_for_each_entry(isr, handler_nodes, list)
  483. {
  484. action = &isr->action;
  485. RT_ASSERT(action->handler != RT_NULL);
  486. action->handler(pirq->irq, action->param);
  487. #ifdef RT_USING_INTERRUPT_INFO
  488. action->counter++;
  489. #ifdef RT_USING_SMP
  490. action->cpu_counter[rt_hw_cpu_id()]++;
  491. #endif
  492. #endif
  493. }
  494. }
  495. err = RT_EOK;
  496. }
  497. #ifdef RT_USING_PIC_STATISTICS
  498. rt_ktime_boottime_get_ns(&ts);
  499. irq_time_ns = ts.tv_sec * (1000UL * 1000 * 1000) + ts.tv_nsec - current_irq_begin;
  500. pirq->stat.sum_irq_time_ns += irq_time_ns;
  501. if (irq_time_ns < pirq->stat.min_irq_time_ns || pirq->stat.min_irq_time_ns == 0)
  502. {
  503. pirq->stat.min_irq_time_ns = irq_time_ns;
  504. }
  505. if (irq_time_ns > pirq->stat.max_irq_time_ns)
  506. {
  507. pirq->stat.max_irq_time_ns = irq_time_ns;
  508. }
  509. #endif
  510. return err;
  511. }
  512. rt_weak rt_err_t rt_pic_user_extends(struct rt_pic *pic)
  513. {
  514. return -RT_ENOSYS;
  515. }
  516. rt_err_t rt_pic_irq_init(void)
  517. {
  518. rt_err_t err = RT_EOK;
  519. struct rt_pic *pic;
  520. rt_list_for_each_entry(pic, &_pic_nodes, list)
  521. {
  522. if (pic->ops->irq_init)
  523. {
  524. err = pic->ops->irq_init(pic);
  525. if (err)
  526. {
  527. LOG_E("PIC = %s init fail", pic->ops->name);
  528. break;
  529. }
  530. }
  531. }
  532. return err;
  533. }
  534. rt_err_t rt_pic_irq_finit(void)
  535. {
  536. rt_err_t err = RT_EOK;
  537. struct rt_pic *pic;
  538. rt_list_for_each_entry(pic, &_pic_nodes, list)
  539. {
  540. if (pic->ops->irq_finit)
  541. {
  542. err = pic->ops->irq_finit(pic);
  543. if (err)
  544. {
  545. LOG_E("PIC = %s finit fail", pic->ops->name);
  546. break;
  547. }
  548. }
  549. }
  550. return err;
  551. }
  552. void rt_pic_irq_enable(int irq)
  553. {
  554. struct rt_pic_irq *pirq = irq2pirq(irq);
  555. RT_ASSERT(pirq != RT_NULL);
  556. rt_hw_spin_lock(&pirq->rw_lock.lock);
  557. if (pirq->pic->ops->irq_enable)
  558. {
  559. pirq->pic->ops->irq_enable(pirq);
  560. }
  561. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  562. }
  563. void rt_pic_irq_disable(int irq)
  564. {
  565. struct rt_pic_irq *pirq = irq2pirq(irq);
  566. RT_ASSERT(pirq != RT_NULL);
  567. rt_hw_spin_lock(&pirq->rw_lock.lock);
  568. if (pirq->pic->ops->irq_disable)
  569. {
  570. pirq->pic->ops->irq_disable(pirq);
  571. }
  572. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  573. }
  574. void rt_pic_irq_ack(int irq)
  575. {
  576. struct rt_pic_irq *pirq = irq2pirq(irq);
  577. RT_ASSERT(pirq != RT_NULL);
  578. rt_hw_spin_lock(&pirq->rw_lock.lock);
  579. if (pirq->pic->ops->irq_ack)
  580. {
  581. pirq->pic->ops->irq_ack(pirq);
  582. }
  583. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  584. }
  585. void rt_pic_irq_mask(int irq)
  586. {
  587. struct rt_pic_irq *pirq = irq2pirq(irq);
  588. RT_ASSERT(pirq != RT_NULL);
  589. rt_hw_spin_lock(&pirq->rw_lock.lock);
  590. if (pirq->pic->ops->irq_mask)
  591. {
  592. pirq->pic->ops->irq_mask(pirq);
  593. }
  594. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  595. }
  596. void rt_pic_irq_unmask(int irq)
  597. {
  598. struct rt_pic_irq *pirq = irq2pirq(irq);
  599. RT_ASSERT(pirq != RT_NULL);
  600. rt_hw_spin_lock(&pirq->rw_lock.lock);
  601. if (pirq->pic->ops->irq_unmask)
  602. {
  603. pirq->pic->ops->irq_unmask(pirq);
  604. }
  605. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  606. }
  607. void rt_pic_irq_eoi(int irq)
  608. {
  609. struct rt_pic_irq *pirq = irq2pirq(irq);
  610. RT_ASSERT(pirq != RT_NULL);
  611. rt_hw_spin_lock(&pirq->rw_lock.lock);
  612. if (pirq->pic->ops->irq_eoi)
  613. {
  614. pirq->pic->ops->irq_eoi(pirq);
  615. }
  616. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  617. }
  618. rt_err_t rt_pic_irq_set_priority(int irq, rt_uint32_t priority)
  619. {
  620. rt_err_t err = -RT_EINVAL;
  621. struct rt_pic_irq *pirq = irq2pirq(irq);
  622. if (pirq)
  623. {
  624. rt_hw_spin_lock(&pirq->rw_lock.lock);
  625. if (pirq->pic->ops->irq_set_priority)
  626. {
  627. err = pirq->pic->ops->irq_set_priority(pirq, priority);
  628. if (!err)
  629. {
  630. pirq->priority = priority;
  631. }
  632. }
  633. else
  634. {
  635. err = -RT_ENOSYS;
  636. }
  637. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  638. }
  639. return err;
  640. }
  641. rt_uint32_t rt_pic_irq_get_priority(int irq)
  642. {
  643. rt_uint32_t priority = RT_UINT32_MAX;
  644. struct rt_pic_irq *pirq = irq2pirq(irq);
  645. if (pirq)
  646. {
  647. rt_hw_spin_lock(&pirq->rw_lock.lock);
  648. priority = pirq->priority;
  649. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  650. }
  651. return priority;
  652. }
  653. rt_err_t rt_pic_irq_set_affinity(int irq, rt_bitmap_t *affinity)
  654. {
  655. rt_err_t err = -RT_EINVAL;
  656. struct rt_pic_irq *pirq;
  657. if (affinity && (pirq = irq2pirq(irq)))
  658. {
  659. rt_hw_spin_lock(&pirq->rw_lock.lock);
  660. if (pirq->pic->ops->irq_set_affinity)
  661. {
  662. err = pirq->pic->ops->irq_set_affinity(pirq, affinity);
  663. if (!err)
  664. {
  665. rt_memcpy(pirq->affinity, affinity, sizeof(pirq->affinity));
  666. }
  667. }
  668. else
  669. {
  670. err = -RT_ENOSYS;
  671. }
  672. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  673. }
  674. return err;
  675. }
  676. rt_err_t rt_pic_irq_get_affinity(int irq, rt_bitmap_t *out_affinity)
  677. {
  678. rt_err_t err = -RT_EINVAL;
  679. struct rt_pic_irq *pirq;
  680. if (out_affinity && (pirq = irq2pirq(irq)))
  681. {
  682. rt_hw_spin_lock(&pirq->rw_lock.lock);
  683. rt_memcpy(out_affinity, pirq->affinity, sizeof(pirq->affinity));
  684. err = RT_EOK;
  685. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  686. }
  687. return err;
  688. }
  689. rt_err_t rt_pic_irq_set_triger_mode(int irq, rt_uint32_t mode)
  690. {
  691. rt_err_t err = -RT_EINVAL;
  692. struct rt_pic_irq *pirq;
  693. if ((~mode & RT_IRQ_MODE_MASK) && (pirq = irq2pirq(irq)))
  694. {
  695. rt_hw_spin_lock(&pirq->rw_lock.lock);
  696. if (pirq->pic->ops->irq_set_triger_mode)
  697. {
  698. err = pirq->pic->ops->irq_set_triger_mode(pirq, mode);
  699. if (!err)
  700. {
  701. pirq->mode = mode;
  702. }
  703. }
  704. else
  705. {
  706. err = -RT_ENOSYS;
  707. }
  708. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  709. }
  710. return err;
  711. }
  712. rt_uint32_t rt_pic_irq_get_triger_mode(int irq)
  713. {
  714. rt_uint32_t mode = RT_UINT32_MAX;
  715. struct rt_pic_irq *pirq = irq2pirq(irq);
  716. if (pirq)
  717. {
  718. rt_hw_spin_lock(&pirq->rw_lock.lock);
  719. mode = pirq->mode;
  720. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  721. }
  722. return mode;
  723. }
  724. void rt_pic_irq_send_ipi(int irq, rt_bitmap_t *cpumask)
  725. {
  726. struct rt_pic_irq *pirq;
  727. if (cpumask && (pirq = irq2pirq(irq)))
  728. {
  729. rt_hw_spin_lock(&pirq->rw_lock.lock);
  730. if (pirq->pic->ops->irq_send_ipi)
  731. {
  732. pirq->pic->ops->irq_send_ipi(pirq, cpumask);
  733. }
  734. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  735. }
  736. }
  737. rt_err_t rt_pic_irq_set_state_raw(struct rt_pic *pic, int hwirq, int type, rt_bool_t state)
  738. {
  739. rt_err_t err;
  740. if (pic && hwirq >= 0)
  741. {
  742. if (pic->ops->irq_set_state)
  743. {
  744. err = pic->ops->irq_set_state(pic, hwirq, type, state);
  745. }
  746. else
  747. {
  748. err = -RT_ENOSYS;
  749. }
  750. }
  751. else
  752. {
  753. err = -RT_EINVAL;
  754. }
  755. return err;
  756. }
  757. rt_err_t rt_pic_irq_get_state_raw(struct rt_pic *pic, int hwirq, int type, rt_bool_t *out_state)
  758. {
  759. rt_err_t err;
  760. if (pic && hwirq >= 0)
  761. {
  762. if (pic->ops->irq_get_state)
  763. {
  764. rt_bool_t state;
  765. if (!(err = pic->ops->irq_get_state(pic, hwirq, type, &state)) && out_state)
  766. {
  767. *out_state = state;
  768. }
  769. }
  770. else
  771. {
  772. err = -RT_ENOSYS;
  773. }
  774. }
  775. else
  776. {
  777. err = -RT_EINVAL;
  778. }
  779. return err;
  780. }
  781. rt_err_t rt_pic_irq_set_state(int irq, int type, rt_bool_t state)
  782. {
  783. rt_err_t err;
  784. struct rt_pic_irq *pirq = irq2pirq(irq);
  785. RT_ASSERT(pirq != RT_NULL);
  786. rt_hw_spin_lock(&pirq->rw_lock.lock);
  787. err = rt_pic_irq_set_state_raw(pirq->pic, pirq->hwirq, type, state);
  788. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  789. return err;
  790. }
  791. rt_err_t rt_pic_irq_get_state(int irq, int type, rt_bool_t *out_state)
  792. {
  793. rt_err_t err;
  794. struct rt_pic_irq *pirq = irq2pirq(irq);
  795. RT_ASSERT(pirq != RT_NULL);
  796. rt_hw_spin_lock(&pirq->rw_lock.lock);
  797. err = rt_pic_irq_get_state_raw(pirq->pic, pirq->hwirq, type, out_state);
  798. rt_hw_spin_unlock(&pirq->rw_lock.lock);
  799. return err;
  800. }
  801. void rt_pic_irq_parent_enable(struct rt_pic_irq *pirq)
  802. {
  803. RT_ASSERT(pirq != RT_NULL);
  804. pirq = pirq->parent;
  805. if (pirq->pic->ops->irq_enable)
  806. {
  807. pirq->pic->ops->irq_enable(pirq);
  808. }
  809. }
  810. void rt_pic_irq_parent_disable(struct rt_pic_irq *pirq)
  811. {
  812. RT_ASSERT(pirq != RT_NULL);
  813. pirq = pirq->parent;
  814. if (pirq->pic->ops->irq_disable)
  815. {
  816. pirq->pic->ops->irq_disable(pirq);
  817. }
  818. }
  819. void rt_pic_irq_parent_ack(struct rt_pic_irq *pirq)
  820. {
  821. RT_ASSERT(pirq != RT_NULL);
  822. pirq = pirq->parent;
  823. if (pirq->pic->ops->irq_ack)
  824. {
  825. pirq->pic->ops->irq_ack(pirq);
  826. }
  827. }
  828. void rt_pic_irq_parent_mask(struct rt_pic_irq *pirq)
  829. {
  830. RT_ASSERT(pirq != RT_NULL);
  831. pirq = pirq->parent;
  832. if (pirq->pic->ops->irq_mask)
  833. {
  834. pirq->pic->ops->irq_mask(pirq);
  835. }
  836. }
  837. void rt_pic_irq_parent_unmask(struct rt_pic_irq *pirq)
  838. {
  839. RT_ASSERT(pirq != RT_NULL);
  840. pirq = pirq->parent;
  841. if (pirq->pic->ops->irq_unmask)
  842. {
  843. pirq->pic->ops->irq_unmask(pirq);
  844. }
  845. }
  846. void rt_pic_irq_parent_eoi(struct rt_pic_irq *pirq)
  847. {
  848. RT_ASSERT(pirq != RT_NULL);
  849. pirq = pirq->parent;
  850. if (pirq->pic->ops->irq_eoi)
  851. {
  852. pirq->pic->ops->irq_eoi(pirq);
  853. }
  854. }
  855. rt_err_t rt_pic_irq_parent_set_priority(struct rt_pic_irq *pirq, rt_uint32_t priority)
  856. {
  857. rt_err_t err = -RT_ENOSYS;
  858. RT_ASSERT(pirq != RT_NULL);
  859. pirq = pirq->parent;
  860. if (pirq->pic->ops->irq_set_priority)
  861. {
  862. if (!(err = pirq->pic->ops->irq_set_priority(pirq, priority)))
  863. {
  864. pirq->priority = priority;
  865. }
  866. }
  867. return err;
  868. }
  869. rt_err_t rt_pic_irq_parent_set_affinity(struct rt_pic_irq *pirq, rt_bitmap_t *affinity)
  870. {
  871. rt_err_t err = -RT_ENOSYS;
  872. RT_ASSERT(pirq != RT_NULL);
  873. pirq = pirq->parent;
  874. if (pirq->pic->ops->irq_set_affinity)
  875. {
  876. if (!(err = pirq->pic->ops->irq_set_affinity(pirq, affinity)))
  877. {
  878. rt_memcpy(pirq->affinity, affinity, sizeof(pirq->affinity));
  879. }
  880. }
  881. return err;
  882. }
  883. rt_err_t rt_pic_irq_parent_set_triger_mode(struct rt_pic_irq *pirq, rt_uint32_t mode)
  884. {
  885. rt_err_t err = -RT_ENOSYS;
  886. RT_ASSERT(pirq != RT_NULL);
  887. pirq = pirq->parent;
  888. if (pirq->pic->ops->irq_set_triger_mode)
  889. {
  890. if (!(err = pirq->pic->ops->irq_set_triger_mode(pirq, mode)))
  891. {
  892. pirq->mode = mode;
  893. }
  894. }
  895. return err;
  896. }
  897. #ifdef RT_USING_OFW
  898. RT_OFW_STUB_RANGE_EXPORT(pic, _pic_ofw_start, _pic_ofw_end);
  899. static rt_err_t ofw_pic_init(void)
  900. {
  901. struct rt_ofw_node *ic_np;
  902. rt_ofw_foreach_node_by_prop(ic_np, "interrupt-controller")
  903. {
  904. rt_ofw_stub_probe_range(ic_np, &_pic_ofw_start, &_pic_ofw_end);
  905. }
  906. return RT_EOK;
  907. }
  908. #else
  909. static rt_err_t ofw_pic_init(void)
  910. {
  911. return RT_EOK;
  912. }
  913. #endif /* !RT_USING_OFW */
  914. rt_err_t rt_pic_init(void)
  915. {
  916. rt_err_t err;
  917. LOG_D("init start");
  918. err = ofw_pic_init();
  919. LOG_D("init end");
  920. return err;
  921. }
  922. #if defined(RT_USING_CONSOLE) && defined(RT_USING_MSH)
  923. static int list_irq(int argc, char**argv)
  924. {
  925. rt_size_t irq_nr = 0;
  926. rt_bool_t dump_all = RT_FALSE;
  927. const char *const irq_modes[] =
  928. {
  929. [RT_IRQ_MODE_NONE] = "None",
  930. [RT_IRQ_MODE_EDGE_RISING] = "Edge-Rising",
  931. [RT_IRQ_MODE_EDGE_FALLING] = "Edge-Falling",
  932. [RT_IRQ_MODE_EDGE_BOTH] = "Edge-Both",
  933. [RT_IRQ_MODE_LEVEL_HIGH] = "Level-High",
  934. [RT_IRQ_MODE_LEVEL_LOW] = "Level-Low",
  935. };
  936. static char info[RT_CONSOLEBUF_SIZE];
  937. #ifdef RT_USING_SMP
  938. static char cpumask[RT_CPUS_NR + 1] = { [RT_CPUS_NR] = '\0' };
  939. #endif
  940. if (argc > 1)
  941. {
  942. if (!rt_strcmp(argv[1], "all"))
  943. {
  944. dump_all = RT_TRUE;
  945. }
  946. }
  947. rt_kprintf("%-*.s %-*.s %s %-*.s %-*.s %-*.s %-*.sUsers%-*.s",
  948. 6, "IRQ",
  949. 6, "HW-IRQ",
  950. "MSI",
  951. _pic_name_max, "PIC",
  952. 12, "Mode",
  953. #ifdef RT_USING_SMP
  954. RT_CPUS_NR, "CPUs",
  955. #else
  956. 0, 0,
  957. #endif
  958. #ifdef RT_USING_INTERRUPT_INFO
  959. 11, "Count",
  960. 5, ""
  961. #else
  962. 0, 0,
  963. 10, "-Number"
  964. #endif
  965. );
  966. #if defined(RT_USING_SMP) && defined(RT_USING_INTERRUPT_INFO)
  967. for (int i = 0; i < RT_CPUS_NR; i++)
  968. {
  969. rt_kprintf(" cpu%2d ", i);
  970. }
  971. #endif
  972. #ifdef RT_USING_PIC_STATISTICS
  973. rt_kprintf(" max/ns avg/ns min/ns");
  974. #endif
  975. rt_kputs("\n");
  976. for (int i = 0; i < RT_ARRAY_SIZE(_pirq_hash); ++i)
  977. {
  978. struct rt_pic_irq *pirq = &_pirq_hash[i];
  979. if (!pirq->pic || !(dump_all || pirq->isr.action.handler))
  980. {
  981. continue;
  982. }
  983. rt_snprintf(info, sizeof(info), "%-6d %-6d %c %-*.s %-*.s ",
  984. pirq->irq,
  985. pirq->hwirq,
  986. pirq->msi_desc ? 'Y' : 'N',
  987. _pic_name_max, pirq->pic->ops->name,
  988. 12, irq_modes[pirq->mode]);
  989. #ifdef RT_USING_SMP
  990. for (int group = 0, id = 0; group < RT_ARRAY_SIZE(pirq->affinity); ++group)
  991. {
  992. rt_bitmap_t mask = pirq->affinity[group];
  993. for (int idx = 0; id < RT_CPUS_NR && idx < RT_BITMAP_BIT_LEN(1); ++idx, ++id)
  994. {
  995. cpumask[RT_ARRAY_SIZE(cpumask) - id - 2] = '0' + ((mask >> idx) & 1);
  996. }
  997. }
  998. #endif /* RT_USING_SMP */
  999. rt_kputs(info);
  1000. #ifdef RT_USING_SMP
  1001. rt_kputs(cpumask);
  1002. #endif
  1003. #ifdef RT_USING_INTERRUPT_INFO
  1004. rt_kprintf(" %-10d ", pirq->isr.action.counter);
  1005. rt_kprintf("%-*.s", 10, pirq->isr.action.name);
  1006. #ifdef RT_USING_SMP
  1007. for (int cpuid = 0; cpuid < RT_CPUS_NR; cpuid++)
  1008. {
  1009. rt_kprintf(" %-10d", pirq->isr.action.cpu_counter[cpuid]);
  1010. }
  1011. #endif
  1012. #ifdef RT_USING_PIC_STATISTICS
  1013. rt_kprintf(" %-10d %-10d %-10d", pirq->stat.max_irq_time_ns, pirq->stat.sum_irq_time_ns/pirq->isr.action.counter, pirq->stat.min_irq_time_ns);
  1014. #endif
  1015. rt_kputs("\n");
  1016. if (!rt_list_isempty(&pirq->isr.list))
  1017. {
  1018. struct rt_pic_isr *repeat_isr;
  1019. rt_list_for_each_entry(repeat_isr, &pirq->isr.list, list)
  1020. {
  1021. rt_kputs(info);
  1022. #ifdef RT_USING_SMP
  1023. rt_kputs(cpumask);
  1024. #endif
  1025. rt_kprintf("%-10d ", repeat_isr->action.counter);
  1026. rt_kprintf("%-*.s", 10, repeat_isr->action.name);
  1027. #ifdef RT_USING_SMP
  1028. for (int cpuid = 0; cpuid < RT_CPUS_NR; cpuid++)
  1029. {
  1030. rt_kprintf(" %-10d", repeat_isr->action.cpu_counter[cpuid]);
  1031. }
  1032. #endif
  1033. #ifdef RT_USING_PIC_STATISTICS
  1034. rt_kprintf(" --- --- ---");
  1035. #endif
  1036. rt_kputs("\n");
  1037. }
  1038. }
  1039. #else
  1040. rt_kprintf(" %d\n", rt_list_len(&pirq->isr.list));
  1041. #endif
  1042. ++irq_nr;
  1043. }
  1044. rt_kprintf("%d IRQs found\n", irq_nr);
  1045. return 0;
  1046. }
  1047. MSH_CMD_EXPORT(list_irq, dump using or args = all of irq information);
  1048. #endif /* RT_USING_CONSOLE && RT_USING_MSH */