hpm_interrupt.h 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040
  1. /*
  2. * Copyright (c) 2021-2023 HPMicro
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. *
  6. */
  7. #ifndef HPM_INTERRUPT_H
  8. #define HPM_INTERRUPT_H
  9. #include "riscv/riscv_core.h"
  10. #include "hpm_common.h"
  11. #include "hpm_plic_drv.h"
  12. /**
  13. * @brief INTERRUPT driver APIs
  14. * @defgroup irq_interface INTERRUPT driver APIs
  15. * @{
  16. */
  17. #define M_MODE 0 /*!< Machine mode */
  18. #define S_MODE 1 /*!< Supervisor mode */
  19. #ifdef __cplusplus
  20. extern "C" {
  21. #endif
  22. /* Machine mode API: these APIs are supposed to be called at machine mode */
  23. /**
  24. * @brief Enable global IRQ with mask
  25. *
  26. * @param[in] mask interrupt mask to be enabaled
  27. */
  28. ATTR_ALWAYS_INLINE static inline void enable_global_irq(uint32_t mask)
  29. {
  30. set_csr(CSR_MSTATUS, mask);
  31. }
  32. /**
  33. * @brief Disable global IRQ with mask and return mstatus
  34. *
  35. * @param[in] mask interrupt mask to be disabled
  36. * @retval current mstatus value before irq disabled
  37. */
  38. ATTR_ALWAYS_INLINE static inline uint32_t disable_global_irq(uint32_t mask)
  39. {
  40. return read_clear_csr(CSR_MSTATUS, mask);
  41. }
  42. /**
  43. * @brief Restore global IRQ with mask
  44. *
  45. * @param[in] mask interrupt mask to be restored
  46. */
  47. ATTR_ALWAYS_INLINE static inline void restore_global_irq(uint32_t mask)
  48. {
  49. set_csr(CSR_MSTATUS, mask);
  50. }
  51. /**
  52. * @brief Enable IRQ from interrupt controller
  53. *
  54. */
  55. ATTR_ALWAYS_INLINE static inline void enable_irq_from_intc(void)
  56. {
  57. set_csr(CSR_MIE, CSR_MIE_MEIE_MASK);
  58. }
  59. /**
  60. * @brief Disable IRQ from interrupt controller
  61. *
  62. */
  63. ATTR_ALWAYS_INLINE static inline void disable_irq_from_intc(void)
  64. {
  65. clear_csr(CSR_MIE, CSR_MIE_MEIE_MASK);
  66. }
  67. /**
  68. * @brief Enable machine timer IRQ
  69. */
  70. ATTR_ALWAYS_INLINE static inline void enable_mchtmr_irq(void)
  71. {
  72. set_csr(CSR_MIE, CSR_MIE_MTIE_MASK);
  73. }
  74. /**
  75. * @brief Disable machine timer IRQ
  76. *
  77. */
  78. ATTR_ALWAYS_INLINE static inline void disable_mchtmr_irq(void)
  79. {
  80. clear_csr(CSR_MIE, CSR_MIE_MTIE_MASK);
  81. }
  82. /**
  83. * @brief Delegate IRQ handling
  84. *
  85. * @param[in] mask interrupt mask to be delegated
  86. */
  87. ATTR_ALWAYS_INLINE static inline void delegate_irq(uint32_t mask)
  88. {
  89. set_csr(CSR_MIDELEG, mask);
  90. }
  91. /**
  92. * @brief Undelegate IRQ handling
  93. *
  94. * @param[in] mask interrupt mask to be undelegated
  95. */
  96. ATTR_ALWAYS_INLINE static inline void undelegate_irq(uint32_t mask)
  97. {
  98. clear_csr(CSR_MIDELEG, mask);
  99. }
  100. /* Supervisor mode API: these APIs are supposed to be called at supervisor mode */
  101. /**
  102. * @brief Enable global IRQ with mask for supervisor mode
  103. *
  104. * @param[in] mask interrupt mask to be enabaled
  105. */
  106. ATTR_ALWAYS_INLINE static inline void enable_s_global_irq(uint32_t mask)
  107. {
  108. set_csr(CSR_SSTATUS, mask);
  109. }
  110. /**
  111. * @brief Disable global IRQ with mask and return sstatus for supervisor mode
  112. *
  113. * @param[in] mask interrupt mask to be disabled
  114. * @retval current sstatus value before irq mask is disabled
  115. */
  116. ATTR_ALWAYS_INLINE static inline uint32_t disable_s_global_irq(uint32_t mask)
  117. {
  118. return read_clear_csr(CSR_SSTATUS, mask);
  119. }
  120. /**
  121. * @brief Restore global IRQ with mask
  122. *
  123. * @param[in] mask interrupt mask to be restored
  124. */
  125. ATTR_ALWAYS_INLINE static inline void restore_s_global_irq(uint32_t mask)
  126. {
  127. set_csr(CSR_SSTATUS, mask);
  128. }
  129. /**
  130. * @brief Disable IRQ from interrupt controller for supervisor mode
  131. *
  132. */
  133. ATTR_ALWAYS_INLINE static inline void disable_s_irq_from_intc(void)
  134. {
  135. clear_csr(CSR_SIE, CSR_SIE_SEIE_MASK);
  136. }
  137. /**
  138. * @brief Enable IRQ from interrupt controller for supervisor mode
  139. *
  140. */
  141. ATTR_ALWAYS_INLINE static inline void enable_s_irq_from_intc(void)
  142. {
  143. set_csr(CSR_SIE, CSR_SIE_SEIE_MASK);
  144. }
  145. /**
  146. * @brief Enable machine timer IRQ for supervisor mode
  147. */
  148. ATTR_ALWAYS_INLINE static inline void enable_s_mchtmr_irq(void)
  149. {
  150. set_csr(CSR_SIE, CSR_SIE_STIE_MASK);
  151. }
  152. /**
  153. * @brief Disable machine timer IRQ
  154. *
  155. */
  156. ATTR_ALWAYS_INLINE static inline void disable_s_mchtmr_irq(void)
  157. {
  158. clear_csr(CSR_SIE, CSR_SIE_STIE_MASK);
  159. }
  160. /*
  161. * CPU Machine SWI control
  162. *
  163. * Machine SWI (MSIP) is connected to PLICSW irq 1.
  164. */
  165. #define PLICSWI 1
  166. /**
  167. * @brief Initialize software interrupt
  168. *
  169. */
  170. ATTR_ALWAYS_INLINE static inline void intc_m_init_swi(void)
  171. {
  172. __plic_enable_irq(HPM_PLICSW_BASE, HPM_PLIC_TARGET_M_MODE, PLICSWI);
  173. }
  174. /**
  175. * @brief Enable software interrupt
  176. *
  177. */
  178. ATTR_ALWAYS_INLINE static inline void intc_m_enable_swi(void)
  179. {
  180. set_csr(CSR_MIE, CSR_MIE_MSIE_MASK);
  181. }
  182. /**
  183. * @brief Disable software interrupt
  184. *
  185. */
  186. ATTR_ALWAYS_INLINE static inline void intc_m_disable_swi(void)
  187. {
  188. clear_csr(CSR_MIE, CSR_MIE_MSIE_MASK);
  189. }
  190. /**
  191. * @brief Trigger software interrupt
  192. *
  193. */
  194. ATTR_ALWAYS_INLINE static inline void intc_m_trigger_swi(void)
  195. {
  196. __plic_set_irq_pending(HPM_PLICSW_BASE, PLICSWI);
  197. }
  198. /**
  199. * @brief Claim software interrupt
  200. *
  201. */
  202. ATTR_ALWAYS_INLINE static inline void intc_m_claim_swi(void)
  203. {
  204. __plic_claim_irq(HPM_PLICSW_BASE, 0);
  205. }
  206. /**
  207. * @brief Complete software interrupt
  208. *
  209. */
  210. ATTR_ALWAYS_INLINE static inline void intc_m_complete_swi(void)
  211. {
  212. __plic_complete_irq(HPM_PLICSW_BASE, HPM_PLIC_TARGET_M_MODE, PLICSWI);
  213. }
  214. /*
  215. * @brief Enable IRQ for machine mode
  216. *
  217. * @param[in] irq Interrupt number
  218. */
  219. #define intc_m_enable_irq(irq) \
  220. intc_enable_irq(HPM_PLIC_TARGET_M_MODE, irq)
  221. /*
  222. * @brief Disable IRQ for machine mode
  223. *
  224. * @param[in] irq Interrupt number
  225. */
  226. #define intc_m_disable_irq(irq) \
  227. intc_disable_irq(HPM_PLIC_TARGET_M_MODE, irq)
  228. #define intc_m_set_threshold(threshold) \
  229. intc_set_threshold(HPM_PLIC_TARGET_M_MODE, threshold)
  230. /*
  231. * @brief Complete IRQ for machine mode
  232. *
  233. * @param[in] irq Interrupt number
  234. */
  235. #define intc_m_complete_irq(irq) \
  236. intc_complete_irq(HPM_PLIC_TARGET_M_MODE, irq)
  237. /*
  238. * @brief Claim IRQ for machine mode
  239. *
  240. */
  241. #define intc_m_claim_irq() intc_claim_irq(HPM_PLIC_TARGET_M_MODE)
  242. /*
  243. * @brief Enable IRQ for machine mode with priority
  244. *
  245. * @param[in] irq Interrupt number
  246. * @param[in] priority Priority of interrupt
  247. */
  248. #define intc_m_enable_irq_with_priority(irq, priority) \
  249. do { \
  250. intc_set_irq_priority(irq, priority); \
  251. intc_m_enable_irq(irq); \
  252. } while (0)
  253. /* Supervisor mode */
  254. /**
  255. * @brief Enable software interrupt for supervisor mode
  256. *
  257. */
  258. ATTR_ALWAYS_INLINE static inline void intc_s_enable_swi(void)
  259. {
  260. set_csr(CSR_SIE, CSR_SIE_SSIE_MASK);
  261. }
  262. /**
  263. * @brief Disable software interrupt for supervisor mode
  264. *
  265. */
  266. ATTR_ALWAYS_INLINE static inline void intc_s_disable_swi(void)
  267. {
  268. clear_csr(CSR_SIE, CSR_SIE_SSIE_MASK);
  269. }
  270. /**
  271. * @brief Trigger software interrupt for supervisor mode
  272. *
  273. */
  274. ATTR_ALWAYS_INLINE static inline void intc_s_trigger_swi(void)
  275. {
  276. set_csr(CSR_SIP, CSR_SIP_SSIP_MASK);
  277. }
  278. /**
  279. * @brief Complete software interrupt for supervisor mode
  280. *
  281. */
  282. ATTR_ALWAYS_INLINE static inline void intc_s_complete_swi(void)
  283. {
  284. clear_csr(CSR_SIP, CSR_SIP_SSIP_MASK);
  285. }
  286. /*
  287. * @brief Enable IRQ for supervisor mode
  288. *
  289. * @param[in] irq Interrupt number
  290. */
  291. #define intc_s_enable_irq(irq) \
  292. intc_enable_irq(HPM_PLIC_TARGET_S_MODE, irq)
  293. /*
  294. * @brief Disable IRQ for supervisor mode
  295. *
  296. * @param[in] irq Interrupt number
  297. */
  298. #define intc_s_disable_irq(irq) \
  299. intc_disable_irq(HPM_PLIC_TARGET_S_MODE, irq)
  300. #define intc_set_s_threshold(threshold) \
  301. intc_set_threshold(HPM_PLIC_TARGET_S_MODE, threshold)
  302. /*
  303. * @brief Complete IRQ for supervisor mode
  304. *
  305. * @param[in] irq Interrupt number
  306. */
  307. #define intc_s_complete_irq(irq) \
  308. intc_complete_irq(HPM_PLIC_TARGET_S_MODE, irq)
  309. /*
  310. * @brief Claim IRQ for supervisor mode
  311. *
  312. */
  313. #define intc_s_claim_irq() intc_claim_irq(HPM_PLIC_TARGET_S_MODE)
  314. /*
  315. * @brief Enable IRQ for supervisor mode with priority
  316. *
  317. * @param[in] irq Interrupt number
  318. * @param[in] priority Priority of interrupt
  319. */
  320. #define intc_s_enable_irq_with_priority(irq, priority) \
  321. do { \
  322. intc_set_irq_priority(irq, priority); \
  323. intc_s_enable_irq(irq); \
  324. } while (0)
  325. /*
  326. * @brief Enable specific interrupt
  327. *
  328. * @param[in] target Target to handle specific interrupt
  329. * @param[in] irq Interrupt number
  330. */
  331. ATTR_ALWAYS_INLINE static inline void intc_enable_irq(uint32_t target, uint32_t irq)
  332. {
  333. __plic_enable_irq(HPM_PLIC_BASE, target, irq);
  334. }
  335. /**
  336. * @brief Set interrupt priority
  337. *
  338. * @param[in] irq Interrupt number
  339. * @param[in] priority Priority of interrupt
  340. */
  341. ATTR_ALWAYS_INLINE static inline void intc_set_irq_priority(uint32_t irq, uint32_t priority)
  342. {
  343. __plic_set_irq_priority(HPM_PLIC_BASE, irq, priority);
  344. }
  345. /**
  346. * @brief Disable specific interrupt
  347. *
  348. * @param[in] target Target to handle specific interrupt
  349. * @param[in] irq Interrupt number
  350. */
  351. ATTR_ALWAYS_INLINE static inline void intc_disable_irq(uint32_t target, uint32_t irq)
  352. {
  353. __plic_disable_irq(HPM_PLIC_BASE, target, irq);
  354. }
  355. /**
  356. * @brief Set interrupt threshold
  357. *
  358. * @param[in] target Target to handle specific interrupt
  359. * @param[in] threshold Threshold of IRQ can be serviced
  360. */
  361. ATTR_ALWAYS_INLINE static inline void intc_set_threshold(uint32_t target, uint32_t threshold)
  362. {
  363. __plic_set_threshold(HPM_PLIC_BASE, target, threshold);
  364. }
  365. /**
  366. * @brief Claim IRQ
  367. *
  368. * @param[in] target Target to handle specific interrupt
  369. *
  370. */
  371. ATTR_ALWAYS_INLINE static inline uint32_t intc_claim_irq(uint32_t target)
  372. {
  373. return __plic_claim_irq(HPM_PLIC_BASE, target);
  374. }
  375. /**
  376. * @brief Complete IRQ
  377. *
  378. * @param[in] target Target to handle specific interrupt
  379. * @param[in] irq Specific IRQ to be completed
  380. *
  381. */
  382. ATTR_ALWAYS_INLINE static inline void intc_complete_irq(uint32_t target, uint32_t irq)
  383. {
  384. __plic_complete_irq(HPM_PLIC_BASE, target, irq);
  385. }
  386. /*
  387. * Vectored based irq install and uninstall
  388. */
  389. /* Machine mode */
  390. extern int __vector_table[];
  391. extern void default_irq_entry(void);
  392. /**
  393. * @brief Install ISR for certain IRQ for ram based vector table
  394. *
  395. * @param[in] irq Target interrupt number
  396. * @param[in] isr Interrupt service routine
  397. *
  398. */
  399. ATTR_ALWAYS_INLINE static inline void install_isr(uint32_t irq, uint32_t isr)
  400. {
  401. __vector_table[irq] = isr;
  402. }
  403. /**
  404. * @brief Uninstall ISR for certain IRQ for ram based vector table
  405. *
  406. * @param[in] irq Target interrupt number
  407. *
  408. */
  409. ATTR_ALWAYS_INLINE static inline void uninstall_isr(uint32_t irq)
  410. {
  411. __vector_table[irq] = (int) default_irq_entry;
  412. }
  413. /* Supervisor mode */
  414. extern int __vector_s_table[];
  415. extern void default_s_irq_entry(void);
  416. /**
  417. * @brief Install ISR for certain IRQ for ram based vector table for supervisor mode
  418. *
  419. * @param[in] irq Target interrupt number
  420. * @param[in] isr Interrupt service routine
  421. *
  422. */
  423. ATTR_ALWAYS_INLINE static inline void install_s_isr(uint32_t irq, uint32_t isr)
  424. {
  425. __vector_s_table[irq] = isr;
  426. }
  427. /**
  428. * @brief Uninstall ISR for certain IRQ for ram based vector table for supervisor mode
  429. *
  430. * @param[in] irq Target interrupt number
  431. *
  432. */
  433. ATTR_ALWAYS_INLINE static inline void uninstall_s_isr(uint32_t irq)
  434. {
  435. __vector_s_table[irq] = (int) default_s_irq_entry;
  436. }
  437. /*
  438. * Inline nested irq entry/exit macros
  439. */
  440. /*
  441. * @brief Save CSR
  442. * @param[in] r Target CSR to be saved
  443. */
  444. #define SAVE_CSR(r) register long __##r = read_csr(r);
  445. /*
  446. * @brief Restore macro
  447. *
  448. * @param[in] r Target CSR to be restored
  449. */
  450. #define RESTORE_CSR(r) write_csr(r, __##r);
  451. #if defined(SUPPORT_PFT_ARCH) && SUPPORT_PFT_ARCH
  452. #define SAVE_MXSTATUS() SAVE_CSR(CSR_MXSTATUS)
  453. #define RESTORE_MXSTATUS() RESTORE_CSR(CSR_MXSTATUS)
  454. #else
  455. #define SAVE_MXSTATUS()
  456. #define RESTORE_MXSTATUS()
  457. #endif
  458. #ifdef __riscv_flen
  459. #define SAVE_FCSR() register int __fcsr = read_fcsr();
  460. #define RESTORE_FCSR() write_fcsr(__fcsr);
  461. #else
  462. #define SAVE_FCSR()
  463. #define RESTORE_FCSR()
  464. #endif
  465. #ifdef __riscv_dsp
  466. #define SAVE_UCODE() SAVE_CSR(CSR_UCODE)
  467. #define RESTORE_UCODE() RESTORE_CSR(CSR_UCODE)
  468. #else
  469. #define SAVE_UCODE()
  470. #define RESTORE_UCODE()
  471. #endif
  472. #ifdef __riscv_flen
  473. #if __riscv_flen == 32
  474. /* RV32I caller registers + MCAUSE + MEPC + MSTATUS +MXSTATUS + 20 FPU caller registers +FCSR + UCODE (DSP) */
  475. #define CONTEXT_REG_NUM (4 * (16 + 4 + 20))
  476. #else /* __riscv_flen = 64 */
  477. /* RV32I caller registers + MCAUSE + MEPC + MSTATUS +MXSTATUS + 20 DFPU caller + FCSR registers + UCODE (DSP) */
  478. #define CONTEXT_REG_NUM (4 * (16 + 4 + 20 * 2))
  479. #endif
  480. #else
  481. /* RV32I caller registers + MCAUSE + MEPC + MSTATUS +MXSTATUS + UCODE (DSP)*/
  482. #define CONTEXT_REG_NUM (4 * (16 + 4))
  483. #endif
  484. #ifdef __riscv_flen
  485. /*
  486. * Save FPU caller registers:
  487. * NOTE: To simplify the logic, the FPU caller registers are always stored at word offset 20 in the stack
  488. */
  489. #if __riscv_flen == 32
  490. #define SAVE_FPU_CONTEXT() { \
  491. __asm volatile("\n\
  492. c.fswsp ft0, 20*4(sp)\n\
  493. c.fswsp ft1, 21*4(sp) \n\
  494. c.fswsp ft2, 22*4(sp) \n\
  495. c.fswsp ft3, 23*4(sp) \n\
  496. c.fswsp ft4, 24*4(sp) \n\
  497. c.fswsp ft5, 25*4(sp) \n\
  498. c.fswsp ft6, 26*4(sp) \n\
  499. c.fswsp ft7, 27*4(sp) \n\
  500. c.fswsp fa0, 28*4(sp) \n\
  501. c.fswsp fa1, 29*4(sp) \n\
  502. c.fswsp fa2, 30*4(sp) \n\
  503. c.fswsp fa3, 31*4(sp) \n\
  504. c.fswsp fa4, 32*4(sp) \n\
  505. c.fswsp fa5, 33*4(sp) \n\
  506. c.fswsp fa6, 34*4(sp) \n\
  507. c.fswsp fa7, 35*4(sp) \n\
  508. c.fswsp ft8, 36*4(sp) \n\
  509. c.fswsp ft9, 37*4(sp) \n\
  510. c.fswsp ft10, 38*4(sp) \n\
  511. c.fswsp ft11, 39*4(sp) \n");\
  512. }
  513. /*
  514. * Restore FPU caller registers:
  515. * NOTE: To simplify the logic, the FPU caller registers are always stored at word offset 20 in the stack
  516. */
  517. #define RESTORE_FPU_CONTEXT() { \
  518. __asm volatile("\n\
  519. c.flwsp ft0, 20*4(sp)\n\
  520. c.flwsp ft1, 21*4(sp) \n\
  521. c.flwsp ft2, 22*4(sp) \n\
  522. c.flwsp ft3, 23*4(sp) \n\
  523. c.flwsp ft4, 24*4(sp) \n\
  524. c.flwsp ft5, 25*4(sp) \n\
  525. c.flwsp ft6, 26*4(sp) \n\
  526. c.flwsp ft7, 27*4(sp) \n\
  527. c.flwsp fa0, 28*4(sp) \n\
  528. c.flwsp fa1, 29*4(sp) \n\
  529. c.flwsp fa2, 30*4(sp) \n\
  530. c.flwsp fa3, 31*4(sp) \n\
  531. c.flwsp fa4, 32*4(sp) \n\
  532. c.flwsp fa5, 33*4(sp) \n\
  533. c.flwsp fa6, 34*4(sp) \n\
  534. c.flwsp fa7, 35*4(sp) \n\
  535. c.flwsp ft8, 36*4(sp) \n\
  536. c.flwsp ft9, 37*4(sp) \n\
  537. c.flwsp ft10, 38*4(sp) \n\
  538. c.flwsp ft11, 39*4(sp) \n");\
  539. }
  540. #else /*__riscv_flen == 64*/
  541. #define SAVE_FPU_CONTEXT() { \
  542. __asm volatile("\n\
  543. c.fsdsp ft0, 20*4(sp)\n\
  544. c.fsdsp ft1, 22*4(sp) \n\
  545. c.fsdsp ft2, 24*4(sp) \n\
  546. c.fsdsp ft3, 26*4(sp) \n\
  547. c.fsdsp ft4, 28*4(sp) \n\
  548. c.fsdsp ft5, 30*4(sp) \n\
  549. c.fsdsp ft6, 32*4(sp) \n\
  550. c.fsdsp ft7, 34*4(sp) \n\
  551. c.fsdsp fa0, 36*4(sp) \n\
  552. c.fsdsp fa1, 38*4(sp) \n\
  553. c.fsdsp fa2, 40*4(sp) \n\
  554. c.fsdsp fa3, 42*4(sp) \n\
  555. c.fsdsp fa4, 44*4(sp) \n\
  556. c.fsdsp fa5, 46*4(sp) \n\
  557. c.fsdsp fa6, 48*4(sp) \n\
  558. c.fsdsp fa7, 50*4(sp) \n\
  559. c.fsdsp ft8, 52*4(sp) \n\
  560. c.fsdsp ft9, 54*4(sp) \n\
  561. c.fsdsp ft10, 56*4(sp) \n\
  562. c.fsdsp ft11, 58*4(sp) \n");\
  563. }
  564. /*
  565. * Restore FPU caller registers:
  566. * NOTE: To simplify the logic, the FPU caller registers are always stored at word offset 20 in the stack
  567. */
  568. #define RESTORE_FPU_CONTEXT() { \
  569. __asm volatile("\n\
  570. c.fldsp ft0, 20*4(sp)\n\
  571. c.fldsp ft1, 22*4(sp) \n\
  572. c.fldsp ft2, 24*4(sp) \n\
  573. c.fldsp ft3, 26*4(sp) \n\
  574. c.fldsp ft4, 28*4(sp) \n\
  575. c.fldsp ft5, 30*4(sp) \n\
  576. c.fldsp ft6, 32*4(sp) \n\
  577. c.fldsp ft7, 34*4(sp) \n\
  578. c.fldsp fa0, 36*4(sp) \n\
  579. c.fldsp fa1, 38*4(sp) \n\
  580. c.fldsp fa2, 40*4(sp) \n\
  581. c.fldsp fa3, 42*4(sp) \n\
  582. c.fldsp fa4, 44*4(sp) \n\
  583. c.fldsp fa5, 46*4(sp) \n\
  584. c.fldsp fa6, 48*4(sp) \n\
  585. c.fldsp fa7, 50*4(sp) \n\
  586. c.fldsp ft8, 52*4(sp) \n\
  587. c.fldsp ft9, 54*4(sp) \n\
  588. c.fldsp ft10, 56*4(sp) \n\
  589. c.fldsp ft11, 58*4(sp) \n");\
  590. }
  591. #endif
  592. #else
  593. #define SAVE_FPU_CONTEXT()
  594. #define RESTORE_FPU_CONTEXT()
  595. #endif
  596. /**
  597. * @brief Save the caller registers based on the RISC-V ABI specification
  598. */
  599. #define SAVE_CALLER_CONTEXT() { \
  600. __asm volatile("addi sp, sp, %0" : : "i"(-CONTEXT_REG_NUM) :);\
  601. __asm volatile("\n\
  602. c.swsp ra, 0*4(sp) \n\
  603. c.swsp t0, 1*4(sp) \n\
  604. c.swsp t1, 2*4(sp) \n\
  605. c.swsp t2, 3*4(sp) \n\
  606. c.swsp s0, 4*4(sp) \n\
  607. c.swsp s1, 5*4(sp) \n\
  608. c.swsp a0, 6*4(sp) \n\
  609. c.swsp a1, 7*4(sp) \n\
  610. c.swsp a2, 8*4(sp) \n\
  611. c.swsp a3, 9*4(sp) \n\
  612. c.swsp a4, 10*4(sp) \n\
  613. c.swsp a5, 11*4(sp) \n\
  614. c.swsp a6, 12*4(sp) \n\
  615. c.swsp a7, 13*4(sp) \n\
  616. c.swsp s2, 14*4(sp) \n\
  617. c.swsp s3, 15*4(sp) \n\
  618. c.swsp t3, 16*4(sp) \n\
  619. c.swsp t4, 17*4(sp) \n\
  620. c.swsp t5, 18*4(sp) \n\
  621. c.swsp t6, 19*4(sp)"); \
  622. SAVE_FPU_CONTEXT(); \
  623. }
  624. /**
  625. * @brief Restore the caller registers based on the RISC-V ABI specification
  626. */
  627. #define RESTORE_CALLER_CONTEXT() { \
  628. __asm volatile("\n\
  629. c.lwsp ra, 0*4(sp) \n\
  630. c.lwsp t0, 1*4(sp) \n\
  631. c.lwsp t1, 2*4(sp) \n\
  632. c.lwsp t2, 3*4(sp) \n\
  633. c.lwsp s0, 4*4(sp) \n\
  634. c.lwsp s1, 5*4(sp) \n\
  635. c.lwsp a0, 6*4(sp) \n\
  636. c.lwsp a1, 7*4(sp) \n\
  637. c.lwsp a2, 8*4(sp) \n\
  638. c.lwsp a3, 9*4(sp) \n\
  639. c.lwsp a4, 10*4(sp) \n\
  640. c.lwsp a5, 11*4(sp) \n\
  641. c.lwsp a6, 12*4(sp) \n\
  642. c.lwsp a7, 13*4(sp) \n\
  643. c.lwsp s2, 14*4(sp) \n\
  644. c.lwsp s3, 15*4(sp) \n\
  645. c.lwsp t3, 16*4(sp) \n\
  646. c.lwsp t4, 17*4(sp) \n\
  647. c.lwsp t5, 18*4(sp) \n\
  648. c.lwsp t6, 19*4(sp) \n");\
  649. RESTORE_FPU_CONTEXT(); \
  650. __asm volatile("addi sp, sp, %0" : : "i"(CONTEXT_REG_NUM) :);\
  651. }
  652. #ifdef __riscv_flen
  653. #define SAVE_FPU_STATE() { \
  654. __asm volatile("frsr s1\n"); \
  655. }
  656. #define RESTORE_FPU_STATE() { \
  657. __asm volatile("fssr s1\n"); \
  658. }
  659. #else
  660. #define SAVE_FPU_STATE()
  661. #define RESTORE_FPU_STATE()
  662. #endif
  663. #ifdef __riscv_dsp
  664. /*
  665. * Save DSP context
  666. * NOTE: DSP context registers are stored at word offset 41 in the stack
  667. */
  668. #define SAVE_DSP_CONTEXT() { \
  669. __asm volatile("rdov s0\n"); \
  670. }
  671. /*
  672. * @brief Restore DSP context
  673. * @note DSP context registers are stored at word offset 41 in the stack
  674. */
  675. #define RESTORE_DSP_CONTEXT() {\
  676. __asm volatile("csrw ucode, s0\n"); \
  677. }
  678. #else
  679. #define SAVE_DSP_CONTEXT()
  680. #define RESTORE_DSP_CONTEXT()
  681. #endif
  682. /*
  683. * @brief Enter Nested IRQ Handling
  684. * @note To simplify the logic, Nested IRQ related registers are stored in the stack as below:
  685. * MCAUSE - word offset 16 (not used in the vectored mode)
  686. * EPC - word offset 17
  687. * MSTATUS = word offset 18
  688. * MXSTATUS = word offset 19
  689. */
  690. #define ENTER_NESTED_IRQ_HANDLING_M() { \
  691. __asm volatile("\n\
  692. csrr s2, mepc \n\
  693. csrr s3, mstatus \n");\
  694. SAVE_FPU_STATE(); \
  695. SAVE_DSP_CONTEXT(); \
  696. __asm volatile ("\n\
  697. c.li a5, 8\n\
  698. csrs mstatus, a5\n"); \
  699. }
  700. /*
  701. * @brief Complete IRQ Handling
  702. */
  703. #define COMPLETE_IRQ_HANDLING_M(irq_num) { \
  704. __asm volatile("\n\
  705. lui a5, 0x1\n\
  706. addi a5, a5, -2048\n\
  707. csrc mie, a5\n"); \
  708. __asm volatile("\n\
  709. lui a4, 0xe4200\n");\
  710. __asm volatile("li a3, %0" : : "i" (irq_num) :); \
  711. __asm volatile("sw a3, 4(a4)\n\
  712. fence io, io\n"); \
  713. __asm volatile("csrs mie, a5"); \
  714. }
  715. /*
  716. * @brief Exit Nested IRQ Handling
  717. * @note To simplify the logic, Nested IRQ related registers are stored in the stack as below:
  718. * MCAUSE - word offset 16 (not used in the vectored mode)
  719. * EPC - word offset 17
  720. * MSTATUS = word offset 18
  721. * MXSTATUS = word offset 19
  722. */
  723. #define EXIT_NESTED_IRQ_HANDLING_M() { \
  724. __asm volatile("\n\
  725. csrw mstatus, s3 \n\
  726. csrw mepc, s2 \n");\
  727. RESTORE_FPU_STATE(); \
  728. RESTORE_DSP_CONTEXT(); \
  729. }
  730. #define ENTER_NESTED_IRQ_HANDLING_S() {\
  731. __asm volatile("\n\
  732. csrr s2, sepc \n\
  733. csrr s3, sstatus \n");\
  734. SAVE_FPU_STATE(); \
  735. SAVE_DSP_CONTEXT(); \
  736. __asm volatile ("\n\
  737. c.li a5, 8\n\
  738. csrs sstatus, a5\n"); \
  739. }
  740. #define COMPLETE_IRQ_HANDLING_S(irq_num) {\
  741. __asm volatile("\n\
  742. lui a5, 0x1\n\
  743. addi a5, a5, -2048\n\
  744. csrc sie, a5\n"); \
  745. __asm volatile("\n\
  746. lui a4, 0xe4201\n");\
  747. __asm volatile("li a3, %0" : : "i" (irq_num) :); \
  748. __asm volatile("sw a3, 4(a4)\n\
  749. fence io, io\n"); \
  750. __asm volatile("csrs sie, a5"); \
  751. }
  752. /*
  753. * @brief Exit Nested IRQ Handling at supervisor mode
  754. * @note To simplify the logic, Nested IRQ related registers are stored in the stack as below:
  755. * SCAUSE - word offset 16 (not used in the vectored mode)
  756. * EPC - word offset 17
  757. * SSTATUS = word offset 18
  758. */
  759. #define EXIT_NESTED_IRQ_HANDLING_S() { \
  760. __asm volatile("\n\
  761. csrw sstatus, s3 \n\
  762. csrw sepc, s2 \n");\
  763. RESTORE_FPU_STATE(); \
  764. RESTORE_DSP_CONTEXT(); \
  765. }
  766. /* @brief Nested IRQ entry macro : Save CSRs and enable global irq. */
  767. #define NESTED_IRQ_ENTER() \
  768. SAVE_CSR(CSR_MEPC) \
  769. SAVE_CSR(CSR_MSTATUS) \
  770. SAVE_MXSTATUS() \
  771. SAVE_FCSR() \
  772. SAVE_UCODE() \
  773. set_csr(CSR_MSTATUS, CSR_MSTATUS_MIE_MASK);
  774. /* @brief Nested IRQ exit macro : Restore CSRs */
  775. #define NESTED_IRQ_EXIT() \
  776. RESTORE_CSR(CSR_MSTATUS) \
  777. RESTORE_CSR(CSR_MEPC) \
  778. RESTORE_MXSTATUS() \
  779. RESTORE_FCSR() \
  780. RESTORE_UCODE()
  781. /*
  782. * @brief Nested IRQ exit macro : Restore CSRs
  783. * @param[in] irq Target interrupt number
  784. */
  785. #define NESTED_VPLIC_COMPLETE_INTERRUPT(irq) \
  786. do { \
  787. clear_csr(CSR_MIE, CSR_MIP_MEIP_MASK); \
  788. __plic_complete_irq(HPM_PLIC_BASE, HPM_PLIC_TARGET_M_MODE, irq); \
  789. __asm volatile("fence io, io"); \
  790. set_csr(CSR_MIE, CSR_MIP_MEIP_MASK); \
  791. } while (0)
  792. #ifdef __cplusplus
  793. #define EXTERN_C extern "C"
  794. #else
  795. #define EXTERN_C
  796. #endif
  797. #define ISR_NAME_M(irq_num) default_isr_##irq_num
  798. #define ISR_NAME_S(irq_num) default_isr_s_##irq_num
  799. /**
  800. * @brief Declare an external interrupt handler for machine mode
  801. *
  802. * @param[in] irq_num - IRQ number index
  803. * @param[in] isr - Application IRQ handler function pointer
  804. */
  805. #ifndef USE_NONVECTOR_MODE
  806. #define SDK_DECLARE_EXT_ISR_M(irq_num, isr) \
  807. void isr(void) __attribute__((section(".isr_vector")));\
  808. EXTERN_C void ISR_NAME_M(irq_num)(void) __attribute__((section(".isr_vector")));\
  809. void ISR_NAME_M(irq_num)(void) \
  810. { \
  811. SAVE_CALLER_CONTEXT(); \
  812. ENTER_NESTED_IRQ_HANDLING_M();\
  813. __asm volatile("la t1, %0\n\t" : : "i" (isr) : );\
  814. __asm volatile("jalr t1\n");\
  815. COMPLETE_IRQ_HANDLING_M(irq_num);\
  816. EXIT_NESTED_IRQ_HANDLING_M();\
  817. RESTORE_CALLER_CONTEXT();\
  818. __asm volatile("mret\n");\
  819. }
  820. /**
  821. * @brief Declare an external interrupt handler for supervisor mode
  822. *
  823. * @param[in] irq_num - IRQ number index
  824. * @param[in] isr - Application IRQ handler function pointer
  825. */
  826. #define SDK_DECLARE_EXT_ISR_S(irq_num, isr) \
  827. void isr(void) __attribute__((section(".isr_s_vector")));\
  828. EXTERN_C void ISR_NAME_S(irq_num)(void) __attribute__((section(".isr_s_vector")));\
  829. void ISR_NAME_S(irq_num)(void) {\
  830. SAVE_CALLER_CONTEXT(); \
  831. ENTER_NESTED_IRQ_HANDLING_S();\
  832. __asm volatile("la t1, %0\n\t" : : "i" (isr) : );\
  833. __asm volatile("jalr t1\n");\
  834. COMPLETE_IRQ_HANDLING_S(irq_num);\
  835. EXIT_NESTED_IRQ_HANDLING_S();\
  836. RESTORE_CALLER_CONTEXT();\
  837. __asm volatile("sret\n");\
  838. }
  839. #else
  840. #define SDK_DECLARE_EXT_ISR_M(irq_num, isr) \
  841. void isr(void) __attribute__((section(".isr_vector")));\
  842. EXTERN_C void ISR_NAME_M(irq_num)(void) __attribute__((section(".isr_vector")));\
  843. void ISR_NAME_M(irq_num)(void) { \
  844. isr(); \
  845. }
  846. #define SDK_DECLARE_EXT_ISR_S(irq_num, isr) \
  847. void isr(void) __attribute__((section(".isr_vector")));\
  848. EXTERN_C void ISR_NAME_S(irq_num)(void) __attribute__((section(".isr_vector")));\
  849. void ISR_NAME_S(irq_num)(void) { \
  850. isr(); \
  851. }
  852. #endif
  853. /**
  854. * @brief Declare machine timer interrupt handler
  855. *
  856. * @param[in] isr - MCHTMR IRQ handler function pointer
  857. */
  858. #define SDK_DECLARE_MCHTMR_ISR(isr) \
  859. void isr(void) __attribute__((section(".isr_vector")));\
  860. EXTERN_C void mchtmr_isr(void) __attribute__((section(".isr_vector"))); \
  861. void mchtmr_isr(void) {\
  862. isr();\
  863. }
  864. /**
  865. * @brief Declare machine software interrupt handler
  866. *
  867. * @param[in] isr - SWI IRQ handler function pointer
  868. */
  869. #define SDK_DECLARE_SWI_ISR(isr)\
  870. void isr(void) __attribute__((section(".isr_vector")));\
  871. EXTERN_C void swi_isr(void) __attribute__((section(".isr_vector"))); \
  872. void swi_isr(void) {\
  873. isr();\
  874. }
  875. /* Supervisor mode */
  876. /**
  877. * @brief Declare machine timer interrupt handler
  878. *
  879. * @param[in] isr - MCHTMR IRQ handler function pointer
  880. */
  881. #define SDK_DECLARE_MCHTMR_ISR_S(isr) \
  882. void isr(void) __attribute__((section(".isr_vector")));\
  883. EXTERN_C void mchtmr_s_isr(void) __attribute__((section(".isr_vector"))); \
  884. void mchtmr_s_isr(void) {\
  885. isr();\
  886. }
  887. /**
  888. * @brief Declare machine software interrupt handler
  889. *
  890. * @param[in] isr - SWI IRQ handler function pointer
  891. */
  892. #define SDK_DECLARE_SWI_ISR_S(isr)\
  893. void isr(void) __attribute__((section(".isr_vector")));\
  894. EXTERN_C void swi_s_isr(void) __attribute__((section(".isr_vector"))); \
  895. void swi_s_isr(void) {\
  896. isr();\
  897. }
  898. #define CSR_MSTATUS_MPP_S_MODE (0x1)
  899. #define MODE_SWITCH_FROM_M(mstatus, mepc, label, mode) \
  900. do { \
  901. if (label) { \
  902. write_csr(mepc, label); \
  903. } \
  904. clear_csr(mstatus, CSR_MSTATUS_MPP_MASK); \
  905. set_csr(mstatus, CSR_MSTATUS_MPP_SET(mode)); \
  906. } while(0)
  907. typedef void (*s_mode_entry)(void);
  908. /**
  909. * @brief Switch mode to supervisor from machine
  910. *
  911. * @param[in] entry - entry point after mode is switched
  912. */
  913. static inline void switch_to_s_mode(s_mode_entry entry)
  914. {
  915. write_csr(CSR_SEPC, entry);
  916. MODE_SWITCH_FROM_M(CSR_MSTATUS, CSR_MEPC, entry, CSR_MSTATUS_MPP_S_MODE);
  917. if (entry) {
  918. __asm("mret");
  919. }
  920. }
  921. #ifdef __cplusplus
  922. }
  923. #endif
  924. /**
  925. * @}
  926. */
  927. #endif /* HPM_INTERRUPT_H */