Browse Source

feat: Added rt_interrupt_context* family for nested interrupt handling

These changes introduce the rt_interrupt_context family, providing a
mechanism for managing nested interrupts. The context management
ensures proper storage and retrieval of interrupt states, improving
reliability in nested interrupt scenarios by enabling context tracking
across different interrupt levels. This enhancement is essential for
platforms where nested interrupt handling is crucial, such as in real-
time or multi-threaded applications.

Changes:
- Defined rt_interrupt_context structure with context and node fields
  in `rtdef.h` to support nested interrupts.
- Added rt_slist_pop function in `rtservice.h` for simplified node
  removal in singly linked lists.
- Declared rt_interrupt_context_push, rt_interrupt_context_pop, and
  rt_interrupt_context_get functions in `rtthread.h` to manage the
  interrupt/exception stack.
- Modified AArch64 CPU support in `cpuport.h` to include
  rt_hw_show_register for debugging registers.
- Refactored `_rt_hw_trap_irq` in `trap.c` for context-aware IRQ
  handling, with stack push/pop logic to handle nested contexts.
- Implemented interrupt context push, pop, and retrieval logic in
  `irq.c` to manage context at the CPU level.

Signed-off-by: Shell <smokewood@qq.com>
Shell 8 months ago
parent
commit
40cd8cce99
7 changed files with 76 additions and 2 deletions
  1. 16 0
      include/rtdef.h
  2. 15 0
      include/rtservice.h
  3. 4 0
      include/rtthread.h
  4. 5 0
      libcpu/Kconfig
  5. 2 0
      libcpu/aarch64/common/include/armv8.h
  6. 14 2
      libcpu/aarch64/common/trap.c
  7. 20 0
      src/irq.c

+ 16 - 0
include/rtdef.h

@@ -723,6 +723,9 @@ struct rt_cpu
 #ifdef RT_USING_CPU_USAGE_TRACER
     struct rt_cpu_usage_stats   cpu_stat;
 #endif /* RT_USING_CPU_USAGE_TRACER */
+#ifdef ARCH_USING_IRQ_CTX_LIST
+    rt_slist_t                  irq_ctx_head;
+#endif /* ARCH_USING_IRQ_CTX_LIST */
 };
 
 #else /* !RT_USING_SMP */
@@ -734,6 +737,9 @@ struct rt_cpu
 #ifdef RT_USING_CPU_USAGE_TRACER
     struct rt_cpu_usage_stats   cpu_stat;
 #endif /* RT_USING_CPU_USAGE_TRACER */
+#ifdef ARCH_USING_IRQ_CTX_LIST
+    rt_slist_t                  irq_ctx_head;
+#endif /* ARCH_USING_IRQ_CTX_LIST */
 };
 
 #endif /* RT_USING_SMP */
@@ -744,6 +750,16 @@ typedef struct rt_cpu *rt_cpu_t;
 
 struct rt_thread;
 
+/**
+ * interrupt/exception frame handling
+ *
+ */
+
+typedef struct rt_interrupt_context {
+    void *context;      /**< arch specific context */
+    rt_slist_t node;    /**< node for nested interrupt */
+} *rt_interrupt_context_t;
+
 #ifdef RT_USING_SMART
 typedef rt_err_t (*rt_wakeup_func_t)(void *object, struct rt_thread *thread);
 

+ 15 - 0
include/rtservice.h

@@ -11,6 +11,7 @@
  * 2012-03-22     Bernard      rename kservice.h to rtservice.h
  * 2017-11-15     JasonJia     Modify rt_slist_foreach to rt_slist_for_each_entry.
  *                             Make code cleanup.
+ * 2024-01-03     Shell        add rt_slist_pop()
  */
 
 #ifndef __RT_SERVICE_H__
@@ -224,6 +225,20 @@ rt_inline unsigned int rt_slist_len(const rt_slist_t *l)
     return len;
 }
 
+rt_inline rt_slist_t *rt_slist_pop(rt_slist_t *l)
+{
+    struct rt_slist_node *node = l;
+
+    /* remove node */
+    node = node->next;
+    if (node != (rt_slist_t *)0)
+    {
+        ((struct rt_slist_node *)l)->next = node->next;
+    }
+
+    return node;
+}
+
 rt_inline rt_slist_t *rt_slist_remove(rt_slist_t *l, rt_slist_t *n)
 {
     /* remove slist head */

+ 4 - 0
include/rtthread.h

@@ -713,6 +713,10 @@ rt_err_t  rt_device_control(rt_device_t dev, int cmd, void *arg);
 void rt_interrupt_enter(void);
 void rt_interrupt_leave(void);
 
+void rt_interrupt_context_push(rt_interrupt_context_t this_ctx);
+void rt_interrupt_context_pop(void);
+void *rt_interrupt_context_get(void);
+
 /**
  * CPU object
  */

+ 5 - 0
libcpu/Kconfig

@@ -219,6 +219,7 @@ config ARCH_ARMV8
     select ARCH_ARM
     select ARCH_ARM_MMU
     select RT_USING_CPU_FFS
+    select ARCH_USING_IRQ_CTX_LIST
 
 config ARCH_MIPS
     bool
@@ -325,3 +326,7 @@ config ARCH_CPU_STACK_GROWS_UPWARD
 config ARCH_USING_HW_THREAD_SELF
     bool
     default n
+
+config ARCH_USING_IRQ_CTX_LIST
+    bool
+    default n

+ 2 - 0
libcpu/aarch64/common/include/armv8.h

@@ -145,6 +145,8 @@ struct rt_hw_exp_stack
     rt_uint128_t fpu[32];
 };
 
+void rt_hw_show_register(struct rt_hw_exp_stack *regs);
+
 #define SP_ELx     ((unsigned long)0x01)
 #define SP_EL0     ((unsigned long)0x00)
 #define PSTATE_EL1 ((unsigned long)0x04)

+ 14 - 2
libcpu/aarch64/common/trap.c

@@ -167,7 +167,7 @@ void rt_hw_show_register(struct rt_hw_exp_stack *regs)
 }
 
 #ifndef RT_USING_PIC
-void rt_hw_trap_irq(void)
+static void _rt_hw_trap_irq(rt_interrupt_context_t irq_context)
 {
 #ifdef SOC_BCM283x
     extern rt_uint8_t core_timer_flag;
@@ -269,12 +269,24 @@ void rt_hw_trap_irq(void)
 #endif
 }
 #else
-void rt_hw_trap_irq(void)
+static void _rt_hw_trap_irq(struct rt_interrupt_context *this_ctx)
 {
     rt_pic_do_traps();
 }
 #endif
 
+void rt_hw_trap_irq(struct rt_hw_exp_stack *regs)
+{
+    struct rt_interrupt_context this_ctx = {
+        .context = regs,
+        .node = RT_SLIST_OBJECT_INIT(this_ctx.node),
+    };
+
+    rt_interrupt_context_push(&this_ctx);
+    _rt_hw_trap_irq(&this_ctx);
+    rt_interrupt_context_pop();
+}
+
 #ifdef RT_USING_SMART
 #define DBG_CHECK_EVENT(regs, esr) dbg_check_event(regs, esr)
 #else

+ 20 - 0
src/irq.c

@@ -14,6 +14,7 @@
  * 2022-07-04     Yunjie       fix RT_DEBUG_LOG
  * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
  * 2024-01-05     Shell        Fixup of data racing in rt_interrupt_get_nest
+ * 2024-01-03     Shell        Support for interrupt context
  */
 
 #include <rthw.h>
@@ -69,6 +70,25 @@ void rt_interrupt_leave_sethook(void (*hook)(void))
 volatile rt_atomic_t rt_interrupt_nest = 0;
 #endif /* RT_USING_SMP */
 
+#ifdef ARCH_USING_IRQ_CTX_LIST
+void rt_interrupt_context_push(rt_interrupt_context_t this_ctx)
+{
+    struct rt_cpu *this_cpu = rt_cpu_self();
+    rt_slist_insert(&this_cpu->irq_ctx_head, &this_ctx->node);
+}
+
+void rt_interrupt_context_pop(void)
+{
+    struct rt_cpu *this_cpu = rt_cpu_self();
+    rt_slist_pop(&this_cpu->irq_ctx_head);
+}
+
+void *rt_interrupt_context_get(void)
+{
+    struct rt_cpu *this_cpu = rt_cpu_self();
+    return rt_slist_first_entry(&this_cpu->irq_ctx_head, struct rt_interrupt_context, node)->context;
+}
+#endif /* ARCH_USING_IRQ_CTX_LIST */
 
 /**
  * @brief This function will be invoked by BSP, when enter interrupt service routine