Browse Source

Merge pull request #4843 from fenghuijie/master

添加dcache invalidate/dcache clean&invalidate接口
Bernard Xiong 3 years ago
parent
commit
3a29bf4889
6 changed files with 215 additions and 42 deletions
  1. 4 1
      include/rtthread.h
  2. 48 0
      libcpu/arm/cortex-a/cache.c
  3. 5 0
      src/Kconfig
  4. 133 21
      src/idle.c
  5. 0 5
      src/scheduler.c
  6. 25 15
      src/thread.c

+ 4 - 1
include/rtthread.h

@@ -168,7 +168,6 @@ void rt_thread_idle_init(void);
 rt_err_t rt_thread_idle_sethook(void (*hook)(void));
 rt_err_t rt_thread_idle_sethook(void (*hook)(void));
 rt_err_t rt_thread_idle_delhook(void (*hook)(void));
 rt_err_t rt_thread_idle_delhook(void (*hook)(void));
 #endif
 #endif
-void rt_thread_idle_excute(void);
 rt_thread_t rt_thread_idle_gethandler(void);
 rt_thread_t rt_thread_idle_gethandler(void);
 
 
 /*
 /*
@@ -396,6 +395,10 @@ rt_err_t rt_mq_recv(rt_mq_t    mq,
 rt_err_t rt_mq_control(rt_mq_t mq, int cmd, void *arg);
 rt_err_t rt_mq_control(rt_mq_t mq, int cmd, void *arg);
 #endif
 #endif
 
 
+/* defunct */
+void rt_thread_defunct_enqueue(rt_thread_t thread);
+rt_thread_t rt_thread_defunct_dequeue(void);
+
 /*
 /*
  * spinlock
  * spinlock
  */
  */

+ 48 - 0
libcpu/arm/cortex-a/cache.c

@@ -58,6 +58,37 @@ void rt_hw_cpu_dcache_invalidate(void *addr, int size)
     asm volatile ("dsb":::"memory");
     asm volatile ("dsb":::"memory");
 }
 }
 
 
+void rt_hw_cpu_dcache_inv_range(void *addr, int size)
+{
+    rt_uint32_t line_size = rt_cpu_dcache_line_size();
+    rt_uint32_t start_addr = (rt_uint32_t)addr;
+    rt_uint32_t end_addr = (rt_uint32_t)addr + size;
+
+    asm volatile ("dmb":::"memory");
+
+    if ((start_addr & (line_size - 1)) != 0)
+    {
+        start_addr &= ~(line_size - 1);
+        asm volatile ("mcr p15, 0, %0, c7, c14, 1" :: "r"(start_addr));
+        start_addr += line_size;
+        asm volatile ("dsb":::"memory");
+    }
+
+    if ((end_addr & (line_size - 1)) != 0)
+    {
+        end_addr &= ~(line_size - 1);
+        asm volatile ("mcr p15, 0, %0, c7, c14, 1" :: "r"(end_addr));
+        asm volatile ("dsb":::"memory");
+    }
+
+    while (start_addr < end_addr)
+    {
+        asm volatile ("mcr p15, 0, %0, c7, c6, 1" :: "r"(start_addr));  /* dcimvac */
+        start_addr += line_size;
+    }
+    asm volatile ("dsb":::"memory");
+}
+
 void rt_hw_cpu_dcache_clean(void *addr, int size)
 void rt_hw_cpu_dcache_clean(void *addr, int size)
 {
 {
     rt_uint32_t line_size = rt_cpu_dcache_line_size();
     rt_uint32_t line_size = rt_cpu_dcache_line_size();
@@ -75,6 +106,23 @@ void rt_hw_cpu_dcache_clean(void *addr, int size)
     asm volatile ("dsb":::"memory");
     asm volatile ("dsb":::"memory");
 }
 }
 
 
+void rt_hw_cpu_dcache_clean_inv(void *addr, int size)
+{
+    rt_uint32_t line_size = rt_cpu_dcache_line_size();
+    rt_uint32_t start_addr = (rt_uint32_t)addr;
+    rt_uint32_t end_addr = (rt_uint32_t) addr + size + line_size - 1;
+
+    asm volatile ("dmb":::"memory");
+    start_addr &= ~(line_size-1);
+    end_addr &= ~(line_size-1);
+    while (start_addr < end_addr)
+    {
+        asm volatile ("mcr p15, 0, %0, c7, c14, 1" :: "r"(start_addr));
+        start_addr += line_size;
+    }
+    asm volatile ("dsb":::"memory");
+}
+
 void rt_hw_cpu_icache_ops(int ops, void *addr, int size)
 void rt_hw_cpu_icache_ops(int ops, void *addr, int size)
 {
 {
     if (ops == RT_HW_CACHE_INVALIDATE)
     if (ops == RT_HW_CACHE_INVALIDATE)

+ 5 - 0
src/Kconfig

@@ -98,6 +98,11 @@ config IDLE_THREAD_STACK_SIZE
     int "The stack size of idle thread"
     int "The stack size of idle thread"
     default 256
     default 256
 
 
+config SYSTEM_THREAD_STACK_SIZE
+    int "The stack size of system thread (for defunct etc.)"
+    depends on RT_USING_SMP
+    default IDLE_THREAD_STACK_SIZE
+
 config RT_USING_TIMER_SOFT
 config RT_USING_TIMER_SOFT
     bool "Enable software timer with a timer thread"
     bool "Enable software timer with a timer thread"
     default y
     default y

+ 133 - 21
src/idle.c

@@ -44,12 +44,22 @@
 #define _CPUS_NR                1
 #define _CPUS_NR                1
 #endif /* RT_USING_SMP */
 #endif /* RT_USING_SMP */
 
 
-extern rt_list_t rt_thread_defunct;
+static rt_list_t _rt_thread_defunct = RT_LIST_OBJECT_INIT(_rt_thread_defunct);;
 
 
 static struct rt_thread idle[_CPUS_NR];
 static struct rt_thread idle[_CPUS_NR];
 ALIGN(RT_ALIGN_SIZE)
 ALIGN(RT_ALIGN_SIZE)
 static rt_uint8_t rt_thread_stack[_CPUS_NR][IDLE_THREAD_STACK_SIZE];
 static rt_uint8_t rt_thread_stack[_CPUS_NR][IDLE_THREAD_STACK_SIZE];
 
 
+#ifdef RT_USING_SMP
+#ifndef SYSTEM_THREAD_STACK_SIZE
+#define SYSTEM_THREAD_STACK_SIZE IDLE_THREAD_STACK_SIZE
+#endif
+static struct rt_thread rt_system_thread;
+ALIGN(RT_ALIGN_SIZE)
+static rt_uint8_t rt_system_stack[SYSTEM_THREAD_STACK_SIZE];
+static struct rt_semaphore system_sem;
+#endif
+
 #ifdef RT_USING_IDLE_HOOK
 #ifdef RT_USING_IDLE_HOOK
 #ifndef RT_IDLE_HOOK_LIST_SIZE
 #ifndef RT_IDLE_HOOK_LIST_SIZE
 #define RT_IDLE_HOOK_LIST_SIZE  4
 #define RT_IDLE_HOOK_LIST_SIZE  4
@@ -127,43 +137,75 @@ rt_err_t rt_thread_idle_delhook(void (*hook)(void))
 
 
 #endif /* RT_USING_IDLE_HOOK */
 #endif /* RT_USING_IDLE_HOOK */
 
 
-#ifdef RT_USING_HEAP
+#ifdef RT_USING_MODULE
 /* Return whether there is defunctional thread to be deleted. */
 /* Return whether there is defunctional thread to be deleted. */
 rt_inline int _has_defunct_thread(void)
 rt_inline int _has_defunct_thread(void)
 {
 {
     /* The rt_list_isempty has prototype of "int rt_list_isempty(const rt_list_t *l)".
     /* The rt_list_isempty has prototype of "int rt_list_isempty(const rt_list_t *l)".
-     * So the compiler has a good reason that the rt_thread_defunct list does
-     * not change within rt_thread_idle_excute thus optimize the "while" loop
+     * So the compiler has a good reason that the _rt_thread_defunct list does
+     * not change within rt_thread_defunct_exceute thus optimize the "while" loop
      * into a "if".
      * into a "if".
      *
      *
      * So add the volatile qualifier here. */
      * So add the volatile qualifier here. */
-    const volatile rt_list_t *l = (const volatile rt_list_t *)&rt_thread_defunct;
+    const volatile rt_list_t *l = (const volatile rt_list_t *)&_rt_thread_defunct;
 
 
     return l->next != l;
     return l->next != l;
 }
 }
-#endif /* RT_USING_HEAP */
+#endif /* RT_USING_MODULE */
+
+/* enqueue a thread to defunct queue
+ * it must be called between rt_hw_interrupt_disable and rt_hw_interrupt_enable
+ */
+void rt_thread_defunct_enqueue(rt_thread_t thread)
+{
+    rt_list_insert_after(&_rt_thread_defunct, &thread->tlist);
+#ifdef RT_USING_SMP
+    rt_sem_release(&system_sem);
+#endif
+}
+
+/* dequeue a thread from defunct queue
+ * it must be called between rt_hw_interrupt_disable and rt_hw_interrupt_enable
+ */
+rt_thread_t rt_thread_defunct_dequeue(void)
+{
+    rt_thread_t thread = RT_NULL;
+    rt_list_t *l = &_rt_thread_defunct;
+
+    if (l->next != l)
+    {
+        thread = rt_list_entry(l->next,
+                struct rt_thread,
+                tlist);
+        rt_list_remove(&(thread->tlist));
+    }
+    return thread;
+}
 
 
 /**
 /**
  * @ingroup Thread
  * @ingroup Thread
  *
  *
  * This function will perform system background job when system idle.
  * This function will perform system background job when system idle.
  */
  */
-void rt_thread_idle_excute(void)
+static void rt_defunct_execute(void)
 {
 {
-    /* Loop until there is no dead thread. So one call to rt_thread_idle_excute
+    /* Loop until there is no dead thread. So one call to rt_defunct_execute
      * will do all the cleanups. */
      * will do all the cleanups. */
-    /* disable interrupt */
-
-    RT_DEBUG_NOT_IN_INTERRUPT;
-
-#ifdef RT_USING_HEAP
     while (1)
     while (1)
     {
     {
         rt_base_t lock;
         rt_base_t lock;
         rt_thread_t thread;
         rt_thread_t thread;
+        void (*cleanup)(struct rt_thread *tid);
 
 
+#ifdef RT_USING_MODULE
+        struct rt_dlmodule *module = RT_NULL;
+#endif
+        RT_DEBUG_NOT_IN_INTERRUPT;
+
+        /* disable interrupt */
         lock = rt_hw_interrupt_disable();
         lock = rt_hw_interrupt_disable();
 
 
+#ifdef RT_USING_MODULE
         /* check whether list is empty */
         /* check whether list is empty */
         if (!_has_defunct_thread())
         if (!_has_defunct_thread())
         {
         {
@@ -171,18 +213,56 @@ void rt_thread_idle_excute(void)
             break;
             break;
         }
         }
         /* get defunct thread */
         /* get defunct thread */
-        thread = rt_list_entry(rt_thread_defunct.next,
+        thread = rt_list_entry(_rt_thread_defunct.next,
                 struct rt_thread,
                 struct rt_thread,
                 tlist);
                 tlist);
+        module = (struct rt_dlmodule*)thread->module_id;
+        if (module)
+        {
+            dlmodule_destroy(module);
+        }
         /* remove defunct thread */
         /* remove defunct thread */
         rt_list_remove(&(thread->tlist));
         rt_list_remove(&(thread->tlist));
-        /* release thread's stack */
-        RT_KERNEL_FREE(thread->stack_addr);
-        /* delete thread object */
-        rt_object_delete((rt_object_t)thread);
-        rt_hw_interrupt_enable(lock);
+#else
+        thread = rt_thread_defunct_dequeue();
+        if (!thread)
+        {
+            rt_hw_interrupt_enable(lock);
+            break;
+        }
+#endif
+        /* invoke thread cleanup */
+        cleanup = thread->cleanup;
+        if (cleanup != RT_NULL)
+        {
+            rt_hw_interrupt_enable(lock);
+            cleanup(thread);
+            lock = rt_hw_interrupt_disable();
+        }
+
+#ifdef RT_USING_SIGNALS
+        rt_thread_free_sig(thread);
+#endif
+
+        /* if it's a system object, not delete it */
+        if (rt_object_is_systemobject((rt_object_t)thread) == RT_TRUE)
+        {
+            /* detach this object */
+            rt_object_detach((rt_object_t)thread);
+            /* enable interrupt */
+            rt_hw_interrupt_enable(lock);
+        }
+        else
+        {
+            rt_hw_interrupt_enable(lock);
+#ifdef RT_USING_HEAP
+            /* release thread's stack */
+            RT_KERNEL_FREE(thread->stack_addr);
+            /* delete thread object */
+            rt_object_delete((rt_object_t)thread);
+#endif
+        }
     }
     }
-#endif /* RT_USING_HEAP */
 }
 }
 
 
 extern void rt_system_power_manager(void);
 extern void rt_system_power_manager(void);
@@ -214,13 +294,27 @@ static void rt_thread_idle_entry(void *parameter)
         }
         }
 #endif /* RT_USING_IDLE_HOOK */
 #endif /* RT_USING_IDLE_HOOK */
 
 
-        rt_thread_idle_excute();
+#ifndef RT_USING_SMP
+        rt_defunct_execute();
+#endif /* RT_USING_SMP */
+
 #ifdef RT_USING_PM
 #ifdef RT_USING_PM
         rt_system_power_manager();
         rt_system_power_manager();
 #endif /* RT_USING_PM */
 #endif /* RT_USING_PM */
     }
     }
 }
 }
 
 
+#ifdef RT_USING_SMP
+static void rt_thread_system_entry(void *parameter)
+{
+    while (1)
+    {
+        rt_sem_take(&system_sem, RT_WAITING_FOREVER);
+        rt_defunct_execute();
+    }
+}
+#endif
+
 /**
 /**
  * @ingroup SystemInit
  * @ingroup SystemInit
  *
  *
@@ -250,6 +344,24 @@ void rt_thread_idle_init(void)
         /* startup */
         /* startup */
         rt_thread_startup(&idle[i]);
         rt_thread_startup(&idle[i]);
     }
     }
+
+#ifdef RT_USING_SMP
+    RT_ASSERT(RT_THREAD_PRIORITY_MAX > 2);
+
+    rt_sem_init(&system_sem, "defunct", 1, RT_IPC_FLAG_FIFO);
+
+    /* create defunct thread */
+    rt_thread_init(&rt_system_thread,
+            "tsystem",
+            rt_thread_system_entry,
+            RT_NULL,
+            rt_system_stack,
+            sizeof(rt_system_stack),
+            RT_THREAD_PRIORITY_MAX - 2,
+            32);
+    /* startup */
+    rt_thread_startup(&rt_system_thread);
+#endif
 }
 }
 
 
 /**
 /**

+ 0 - 5
src/scheduler.c

@@ -47,8 +47,6 @@ struct rt_thread *rt_current_thread = RT_NULL;
 rt_uint8_t rt_current_priority;
 rt_uint8_t rt_current_priority;
 #endif /* RT_USING_SMP */
 #endif /* RT_USING_SMP */
 
 
-rt_list_t rt_thread_defunct;
-
 #ifdef RT_USING_HOOK
 #ifdef RT_USING_HOOK
 static void (*rt_scheduler_hook)(struct rt_thread *from, struct rt_thread *to);
 static void (*rt_scheduler_hook)(struct rt_thread *from, struct rt_thread *to);
 
 
@@ -224,9 +222,6 @@ void rt_system_scheduler_init(void)
     /* initialize ready table */
     /* initialize ready table */
     rt_memset(rt_thread_ready_table, 0, sizeof(rt_thread_ready_table));
     rt_memset(rt_thread_ready_table, 0, sizeof(rt_thread_ready_table));
 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
-
-    /* initialize thread defunct */
-    rt_list_init(&rt_thread_defunct);
 }
 }
 
 
 /**
 /**

+ 25 - 15
src/thread.c

@@ -32,8 +32,6 @@
 #include <rthw.h>
 #include <rthw.h>
 #include <rtthread.h>
 #include <rtthread.h>
 
 
-extern rt_list_t rt_thread_defunct;
-
 #ifdef RT_USING_HOOK
 #ifdef RT_USING_HOOK
 static void (*rt_thread_suspend_hook)(rt_thread_t thread);
 static void (*rt_thread_suspend_hook)(rt_thread_t thread);
 static void (*rt_thread_resume_hook) (rt_thread_t thread);
 static void (*rt_thread_resume_hook) (rt_thread_t thread);
@@ -131,7 +129,7 @@ static void _rt_thread_exit(void)
     else
     else
     {
     {
         /* insert to defunct thread list */
         /* insert to defunct thread list */
-        rt_list_insert_after(&rt_thread_defunct, &(thread->tlist));
+        rt_thread_defunct_enqueue(thread);
     }
     }
 
 
     /* switch to next task */
     /* switch to next task */
@@ -379,17 +377,22 @@ rt_err_t rt_thread_detach(rt_thread_t thread)
     /* release thread timer */
     /* release thread timer */
     rt_timer_detach(&(thread->thread_timer));
     rt_timer_detach(&(thread->thread_timer));
 
 
-    /* disable interrupt */
-    lock = rt_hw_interrupt_disable();
-
     /* change stat */
     /* change stat */
     thread->stat = RT_THREAD_CLOSE;
     thread->stat = RT_THREAD_CLOSE;
 
 
-    /* detach thread object */
-    rt_object_detach((rt_object_t)thread);
-
-    /* enable interrupt */
-    rt_hw_interrupt_enable(lock);
+    if (rt_object_is_systemobject((rt_object_t)thread) == RT_TRUE)
+    {
+        rt_object_detach((rt_object_t)thread);
+    }
+    else
+    {
+        /* disable interrupt */
+        lock = rt_hw_interrupt_disable();
+        /* insert to defunct thread list */
+        rt_thread_defunct_enqueue(thread);
+        /* enable interrupt */
+        rt_hw_interrupt_enable(lock);
+    }
 
 
     return RT_EOK;
     return RT_EOK;
 }
 }
@@ -484,7 +487,7 @@ rt_err_t rt_thread_delete(rt_thread_t thread)
     thread->stat = RT_THREAD_CLOSE;
     thread->stat = RT_THREAD_CLOSE;
 
 
     /* insert to defunct thread list */
     /* insert to defunct thread list */
-    rt_list_insert_after(&rt_thread_defunct, &(thread->tlist));
+    rt_thread_defunct_enqueue(thread);
 
 
     /* enable interrupt */
     /* enable interrupt */
     rt_hw_interrupt_enable(lock);
     rt_hw_interrupt_enable(lock);
@@ -847,12 +850,12 @@ rt_err_t rt_thread_resume(rt_thread_t thread)
 
 
     rt_timer_stop(&thread->thread_timer);
     rt_timer_stop(&thread->thread_timer);
 
 
-    /* enable interrupt */
-    rt_hw_interrupt_enable(temp);
-
     /* insert to schedule ready list */
     /* insert to schedule ready list */
     rt_schedule_insert_thread(thread);
     rt_schedule_insert_thread(thread);
 
 
+    /* enable interrupt */
+    rt_hw_interrupt_enable(temp);
+
     RT_OBJECT_HOOK_CALL(rt_thread_resume_hook, (thread));
     RT_OBJECT_HOOK_CALL(rt_thread_resume_hook, (thread));
     return RT_EOK;
     return RT_EOK;
 }
 }
@@ -867,6 +870,7 @@ RTM_EXPORT(rt_thread_resume);
 void rt_thread_timeout(void *parameter)
 void rt_thread_timeout(void *parameter)
 {
 {
     struct rt_thread *thread;
     struct rt_thread *thread;
+    register rt_base_t temp;
 
 
     thread = (struct rt_thread *)parameter;
     thread = (struct rt_thread *)parameter;
 
 
@@ -875,6 +879,9 @@ void rt_thread_timeout(void *parameter)
     RT_ASSERT((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND);
     RT_ASSERT((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND);
     RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
     RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread);
 
 
+    /* disable interrupt */
+    temp = rt_hw_interrupt_disable();
+
     /* set error number */
     /* set error number */
     thread->error = -RT_ETIMEOUT;
     thread->error = -RT_ETIMEOUT;
 
 
@@ -884,6 +891,9 @@ void rt_thread_timeout(void *parameter)
     /* insert to schedule ready list */
     /* insert to schedule ready list */
     rt_schedule_insert_thread(thread);
     rt_schedule_insert_thread(thread);
 
 
+    /* enable interrupt */
+    rt_hw_interrupt_enable(temp);
+
     /* do schedule */
     /* do schedule */
     rt_schedule();
     rt_schedule();
 }
 }