Browse Source

开放spinlock相关函数

shaojinchun 5 years ago
parent
commit
cb07e5fb24
7 changed files with 148 additions and 3 deletions
  1. 1 0
      include/rtdef.h
  2. 14 0
      include/rthw.h
  3. 6 0
      libcpu/arm/cortex-a/cpu.c
  4. 5 0
      libcpu/risc-v/k210/cpuport_smp.c
  5. 113 0
      src/cpu.c
  6. 8 3
      src/scheduler.c
  7. 1 0
      src/thread.c

+ 1 - 0
include/rtdef.h

@@ -577,6 +577,7 @@ struct rt_thread
 
     rt_uint16_t scheduler_lock_nest;                    /**< scheduler lock count */
     rt_uint16_t cpus_lock_nest;                         /**< cpus lock count */
+    rt_uint16_t critical_lock_nest;                     /**< critical lock count */
 #endif /*RT_USING_SMP*/
 
     /* priority */

+ 14 - 0
include/rthw.h

@@ -143,6 +143,12 @@ typedef union {
     } tickets;
 } rt_hw_spinlock_t;
 
+typedef struct
+{
+    rt_hw_spinlock_t lock;
+} rt_spinlock_t;
+
+void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock);
 void rt_hw_spin_lock(rt_hw_spinlock_t *lock);
 void rt_hw_spin_unlock(rt_hw_spinlock_t *lock);
 
@@ -181,8 +187,16 @@ void rt_hw_secondary_cpu_idle_exec(void);
 #define rt_hw_spin_lock(lock)     *(lock) = rt_hw_interrupt_disable()
 #define rt_hw_spin_unlock(lock)   rt_hw_interrupt_enable(*(lock))
 
+typedef int rt_spinlock_t;
+
 #endif
 
+void rt_spin_lock_init(rt_spinlock_t *lock);
+void rt_spin_lock(rt_spinlock_t *lock);
+void rt_spin_unlock(rt_spinlock_t *lock);
+rt_base_t rt_spin_lock_irqsave(rt_spinlock_t *lock);
+void rt_spin_unlock_irqrestore(rt_spinlock_t *lock, rt_base_t level);
+
 #ifdef __cplusplus
 }
 #endif

+ 6 - 0
libcpu/arm/cortex-a/cpu.c

@@ -14,6 +14,7 @@
 #include <board.h>
 
 #ifdef RT_USING_SMP
+
 int rt_hw_cpu_id(void)
 {
     int cpu_id;
@@ -25,6 +26,11 @@ int rt_hw_cpu_id(void)
     return cpu_id;
 };
 
+void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
+{
+    lock->slock = 0;
+}
+
 void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
 {
     unsigned long tmp;

+ 5 - 0
libcpu/risc-v/k210/cpuport_smp.c

@@ -25,6 +25,11 @@ int rt_hw_cpu_id(void)
     return read_csr(mhartid);
 }
 
+void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
+{
+    ((spinlock_t *)lock)->lock = 0;
+}
+
 void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
 {
     spinlock_lock((spinlock_t *)lock);

+ 113 - 0
src/cpu.c

@@ -11,6 +11,119 @@
 #include <rtthread.h>
 #include <rthw.h>
 
+#ifdef RT_USING_SMP
+/***********************************
+ * disable scheduler
+ ***********************************/
+static void rt_preempt_disable(void)
+{
+	register rt_base_t level;
+	struct rt_thread *current_thread;
+
+	/* disable interrupt */
+	level = rt_hw_local_irq_disable();
+
+	current_thread = rt_cpu_self()->current_thread;
+	if (!current_thread)
+	{
+		rt_hw_local_irq_enable(level);
+		return;
+	}
+
+	/* lock scheduler for local cpu */
+	current_thread->scheduler_lock_nest ++;
+
+	/* enable interrupt */
+	rt_hw_local_irq_enable(level);
+}
+
+/***********************************
+ * restore scheduler
+ ***********************************/
+static void rt_preempt_enable(void)
+{
+	register rt_base_t level;
+	struct rt_thread *current_thread;
+
+	/* disable interrupt */
+	level = rt_hw_local_irq_disable();
+
+	current_thread = rt_cpu_self()->current_thread;
+	if (!current_thread)
+	{
+		rt_hw_local_irq_enable(level);
+		return;
+	}
+
+	/* unlock scheduler for local cpu */
+	current_thread->scheduler_lock_nest --;
+
+	rt_schedule();
+	/* enable interrupt */
+	rt_hw_local_irq_enable(level);
+}
+#endif
+
+void rt_spin_lock_init(rt_spinlock_t *lock)
+{
+#ifdef RT_USING_SMP
+	rt_hw_spin_lock_init(&lock->lock);
+#endif
+}
+RTM_EXPORT(rt_spin_lock_init)
+
+void rt_spin_lock(rt_spinlock_t *lock)
+{
+#ifdef RT_USING_SMP
+	rt_preempt_disable();
+	rt_hw_spin_lock(&lock->lock);
+#else
+    rt_enter_critical();
+#endif
+}
+RTM_EXPORT(rt_spin_lock)
+
+void rt_spin_unlock(rt_spinlock_t *lock)
+{
+#ifdef RT_USING_SMP
+	rt_hw_spin_unlock(&lock->lock);
+	rt_preempt_enable();
+#else
+    rt_exit_critical();
+#endif
+}
+RTM_EXPORT(rt_spin_unlock)
+
+rt_base_t rt_spin_lock_irqsave(rt_spinlock_t *lock)
+{
+	unsigned long level;
+
+#ifdef RT_USING_SMP
+	rt_preempt_disable();
+
+	level = rt_hw_local_irq_disable();
+	rt_hw_spin_lock(&lock->lock);
+
+	return level;
+#else
+    return rt_hw_interrupt_disable();
+#endif
+}
+RTM_EXPORT(rt_spin_lock_irqsave)
+
+void rt_spin_unlock_irqrestore(rt_spinlock_t *lock, rt_base_t level)
+{
+#ifdef RT_USING_SMP
+	rt_hw_spin_unlock(&lock->lock);
+	rt_hw_local_irq_enable(level);
+
+	rt_preempt_enable();
+#else
+    rt_hw_interrupt_enable(level);
+#endif
+}
+RTM_EXPORT(rt_spin_unlock_irqrestore)
+
 #ifdef RT_USING_SMP
 
 static struct rt_cpu rt_cpus[RT_CPUS_NR];

+ 8 - 3
src/scheduler.c

@@ -840,11 +840,14 @@ void rt_enter_critical(void)
      */
 
     /* lock scheduler for all cpus */
-    if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest)
+    if (current_thread->critical_lock_nest == 0)
     {
         rt_hw_spin_lock(&_rt_critical_lock);
     }
 
+    /* critical for local cpu */
+    current_thread->critical_lock_nest ++;
+
     /* lock scheduler for local cpu */
     current_thread->scheduler_lock_nest ++;
 
@@ -892,7 +895,9 @@ void rt_exit_critical(void)
 
     current_thread->scheduler_lock_nest --;
 
-    if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest)
+    current_thread->critical_lock_nest --;
+
+    if (current_thread->critical_lock_nest == 0)
     {
         rt_hw_spin_unlock(&_rt_critical_lock);
     }
@@ -951,7 +956,7 @@ rt_uint16_t rt_critical_level(void)
 #ifdef RT_USING_SMP
     struct rt_thread *current_thread = rt_cpu_self()->current_thread;
 
-    return current_thread->scheduler_lock_nest;
+    return current_thread->critical_lock_nest;
 #else
 	return rt_scheduler_lock_nest;
 #endif /*RT_USING_SMP*/

+ 1 - 0
src/thread.c

@@ -172,6 +172,7 @@ static rt_err_t _rt_thread_init(struct rt_thread *thread,
     /* lock init */
     thread->scheduler_lock_nest = 0;
     thread->cpus_lock_nest = 0;
+    thread->critical_lock_nest = 0;
 #endif /*RT_USING_SMP*/
 
     /* initialize cleanup function and user data */