Browse Source

[kernel] Add hook for malloc services

Including the patch for memory allocation, testsuites and fixups of
static analysis.

Signed-off-by: Shell <smokewood@qq.com>
Shell 1 year ago
parent
commit
3a7903b799

+ 4 - 5
components/finsh/msh.c

@@ -53,7 +53,7 @@ int msh_help(int argc, char **argv)
 
     return 0;
 }
-MSH_CMD_EXPORT_ALIAS(msh_help, help, RT-Thread shell help.);
+MSH_CMD_EXPORT_ALIAS(msh_help, help, RT-Thread shell help);
 
 #ifdef MSH_USING_BUILT_IN_COMMANDS
 int cmd_ps(int argc, char **argv)
@@ -69,7 +69,7 @@ int cmd_ps(int argc, char **argv)
     list_thread();
     return 0;
 }
-MSH_CMD_EXPORT_ALIAS(cmd_ps, ps, List threads in the system.);
+MSH_CMD_EXPORT_ALIAS(cmd_ps, ps, List threads in the system);
 
 #ifdef RT_USING_HEAP
 int cmd_free(int argc, char **argv)
@@ -88,7 +88,7 @@ int cmd_free(int argc, char **argv)
 #endif
     return 0;
 }
-MSH_CMD_EXPORT_ALIAS(cmd_free, free, Show the memory usage in the system.);
+MSH_CMD_EXPORT_ALIAS(cmd_free, free, Show the memory usage in the system);
 #endif /* RT_USING_HEAP */
 #endif /* MSH_USING_BUILT_IN_COMMANDS */
 
@@ -296,8 +296,7 @@ static int _msh_exec_cmd(char *cmd, rt_size_t length, int *retp)
 }
 
 #if defined(RT_USING_SMART) && defined(DFS_USING_POSIX)
-pid_t exec(char*, int, int, char**);
-
+#include <lwp.h>
 /* check whether a file of the given path exits */
 static rt_bool_t _msh_lwp_cmd_exists(const char *path)
 {

+ 2 - 0
components/lwp/lwp.h

@@ -208,6 +208,8 @@ void lwp_uthread_ctx_restore(void);
 
 int lwp_setaffinity(pid_t pid, int cpu);
 
+pid_t exec(char *filename, int debug, int argc, char **argv);
+
 /* ctime lwp API */
 int timer_list_free(rt_list_t *timer_list);
 

+ 2 - 0
components/lwp/lwp_user_mm.c

@@ -148,7 +148,9 @@ void lwp_aspace_switch(struct rt_thread *thread)
         aspace = lwp->aspace;
     }
     else
+    {
         aspace = &rt_kernel_space;
+    }
 
     from_tbl = rt_hw_mmu_tbl_get();
     if (aspace->page_table != from_tbl)

+ 12 - 0
include/rtdef.h

@@ -198,6 +198,7 @@ typedef rt_base_t                       rt_off_t;       /**< Type for offset */
 #define rt_used                     __attribute__((used))
 #define rt_align(n)                 __attribute__((aligned(n)))
 #define rt_weak                     __attribute__((weak))
+#define rt_noreturn
 #define rt_inline                   static __inline
 #define rt_always_inline            rt_inline
 #elif defined (__IAR_SYSTEMS_ICC__)     /* for IAR Compiler */
@@ -206,6 +207,7 @@ typedef rt_base_t                       rt_off_t;       /**< Type for offset */
 #define PRAGMA(x)                   _Pragma(#x)
 #define rt_align(n)                    PRAGMA(data_alignment=n)
 #define rt_weak                     __weak
+#define rt_noreturn
 #define rt_inline                   static inline
 #define rt_always_inline            rt_inline
 #elif defined (__GNUC__)                /* GNU GCC Compiler */
@@ -223,6 +225,7 @@ typedef rt_base_t                       rt_off_t;       /**< Type for offset */
 #define rt_used                     __attribute__((used))
 #define rt_align(n)                 __attribute__((aligned(n)))
 #define rt_weak                     __attribute__((weak))
+#define rt_noreturn
 #define rt_inline                   static inline
 #define rt_always_inline            rt_inline
 #elif defined (_MSC_VER)
@@ -230,6 +233,7 @@ typedef rt_base_t                       rt_off_t;       /**< Type for offset */
 #define rt_used
 #define rt_align(n)                 __declspec(align(n))
 #define rt_weak
+#define rt_noreturn
 #define rt_inline                   static __inline
 #define rt_always_inline            rt_inline
 #elif defined (__TI_COMPILER_VERSION__)
@@ -400,6 +404,7 @@ typedef int (*init_fn_t)(void);
 #define RT_ENOSPC                       ENOSPC          /**< No space left */
 #define RT_EPERM                        EPERM           /**< Operation not permitted */
 #define RT_EFAULT                       EFAULT          /**< Bad address */
+#define RT_ENOBUFS                      ENOBUFS         /**< No buffer space is available */
 #define RT_ETRAP                        254             /**< Trap event */
 #else
 #define RT_EOK                          0               /**< There is no error */
@@ -418,6 +423,7 @@ typedef int (*init_fn_t)(void);
 #define RT_EPERM                        13              /**< Operation not permitted */
 #define RT_ETRAP                        14              /**< Trap event */
 #define RT_EFAULT                       15              /**< Bad address */
+#define RT_ENOBUFS                      16              /**< No buffer space is available */
 #endif /* defined(RT_USING_LIBC) && !defined(RT_USING_NANO) */
 
 /**@}*/
@@ -604,6 +610,12 @@ struct rt_object_information
 #ifndef __on_rt_malloc_hook
     #define __on_rt_malloc_hook(addr, size)         __ON_HOOK_ARGS(rt_malloc_hook, (addr, size))
 #endif
+#ifndef __on_rt_realloc_entry_hook
+    #define __on_rt_realloc_entry_hook(addr, size)  __ON_HOOK_ARGS(rt_realloc_entry_hook, (addr, size))
+#endif
+#ifndef __on_rt_realloc_exit_hook
+    #define __on_rt_realloc_exit_hook(addr, size)   __ON_HOOK_ARGS(rt_realloc_exit_hook, (addr, size))
+#endif
 #ifndef __on_rt_free_hook
     #define __on_rt_free_hook(rmem)                 __ON_HOOK_ARGS(rt_free_hook, (rmem))
 #endif

+ 4 - 2
include/rtthread.h

@@ -287,8 +287,10 @@ void rt_page_free(void *addr, rt_size_t npages);
 #endif /* defined(RT_USING_SLAB) && defined(RT_USING_SLAB_AS_HEAP) */
 
 #ifdef RT_USING_HOOK
-void rt_malloc_sethook(void (*hook)(void *ptr, rt_size_t size));
-void rt_free_sethook(void (*hook)(void *ptr));
+void rt_malloc_sethook(void (*hook)(void **ptr, rt_size_t size));
+void rt_realloc_set_entry_hook(void (*hook)(void **ptr, rt_size_t size));
+void rt_realloc_set_exit_hook(void (*hook)(void **ptr, rt_size_t size));
+void rt_free_sethook(void (*hook)(void **ptr));
 #endif /* RT_USING_HOOK */
 
 #endif /* RT_USING_HEAP */

+ 9 - 0
libcpu/aarch64/common/backtrace.c

@@ -24,10 +24,19 @@
 #define INST_WORD_BYTES                 4
 #define WORD                            sizeof(rt_base_t)
 #define ARCH_CONTEXT_FETCH(pctx, id)    (*(((unsigned long *)pctx) + (id)))
+#define PTR_NORMALIZE(ptr)              (ptr = rt_backtrace_ptr_normalize(ptr))
+
+rt_weak void *rt_backtrace_ptr_normalize(void *ptr)
+{
+    return ptr;
+}
 
 rt_inline rt_err_t _bt_kaddr(rt_ubase_t *fp, struct rt_hw_backtrace_frame *frame)
 {
     rt_err_t rc;
+
+    PTR_NORMALIZE(fp);
+
     frame->fp = *fp;
     frame->pc = *(fp + 1) - INST_WORD_BYTES;
 

+ 2 - 18
libcpu/aarch64/common/cpu.c

@@ -152,7 +152,7 @@ static int _cpus_init(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *c
     // assuming that cpuid 0 has already init
     for (int i = 1; i < RT_CPUS_NR; i++)
     {
-        if (cpuid_to_hwid(i) == ID_ERROR)
+        if (rt_cpu_mpidr_early[i] == ID_ERROR)
         {
             LOG_E("Failed to find hardware id of CPU %d", i);
             continue;
@@ -166,7 +166,7 @@ static int _cpus_init(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *c
         else
         {
             LOG_E("Failed to find cpu_init for cpu %d with cpu_ops[%p], cpu_ops->cpu_init[%p]"
-                , cpuid_to_hwid(i), cpu_ops_tbl[i], cpu_ops_tbl[i] ? cpu_ops_tbl[i]->cpu_init : NULL);
+                , rt_cpu_mpidr_early[i], cpu_ops_tbl[i], cpu_ops_tbl[i] ? cpu_ops_tbl[i]->cpu_init : NULL);
         }
     }
     return 0;
@@ -204,20 +204,4 @@ const char *rt_hw_cpu_arch(void)
     return "aarch64";
 }
 
-#ifdef RT_USING_CPU_FFS
-/**
- * This function finds the first bit set (beginning with the least significant bit)
- * in value and return the index of that bit.
- *
- * Bits are numbered starting at 1 (the least significant bit).  A return value of
- * zero from any of these functions means that the argument was zero.
- *
- * @return return the index of the first bit set. If value is 0, then this function
- * shall return 0.
- */
-int __rt_ffs(int value)
-{
-    return __builtin_ffs(value);
-}
-#endif
 /*@}*/

+ 33 - 0
libcpu/aarch64/common/cpuport.h

@@ -5,6 +5,8 @@
  *
  * Change Logs:
  * Date           Author       Notes
+ * 2023-10-25     Shell        Move ffs to cpuport, add general implementation
+ *                             by inline assembly
  */
 
 #ifndef  CPUPORT_H__
@@ -35,4 +37,35 @@ typedef struct {
 #define rt_hw_cpu_relax() rt_hw_barrier(yield)
 
 void _thread_start(void);
+
+#ifdef RT_USING_CPU_FFS
+/**
+ * This function finds the first bit set (beginning with the least significant bit)
+ * in value and return the index of that bit.
+ *
+ * Bits are numbered starting at 1 (the least significant bit).  A return value of
+ * zero from any of these functions means that the argument was zero.
+ *
+ * @return return the index of the first bit set. If value is 0, then this function
+ * shall return 0.
+ */
+rt_inline int __rt_ffs(int value)
+{
+#ifdef __GNUC__
+    return __builtin_ffs(value);
+#else
+    __asm__ volatile (
+        "rbit w1, %w0\n"
+        "cmp %w0, 0\n"
+        "clz w1, w1\n"
+        "csinc %w0, wzr, w1, eq\n"
+        : "=r"(value)
+        : "0"(value)
+    );
+    return value;
+#endif
+}
+
+#endif /* RT_USING_CPU_FFS */
+
 #endif  /*CPUPORT_H__*/

+ 31 - 22
libcpu/aarch64/common/mmu.c

@@ -9,6 +9,11 @@
  * 2021-11-28     GuEe-GUI     first version
  * 2022-12-10     WangXiaoyao  porting to MM
  */
+
+#define DBG_TAG "hw.mmu"
+#define DBG_LVL DBG_LOG
+#include <rtdbg.h>
+
 #include <rthw.h>
 #include <rtthread.h>
 #include <stddef.h>
@@ -27,9 +32,8 @@
 #include <lwp_mm.h>
 #endif
 
-#define DBG_TAG "hw.mmu"
-#define DBG_LVL DBG_LOG
-#include <rtdbg.h>
+#define TCR_CONFIG_TBI0     rt_hw_mmu_config_tbi(0)
+#define TCR_CONFIG_TBI1     rt_hw_mmu_config_tbi(1)
 
 #define MMU_LEVEL_MASK   0x1ffUL
 #define MMU_LEVEL_SHIFT  9
@@ -482,6 +486,11 @@ int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, size_t size,
     return 0;
 }
 
+rt_weak long rt_hw_mmu_config_tbi(int tbi_index)
+{
+    return 0;
+}
+
 /************ setting el1 mmu register**************
   MAIR_EL1
   index 0 : memory outer writeback, write/read alloc
@@ -500,25 +509,25 @@ void mmu_tcr_init(void)
     pa_range = val64 & 0xf; /* PARange */
 
     /* TCR_EL1 */
-    val64 = (16UL << 0)      /* t0sz 48bit */
-            | (0x0UL << 6)   /* reserved */
-            | (0x0UL << 7)   /* epd0 */
-            | (0x3UL << 8)   /* t0 wb cacheable */
-            | (0x3UL << 10)  /* inner shareable */
-            | (0x2UL << 12)  /* t0 outer shareable */
-            | (0x0UL << 14)  /* t0 4K */
-            | (16UL << 16)   /* t1sz 48bit */
-            | (0x0UL << 22)  /* define asid use ttbr0.asid */
-            | (0x0UL << 23)  /* epd1 */
-            | (0x3UL << 24)  /* t1 inner wb cacheable */
-            | (0x3UL << 26)  /* t1 outer wb cacheable */
-            | (0x2UL << 28)  /* t1 outer shareable */
-            | (0x2UL << 30)  /* t1 4k */
-            | (pa_range << 32)  /* PA range */
-            | (0x0UL << 35)  /* reserved */
-            | (0x1UL << 36)  /* as: 0:8bit 1:16bit */
-            | (0x0UL << 37)  /* tbi0 */
-            | (0x0UL << 38); /* tbi1 */
+    val64 = (16UL << 0)                /* t0sz 48bit */
+            | (0x0UL << 6)             /* reserved */
+            | (0x0UL << 7)             /* epd0 */
+            | (0x3UL << 8)             /* t0 wb cacheable */
+            | (0x3UL << 10)            /* inner shareable */
+            | (0x2UL << 12)            /* t0 outer shareable */
+            | (0x0UL << 14)            /* t0 4K */
+            | (16UL << 16)             /* t1sz 48bit */
+            | (0x0UL << 22)            /* define asid use ttbr0.asid */
+            | (0x0UL << 23)            /* epd1 */
+            | (0x3UL << 24)            /* t1 inner wb cacheable */
+            | (0x3UL << 26)            /* t1 outer wb cacheable */
+            | (0x2UL << 28)            /* t1 outer shareable */
+            | (0x2UL << 30)            /* t1 4k */
+            | (pa_range << 32)         /* PA range */
+            | (0x0UL << 35)            /* reserved */
+            | (0x1UL << 36)            /* as: 0:8bit 1:16bit */
+            | (TCR_CONFIG_TBI0 << 37)  /* tbi0 */
+            | (TCR_CONFIG_TBI1 << 38); /* tbi1 */
     __asm__ volatile("msr TCR_EL1, %0\n" ::"r"(val64));
 }
 

+ 2 - 2
libcpu/aarch64/common/mmu.h

@@ -110,11 +110,11 @@ void rt_hw_mmu_kernel_map_init(struct rt_aspace *aspace, rt_size_t vaddr_start,
 void *rt_hw_mmu_pgtbl_create(void);
 void rt_hw_mmu_pgtbl_delete(void *pgtbl);
 
-static inline void *rt_hw_mmu_tbl_get()
+rt_inline void *rt_hw_mmu_tbl_get()
 {
     uintptr_t tbl;
     __asm__ volatile("MRS %0, TTBR0_EL1" : "=r"(tbl));
-    return (void *)(tbl & ((1ul << 48) - 2));
+    return rt_kmem_p2v((void *)(tbl & ((1ul << 48) - 2)));
 }
 
 static inline void *rt_hw_mmu_kernel_v2p(void *v_addr)

+ 48 - 14
src/kservice.c

@@ -24,6 +24,7 @@
  * 2022-08-24     Yunjie       make rt_memset word-independent to adapt to ti c28x (16bit word)
  * 2022-08-30     Yunjie       make rt_vsnprintf adapt to ti c28x (16bit int)
  * 2023-02-02     Bernard      add Smart ID for logo version show
+ * 2023-10-16     Shell        Add hook point for rt_malloc services
  * 2023-12-10     xqyjlj       perf rt_hw_interrupt_disable/enable, fix memheap lock
  */
 
@@ -1660,8 +1661,10 @@ MSH_CMD_EXPORT_ALIAS(cmd_backtrace, backtrace, print backtrace of a thread);
 
 #if defined(RT_USING_HEAP) && !defined(RT_USING_USERHEAP)
 #ifdef RT_USING_HOOK
-static void (*rt_malloc_hook)(void *ptr, rt_size_t size);
-static void (*rt_free_hook)(void *ptr);
+static void (*rt_malloc_hook)(void **ptr, rt_size_t size);
+static void (*rt_realloc_entry_hook)(void **ptr, rt_size_t size);
+static void (*rt_realloc_exit_hook)(void **ptr, rt_size_t size);
+static void (*rt_free_hook)(void **ptr);
 
 /**
  * @addtogroup Hook
@@ -1674,18 +1677,40 @@ static void (*rt_free_hook)(void *ptr);
  *
  * @param hook the hook function.
  */
-void rt_malloc_sethook(void (*hook)(void *ptr, rt_size_t size))
+void rt_malloc_sethook(void (*hook)(void **ptr, rt_size_t size))
 {
     rt_malloc_hook = hook;
 }
 
+/**
+ * @brief This function will set a hook function, which will be invoked when a memory
+ *        block is allocated from heap memory.
+ *
+ * @param hook the hook function.
+ */
+void rt_realloc_set_entry_hook(void (*hook)(void **ptr, rt_size_t size))
+{
+    rt_realloc_entry_hook = hook;
+}
+
+/**
+ * @brief This function will set a hook function, which will be invoked when a memory
+ *        block is allocated from heap memory.
+ *
+ * @param hook the hook function.
+ */
+void rt_realloc_set_exit_hook(void (*hook)(void **ptr, rt_size_t size))
+{
+    rt_realloc_exit_hook = hook;
+}
+
 /**
  * @brief This function will set a hook function, which will be invoked when a memory
  *        block is released to heap memory.
  *
  * @param hook the hook function
  */
-void rt_free_sethook(void (*hook)(void *ptr))
+void rt_free_sethook(void (*hook)(void **ptr))
 {
     rt_free_hook = hook;
 }
@@ -1817,14 +1842,7 @@ rt_inline void _slab_info(rt_size_t *total,
 #define _MEM_INFO(...)
 #endif
 
-/**
- * @brief This function will init system heap.
- *
- * @param begin_addr the beginning address of system page.
- *
- * @param end_addr the end address of system page.
- */
-rt_weak void rt_system_heap_init(void *begin_addr, void *end_addr)
+void _rt_system_heap_init(void *begin_addr, void *end_addr)
 {
     rt_ubase_t begin_align = RT_ALIGN((rt_ubase_t)begin_addr, RT_ALIGN_SIZE);
     rt_ubase_t end_align   = RT_ALIGN_DOWN((rt_ubase_t)end_addr, RT_ALIGN_SIZE);
@@ -1837,6 +1855,18 @@ rt_weak void rt_system_heap_init(void *begin_addr, void *end_addr)
     _heap_lock_init();
 }
 
+/**
+ * @brief This function will init system heap.
+ *
+ * @param begin_addr the beginning address of system page.
+ *
+ * @param end_addr the end address of system page.
+ */
+rt_weak void rt_system_heap_init(void *begin_addr, void *end_addr)
+{
+    _rt_system_heap_init(begin_addr, end_addr);
+}
+
 /**
  * @brief Allocate a block of memory with a minimum of 'size' bytes.
  *
@@ -1856,7 +1886,7 @@ rt_weak void *rt_malloc(rt_size_t size)
     /* Exit critical zone */
     _heap_unlock(level);
     /* call 'rt_malloc' hook */
-    RT_OBJECT_HOOK_CALL(rt_malloc_hook, (ptr, size));
+    RT_OBJECT_HOOK_CALL(rt_malloc_hook, (&ptr, size));
     return ptr;
 }
 RTM_EXPORT(rt_malloc);
@@ -1875,12 +1905,16 @@ rt_weak void *rt_realloc(void *ptr, rt_size_t newsize)
     rt_base_t level;
     void *nptr;
 
+    /* Entry hook */
+    RT_OBJECT_HOOK_CALL(rt_realloc_entry_hook, (&ptr, newsize));
     /* Enter critical zone */
     level = _heap_lock();
     /* Change the size of previously allocated memory block */
     nptr = _MEM_REALLOC(ptr, newsize);
     /* Exit critical zone */
     _heap_unlock(level);
+    /* Exit hook */
+    RT_OBJECT_HOOK_CALL(rt_realloc_exit_hook, (&nptr, newsize));
     return nptr;
 }
 RTM_EXPORT(rt_realloc);
@@ -1924,7 +1958,7 @@ rt_weak void rt_free(void *ptr)
     rt_base_t level;
 
     /* call 'rt_free' hook */
-    RT_OBJECT_HOOK_CALL(rt_free_hook, (ptr));
+    RT_OBJECT_HOOK_CALL(rt_free_hook, (&ptr));
     /* NULL check */
     if (ptr == RT_NULL) return;
     /* Enter critical zone */