Browse Source

[mm] precise & readable mm fault type (#9047)

* [smart] fixup: precise mm fault type

Also, fixup arm64 read access fault

* arm64: using meaningful macro on trap

* fixup: renaming macro
Shell 1 year ago
parent
commit
f00db6381a

+ 4 - 4
components/lwp/lwp_user_mm.h

@@ -206,13 +206,16 @@ rt_inline rt_size_t lwp_user_mm_flag_to_kernel(int flags)
     return k_flags;
 }
 
+#ifndef MMU_MAP_U_ROCB
+#define MMU_MAP_U_ROCB MMU_MAP_U_RWCB
+#endif /* MMU_MAP_U_ROCB */
+
 rt_inline rt_size_t lwp_user_mm_attr_to_kernel(int prot)
 {
     RT_UNUSED(prot);
 
     rt_size_t k_attr = 0;
 
-#ifdef IMPL_MPROTECT
     if ((prot & PROT_EXEC) || (prot & PROT_WRITE) ||
         ((prot & PROT_READ) && (prot & PROT_WRITE)))
         k_attr = MMU_MAP_U_RWCB;
@@ -220,9 +223,6 @@ rt_inline rt_size_t lwp_user_mm_attr_to_kernel(int prot)
         k_attr = MMU_MAP_K_RWCB;
     else
         k_attr = MMU_MAP_U_ROCB;
-#else
-    k_attr = MMU_MAP_U_RWCB;
-#endif /* IMPL_MPROTECT */
 
     return k_attr;
 }

+ 1 - 1
components/mm/mm_aspace.c

@@ -1743,7 +1743,7 @@ rt_err_t rt_aspace_page_put(rt_aspace_t aspace, void *page_va, void *buffer)
                     RDWR_LOCK(aspace);
                     struct rt_aspace_fault_msg msg;
                     msg.fault_op = MM_FAULT_OP_WRITE;
-                    msg.fault_type = MM_FAULT_TYPE_ACCESS_FAULT;
+                    msg.fault_type = MM_FAULT_TYPE_GENERIC_MMU;
                     msg.fault_vaddr = page_va;
                     rc = rt_varea_fix_private_locked(varea, rt_hw_mmu_v2p(aspace, page_va),
                                                     &msg, RT_TRUE);

+ 41 - 1
components/mm/mm_fault.c

@@ -60,7 +60,7 @@ static int _write_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *
     if (rt_varea_is_private_locked(varea))
     {
         if (VAREA_IS_WRITABLE(varea) && (
-            msg->fault_type == MM_FAULT_TYPE_ACCESS_FAULT ||
+            msg->fault_type == MM_FAULT_TYPE_RWX_PERM ||
             msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT))
         {
             RDWR_LOCK(aspace);
@@ -102,6 +102,44 @@ static int _exec_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *m
     return err;
 }
 
+static void _determine_precise_fault_type(struct rt_aspace_fault_msg *msg, rt_ubase_t pa, rt_varea_t varea)
+{
+    if (msg->fault_type == MM_FAULT_TYPE_GENERIC_MMU)
+    {
+        rt_base_t requesting_perm;
+        switch (msg->fault_op)
+        {
+        case MM_FAULT_OP_READ:
+            requesting_perm = RT_HW_MMU_PROT_READ | RT_HW_MMU_PROT_USER;
+            break;
+        case MM_FAULT_OP_WRITE:
+            requesting_perm = RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER;
+            break;
+        case MM_FAULT_OP_EXECUTE:
+            requesting_perm = RT_HW_MMU_PROT_EXECUTE | RT_HW_MMU_PROT_USER;
+            break;
+        }
+
+        /**
+         * always checking the user privileges since dynamic permission is not
+         * supported in kernel. So those faults are never fixable. Hence, adding
+         * permission check never changes the result of checking. In other
+         * words, { 0 && (expr) } is always false.
+         */
+        if (rt_hw_mmu_attr_test_perm(varea->attr, requesting_perm))
+        {
+            if (pa == (rt_ubase_t)ARCH_MAP_FAILED)
+            {
+                msg->fault_type = MM_FAULT_TYPE_PAGE_FAULT;
+            }
+            else
+            {
+                msg->fault_type = MM_FAULT_TYPE_RWX_PERM;
+            }
+        }
+    }
+}
+
 int rt_aspace_fault_try_fix(rt_aspace_t aspace, struct rt_aspace_fault_msg *msg)
 {
     int err = MM_FAULT_FIXABLE_FALSE;
@@ -121,6 +159,8 @@ int rt_aspace_fault_try_fix(rt_aspace_t aspace, struct rt_aspace_fault_msg *msg)
         if (varea)
         {
             void *pa = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
+            _determine_precise_fault_type(msg, (rt_ubase_t)pa, varea);
+
             if (pa != ARCH_MAP_FAILED && msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
             {
                 LOG_D("%s(fault=%p) has already fixed", __func__, msg->fault_vaddr);

+ 11 - 2
components/mm/mm_fault.h

@@ -34,9 +34,12 @@ enum rt_mm_fault_type
 {
     /**
      * Occurs when an instruction attempts to access a memory address that it
-     * does not have permission to access
+     * does not have R/W/X permission to access
      */
-    MM_FAULT_TYPE_ACCESS_FAULT,
+    MM_FAULT_TYPE_RWX_PERM,
+
+    /* Without privileges to access (e.g. user accessing kernel) */
+    MM_FAULT_TYPE_NO_PRIVILEGES,
 
     /**
      * Occurs when a load or store instruction accesses a virtual memory
@@ -49,6 +52,12 @@ enum rt_mm_fault_type
      */
     MM_FAULT_TYPE_BUS_ERROR,
 
+    /**
+     * Occurs when page table walk failed, permission failed, writings on
+     * non-dirty page.
+     */
+    MM_FAULT_TYPE_GENERIC_MMU,
+
     MM_FAULT_TYPE_GENERIC,
     __PRIVATE_PAGE_INSERT,
 };

+ 2 - 0
components/mm/mm_private.h

@@ -38,6 +38,8 @@
   ((!varea->mem_obj || !varea->mem_obj->get_name)                              \
        ? "unknow"                                                              \
        : varea->mem_obj->get_name(varea))
+
+/* only user address use COW technique, so user permission is always checked */
 #define VAREA_IS_WRITABLE(varea)                                               \
   (rt_hw_mmu_attr_test_perm(varea->attr,                                       \
                             RT_HW_MMU_PROT_USER | RT_HW_MMU_PROT_WRITE))

+ 1 - 1
examples/utest/testcases/mm/lwp_mmap_fix_private.c

@@ -56,7 +56,7 @@ static void test_mmap_fix_private(void)
     char *next_va;
     struct rt_aspace_fault_msg msg;
     msg.fault_op = MM_FAULT_OP_WRITE;
-    msg.fault_type = MM_FAULT_TYPE_ACCESS_FAULT;
+    msg.fault_type = MM_FAULT_TYPE_RWX_PERM;
 
     /* map new pages at ex_vaddr to anonymous */
     next_va = ex_vaddr;

+ 25 - 0
libcpu/aarch64/common/armv8.h

@@ -149,6 +149,31 @@ rt_ubase_t rt_hw_get_current_el(void);
 void rt_hw_set_elx_env(void);
 void rt_hw_set_current_vbar(rt_ubase_t addr);
 
+/* ESR:generic */
+#define ARM64_ABORT_WNR(esr)        ((esr) & 0x40)
+#define ARM64_ESR_EXTRACT_EC(esr)   ((((esr) >> 26) & 0x3fU))
+#define ARM64_ESR_EXTRACT_FSC(esr)  ((esr) & 0x3f)
+
+/* ESR:EC */
+#define ARM64_EC_INST_ABORT_FROM_LO_EXCEPTION   (0b100000)
+#define ARM64_EC_INST_ABORT_WITHOUT_A_CHANGE    (0b100001)
+#define ARM64_EC_DATA_ABORT_FROM_LO_EXCEPTION   (0b100100)
+#define ARM64_EC_DATA_ABORT_WITHOUT_A_CHANGE    (0b100101)
+
+/* ESR:FSC */
+#define ARM64_FSC_TRANSLATION_FAULT_LEVEL_0     (0b000100)
+#define ARM64_FSC_TRANSLATION_FAULT_LEVEL_1     (0b000101)
+#define ARM64_FSC_TRANSLATION_FAULT_LEVEL_2     (0b000110)
+#define ARM64_FSC_TRANSLATION_FAULT_LEVEL_3     (0b000111)
+#define ARM64_FSC_PERMISSION_FAULT_LEVEL_0      (0b001100)
+#define ARM64_FSC_PERMISSION_FAULT_LEVEL_1      (0b001101)
+#define ARM64_FSC_PERMISSION_FAULT_LEVEL_2      (0b001110)
+#define ARM64_FSC_PERMISSION_FAULT_LEVEL_3      (0b001111)
+#define ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_0     (0b001000)
+#define ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_1     (0b001001)
+#define ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_2     (0b001010)
+#define ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_3     (0b001011)
+
 #endif /* __ASSEMBLY__ */
 
 #endif

+ 25 - 25
libcpu/aarch64/common/trap.c

@@ -62,26 +62,26 @@ static void _check_fault(struct rt_hw_exp_stack *regs, uint32_t pc_adj, char *in
 rt_inline int _get_type(unsigned long esr)
 {
     int ret;
-    int fsc = esr & 0x3f;
+    int fsc = ARM64_ESR_EXTRACT_FSC(esr);
     switch (fsc)
     {
-        case 0x4:
-        case 0x5:
-        case 0x6:
-        case 0x7:
+        case ARM64_FSC_TRANSLATION_FAULT_LEVEL_0:
+        case ARM64_FSC_TRANSLATION_FAULT_LEVEL_1:
+        case ARM64_FSC_TRANSLATION_FAULT_LEVEL_2:
+        case ARM64_FSC_TRANSLATION_FAULT_LEVEL_3:
             ret = MM_FAULT_TYPE_PAGE_FAULT;
             break;
-        case 0xc:
-        case 0xd:
-        case 0xe:
-        case 0xf:
-            ret = MM_FAULT_TYPE_ACCESS_FAULT;
+        case ARM64_FSC_PERMISSION_FAULT_LEVEL_0:
+        case ARM64_FSC_PERMISSION_FAULT_LEVEL_1:
+        case ARM64_FSC_PERMISSION_FAULT_LEVEL_2:
+        case ARM64_FSC_PERMISSION_FAULT_LEVEL_3:
+            ret = MM_FAULT_TYPE_RWX_PERM;
             break;
-        case 0x8:
-        case 0x9:
-        case 0xa:
-        case 0xb:
-            /* access flag fault */
+        case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_0:
+        case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_1:
+        case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_2:
+        case ARM64_FSC_ACCESS_FLAG_FAULT_LEVEL_3:
+            /* access flag fault, not handle currently */
         default:
             ret = MM_FAULT_TYPE_GENERIC;
     }
@@ -96,28 +96,28 @@ rt_inline long _irq_is_disable(long cpsr)
 static int user_fault_fixable(unsigned long esr, struct rt_hw_exp_stack *regs)
 {
     rt_ubase_t level;
-    unsigned char ec;
-    void *dfar;
-    int ret = 0;
-
-    ec = (unsigned char)((esr >> 26) & 0x3fU);
     enum rt_mm_fault_op fault_op;
     enum rt_mm_fault_type fault_type;
     struct rt_lwp *lwp;
+    void *dfar;
+    int ret = 0;
+    unsigned char ec = ARM64_ESR_EXTRACT_EC(esr);
+    rt_bool_t is_write = ARM64_ABORT_WNR(esr);
 
     switch (ec)
     {
-    case 0x20:
+    case ARM64_EC_INST_ABORT_FROM_LO_EXCEPTION:
         fault_op = MM_FAULT_OP_EXECUTE;
         fault_type = _get_type(esr);
         break;
-    case 0x21:
-    case 0x24:
-    case 0x25:
-        fault_op = MM_FAULT_OP_WRITE;
+    case ARM64_EC_INST_ABORT_WITHOUT_A_CHANGE:
+    case ARM64_EC_DATA_ABORT_FROM_LO_EXCEPTION:
+    case ARM64_EC_DATA_ABORT_WITHOUT_A_CHANGE:
+        fault_op = is_write ? MM_FAULT_OP_WRITE : MM_FAULT_OP_READ;
         fault_type = _get_type(esr);
         break;
     default:
+        /* non-fixable */
         fault_op = 0;
         break;
     }

+ 3 - 3
libcpu/risc-v/t-head/c906/trap.c

@@ -169,7 +169,7 @@ void handle_user(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw
             break;
         case EP_LOAD_ACCESS_FAULT:
             fault_op = MM_FAULT_OP_READ;
-            fault_type = MM_FAULT_TYPE_ACCESS_FAULT;
+            fault_type = MM_FAULT_TYPE_GENERIC;
             break;
         case EP_LOAD_ADDRESS_MISALIGNED:
             fault_op = MM_FAULT_OP_READ;
@@ -181,7 +181,7 @@ void handle_user(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw
             break;
         case EP_STORE_ACCESS_FAULT:
             fault_op = MM_FAULT_OP_WRITE;
-            fault_type = MM_FAULT_TYPE_ACCESS_FAULT;
+            fault_type = MM_FAULT_TYPE_GENERIC;
             break;
         case EP_STORE_ADDRESS_MISALIGNED:
             fault_op = MM_FAULT_OP_WRITE;
@@ -193,7 +193,7 @@ void handle_user(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw
             break;
         case EP_INSTRUCTION_ACCESS_FAULT:
             fault_op = MM_FAULT_OP_EXECUTE;
-            fault_type = MM_FAULT_TYPE_ACCESS_FAULT;
+            fault_type = MM_FAULT_TYPE_GENERIC;
             break;
         case EP_INSTRUCTION_ADDRESS_MISALIGNED:
             fault_op = MM_FAULT_OP_EXECUTE;

+ 19 - 4
libcpu/risc-v/virt64/riscv_mmu.h

@@ -116,6 +116,10 @@ rt_inline size_t rt_hw_mmu_attr_rm_perm(size_t attr, rt_base_t prot)
         case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
             attr &= ~PTE_W;
             break;
+        /* remove write permission for kernel */
+        case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_KERNEL:
+            attr &= ~PTE_W;
+            break;
         default:
             RT_ASSERT(0);
     }
@@ -135,7 +139,7 @@ rt_inline size_t rt_hw_mmu_attr_add_perm(size_t attr, rt_base_t prot)
     {
         /* add write permission for user */
         case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
-            attr |= PTE_W;
+            attr |= (PTE_R | PTE_W | PTE_U);
             break;
         default:
             RT_ASSERT(0);
@@ -153,15 +157,26 @@ rt_inline size_t rt_hw_mmu_attr_add_perm(size_t attr, rt_base_t prot)
 rt_inline rt_bool_t rt_hw_mmu_attr_test_perm(size_t attr, rt_base_t prot)
 {
     rt_bool_t rc = 0;
-    switch (prot)
+    switch (prot & ~RT_HW_MMU_PROT_USER)
     {
         /* test write permission for user */
-        case RT_HW_MMU_PROT_WRITE | RT_HW_MMU_PROT_USER:
-            rc = !!(attr & PTE_W);
+        case RT_HW_MMU_PROT_WRITE:
+            rc = ((attr & PTE_W) && (attr & PTE_R));
+            break;
+        case RT_HW_MMU_PROT_READ:
+            rc = !!(attr & PTE_R);
+            break;
+        case RT_HW_MMU_PROT_EXECUTE:
+            rc = !!(attr & PTE_X);
             break;
         default:
             RT_ASSERT(0);
     }
+
+    if (rc && (prot & RT_HW_MMU_PROT_USER))
+    {
+        rc = !!(attr & PTE_U);
+    }
     return rc;
 }
 

+ 6 - 6
libcpu/risc-v/virt64/trap.c

@@ -162,11 +162,11 @@ void handle_user(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw
     {
         case EP_LOAD_PAGE_FAULT:
             fault_op = MM_FAULT_OP_READ;
-            fault_type = MM_FAULT_TYPE_PAGE_FAULT;
+            fault_type = MM_FAULT_TYPE_GENERIC_MMU;
             break;
         case EP_LOAD_ACCESS_FAULT:
             fault_op = MM_FAULT_OP_READ;
-            fault_type = MM_FAULT_TYPE_ACCESS_FAULT;
+            fault_type = MM_FAULT_TYPE_BUS_ERROR;
             break;
         case EP_LOAD_ADDRESS_MISALIGNED:
             fault_op = MM_FAULT_OP_READ;
@@ -174,11 +174,11 @@ void handle_user(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw
             break;
         case EP_STORE_PAGE_FAULT:
             fault_op = MM_FAULT_OP_WRITE;
-            fault_type = MM_FAULT_TYPE_PAGE_FAULT;
+            fault_type = MM_FAULT_TYPE_GENERIC_MMU;
             break;
         case EP_STORE_ACCESS_FAULT:
             fault_op = MM_FAULT_OP_WRITE;
-            fault_type = MM_FAULT_TYPE_ACCESS_FAULT;
+            fault_type = MM_FAULT_TYPE_BUS_ERROR;
             break;
         case EP_STORE_ADDRESS_MISALIGNED:
             fault_op = MM_FAULT_OP_WRITE;
@@ -186,11 +186,11 @@ void handle_user(rt_size_t scause, rt_size_t stval, rt_size_t sepc, struct rt_hw
             break;
         case EP_INSTRUCTION_PAGE_FAULT:
             fault_op = MM_FAULT_OP_EXECUTE;
-            fault_type = MM_FAULT_TYPE_PAGE_FAULT;
+            fault_type = MM_FAULT_TYPE_GENERIC_MMU;
             break;
         case EP_INSTRUCTION_ACCESS_FAULT:
             fault_op = MM_FAULT_OP_EXECUTE;
-            fault_type = MM_FAULT_TYPE_ACCESS_FAULT;
+            fault_type = MM_FAULT_TYPE_BUS_ERROR;
             break;
         case EP_INSTRUCTION_ADDRESS_MISALIGNED:
             fault_op = MM_FAULT_OP_EXECUTE;