1
0
Эх сурвалжийг харах

实现MPU抽象层 (#8080)

- 为RT-Thread设计MPU抽象层,支持ARMV7-M,ARMV8-M架构,让用户使用MPU检测栈溢出等内存问题,实现线程内存隔离
- 在components/mp目录下提供通用的API,libcpu目录下提供各处理器架构的具体实现
- 在STM32U575 NUCLEO, STM32H75 NUCLEO开发板测试通过
tangzz98 1 жил өмнө
parent
commit
acc66c5479

+ 38 - 62
bsp/stm32/stm32u575-st-nucleo/.config

@@ -11,6 +11,7 @@ CONFIG_RT_NAME_MAX=8
 # CONFIG_RT_USING_SMART is not set
 # CONFIG_RT_USING_AMP is not set
 # CONFIG_RT_USING_SMP is not set
+CONFIG_RT_CPUS_NR=1
 CONFIG_RT_ALIGN_SIZE=8
 # CONFIG_RT_THREAD_PRIORITY_8 is not set
 CONFIG_RT_THREAD_PRIORITY_32=y
@@ -65,19 +66,15 @@ CONFIG_RT_USING_SMALL_MEM_AS_HEAP=y
 # CONFIG_RT_USING_MEMTRACE is not set
 # CONFIG_RT_USING_HEAP_ISR is not set
 CONFIG_RT_USING_HEAP=y
-
-#
-# Kernel Device Object
-#
 CONFIG_RT_USING_DEVICE=y
 # CONFIG_RT_USING_DEVICE_OPS is not set
-# CONFIG_RT_USING_DM is not set
 # CONFIG_RT_USING_INTERRUPT_INFO is not set
 CONFIG_RT_USING_CONSOLE=y
 CONFIG_RT_CONSOLEBUF_SIZE=256
 CONFIG_RT_CONSOLE_DEVICE_NAME="uart1"
-CONFIG_RT_VER_NUM=0x50002
+CONFIG_RT_VER_NUM=0x50100
 # CONFIG_RT_USING_STDC_ATOMIC is not set
+CONFIG_RT_BACKTRACE_LEVEL_MAX_NR=32
 # CONFIG_RT_USING_CACHE is not set
 CONFIG_RT_USING_HW_ATOMIC=y
 # CONFIG_ARCH_ARM_BOOTWITH_FLUSH_CACHE is not set
@@ -121,6 +118,7 @@ CONFIG_FINSH_USING_OPTION_COMPLETION=y
 #
 # Device Drivers
 #
+# CONFIG_RT_USING_DM is not set
 CONFIG_RT_USING_DEVICE_IPC=y
 CONFIG_RT_UNAMED_PIPE_NUMBER=64
 CONFIG_RT_USING_SYSTEM_WORKQUEUE=y
@@ -138,6 +136,7 @@ CONFIG_RT_USING_I2C=y
 # CONFIG_RT_I2C_DEBUG is not set
 CONFIG_RT_USING_I2C_BITOPS=y
 # CONFIG_RT_I2C_BITOPS_DEBUG is not set
+# CONFIG_RT_USING_SOFT_I2C is not set
 # CONFIG_RT_USING_PHY is not set
 CONFIG_RT_USING_PIN=y
 CONFIG_RT_USING_ADC=y
@@ -149,7 +148,6 @@ CONFIG_RT_USING_PWM=y
 # CONFIG_RT_USING_MTD_NOR is not set
 # CONFIG_RT_USING_MTD_NAND is not set
 # CONFIG_RT_USING_PM is not set
-# CONFIG_RT_USING_FDT is not set
 # CONFIG_RT_USING_RTC is not set
 # CONFIG_RT_USING_SDIO is not set
 CONFIG_RT_USING_SPI=y
@@ -238,6 +236,21 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_RT_USING_RT_LINK is not set
 # CONFIG_RT_USING_VBUS is not set
 
+#
+# Memory management
+#
+# CONFIG_RT_USING_MEMBLOCK is not set
+
+#
+# Memory protection
+#
+CONFIG_RT_USING_MEM_PROTECTION=y
+CONFIG_RT_USING_HW_STACK_GUARD=y
+CONFIG_USE_MEM_PROTECTION_EXAMPLES=y
+CONFIG_NUM_MEM_REGIONS=8
+CONFIG_NUM_EXCLUSIVE_REGIONS=2
+CONFIG_NUM_CONFIGURABLE_REGIONS=3
+
 #
 # RT-Thread Utestcases
 #
@@ -261,6 +274,7 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_KAWAII_MQTT is not set
 # CONFIG_PKG_USING_BC28_MQTT is not set
 # CONFIG_PKG_USING_WEBTERMINAL is not set
+# CONFIG_PKG_USING_LIBMODBUS is not set
 # CONFIG_PKG_USING_FREEMODBUS is not set
 # CONFIG_PKG_USING_NANOPB is not set
 
@@ -328,8 +342,6 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_AGILE_FTP is not set
 # CONFIG_PKG_USING_EMBEDDEDPROTO is not set
 # CONFIG_PKG_USING_RT_LINK_HW is not set
-# CONFIG_PKG_USING_RYANMQTT is not set
-# CONFIG_PKG_USING_RYANW5500 is not set
 # CONFIG_PKG_USING_LORA_PKT_FWD is not set
 # CONFIG_PKG_USING_LORA_GW_DRIVER_LIB is not set
 # CONFIG_PKG_USING_LORA_PKT_SNIFFER is not set
@@ -337,8 +349,6 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_SMALL_MODBUS is not set
 # CONFIG_PKG_USING_NET_SERVER is not set
 # CONFIG_PKG_USING_ZFTP is not set
-# CONFIG_PKG_USING_WOL is not set
-# CONFIG_PKG_USING_ZEPHYR_POLLING is not set
 
 #
 # security packages
@@ -407,12 +417,17 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_MP3PLAYER is not set
 # CONFIG_PKG_USING_TINYJPEG is not set
 # CONFIG_PKG_USING_UGUI is not set
+
+#
+# PainterEngine: A cross-platform graphics application framework written in C language
+#
+# CONFIG_PKG_USING_PAINTERENGINE is not set
+# CONFIG_PKG_USING_PAINTERENGINE_AUX is not set
 # CONFIG_PKG_USING_MCURSES is not set
 # CONFIG_PKG_USING_TERMBOX is not set
 # CONFIG_PKG_USING_VT100 is not set
 # CONFIG_PKG_USING_QRCODE is not set
 # CONFIG_PKG_USING_GUIENGINE is not set
-# CONFIG_PKG_USING_3GPP_AMRNB is not set
 
 #
 # tools packages
@@ -422,7 +437,6 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_EASYLOGGER is not set
 # CONFIG_PKG_USING_SYSTEMVIEW is not set
 # CONFIG_PKG_USING_SEGGER_RTT is not set
-# CONFIG_PKG_USING_RTT_AUTO_EXE_CMD is not set
 # CONFIG_PKG_USING_RDB is not set
 # CONFIG_PKG_USING_ULOG_EASYFLASH is not set
 # CONFIG_PKG_USING_LOGMGR is not set
@@ -458,6 +472,7 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_CBOX is not set
 # CONFIG_PKG_USING_SNOWFLAKE is not set
 # CONFIG_PKG_USING_HASH_MATCH is not set
+# CONFIG_PKG_USING_FIRE_PID_CURVE is not set
 # CONFIG_PKG_USING_ARMV7M_DWT_TOOL is not set
 # CONFIG_PKG_USING_VOFA_PLUS is not set
 
@@ -530,8 +545,6 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_TFDB is not set
 # CONFIG_PKG_USING_QPC is not set
 # CONFIG_PKG_USING_AGILE_UPGRADE is not set
-# CONFIG_PKG_USING_FLASH_BLOB is not set
-# CONFIG_PKG_USING_MLIBC is not set
 
 #
 # peripheral libraries and drivers
@@ -540,6 +553,7 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 #
 # sensors drivers
 #
+# CONFIG_PKG_USING_FINGERPRINT is not set
 # CONFIG_PKG_USING_LSM6DSM is not set
 # CONFIG_PKG_USING_LSM6DSL is not set
 # CONFIG_PKG_USING_LPS22HB is not set
@@ -599,11 +613,6 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_AD7746 is not set
 # CONFIG_PKG_USING_ADT74XX is not set
 # CONFIG_PKG_USING_MAX17048 is not set
-# CONFIG_PKG_USING_AS7341 is not set
-# CONFIG_PKG_USING_CW2015 is not set
-# CONFIG_PKG_USING_ICM20608 is not set
-# CONFIG_PKG_USING_PAJ7620 is not set
-# CONFIG_PKG_USING_STHS34PF80 is not set
 
 #
 # touch drivers
@@ -616,10 +625,11 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_FT5426 is not set
 # CONFIG_PKG_USING_FT6236 is not set
 # CONFIG_PKG_USING_XPT2046_TOUCH is not set
-# CONFIG_PKG_USING_CST816X is not set
 # CONFIG_PKG_USING_REALTEK_AMEBA is not set
+# CONFIG_PKG_USING_AS7341 is not set
 # CONFIG_PKG_USING_STM32_SDIO is not set
 # CONFIG_PKG_USING_ESP_IDF is not set
+# CONFIG_PKG_USING_ICM20608 is not set
 # CONFIG_PKG_USING_BUTTON is not set
 # CONFIG_PKG_USING_PCF8574 is not set
 # CONFIG_PKG_USING_SX12XX is not set
@@ -629,6 +639,7 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_LKDGUI is not set
 # CONFIG_PKG_USING_NRF5X_SDK is not set
 # CONFIG_PKG_USING_NRFX is not set
+# CONFIG_PKG_USING_WM_LIBRARIES is not set
 
 #
 # Kendryte SDK
@@ -642,7 +653,6 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_AT24CXX is not set
 # CONFIG_PKG_USING_MOTIONDRIVER2RTT is not set
 # CONFIG_PKG_USING_PCA9685 is not set
-# CONFIG_PKG_USING_ILI9341 is not set
 # CONFIG_PKG_USING_I2C_TOOLS is not set
 # CONFIG_PKG_USING_NRF24L01 is not set
 # CONFIG_PKG_USING_RPLIDAR is not set
@@ -659,6 +669,7 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_CAN_YMODEM is not set
 # CONFIG_PKG_USING_LORA_RADIO_DRIVER is not set
 # CONFIG_PKG_USING_QLED is not set
+# CONFIG_PKG_USING_PAJ7620 is not set
 # CONFIG_PKG_USING_AGILE_CONSOLE is not set
 # CONFIG_PKG_USING_LD3320 is not set
 # CONFIG_PKG_USING_WK2124 is not set
@@ -686,17 +697,14 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_MISAKA_AT24CXX is not set
 # CONFIG_PKG_USING_MISAKA_RGB_BLING is not set
 # CONFIG_PKG_USING_LORA_MODEM_DRIVER is not set
+# CONFIG_PKG_USING_BL_MCU_SDK is not set
 # CONFIG_PKG_USING_SOFT_SERIAL is not set
 # CONFIG_PKG_USING_MB85RS16 is not set
+# CONFIG_PKG_USING_CW2015 is not set
 # CONFIG_PKG_USING_RFM300 is not set
 # CONFIG_PKG_USING_IO_INPUT_FILTER is not set
 # CONFIG_PKG_USING_RASPBERRYPI_PICO_SDK is not set
 # CONFIG_PKG_USING_LRF_NV7LIDAR is not set
-# CONFIG_PKG_USING_AIP650 is not set
-# CONFIG_PKG_USING_FINGERPRINT is not set
-# CONFIG_PKG_USING_BT_ECB02C is not set
-# CONFIG_PKG_USING_UAT is not set
-# CONFIG_PKG_USING_SPI_TOOLS is not set
 
 #
 # AI packages
@@ -711,15 +719,6 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_QUEST is not set
 # CONFIG_PKG_USING_NAXOS is not set
 
-#
-# Signal Processing and Control Algorithm Packages
-#
-# CONFIG_PKG_USING_FIRE_PID_CURVE is not set
-# CONFIG_PKG_USING_QPID is not set
-# CONFIG_PKG_USING_UKAL is not set
-# CONFIG_PKG_USING_DIGITALCTRL is not set
-# CONFIG_PKG_USING_KISSFFT is not set
-
 #
 # miscellaneous packages
 #
@@ -749,7 +748,6 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_TETRIS is not set
 # CONFIG_PKG_USING_DONUT is not set
 # CONFIG_PKG_USING_COWSAY is not set
-# CONFIG_PKG_USING_MORSE is not set
 # CONFIG_PKG_USING_LIBCSV is not set
 # CONFIG_PKG_USING_OPTPARSE is not set
 # CONFIG_PKG_USING_FASTLZ is not set
@@ -765,12 +763,14 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_DSTR is not set
 # CONFIG_PKG_USING_TINYFRAME is not set
 # CONFIG_PKG_USING_KENDRYTE_DEMO is not set
+# CONFIG_PKG_USING_DIGITALCTRL is not set
 # CONFIG_PKG_USING_UPACKER is not set
 # CONFIG_PKG_USING_UPARAM is not set
 # CONFIG_PKG_USING_HELLO is not set
 # CONFIG_PKG_USING_VI is not set
 # CONFIG_PKG_USING_KI is not set
 # CONFIG_PKG_USING_ARMv7M_DWT is not set
+# CONFIG_PKG_USING_UKAL is not set
 # CONFIG_PKG_USING_CRCLIB is not set
 # CONFIG_PKG_USING_LWGPS is not set
 # CONFIG_PKG_USING_STATE_MACHINE is not set
@@ -781,8 +781,6 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_SLCAN2RTT is not set
 # CONFIG_PKG_USING_SOEM is not set
 # CONFIG_PKG_USING_QPARAM is not set
-# CONFIG_PKG_USING_CorevMCU_CLI is not set
-# CONFIG_PKG_USING_GET_IRQ_PRIORITY is not set
 
 #
 # Arduino libraries
@@ -790,9 +788,8 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_RTDUINO is not set
 
 #
-# Projects and Demos
+# Projects
 #
-# CONFIG_PKG_USING_ARDUINO_MSGQ_C_CPP_DEMO is not set
 # CONFIG_PKG_USING_ARDUINO_ULTRASOUND_RADAR is not set
 # CONFIG_PKG_USING_ARDUINO_SENSOR_KIT is not set
 # CONFIG_PKG_USING_ARDUINO_MATLAB_SUPPORT is not set
@@ -800,17 +797,13 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 #
 # Sensors
 #
-# CONFIG_PKG_USING_ARDUINO_SENSOR_DEVICE_DRIVERS is not set
 # CONFIG_PKG_USING_ARDUINO_CAPACITIVESENSOR is not set
 # CONFIG_PKG_USING_ARDUINO_ADAFRUIT_ADXL375 is not set
 # CONFIG_PKG_USING_ARDUINO_ADAFRUIT_VL53L0X is not set
-# CONFIG_PKG_USING_ARDUINO_ADAFRUIT_VL53L1X is not set
 # CONFIG_PKG_USING_ARDUINO_ADAFRUIT_SENSOR is not set
-# CONFIG_PKG_USING_ARDUINO_ADAFRUIT_VL6180X is not set
 # CONFIG_PKG_USING_ADAFRUIT_MAX31855 is not set
 # CONFIG_PKG_USING_ARDUINO_ADAFRUIT_MAX31865 is not set
 # CONFIG_PKG_USING_ARDUINO_ADAFRUIT_MAX31856 is not set
-# CONFIG_PKG_USING_ARDUINO_ADAFRUIT_MAX6675 is not set
 # CONFIG_PKG_USING_ARDUINO_ADAFRUIT_MLX90614 is not set
 # CONFIG_PKG_USING_ARDUINO_ADAFRUIT_LSM9DS1 is not set
 # CONFIG_PKG_USING_ARDUINO_ADAFRUIT_AHTX0 is not set
@@ -931,27 +924,17 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_ARDUINO_SEEED_AT42QT1070 is not set
 # CONFIG_PKG_USING_ARDUINO_SEEED_LSM6DS3 is not set
 # CONFIG_PKG_USING_ARDUINO_SEEED_HDC1000 is not set
-# CONFIG_PKG_USING_ARDUINO_SEEED_HM3301 is not set
-# CONFIG_PKG_USING_ARDUINO_SEEED_MCP9600 is not set
-# CONFIG_PKG_USING_ARDUINO_SEEED_LTC2941 is not set
-# CONFIG_PKG_USING_ARDUINO_SEEED_LDC1612 is not set
 
 #
 # Display
 #
-# CONFIG_PKG_USING_ARDUINO_ADAFRUIT_GFX_LIBRARY is not set
 # CONFIG_PKG_USING_ARDUINO_U8G2 is not set
-# CONFIG_PKG_USING_ARDUINO_ADAFRUIT_ST7735 is not set
-# CONFIG_PKG_USING_ARDUINO_ADAFRUIT_SSD1306 is not set
-# CONFIG_PKG_USING_ARDUINO_ADAFRUIT_ILI9341 is not set
 # CONFIG_PKG_USING_SEEED_TM1637 is not set
 
 #
 # Timing
 #
 # CONFIG_PKG_USING_ARDUINO_MSTIMER2 is not set
-# CONFIG_PKG_USING_ARDUINO_TICKER is not set
-# CONFIG_PKG_USING_ARDUINO_TASKSCHEDULER is not set
 
 #
 # Data Processing
@@ -975,17 +958,10 @@ CONFIG_RT_LIBC_TZ_DEFAULT_SEC=0
 # CONFIG_PKG_USING_ARDUINO_ADAFRUIT_PCF8574 is not set
 # CONFIG_PKG_USING_ARDUINO_ADAFRUIT_PCA9685 is not set
 # CONFIG_PKG_USING_ARDUINO_SEEED_PCF85063TP is not set
-# CONFIG_PKG_USING_ARDUINO_ADAFRUIT_TPA2016 is not set
-# CONFIG_PKG_USING_ARDUINO_ADAFRUIT_DRV2605 is not set
-# CONFIG_PKG_USING_ARDUINO_ADAFRUIT_DS1841 is not set
-# CONFIG_PKG_USING_ARDUINO_ADAFRUIT_DS3502 is not set
 
 #
 # Other
 #
-# CONFIG_PKG_USING_ARDUINO_ADAFRUIT_MFRC630 is not set
-# CONFIG_PKG_USING_ARDUINO_ADAFRUIT_SI5351 is not set
-# CONFIG_PKG_USING_ARDUINO_RTCLIB is not set
 
 #
 # Signal IO

+ 13 - 0
bsp/stm32/stm32u575-st-nucleo/board/board.c

@@ -10,6 +10,19 @@
 
 #include "board.h"
 
+#ifdef RT_USING_MEM_PROTECTION
+#include "mprotect.h"
+
+rt_mem_region_t static_regions[NUM_STATIC_REGIONS] = {
+  /* Flash region, read only */
+  {
+    .start = (void *)STM32_FLASH_START_ADRESS,
+    .size = (rt_size_t)STM32_FLASH_SIZE,
+    .attr = RT_MEM_REGION_P_RX_U_RX,
+  },
+};
+#endif
+
 void SystemClock_Config(void)
 {
   RCC_ClkInitTypeDef RCC_ClkInitStruct = {0};

+ 4 - 0
bsp/stm32/stm32u575-st-nucleo/board/board.h

@@ -42,6 +42,10 @@ extern int __bss_end;
 
 #define HEAP_END                       STM32_SRAM1_END
 
+#ifdef RT_USING_MEM_PROTECTION
+#define NUM_STATIC_REGIONS 1
+#endif
+
 void SystemClock_Config(void);
 
 #ifdef __cplusplus

+ 19 - 9
bsp/stm32/stm32u575-st-nucleo/rtconfig.h

@@ -7,6 +7,7 @@
 /* RT-Thread Kernel */
 
 #define RT_NAME_MAX 8
+#define RT_CPUS_NR 1
 #define RT_ALIGN_SIZE 8
 #define RT_THREAD_PRIORITY_32
 #define RT_THREAD_PRIORITY_MAX 32
@@ -42,14 +43,12 @@
 #define RT_USING_SMALL_MEM
 #define RT_USING_SMALL_MEM_AS_HEAP
 #define RT_USING_HEAP
-
-/* Kernel Device Object */
-
 #define RT_USING_DEVICE
 #define RT_USING_CONSOLE
 #define RT_CONSOLEBUF_SIZE 256
 #define RT_CONSOLE_DEVICE_NAME "uart1"
-#define RT_VER_NUM 0x50002
+#define RT_VER_NUM 0x50100
+#define RT_BACKTRACE_LEVEL_MAX_NR 32
 #define RT_USING_HW_ATOMIC
 #define RT_USING_CPU_FFS
 #define ARCH_ARM
@@ -126,6 +125,18 @@
 /* Utilities */
 
 
+/* Memory management */
+
+
+/* Memory protection */
+
+#define RT_USING_MEM_PROTECTION
+#define RT_USING_HW_STACK_GUARD
+#define USE_MEM_PROTECTION_EXAMPLES
+#define NUM_MEM_REGIONS 8
+#define NUM_EXCLUSIVE_REGIONS 2
+#define NUM_CONFIGURABLE_REGIONS 3
+
 /* RT-Thread Utestcases */
 
 
@@ -164,6 +175,9 @@
 /* u8g2: a monochrome graphic library */
 
 
+/* PainterEngine: A cross-platform graphics application framework written in C language */
+
+
 /* tools packages */
 
 
@@ -195,9 +209,6 @@
 /* AI packages */
 
 
-/* Signal Processing and Control Algorithm Packages */
-
-
 /* miscellaneous packages */
 
 /* project laboratory */
@@ -211,7 +222,7 @@
 /* Arduino libraries */
 
 
-/* Projects and Demos */
+/* Projects */
 
 
 /* Sensors */
@@ -236,7 +247,6 @@
 
 /* Other */
 
-
 /* Signal IO */
 
 

+ 1 - 0
components/Kconfig

@@ -35,5 +35,6 @@ source "$RTT_DIR/components/net/Kconfig"
 source "$RTT_DIR/components/utilities/Kconfig"
 source "$RTT_DIR/components/vbus/Kconfig"
 source "$RTT_DIR/components/mm/Kconfig"
+source "$RTT_DIR/components/mprotect/Kconfig"
 
 endmenu

+ 28 - 0
components/mprotect/Kconfig

@@ -0,0 +1,28 @@
+menu "Memory protection"
+
+config RT_USING_MEM_PROTECTION
+    bool "Enable memory protection"
+    default n
+    select RT_USING_HEAP
+
+config RT_USING_HW_STACK_GUARD
+    bool "Enable hardware stack guard"
+    default n
+    select RT_USING_MEM_PROTECTION
+
+if RT_USING_MEM_PROTECTION
+    config USE_MEM_PROTECTION_EXAMPLES
+    bool "Use memory protection examples"
+    default y
+
+    config NUM_MEM_REGIONS
+    int "Total number of memory protection regions supported by hardware"
+
+    config NUM_EXCLUSIVE_REGIONS
+    int "Total number of exclusive memory regions added using rt_mprotect_add_exclusive_region API"
+
+    config NUM_CONFIGURABLE_REGIONS
+    int "Maximum number of configurable memory regions for each thread, excluding stack guard and exclusive regions added using rt_mprotect_add_exclusive_region API"
+endif
+
+endmenu

+ 155 - 0
components/mprotect/README.md

@@ -0,0 +1,155 @@
+# RT-Thread MPU抽象层
+Mprotect(Memory Protection)组件是为不同处理器架构的内存保护单元提供的一套通用框架,让用户能使用这套框架解决一些常见的内存问题。
+
+# 内存保护单元
+内存保护单元是一个可编程的设备,用来指定一块特定内存区域的访问权限,比如读,写,和从该区域执行代码的权限。内存保护单元可以增加系统的健壮性,预防一些黑客的攻击。ARMV7-M和ARMV8-M都提供了内存保护单元,简称MPU(Memory Protection Unit)。[论坛里的这篇文章](https://club.rt-thread.org/ask/article/610305c1379b9e5e.html)提供了ARM MPU更详细的介绍。RISC-V也提供了相似的功能,简称PMP(Physical Memory Protection),具体可参考[RISC-V架构手册](https://riscv.org/wp-content/uploads/2017/05/riscv-privileged-v1.10.pdf)。
+
+# 硬件支持
+目前支持ARMV7-M和ARMV8-M架构。本目录下存放框架的通用代码和两个简单的例程。硬件相关的代码存放在`libcpu`目录。
+
+# 功能简介
+RT-Thread操作系统的任务和内核使用同一个地址空间,全部运行在特权级。所有代码默认对任何内存都有读,写,和执行的权限。使用MPU框架可以给特定的内存区域设置更低的权限,如只读权限。MPU框架可以被用来实现以下的功能:
+
+- 把关键数据或代码设置成只读,防止它们被破坏
+- 任务隔离,设定特定地址只能由特定的任务访问
+- 检测栈溢出
+- 把数据区域设置为不可执行,防止栈溢出攻击
+
+# 使用方法
+## Menuconfig配置
+通过`menuconfig`进入`RT-Thread Components->Memory Protection`配置相关选项
+
+- `RT_USING_MEM_PROTECTION`:开启MPU抽象层
+- `RT_USING_HW_STACK_GUARD`:使用MPU检测栈溢出。具体实现原理是在任务栈顶和栈底各设置一个MPU区域,权限设置为不可访问。如果发生栈溢出,代码访问了MPU保护的地址,会触发异常
+- `NUM_MEM_REGIONS`:硬件支持的MPU区域数量
+- `NUM_EXCLUSIVE_REGIONS`:使用`rt_mprotect_add_exclusive_region`函数配置的内存区域数量
+- `NUM_CONFIGURABLE_REGIONS`:各任务可以通过`rt_mprotect_add_region`函数配置的内存区域数量
+
+## 内存区域配置
+MPU抽象层提供了以下的API来配置任务对内存区域的权限:
+
+- `rt_err_t rt_mprotect_add_region(rt_thread_t thread, rt_mem_region_t *region)`:添加内存区域
+- `rt_err_t rt_mprotect_delete_region(rt_thread_t thread, rt_mem_region_t *region)`:删除内存区域
+- `rt_err_t rt_mprotect_update_region(rt_thread_t thread, rt_mem_region_t *region)`:更新内存区域配置
+
+内存区域的特性由`rt_mem_region_t`结构体定义:
+```
+typedef struct {
+    void *start;        /* 起始地址 */
+    rt_size_t size;     /* 区域大小 */
+    rt_mem_attr_t attr; /* 区域特性 */
+} rt_mem_region_t;
+```
+其中`attr`可通过以下宏来定义,使用这样定义的代码在任何处理器架构下都是通用的:
+
+- `RT_MEM_REGION_P_RW_U_RW`:可读写
+- `RT_MEM_REGION_P_RO_U_RO`: 只读
+- `RT_MEM_REGION_P_NA_U_NA`:不可访问
+- `RT_MEM_REGION_P_RWX_U_RWX`:可读写,执行
+- `RT_MEM_REGION_P_RX_U_RX`:只读,可执行
+
+通常程序需要定义一块内存区域只能由一个特定的任务访问。允许访问该内存区域的任务可以调用以下函数实现这个功能:
+
+- `rt_err_t rt_mprotect_add_exclusive_region(void *start, rt_size_t size)`:添加内存区域
+- `rt_err_t rt_mprotect_delete_exclusive_region(void *start, rt_size_t size)`:删除内存区域
+
+调用了`rt_mprotect_add_exclusive_region`的任务在退出前必须调用`rt_mprotect_delete_exclusive_region`删除内存区域。
+
+## 初始化
+使用MPU抽象层之前需要在`board.h`文件定义固定的MPU区域数量:
+```
+#define NUM_STATIC_REGIONS 2
+```
+在`board.c`文件定义固定的MPU区域特性:
+```
+rt_mem_region_t static_regions[NUM_STATIC_REGIONS] = {
+  /* Flash region, read only */
+  {
+    .start = (void *)STM32_FLASH_START_ADRESS,
+    .size = (rt_size_t)STM32_FLASH_SIZE,
+    .attr = RT_MEM_REGION_P_RX_U_RX,
+  },
+  /* SRAM regin, no execute */
+  {
+    .start = (void *)STM32_SRAM_START_ADDRESS,
+    .size = (rt_size_t)STM32_SRAM_SIZE,
+    .attr = RT_MEM_REGION_P_RW_U_RW,
+  },
+};
+```
+任何代码进行内存访问,都要遵守这些区域的配置。可以用固定的MPU区域,把代码段配置为只读,可执行,把数据段配置成可读写,不可执行。
+
+另外必须确保配置的MPU区域数量满足以下的关系:
+- 如果开启了`RT_USING_HW_STACK_GUARD`:`NUM_STATIC_REGIONS` + `NUM_CONFIGURABLE_REGIONS` + `NUM_EXCLUSIVE_REGIONS` + 2 <= `NUM_MEM_REGIONS`
+- 如果没有开启`RT_USING_HW_STACK_GUARD`:`NUM_STATIC_REGIONS` + `NUM_CONFIGURABLE_REGIONS` + `NUM_EXCLUSIVE_REGIONS` <= `NUM_MEM_REGIONS`
+
+## 异常检测
+程序可以注册钩子函数,用来检测内存异常:
+```
+rt_err_t rt_hw_mpu_exception_set_hook(rt_hw_mpu_exception_hook_t hook)
+```
+`hook`函数会在发生内存异常时被调用。函数声明如下:
+```
+typedef void (*rt_hw_mpu_exception_hook_t)(rt_mem_exception_info_t *)
+```
+`rt_mem_exception_info_t`结构体根据处理器机构定义,对于ARM架构,提供以下用来诊断内存异常的信息:
+```
+typedef struct {
+	rt_thread_t thread;		/* 触发异常的线程 */
+	void *addr;				/* 发生异常的地址 */
+	rt_mem_region_t region;	/* 地址对应的内存区域 */
+	rt_uint8_t mmfsr;		/* MemManage Status寄存器的值 */
+} rt_mem_exception_info_t;
+```
+
+# 对RT-Thread内核的影响
+## 线程内存区域的保存
+Mprotect组件在`rt_thread_t`结构体添加了`mem_regions`成员变量,用于保存线程内存区域的配置。
+```
+struct rt_thread
+{
+  ......
+#ifdef RT_USING_MEM_PROTECTION
+    void *mem_regions;
+#endif
+  ......
+}
+```
+`mem_regions`的内存采用动态分配,并在删除线程时释放。
+在切换线程时调用`rt_hw_mpu_table_switch`,切换线程的内存区域配置。
+```
+#if defined (RT_USING_MEM_PROTECTION)
+    PUSH    {r0-r3, r12, lr}
+    LDR     r1, =rt_current_thread
+    LDR     r0, [r1]
+    BL      rt_hw_mpu_table_switch
+    POP     {r0-r3, r12, lr}
+#endif
+```
+
+## 栈溢出检测的实现原理
+线程创建时内核会根据用户指定的参数为栈分配内存,之后调用`rt_hw_stack_guard_init`配置栈溢出检测。栈溢出检测的实现原理是在线程栈底和栈顶分配两块不可读写的内存区域,如果代码访问这块内存,就会触发异常。
+![stack guard](image/stack_guard.png)
+这种方法会改变内核代码可以操作的栈的起始地址和大小。因此`rt_hw_stack_guard_init`会调整`rt_thread_t->stack_addr`,指向允许访问的栈内存的起始地址,调整`rt_thread_t->stack_size`反映允许操作的内存大小,并在`rt_thread_t`添加成员变量`stack_buf`,指向原本为栈分配的内存的起始地址。这样,内核代码可以对栈进行正常操作,无需改动。
+
+应用程序需要注意,如果开启了栈溢出检测,线程实际可以使用的栈空间会比分配的内存更小。因此在创建线程时,需要考虑增加`stack_size`参数。
+
+在删除线程时要使用`stack_buf`变量,正确释放为栈分配的内存。
+```
+static void rt_defunct_execute(void)
+{
+  ......
+  if (object_is_systemobject == RT_FALSE)
+    {
+        /* release thread's stack */
+#ifdef RT_USING_HW_STACK_GUARD
+        RT_KERNEL_FREE(thread->stack_buf);
+#else
+        RT_KERNEL_FREE(thread->stack_addr);
+#endif
+        /* delete thread object */
+        rt_object_delete((rt_object_t)thread);
+    }
+  ......
+}
+```

+ 11 - 0
components/mprotect/SConscript

@@ -0,0 +1,11 @@
+from building import *
+
+cwd  = GetCurrentDir()
+src = Glob('*.c')
+if GetDepend("USE_MEM_PROTECTION_EXAMPLES"):
+    src += Glob('examples/*.c')
+CPPPATH = [cwd]
+
+group = DefineGroup('mprotect', src, depend = ['RT_USING_MEM_PROTECTION'], CPPPATH = CPPPATH)
+
+Return('group')

+ 20 - 0
components/mprotect/examples/mprotect_example_exception_hook.c

@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-9-25     tangzz98     the first version
+ */
+
+#include <rtthread.h>
+#include <mprotect.h>
+
+void mprotect_example_exception_hook(rt_mem_exception_info_t *info)
+{
+    rt_kprintf("Memory manage exception\n");
+    rt_kprintf("Faulting thread: %s\n", info->thread->parent.name);
+    rt_kprintf("Faulting address: %p\n", info->addr);
+    rt_kprintf("Faulting region: %p - %p", info->region.start, (void *)((rt_size_t)info->region.start + info->region.size));
+}

+ 75 - 0
components/mprotect/examples/mprotect_example_exclusive_region.c

@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-09-25     tangzz98     the first version
+ */
+
+#include <rtthread.h>
+#include <mprotect.h>
+
+#define THREAD_PRIORITY      25
+#define THREAD_STACK_SIZE    512
+#define THREAD_TIMESLICE     5
+
+rt_align(MPU_MIN_REGION_SIZE) rt_uint8_t thread1_private_data[MPU_MIN_REGION_SIZE];
+
+static void thread1_entry(void *parameter)
+{
+    (void)parameter;
+    /* Thread 1 configures thread1_private_data for exclusive access */
+    rt_kprintf("Thread 1 configures private data\n");
+    rt_mprotect_add_exclusive_region((void *)thread1_private_data, MPU_MIN_REGION_SIZE);
+    rt_kprintf("Thread 1 private data address: %p - %p\n", &thread1_private_data[0], &thread1_private_data[MPU_MIN_REGION_SIZE]);
+    rt_kprintf("Thread 1 reads and writes to its private data\n");
+    for (int i = 0; i < MPU_MIN_REGION_SIZE; i++)
+    {
+        /* Thread 1 has access to its private data */
+        thread1_private_data[i] = i;
+        rt_kprintf("thread1_private_data[%d] = %d\n", i, thread1_private_data[i]);
+    }
+}
+
+static void thread2_entry(void *parameter)
+{
+    (void)parameter;
+    rt_kprintf("Thread 2 writes to thread 1's private data\n");
+    for (int i = 0; i < MPU_MIN_REGION_SIZE; i++)
+    {
+        /*
+         * Thread 2 does not have access to thread 1's private data.
+         * Access generates an exception.
+         */
+        thread1_private_data[i] = i;
+    }
+}
+
+int mprotect_example_exclusive_region()
+{
+    extern void mprotect_example_exception_hook(rt_mem_exception_info_t *info);
+    rt_hw_mpu_exception_set_hook(mprotect_example_exception_hook);
+    rt_thread_t tid1 = RT_NULL;
+    tid1 = rt_thread_create("thread1",
+                           thread1_entry, RT_NULL,
+                           THREAD_STACK_SIZE,
+                           THREAD_PRIORITY - 1,
+                           THREAD_TIMESLICE);
+    if (tid1 != RT_NULL)
+        rt_thread_startup(tid1);
+
+    rt_thread_t tid2 = RT_NULL;
+    tid2 = rt_thread_create("thread2",
+                           thread2_entry, RT_NULL,
+                           THREAD_STACK_SIZE,
+                           THREAD_PRIORITY,
+                           THREAD_TIMESLICE);
+    if (tid2 != RT_NULL)
+        rt_thread_startup(tid2);
+
+    return 0;
+}
+
+MSH_CMD_EXPORT(mprotect_example_exclusive_region, Memory protection example (exclusive_region));

+ 88 - 0
components/mprotect/examples/mprotect_example_ro_data.c

@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-09-25     tangzz98     the first version
+ */
+
+#include <rtthread.h>
+#include <mprotect.h>
+
+#define THREAD_PRIORITY      25
+#define THREAD_STACK_SIZE    512
+#define THREAD_TIMESLICE     5
+
+rt_align(MPU_MIN_REGION_SIZE) rt_uint8_t ro_data[MPU_MIN_REGION_SIZE];
+
+static void thread1_entry(void *parameter)
+{
+    (void)parameter;
+    rt_kprintf("ro_data address: %p - %p\n", &ro_data[0], &ro_data[MPU_MIN_REGION_SIZE]);
+    /* Thread 1 can write ro_data before configuring memory protection. */
+    rt_kprintf("Thread 1 writes to ro_data before configuring memory protection\n");
+    for (int i = 0; i < MPU_MIN_REGION_SIZE; i++)
+    {
+        ro_data[i] = i;
+        rt_kprintf("ro_data[%d] = %d\n", i, ro_data[i]);
+    }
+    rt_mem_region_t ro_region =
+    {
+        .start = (void *)ro_data,
+        .size = MPU_MIN_REGION_SIZE,
+        .attr = RT_MEM_REGION_P_RO_U_RO,
+    };
+    /* Thread 1 configures ro_data as read only. */
+    rt_kprintf("Thread 1 configures ro_data for read-only access for thread 1\n");
+    rt_mprotect_add_region(RT_NULL, &ro_region);
+    rt_kprintf("Thread 1 reads ro_data\n");
+    for (int i = 0; i < MPU_MIN_REGION_SIZE; i++)
+    {
+        rt_kprintf("ro_data[%d] = %d\n", i, ro_data[i]);
+    }
+    rt_thread_delay(RT_TICK_PER_SECOND * 1);
+    /* Thread 1 cannot write ro_data. */
+    rt_kprintf("Thread 1 writes to ro_data\n");
+    for (int i = 0; i < MPU_MIN_REGION_SIZE; i++)
+    {
+        ro_data[i] = i;
+    }
+}
+
+static void thread2_entry(void *parameter)
+{
+    (void)parameter;
+    rt_kprintf("Thread 2 writes to ro_data\n");
+    for (int i = 0; i < MPU_MIN_REGION_SIZE; i++)
+    {
+        /* Thread 2 can write ro_data. */
+        ro_data[i] = i;
+        rt_kprintf("ro_data[%d] = %d\n", i, ro_data[i]);
+    }
+}
+
+int mprotect_example_ro_data()
+{
+    extern void mprotect_example_exception_hook(rt_mem_exception_info_t *info);
+    rt_hw_mpu_exception_set_hook(mprotect_example_exception_hook);
+    rt_thread_t tid1 = RT_NULL;
+    tid1 = rt_thread_create("thread1",
+                           thread1_entry, RT_NULL,
+                           THREAD_STACK_SIZE,
+                           THREAD_PRIORITY - 1,
+                           THREAD_TIMESLICE);
+    rt_thread_startup(tid1);
+    rt_thread_t tid2 = RT_NULL;
+    tid2 = rt_thread_create("thread2",
+                           thread2_entry, RT_NULL,
+                           THREAD_STACK_SIZE,
+                           THREAD_PRIORITY,
+                           THREAD_TIMESLICE);
+    rt_thread_startup(tid2);
+
+    return 0;
+}
+
+MSH_CMD_EXPORT(mprotect_example_ro_data, Memory protection example (read-only data));

BIN
components/mprotect/image/stack_guard.png


+ 201 - 0
components/mprotect/mprotect.c

@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-09-25     tangzz98     the first version
+ */
+
+#include "mprotect.h"
+
+#define DBG_ENABLE
+#define DBG_SECTION_NAME "MEMORY PROTECTION"
+#define DBG_LEVEL DBG_ERROR
+#include <rtdbg.h>
+
+rt_mem_exclusive_region_t exclusive_regions[NUM_EXCLUSIVE_REGIONS] = {};
+
+rt_mem_region_t *rt_mprotect_find_free_region(rt_thread_t thread)
+{
+    rt_uint8_t i;
+    rt_mem_region_t *free_region = RT_NULL;
+    if (thread->mem_regions != RT_NULL)
+    {
+        for (i = 0U; i < NUM_DYNAMIC_REGIONS; i++)
+        {
+            if (((rt_mem_region_t *)thread->mem_regions)[i].size == 0)
+            {
+                free_region = &(((rt_mem_region_t *)thread->mem_regions)[i]);
+                break;
+            }
+        }
+    }
+
+    return free_region;
+}
+
+rt_mem_region_t *rt_mprotect_find_region(rt_thread_t thread, rt_mem_region_t *region)
+{
+    rt_uint8_t i;
+    rt_mem_region_t *found_region = RT_NULL;
+    if (thread->mem_regions != RT_NULL)
+    {
+        for (i = 0U; i < NUM_DYNAMIC_REGIONS; i++)
+        {
+            if ((((rt_mem_region_t *)thread->mem_regions)[i].start == region->start) && (((rt_mem_region_t *)thread->mem_regions)[i].size == region->size))
+            {
+                found_region = &(((rt_mem_region_t *)thread->mem_regions)[i]);
+                break;
+            }
+        }
+    }
+
+    return found_region;
+}
+
+/**
+ * @brief    This function will initialize memory protection.
+ *
+ * @return   Return the operation status. When the return value is RT_EOK, the initialization is successful.
+ *           When the return value is any other values, it means the initialization failed.
+ */
+int rt_mprotect_init(void)
+{
+    return (int)rt_hw_mpu_init();
+}
+
+/**
+ * @brief    The function will add a memory region configuraiton for a thread.
+ *
+ * @param    thread is the thread that the memory region configuration will apply to.
+ *
+ * @param    region is the configuration for the  memory region to add.
+ *
+ * @return   Return the operation status. When the return value is RT_EOK, the operation is successful.
+ *           If the return value is any other values, it represents the operation failed.
+ */
+rt_err_t rt_mprotect_add_region(rt_thread_t thread, rt_mem_region_t *region)
+{
+    if (thread == RT_NULL)
+    {
+        thread = rt_thread_self();
+    }
+    if (thread->mem_regions == RT_NULL)
+    {
+        thread->mem_regions = RT_KERNEL_MALLOC(NUM_DYNAMIC_REGIONS * sizeof(rt_mem_region_t));
+        if (thread->mem_regions == RT_NULL)
+        {
+            return RT_ERROR;
+        }
+        rt_memset(thread->mem_regions, 0U, sizeof(rt_mem_region_t ) * NUM_DYNAMIC_REGIONS);
+    }
+    return rt_hw_mpu_add_region(thread, region);
+}
+
+/**
+ * @brief    The function will delete an existing memory region configuraiton for a thread.
+ *
+ * @param    thread is the thread that the memory region configuration will apply to.
+ *
+ * @param    region is the configuration for the memory region to delete.
+ *
+ * @return   Return the operation status. When the return value is RT_EOK, the operation is successful.
+ *           If the return value is any other values, it represents the operation failed.
+ */
+rt_err_t rt_mprotect_delete_region(rt_thread_t thread, rt_mem_region_t *region)
+{
+    if (thread == RT_NULL)
+    {
+        thread = rt_thread_self();
+    }
+    return rt_hw_mpu_delete_region(thread, region);
+}
+
+/**
+ * @brief    The function will update an existing memory region configuraiton for a thread.
+ *
+ * @param    thread is the thread that the memory region configuration will apply to.
+ *
+ * @param    region is the new configuration for the  memory region.
+ *
+ * @return   Return the operation status. When the return value is RT_EOK, the operation is successful.
+ *           If the return value is any other values, it represents the operation failed.
+ */
+rt_err_t rt_mprotect_update_region(rt_thread_t thread, rt_mem_region_t *region)
+{
+    if (thread == RT_NULL)
+    {
+        thread = rt_thread_self();
+    }
+    return rt_hw_mpu_update_region(thread, region);
+}
+
+/**
+ * @brief    The function will add a memory region that is only accessible by the calling thread.
+ *
+ * @param    start is the start address of the memory region.
+ *
+ * @param    size is the size of the memory region.
+ *
+ * @return   Return the operation status. When the return value is RT_EOK, the operation is successful.
+ *           If the return value is any other values, it represents the operation failed.
+ */
+rt_err_t rt_mprotect_add_exclusive_region(void *start, rt_size_t size)
+{
+    rt_uint8_t i;
+    rt_mem_exclusive_region_t region;
+    region.owner = rt_thread_self();
+    region.region.start = start;
+    region.region.size = size;
+    region.region.attr = RT_MEM_REGION_P_NA_U_NA;
+    if (rt_hw_mpu_add_region(RT_NULL, (rt_mem_region_t *)(&(region.region))) != RT_EOK)
+    {
+        return RT_ERROR;
+    }
+    rt_enter_critical();
+    for (i = 0; i < NUM_EXCLUSIVE_REGIONS; i++)
+    {
+        if (exclusive_regions[i].owner == RT_NULL)
+        {
+            rt_memcpy(&(exclusive_regions[i]), &region, sizeof(rt_mem_exclusive_region_t));
+            rt_exit_critical();
+            return RT_EOK;
+        }
+    }
+    rt_exit_critical();
+    LOG_E("Insufficient regions");
+    return RT_ERROR;
+}
+
+/**
+ * @brief    The function will delete a memory region that is only accessible by the calling thread.
+ *           The deleted region will be accessible by other threads.
+ *
+ * @param    start is the start address of the memory region.
+ *
+ * @param    size is the size of the memory region.
+ *
+ * @return   Return the operation status. When the return value is RT_EOK, the operation is successful.
+ *           If the return value is any other values, it represents the operation failed.
+ */
+rt_err_t rt_mprotect_delete_exclusive_region(void *start, rt_size_t size)
+{
+    rt_uint8_t i;
+    rt_enter_critical();
+    for (i = 0; i < NUM_EXCLUSIVE_REGIONS; i++)
+    {
+        if (exclusive_regions[i].owner == rt_thread_self() && exclusive_regions[i].region.start == start && exclusive_regions[i].region.size == size)
+        {
+            exclusive_regions[i].owner = RT_NULL;
+            rt_exit_critical();
+            return RT_EOK;
+        }
+    }
+    rt_exit_critical();
+    LOG_E("Region not found");
+    return RT_ERROR;
+}
+
+INIT_BOARD_EXPORT(rt_mprotect_init);

+ 41 - 0
components/mprotect/mprotect.h

@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-09-25     tangzz98     the first version
+ */
+
+#ifndef __MPROTECT_H__
+#define __MPROTECT_H__
+
+#include <rtdef.h>
+#include <mputype.h>
+
+#define ADDR_IN_REGION(addr, region) (((rt_size_t)(addr) >= (rt_size_t)((region)->start)) && ((rt_size_t)(addr) < (rt_size_t)((region)->start) + (region)->size))
+
+typedef struct
+{
+    void *start;
+    rt_size_t size;
+    rt_mem_attr_t attr;
+} rt_mem_region_t;
+
+#include <mpu.h>
+
+typedef struct
+{
+    rt_mem_region_t region;
+    rt_thread_t owner;
+} rt_mem_exclusive_region_t;
+
+int rt_mprotect_init(void);
+rt_err_t rt_mprotect_add_region(rt_thread_t thread, rt_mem_region_t *region);
+rt_err_t rt_mprotect_delete_region(rt_thread_t thread, rt_mem_region_t *region);
+rt_err_t rt_mprotect_update_region(rt_thread_t thread, rt_mem_region_t *region);
+rt_err_t rt_mprotect_add_exclusive_region(void *start, rt_size_t size);
+rt_err_t rt_mprotect_delete_exclusive_region(void *start, rt_size_t size);
+
+#endif /* __MPROTECT_H__ */

+ 8 - 0
include/rtdef.h

@@ -997,6 +997,14 @@ struct rt_thread
     int                         *clear_child_tid;
 #endif /* ARCH_MM_MMU */
 #endif /* RT_USING_SMART */
+
+#ifdef RT_USING_MEM_PROTECTION
+    void *mem_regions;
+#ifdef RT_USING_HW_STACK_GUARD
+    void *stack_buf;
+#endif
+#endif
+
     rt_atomic_t                 ref_count;
     struct rt_spinlock          spinlock;
     rt_ubase_t                  user_data;              /**< private user data beyond this thread */

+ 4 - 0
include/rthw.h

@@ -99,6 +99,10 @@ rt_uint8_t *rt_hw_stack_init(void       *entry,
                              rt_uint8_t *stack_addr,
                              void       *exit);
 
+#ifdef RT_USING_HW_STACK_GUARD
+void rt_hw_stack_guard_init(rt_thread_t thread);
+#endif
+
 /*
  * Interrupt handler definition
  */

+ 3 - 0
libcpu/arm/cortex-m33/SConscript

@@ -18,6 +18,9 @@ if rtconfig.PLATFORM in ['gcc']:
 if rtconfig.PLATFORM in ['iccarm']:
     src += Glob('*_iar.S')
 
+if not GetDepend('RT_USING_MEM_PROTECTION') and not GetDepend('RT_USING_HW_STACK_GUARD'):
+    SrcRemove(src, 'mpu.c')
+
 group = DefineGroup('CPU', src, depend = [''], CPPPATH = CPPPATH)
 
 Return('group')

+ 10 - 0
libcpu/arm/cortex-m33/context_gcc.S

@@ -17,6 +17,8 @@
  */
 /*@{*/
 
+#include <rtconfig.h>
+
 .cpu cortex-m4
 .syntax unified
 .thumb
@@ -192,6 +194,14 @@ contex_ns_load:
     VLDMIAEQ  r1!, {d8 - d15}                       /* pop FPU register s16~s31 */
 #endif
 
+#if defined (RT_USING_MEM_PROTECTION)
+    PUSH    {r0-r3, r12, lr}
+    LDR     r1, =rt_current_thread
+    LDR     r0, [r1]
+    BL      rt_hw_mpu_table_switch
+    POP     {r0-r3, r12, lr}
+#endif
+
 pendsv_exit:
     MSR     psp, r1                                 /* update stack pointer */
     /* restore interrupt */

+ 25 - 0
libcpu/arm/cortex-m33/cpuport.c

@@ -18,6 +18,9 @@
  */
 
 #include <rtthread.h>
+#ifdef RT_USING_HW_STACK_GUARD
+#include <mprotect.h>
+#endif
 
 #if               /* ARMCC */ (  (defined ( __CC_ARM ) && defined ( __TARGET_FPU_VFP ))    \
                   /* Clang */ || (defined ( __clang__ ) && defined ( __VFP_FP__ ) && !defined(__SOFTFP__)) \
@@ -233,6 +236,28 @@ rt_uint8_t *rt_hw_stack_init(void       *tentry,
     return stk;
 }
 
+#ifdef RT_USING_HW_STACK_GUARD
+void rt_hw_stack_guard_init(rt_thread_t thread)
+{
+    rt_mem_region_t stack_top_region, stack_bottom_region;
+    rt_ubase_t stack_bottom = (rt_ubase_t)thread->stack_addr;
+    rt_ubase_t stack_top = (rt_ubase_t)((rt_uint8_t *)thread->stack_addr + thread->stack_size);
+    rt_ubase_t stack_bottom_region_start = RT_ALIGN(stack_bottom, MPU_MIN_REGION_SIZE);
+    rt_ubase_t stack_top_region_start = RT_ALIGN_DOWN(stack_top - MPU_MIN_REGION_SIZE, MPU_MIN_REGION_SIZE);
+    stack_top_region.start = (void *)stack_top_region_start;
+    stack_top_region.size = MPU_MIN_REGION_SIZE;
+    stack_top_region.attr = RT_MEM_REGION_P_RO_U_NA;
+    stack_bottom_region.start = (void *)stack_bottom_region_start;
+    stack_bottom_region.size = MPU_MIN_REGION_SIZE;
+    stack_bottom_region.attr = RT_MEM_REGION_P_RO_U_NA;
+    rt_mprotect_add_region(thread, &stack_top_region);
+    rt_mprotect_add_region(thread, &stack_bottom_region);
+    thread->stack_buf = thread->stack_addr;
+    thread->stack_addr = (void *)(stack_bottom_region_start + MPU_MIN_REGION_SIZE);
+    thread->stack_size = (rt_uint32_t)(stack_top_region_start - (rt_ubase_t)thread->stack_addr);
+}
+#endif
+
 /**
  * This function set the hook, which is invoked on fault exception handling.
  *

+ 355 - 0
libcpu/arm/cortex-m33/mpu.c

@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-09-25     tangzz98     the first version
+ */
+
+#include <rtdef.h>
+#include <mprotect.h>
+
+#define DBG_ENABLE
+#define DBG_SECTION_NAME "MEMORY PROTECTION"
+#define DBG_LEVEL DBG_ERROR
+#include <rtdbg.h>
+
+#define MEM_REGION_TO_MPU_INDEX(thread, region) ((((rt_size_t)region - (rt_size_t)(thread->mem_regions)) / sizeof(rt_mem_region_t)) + NUM_STATIC_REGIONS)
+
+extern rt_mem_region_t *rt_mprotect_find_free_region(rt_thread_t thread);
+extern rt_mem_region_t *rt_mprotect_find_region(rt_thread_t thread, rt_mem_region_t *region);
+
+static rt_hw_mpu_exception_hook_t mem_manage_hook = RT_NULL;
+static rt_uint8_t mpu_mair[8U];
+
+rt_weak rt_uint8_t rt_hw_mpu_region_default_attr(rt_mem_region_t *region)
+{
+    static rt_uint8_t default_mem_attr[] =
+    {
+        ARM_MPU_ATTR(ARM_MPU_ATTR_MEMORY_(1U, 0U, 1U, 0U), ARM_MPU_ATTR_MEMORY_(1U, 0U, 1U, 0U)),
+        ARM_MPU_ATTR(ARM_MPU_ATTR_MEMORY_(1U, 1U, 1U, 1U), ARM_MPU_ATTR_MEMORY_(1U, 1U, 1U, 1U)),
+        ARM_MPU_ATTR_DEVICE_nGnRE,
+        ARM_MPU_ATTR(ARM_MPU_ATTR_MEMORY_(1U, 1U, 1U, 1U), ARM_MPU_ATTR_MEMORY_(1U, 1U, 1U, 1U)),
+        ARM_MPU_ATTR(ARM_MPU_ATTR_MEMORY_(1U, 0U, 1U, 0U), ARM_MPU_ATTR_MEMORY_(1U, 0U, 1U, 0U)),
+        ARM_MPU_ATTR_DEVICE_nGnRE,
+        ARM_MPU_ATTR_DEVICE_nGnRE
+    };
+    rt_uint8_t attr = 0U;
+    if ((rt_uint32_t)region->start >= 0xE0000000U)
+    {
+        attr = ((rt_uint32_t)region->start >= 0xE0100000U) ? ARM_MPU_ATTR_DEVICE_nGnRE : ARM_MPU_ATTR_DEVICE_nGnRnE;
+    }
+    else
+    {
+        attr = default_mem_attr[((rt_uint32_t)region->start & ~0xFFFFFFFU) >> 29U];
+    }
+    return attr;
+}
+
+static rt_err_t _mpu_rbar_rlar(rt_mem_region_t *region)
+{
+    rt_uint32_t rlar = 0U;
+    rt_uint8_t mair_attr;
+    rt_uint8_t index;
+    rt_uint8_t attr_indx = 0xFFU;
+    region->attr.rbar = (rt_uint32_t)region->start | (region->attr.rbar & (~MPU_RBAR_BASE_Msk));
+    rlar |= ((rt_uint32_t)region->start + region->size - 1U) & MPU_RLAR_LIMIT_Msk;
+    if (region->attr.mair_attr == RT_ARM_DEFAULT_MAIR_ATTR)
+    {
+        mair_attr = rt_hw_mpu_region_default_attr(region);
+    }
+    else
+    {
+        mair_attr = (rt_uint8_t)region->attr.mair_attr;
+    }
+    for (index = 0U; index < 8U; index++)
+    {
+        if (mpu_mair[index] == RT_ARM_DEFAULT_MAIR_ATTR)
+        {
+            break;
+        }
+        else if (mpu_mair[index] == mair_attr)
+        {
+            attr_indx = index;
+            break;
+        }
+    }
+    /*
+     * Current region's mair_attr does not match any existing region.
+     * All entries in MPU_MAIR are configured.
+     */
+    if (index == 8U)
+    {
+        return RT_ERROR;
+    }
+    /* An existing region has the same mair_attr. */
+    if (attr_indx != 0xFFU)
+    {
+        rlar |= attr_indx & MPU_RLAR_AttrIndx_Msk;
+    }
+    /* Current region's mair_attr does not match any existing region. */
+    else
+    {
+        ARM_MPU_SetMemAttr(index, mair_attr);
+        rlar |= index & MPU_RLAR_AttrIndx_Msk;
+    }
+    rlar |= MPU_RLAR_EN_Msk;
+    region->attr.rlar = rlar;
+
+    return RT_EOK;
+}
+
+rt_bool_t rt_hw_mpu_region_valid(rt_mem_region_t *region)
+{
+    if (region->size < MPU_MIN_REGION_SIZE)
+    {
+        LOG_E("Region size is too small");
+        return RT_FALSE;
+    }
+    if (region->size & (~(MPU_MIN_REGION_SIZE - 1U)) != region->size)
+    {
+        LOG_E("Region size is not a multiple of 32 bytes");
+        return RT_FALSE;
+    }
+    if ((rt_uint32_t)region->start & (MPU_MIN_REGION_SIZE - 1U) != 0U)
+    {
+        LOG_E("Region is not aligned by 32 bytes");
+        return RT_FALSE;
+    }
+    return RT_TRUE;
+}
+
+rt_err_t rt_hw_mpu_init(void)
+{
+    extern rt_mem_region_t static_regions[NUM_STATIC_REGIONS];
+    rt_uint8_t num_mpu_regions;
+    rt_uint8_t num_dynamic_regions;
+    rt_uint8_t index;
+    num_mpu_regions = (rt_uint8_t)((MPU->TYPE & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos);
+    if (num_mpu_regions == 0U)
+    {
+        LOG_E("Hardware does not support MPU");
+        return RT_ERROR;
+    }
+    if (num_mpu_regions != NUM_MEM_REGIONS)
+    {
+        LOG_E("Incorrect setting of NUM_MEM_REGIONS");
+        LOG_E("NUM_MEM_REGIONS = %d, hardware support %d MPU regions", NUM_MEM_REGIONS, num_mpu_regions);
+        return RT_ERROR;
+    }
+
+    num_dynamic_regions = NUM_DYNAMIC_REGIONS + NUM_EXCLUSIVE_REGIONS;
+    if (num_dynamic_regions + NUM_STATIC_REGIONS > num_mpu_regions)
+    {
+        LOG_E("Insufficient MPU regions: %d hardware MPU regions", num_mpu_regions);
+#ifdef RT_USING_HW_STACK_GUARD
+        LOG_E("Current configuration requires %d static regions + %d configurable regions + %d exclusive regions + %d stack guard regions", NUM_STATIC_REGIONS, NUM_CONFIGURABLE_REGIONS, NUM_EXCLUSIVE_REGIONS, 1);
+#else
+        LOG_E("Current configuration requires %d static regions + %d configurable regions + %d exclusive regions", NUM_STATIC_REGIONS, NUM_CONFIGURABLE_REGIONS, NUM_EXCLUSIVE_REGIONS);
+#endif
+        return RT_ERROR;
+    }
+    for (index = 0U; index < 8U; index++)
+    {
+        mpu_mair[index] = RT_ARM_DEFAULT_MAIR_ATTR;
+    }
+
+    ARM_MPU_Disable();
+    for (index = 0U; index < NUM_STATIC_REGIONS; index++)
+    {
+        if (rt_hw_mpu_region_valid(&(static_regions[index])) == RT_FALSE)
+        {
+            return RT_ERROR;
+        }
+        if (_mpu_rbar_rlar(&(static_regions[index])) == RT_ERROR)
+        {
+            LOG_E("Number of different mair_attr configurations exceeds 8");
+            return RT_ERROR;
+        }
+        ARM_MPU_SetRegion(index, static_regions[index].attr.rbar, static_regions[index].attr.rlar);
+    }
+    /* Enable background region. */
+    ARM_MPU_Enable(MPU_CTRL_PRIVDEFENA_Msk);
+
+    return RT_EOK;
+}
+
+rt_err_t rt_hw_mpu_add_region(rt_thread_t thread, rt_mem_region_t *region)
+{
+    rt_uint8_t index;
+    rt_mem_region_t *free_region;
+    if (rt_hw_mpu_region_valid(region) == RT_FALSE)
+    {
+        return RT_ERROR;
+    }
+    rt_enter_critical();
+    if (_mpu_rbar_rlar(region) == RT_ERROR)
+    {
+        rt_exit_critical();
+        LOG_E("Number of different mair_attr configurations exceeds 8");
+        return RT_ERROR;
+    }
+    if (thread == RT_NULL)
+    {
+        rt_exit_critical();
+        return RT_EOK;
+    }
+    free_region = rt_mprotect_find_free_region(thread);
+    if (free_region == RT_NULL)
+    {
+        rt_exit_critical();
+        LOG_E("Insufficient regions");
+        return RT_ERROR;
+    }
+    rt_memcpy(free_region, region, sizeof(rt_mem_region_t));
+    if (thread == rt_thread_self())
+    {
+        index = MEM_REGION_TO_MPU_INDEX(thread, free_region);
+        ARM_MPU_SetRegion(index, region->attr.rbar, region->attr.rlar);
+    }
+    rt_exit_critical();
+    return RT_EOK;
+}
+
+rt_err_t rt_hw_mpu_delete_region(rt_thread_t thread, rt_mem_region_t *region)
+{
+    rt_uint8_t index;
+    rt_enter_critical();
+    rt_mem_region_t *found_region = rt_mprotect_find_region(thread, region);
+    if (found_region == RT_NULL)
+    {
+        rt_exit_critical();
+        LOG_E("Region not found");
+        return RT_ERROR;
+    }
+    rt_memset(found_region, 0, sizeof(rt_mem_region_t));
+    if (thread == rt_thread_self())
+    {
+        index = MEM_REGION_TO_MPU_INDEX(thread, found_region);
+        ARM_MPU_ClrRegion(index);
+    }
+    rt_exit_critical();
+    return RT_EOK;
+}
+
+rt_err_t rt_hw_mpu_update_region(rt_thread_t thread, rt_mem_region_t *region)
+{
+    rt_uint8_t index;
+    if (rt_hw_mpu_region_valid(region) == RT_FALSE)
+    {
+        return RT_ERROR;
+    }
+    rt_enter_critical();
+    if (_mpu_rbar_rlar(region) == RT_ERROR)
+    {
+        rt_exit_critical();
+        LOG_E("Number of different mair_attr configurations exceeds 8");
+        return RT_ERROR;
+    }
+    rt_mem_region_t *old_region = rt_mprotect_find_region(thread, region);
+    if (old_region == RT_NULL)
+    {
+        rt_exit_critical();
+        LOG_E("Region not found");
+        return RT_ERROR;
+    }
+    rt_memcpy(old_region, region, sizeof(rt_mem_region_t));
+    if (thread == rt_thread_self())
+    {
+        index = MEM_REGION_TO_MPU_INDEX(thread, old_region);
+        ARM_MPU_SetRegion(index, region->attr.rbar, region->attr.rlar);
+    }
+    rt_exit_critical();
+    return RT_EOK;
+}
+
+rt_err_t rt_hw_mpu_exception_set_hook(rt_hw_mpu_exception_hook_t hook)
+{
+    mem_manage_hook = hook;
+    return RT_EOK;
+}
+
+void rt_hw_mpu_table_switch(rt_thread_t thread)
+{
+    extern rt_mem_exclusive_region_t exclusive_regions[NUM_EXCLUSIVE_REGIONS];
+    rt_uint8_t i;
+    rt_uint8_t index = NUM_STATIC_REGIONS;
+    if (thread->mem_regions != RT_NULL)
+    {
+        for (i = 0U; i < NUM_DYNAMIC_REGIONS; i++)
+        {
+            if (((rt_mem_region_t *)thread->mem_regions)[i].size != 0U)
+            {
+                ARM_MPU_SetRegion(index, ((rt_mem_region_t *)thread->mem_regions)[i].attr.rbar, ((rt_mem_region_t *)thread->mem_regions)[i].attr.rlar);
+                index += 1U;
+            }
+        }
+    }
+    for (i = 0U; i < NUM_EXCLUSIVE_REGIONS; i++)
+    {
+        if ((exclusive_regions[i].owner != RT_NULL) && (exclusive_regions[i].owner != thread))
+        {
+            ARM_MPU_SetRegion(index, exclusive_regions[i].region.attr.rbar, exclusive_regions[i].region.attr.rlar);
+            index += 1U;
+        }
+    }
+    for ( ; index < NUM_MEM_REGIONS; index++)
+    {
+        ARM_MPU_ClrRegion(index);
+    }
+}
+
+void MemManage_Handler(void)
+{
+    extern rt_mem_region_t static_regions[NUM_STATIC_REGIONS];
+    extern rt_mem_exclusive_region_t exclusive_regions[NUM_EXCLUSIVE_REGIONS];
+    rt_mem_exception_info_t info;
+    rt_int8_t i;
+    rt_memset(&info, 0, sizeof(rt_mem_exception_info_t));
+    info.thread = rt_thread_self();
+    if (SCB->CFSR & SCB_CFSR_MMARVALID_Msk)
+    {
+        info.addr = (void *)(SCB->MMFAR);
+        for (i = NUM_EXCLUSIVE_REGIONS - 1; i >= 0; i--)
+        {
+        if ((exclusive_regions[i].owner != RT_NULL) && ((exclusive_regions[i].owner != rt_thread_self())) && ADDR_IN_REGION(info.addr, (rt_mem_region_t *)&(exclusive_regions[i])))
+            {
+                rt_memcpy(&(info.region), &(exclusive_regions[i]), sizeof(rt_mem_region_t));
+                break;
+            }
+        }
+        if (info.region.size == 0U)
+        {
+            if (info.thread->mem_regions != RT_NULL)
+            {
+                for (i = NUM_DYNAMIC_REGIONS - 1; i >= 0; i--)
+                {
+                    if ((((rt_mem_region_t *)info.thread->mem_regions)[i].size != 0U) && ADDR_IN_REGION(info.addr, &(((rt_mem_region_t *)info.thread->mem_regions)[i])))
+                    {
+                        rt_memcpy(&(info.region), &(((rt_mem_region_t *)info.thread->mem_regions)[i]), sizeof(rt_mem_region_t));
+                        break;
+                    }
+                }
+            }
+            if (info.region.size == 0U)
+            {
+                for (i = NUM_STATIC_REGIONS - 1; i >= 0; i--)
+                {
+                    if (ADDR_IN_REGION(info.addr, &(static_regions[i])))
+                    {
+                        rt_memcpy(&(info.region), &(static_regions[i]), sizeof(rt_mem_region_t));
+                        break;
+                    }
+                }
+            }
+        }
+    }
+    info.mmfsr = (SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk) >> SCB_CFSR_MEMFAULTSR_Pos;
+    if (mem_manage_hook != RT_NULL)
+    {
+        mem_manage_hook(&info);
+    }
+    while (1);
+}

+ 99 - 0
libcpu/arm/cortex-m33/mpu.h

@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-09-25     tangzz98     the first version
+ */
+
+#ifndef __MPU_H__
+#define __MPU_H__
+
+#ifdef RT_USING_MEM_PROTECTION
+
+#include <board.h>
+#include <mprotect.h>
+
+#define MPU_MIN_REGION_SIZE 32U
+
+#define RT_ARM_DEFAULT_MAIR_ATTR   (0xF0U)
+
+/* MPU attributes for configuring data region permission*/
+/* Privileged Read Write, Unprivileged No Access */
+#define P_RW_U_NA_NON_SHAREABLE       (((0x0 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x0 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | MPU_RBAR_XN_Msk)
+#define P_RW_U_NA_OUTER_SHAREABLE     (((0x2 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x0 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | MPU_RBAR_XN_Msk)
+#define P_RW_U_NA_INNER_SHAREABLE     (((0x3 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x0 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | MPU_RBAR_XN_Msk)
+/* Privileged Read Write, Unprivileged Read Write */
+#define P_RW_U_RW_NON_SHAREABLE       (((0x0 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x1 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | MPU_RBAR_XN_Msk)
+#define P_RW_U_RW_OUTER_SHAREABLE     (((0x2 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x1 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | MPU_RBAR_XN_Msk)
+#define P_RW_U_RW_INNER_SHAREABLE     (((0x3 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x1 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | MPU_RBAR_XN_Msk)
+/* Privileged Read Only, Unprivileged No Access */
+#define P_RO_U_NA_NON_SHAREABLE       (((0x0 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x2 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | MPU_RBAR_XN_Msk)
+#define P_RO_U_NA_OUTER_SHAREABLE     (((0x2 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x2 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | MPU_RBAR_XN_Msk)
+#define P_RO_U_NA_INNER_SHAREABLE     (((0x3 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x2 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | MPU_RBAR_XN_Msk)
+/* Privileged Read Only, Unprivileged Read Only */
+#define P_RO_U_RO_NON_SHAREABLE       (((0x0 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x3 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | MPU_RBAR_XN_Msk)
+#define P_RO_U_RO_OUTER_SHAREABLE     (((0x2 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x3 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | MPU_RBAR_XN_Msk)
+#define P_RO_U_RO_INNER_SHAREABLE     (((0x3 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x3 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | MPU_RBAR_XN_Msk)
+
+/* MPU attributes for configuring code region permission */
+#define P_RWX_U_NA_NON_SHAREABLE      (((0x0 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x0 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk))
+#define P_RWX_U_NA_OUTER_SHAREABLE    (((0x2 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x0 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk))
+#define P_RWX_U_NA_INNER_SHAREABLE    (((0x3 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x0 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk))
+
+/* Privileged Read Write Execute, Unprivileged Read Write Execute */
+#define P_RWX_U_RWX_NON_SHAREABLE     (((0x0 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x1 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk))
+#define P_RWX_U_RWX_OUTER_SHAREABLE   (((0x2 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x1 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk))
+#define P_RWX_U_RWX_INNER_SHAREABLE   (((0x3 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x1 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk))
+
+/* Privileged Read Execute, Unprivileged No Access */
+#define P_RX_U_NA_NON_SHAREABLE       (((0x0 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x2 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk))
+#define P_RX_U_NA_OUTER_SHAREABLE     (((0x2 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x2 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk))
+#define P_RX_U_NA_INNER_SHAREABLE     (((0x3 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x2 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk))
+
+/* Privileged Read Execute, Unprivileged Read Execute */
+#define P_RX_U_RX_NON_SHAREABLE       (((0x0 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x3 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk))
+#define P_RX_U_RX_OUTER_SHAREABLE     (((0x2 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x3 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk))
+#define P_RX_U_RX_INNER_SHAREABLE     (((0x3 << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | ((0x3 << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk))
+
+typedef struct
+{
+    rt_thread_t thread;     /* Thread that triggered exception */
+    void *addr;             /* Address of faulting memory access */
+    rt_mem_region_t region; /* Configurations of the memory region containing the address */
+    rt_uint8_t mmfsr;       /* Content of MemManage Status Register */
+} rt_mem_exception_info_t;
+
+typedef void (*rt_hw_mpu_exception_hook_t)(rt_mem_exception_info_t *);
+
+#define RT_ARM_MEM_ATTR(perm, type) ((rt_mem_attr_t){ .rbar = (perm), .mair_attr = (type) })
+
+/* Convenient macros for configuring data region attributes with default memory type */
+#define RT_MEM_REGION_P_RW_U_NA     RT_ARM_MEM_ATTR(P_RW_U_NA_NON_SHAREABLE, RT_ARM_DEFAULT_MAIR_ATTR)
+#define RT_MEM_REGION_P_RW_U_RW     RT_ARM_MEM_ATTR(P_RW_U_RW_NON_SHAREABLE, RT_ARM_DEFAULT_MAIR_ATTR)
+#define RT_MEM_REGION_P_RO_U_NA     RT_ARM_MEM_ATTR(P_RO_U_NA_NON_SHAREABLE, RT_ARM_DEFAULT_MAIR_ATTR)
+#define RT_MEM_REGION_P_RO_U_RO     RT_ARM_MEM_ATTR(P_RO_U_RO_NON_SHAREABLE, RT_ARM_DEFAULT_MAIR_ATTR)
+/* ARM-V8M does not support P_NA_U_NA.
+   For compatibility with rt_mprotect_add_exclusive_region,
+   define RT_MEM_REGION_P_NA_U_NA as the lowest privilege supported.
+*/
+#define RT_MEM_REGION_P_NA_U_NA     RT_MEM_REGION_P_RO_U_NA
+
+/* Convenient macros for configuring code region attributes with default memory type and shareability */
+#define RT_MEM_REGION_P_RWX_U_NA    RT_ARM_MEM_ATTR(P_RWX_U_NA_NON_SHAREABLE, RT_ARM_DEFAULT_MAIR_ATTR)
+#define RT_MEM_REGION_P_RWX_U_RWX   RT_ARM_MEM_ATTR(P_RWX_U_RWX_NON_SHAREABLE, RT_ARM_DEFAULT_MAIR_ATTR)
+#define RT_MEM_REGION_P_RX_U_NA     RT_ARM_MEM_ATTR(P_RX_U_NA_NON_SHAREABLE, RT_ARM_DEFAULT_MAIR_ATTR)
+#define RT_MEM_REGION_P_RX_U_RX     RT_ARM_MEM_ATTR(P_RX_U_RX_NON_SHAREABLE, RT_ARM_DEFAULT_MAIR_ATTR)
+
+rt_bool_t rt_hw_mpu_region_valid(rt_mem_region_t *region);
+rt_err_t rt_hw_mpu_init(void);
+rt_err_t rt_hw_mpu_add_region(rt_thread_t thread, rt_mem_region_t *region);
+rt_err_t rt_hw_mpu_delete_region(rt_thread_t thread, rt_mem_region_t *region);
+rt_err_t rt_hw_mpu_update_region(rt_thread_t thread, rt_mem_region_t *region);
+rt_err_t rt_hw_mpu_exception_set_hook(rt_hw_mpu_exception_hook_t hook);
+
+#endif /* RT_USING_MEM_PROTECTION */
+
+#endif /* __MPU_H__ */

+ 34 - 0
libcpu/arm/cortex-m33/mputype.h

@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-09-25     tangzz98     the first version
+ */
+
+#ifndef __MPUTYPE_H__
+#define __MPUTYPE_H__
+
+#ifdef RT_USING_MEM_PROTECTION
+
+#ifdef RT_USING_HW_STACK_GUARD
+#define NUM_DYNAMIC_REGIONS (2 + NUM_CONFIGURABLE_REGIONS)
+#else
+#define NUM_DYNAMIC_REGIONS (NUM_CONFIGURABLE_REGIONS)
+#endif
+
+typedef struct
+{
+    rt_uint32_t rbar;
+    union
+    {
+        rt_uint32_t mair_attr;
+        rt_uint32_t rlar;
+    };
+} rt_mem_attr_t;
+
+#endif /* RT_USING_MEM_PROTECTION */
+
+#endif /* __MPUTYPE_H__ */

+ 3 - 0
libcpu/arm/cortex-m7/SConscript

@@ -18,6 +18,9 @@ if rtconfig.PLATFORM in ['gcc']:
 if rtconfig.PLATFORM in ['iccarm']:
     src += Glob('*_iar.S')
 
+if not GetDepend('RT_USING_MEM_PROTECTION') and not GetDepend('RT_USING_HW_STACK_GUARD'):
+    SrcRemove(src, 'mpu.c')
+
 group = DefineGroup('CPU', src, depend = [''], CPPPATH = CPPPATH)
 
 Return('group')

+ 10 - 0
libcpu/arm/cortex-m7/context_gcc.S

@@ -17,6 +17,8 @@
  */
 /*@{*/
 
+#include <rtconfig.h>
+
 .cpu cortex-m4
 .syntax unified
 .thumb
@@ -152,6 +154,14 @@ switch_to_thread:
     BICNE   lr, lr, #0x10       /* lr &= ~(1 << 4), set FPCA. */
 #endif
 
+#if defined (RT_USING_MEM_PROTECTION)
+    PUSH    {r0-r3, r12, lr}
+    LDR     r1, =rt_current_thread
+    LDR     r0, [r1]
+    BL      rt_hw_mpu_table_switch
+    POP     {r0-r3, r12, lr}
+#endif
+
 pendsv_exit:
     /* restore interrupt */
     MSR PRIMASK, r2

+ 25 - 0
libcpu/arm/cortex-m7/cpuport.c

@@ -18,6 +18,9 @@
  */
 
 #include <rtthread.h>
+#ifdef RT_USING_HW_STACK_GUARD
+#include <mprotect.h>
+#endif
 
 #if               /* ARMCC */ (  (defined ( __CC_ARM ) && defined ( __TARGET_FPU_VFP ))    \
                   /* Clang */ || (defined ( __clang__ ) && defined ( __VFP_FP__ ) && !defined(__SOFTFP__)) \
@@ -175,6 +178,28 @@ rt_uint8_t *rt_hw_stack_init(void       *tentry,
     return stk;
 }
 
+#ifdef RT_USING_HW_STACK_GUARD
+void rt_hw_stack_guard_init(rt_thread_t thread)
+{
+    rt_mem_region_t stack_top_region, stack_bottom_region;
+    rt_ubase_t stack_bottom = (rt_ubase_t)thread->stack_addr;
+    rt_ubase_t stack_top = (rt_ubase_t)((rt_uint8_t *)thread->stack_addr + thread->stack_size);
+    rt_ubase_t stack_bottom_region_start = RT_ALIGN(stack_bottom, MPU_MIN_REGION_SIZE);
+    rt_ubase_t stack_top_region_start = RT_ALIGN_DOWN(stack_top - MPU_MIN_REGION_SIZE, MPU_MIN_REGION_SIZE);
+    stack_top_region.start = (void *)stack_top_region_start;
+    stack_top_region.size = MPU_MIN_REGION_SIZE;
+    stack_top_region.attr = RT_MEM_REGION_P_NA_U_NA;
+    stack_bottom_region.start = (void *)stack_bottom_region_start;
+    stack_bottom_region.size = MPU_MIN_REGION_SIZE;
+    stack_bottom_region.attr = RT_MEM_REGION_P_NA_U_NA;
+    rt_mprotect_add_region(thread, &stack_top_region);
+    rt_mprotect_add_region(thread, &stack_bottom_region);
+    thread->stack_buf = thread->stack_addr;
+    thread->stack_addr = (void *)(stack_bottom_region_start + MPU_MIN_REGION_SIZE);
+    thread->stack_size = (rt_uint32_t)(stack_top_region_start - stack_bottom_region_start - MPU_MIN_REGION_SIZE);
+}
+#endif
+
 /**
  * This function set the hook, which is invoked on fault exception handling.
  *

+ 299 - 0
libcpu/arm/cortex-m7/mpu.c

@@ -0,0 +1,299 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-09-25     tangzz98     the first version
+ */
+
+#include <rtdef.h>
+#include <mprotect.h>
+
+#define DBG_ENABLE
+#define DBG_SECTION_NAME "MEMORY PROTECTION"
+#define DBG_LEVEL DBG_ERROR
+#include <rtdbg.h>
+
+#define MEM_REGION_TO_MPU_INDEX(thread, region) ((((rt_size_t)region - (rt_size_t)(thread->mem_regions)) / sizeof(rt_mem_region_t)) + NUM_STATIC_REGIONS)
+
+extern rt_mem_region_t *rt_mprotect_find_free_region(rt_thread_t thread);
+extern rt_mem_region_t *rt_mprotect_find_region(rt_thread_t thread, rt_mem_region_t *region);
+
+static rt_hw_mpu_exception_hook_t mem_manage_hook = RT_NULL;
+
+rt_weak rt_uint32_t rt_hw_mpu_region_default_attr(rt_mem_region_t *region)
+{
+    static rt_uint32_t default_mem_attr[] =
+    {
+        NORMAL_OUTER_INNER_WRITE_THROUGH_NON_SHAREABLE,
+        NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE,
+        DEVICE_NON_SHAREABLE,
+        NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE,
+        NORMAL_OUTER_INNER_WRITE_THROUGH_NON_SHAREABLE,
+        DEVICE_SHAREABLE,
+        DEVICE_NON_SHAREABLE
+    };
+    rt_uint32_t attr = 0U;
+    if ((rt_uint32_t)region->start >= 0xE0000000U)
+    {
+        attr = ((rt_uint32_t)region->start >= 0xE0100000U) ? STRONGLY_ORDERED_SHAREABLE : DEVICE_SHAREABLE;
+    }
+    else
+    {
+        attr = default_mem_attr[((rt_uint32_t)region->start & ~0xFFFFFFFU) >> 29U];
+    }
+    return attr;
+}
+
+static rt_uint32_t _mpu_rasr(rt_mem_region_t *region)
+{
+    rt_uint32_t rasr = 0U;
+    if ((region->attr.rasr & RESERVED) == RESERVED)
+    {
+        rasr |= rt_hw_mpu_region_default_attr(region);
+        rasr |= region->attr.rasr & (MPU_RASR_XN_Msk | MPU_RASR_AP_Msk);
+    }
+    else
+    {
+        rasr |= region->attr.rasr & MPU_RASR_ATTRS_Msk;
+    }
+    rasr |= ((32U - __builtin_clz(region->size - 1U) - 2U + 1U) << MPU_RASR_SIZE_Pos) & MPU_RASR_SIZE_Msk;
+    rasr |= MPU_RASR_ENABLE_Msk;
+    return rasr;
+}
+
+rt_bool_t rt_hw_mpu_region_valid(rt_mem_region_t *region)
+{
+    if (region->size < MPU_MIN_REGION_SIZE)
+    {
+        LOG_E("Region size is too small");
+        return RT_FALSE;
+    }
+    if (region->size & (region->size - 1U) != 0U)
+    {
+        LOG_E("Region size is not power of 2");
+        return RT_FALSE;
+    }
+    if ((rt_uint32_t)region->start & (region->size - 1U) != 0U)
+    {
+        LOG_E("Region is not naturally aligned");
+        return RT_FALSE;
+    }
+    return RT_TRUE;
+}
+
+rt_err_t rt_hw_mpu_init(void)
+{
+    extern rt_mem_region_t static_regions[NUM_STATIC_REGIONS];
+    rt_uint8_t num_mpu_regions;
+    rt_uint8_t num_dynamic_regions;
+    rt_uint8_t index;
+    num_mpu_regions = (rt_uint8_t)((MPU->TYPE & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos);
+    if (num_mpu_regions == 0U)
+    {
+        LOG_E("Hardware does not support MPU");
+        return RT_ERROR;
+    }
+    if (num_mpu_regions != NUM_MEM_REGIONS)
+    {
+        LOG_E("Incorrect setting of NUM_MEM_REGIONS");
+        LOG_E("NUM_MEM_REGIONS = %d, hardware support %d MPU regions", NUM_MEM_REGIONS, num_mpu_regions);
+        return RT_ERROR;
+    }
+
+    num_dynamic_regions = NUM_DYNAMIC_REGIONS + NUM_EXCLUSIVE_REGIONS;
+    if (num_dynamic_regions + NUM_STATIC_REGIONS > num_mpu_regions)
+    {
+        LOG_E("Insufficient MPU regions: %d hardware MPU regions", num_mpu_regions);
+#ifdef RT_USING_HW_STACK_GUARD
+        LOG_E("Current configuration requires %d static regions + %d configurable regions + %d exclusive regions + %d stack guard regions", NUM_STATIC_REGIONS, NUM_CONFIGURABLE_REGIONS, NUM_EXCLUSIVE_REGIONS, 2);
+#else
+        LOG_E("Current configuration requires %d static regions + %d configurable regions + %d exclusive regions", NUM_STATIC_REGIONS, NUM_CONFIGURABLE_REGIONS, NUM_EXCLUSIVE_REGIONS);
+#endif
+        return RT_ERROR;
+    }
+
+    ARM_MPU_Disable();
+    for (index = 0U; index < NUM_STATIC_REGIONS; index++)
+    {
+        if (rt_hw_mpu_region_valid(&(static_regions[index])) == RT_FALSE)
+        {
+            return RT_ERROR;
+        }
+        static_regions[index].attr.rasr = _mpu_rasr(&(static_regions[index]));
+        ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)static_regions[index].start), static_regions[index].attr.rasr);
+    }
+    /* Enable background region. */
+    ARM_MPU_Enable(MPU_CTRL_PRIVDEFENA_Msk);
+
+    return RT_EOK;
+}
+
+rt_err_t rt_hw_mpu_add_region(rt_thread_t thread, rt_mem_region_t *region)
+{
+    rt_uint8_t index;
+    rt_mem_region_t *free_region;
+    if (rt_hw_mpu_region_valid(region) == RT_FALSE)
+    {
+        return RT_ERROR;
+    }
+    region->attr.rasr = _mpu_rasr(region);
+    if (thread == RT_NULL)
+    {
+        return RT_EOK;
+    }
+    rt_enter_critical();
+    free_region = rt_mprotect_find_free_region(thread);
+    if (free_region == RT_NULL)
+    {
+        rt_exit_critical();
+        LOG_E("Insufficient regions");
+        return RT_ERROR;
+    }
+    rt_memcpy(free_region, region, sizeof(rt_mem_region_t));
+    if (thread == rt_thread_self())
+    {
+        index = MEM_REGION_TO_MPU_INDEX(thread, free_region);
+        ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)region->start), region->attr.rasr);
+    }
+    rt_exit_critical();
+    return RT_EOK;
+}
+
+rt_err_t rt_hw_mpu_delete_region(rt_thread_t thread, rt_mem_region_t *region)
+{
+    rt_uint8_t index;
+    rt_enter_critical();
+    rt_mem_region_t *found_region = rt_mprotect_find_region(thread, region);
+    if (found_region == RT_NULL)
+    {
+        rt_exit_critical();
+        LOG_E("Region not found");
+        return RT_ERROR;
+    }
+    rt_memset(found_region, 0, sizeof(rt_mem_region_t));
+    if (thread == rt_thread_self())
+    {
+        index = MEM_REGION_TO_MPU_INDEX(thread, found_region);
+        ARM_MPU_ClrRegion(index);
+    }
+    rt_exit_critical();
+    return RT_EOK;
+}
+
+rt_err_t rt_hw_mpu_update_region(rt_thread_t thread, rt_mem_region_t *region)
+{
+    rt_uint8_t index;
+    if (rt_hw_mpu_region_valid(region) == RT_FALSE)
+    {
+        return RT_ERROR;
+    }
+    region->attr.rasr = _mpu_rasr(region);
+    rt_enter_critical();
+    rt_mem_region_t *old_region = rt_mprotect_find_region(thread, region);
+    if (old_region == RT_NULL)
+    {
+        rt_exit_critical();
+        LOG_E("Region not found");
+        return RT_ERROR;
+    }
+    rt_memcpy(old_region, region, sizeof(rt_mem_region_t));
+    if (thread == rt_thread_self())
+    {
+        index = MEM_REGION_TO_MPU_INDEX(thread, old_region);
+        ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)region->start), region->attr.rasr);
+    }
+    rt_exit_critical();
+    return RT_EOK;
+}
+
+rt_err_t rt_hw_mpu_exception_set_hook(rt_hw_mpu_exception_hook_t hook)
+{
+    mem_manage_hook = hook;
+    return RT_EOK;
+}
+
+void rt_hw_mpu_table_switch(rt_thread_t thread)
+{
+    extern rt_mem_exclusive_region_t exclusive_regions[NUM_EXCLUSIVE_REGIONS];
+    rt_uint8_t i;
+    rt_uint8_t index = NUM_STATIC_REGIONS;
+    if (thread->mem_regions != RT_NULL)
+    {
+        for (i = 0U; i < NUM_DYNAMIC_REGIONS; i++)
+        {
+            if (((rt_mem_region_t *)thread->mem_regions)[i].size != 0U)
+            {
+                ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)(((rt_mem_region_t *)thread->mem_regions)[i].start)), ((rt_mem_region_t *)thread->mem_regions)[i].attr.rasr);
+                index += 1U;
+            }
+        }
+    }
+    for (i = 0U; i < NUM_EXCLUSIVE_REGIONS; i++)
+    {
+        if ((exclusive_regions[i].owner != RT_NULL) && (exclusive_regions[i].owner != thread))
+        {
+            ARM_MPU_SetRegion(ARM_MPU_RBAR(index, (rt_uint32_t)(exclusive_regions[i].region.start)), exclusive_regions[i].region.attr.rasr);
+            index += 1U;
+        }
+    }
+    for ( ; index < NUM_MEM_REGIONS; index++)
+    {
+        ARM_MPU_ClrRegion(index);
+    }
+}
+
+void MemManage_Handler(void)
+{
+    extern rt_mem_region_t static_regions[NUM_STATIC_REGIONS];
+    extern rt_mem_exclusive_region_t exclusive_regions[NUM_EXCLUSIVE_REGIONS];
+    rt_mem_exception_info_t info;
+    rt_int8_t i;
+    rt_memset(&info, 0, sizeof(rt_mem_exception_info_t));
+    info.thread = rt_thread_self();
+    if (SCB->CFSR & SCB_CFSR_MMARVALID_Msk)
+    {
+        info.addr = (void *)(SCB->MMFAR);
+        for (i = NUM_EXCLUSIVE_REGIONS - 1; i >= 0; i--)
+        {
+        if ((exclusive_regions[i].owner != RT_NULL) && ((exclusive_regions[i].owner != rt_thread_self())) && ADDR_IN_REGION(info.addr, (rt_mem_region_t *)&(exclusive_regions[i])))
+            {
+                rt_memcpy(&(info.region), &(exclusive_regions[i]), sizeof(rt_mem_region_t));
+                break;
+            }
+        }
+        if (info.region.size == 0U)
+        {
+            if (info.thread->mem_regions != RT_NULL)
+            {
+                for (i = NUM_DYNAMIC_REGIONS - 1; i >= 0; i--)
+                {
+                    if ((((rt_mem_region_t *)info.thread->mem_regions)[i].size != 0U) && ADDR_IN_REGION(info.addr, &(((rt_mem_region_t *)info.thread->mem_regions)[i])))
+                    {
+                        rt_memcpy(&(info.region), &(((rt_mem_region_t *)info.thread->mem_regions)[i]), sizeof(rt_mem_region_t));
+                        break;
+                    }
+                }
+            }
+            if (info.region.size == 0U)
+            {
+                for (i = NUM_STATIC_REGIONS - 1; i >= 0; i--)
+                {
+                    if (ADDR_IN_REGION(info.addr, &(static_regions[i])))
+                    {
+                        rt_memcpy(&(info.region), &(static_regions[i]), sizeof(rt_mem_region_t));
+                        break;
+                    }
+                }
+            }
+        }
+    }
+    info.mmfsr = (SCB->CFSR & SCB_CFSR_MEMFAULTSR_Msk) >> SCB_CFSR_MEMFAULTSR_Pos;
+    if (mem_manage_hook != RT_NULL)
+    {
+        mem_manage_hook(&info);
+    }
+    while (1);
+}

+ 104 - 0
libcpu/arm/cortex-m7/mpu.h

@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-09-25     tangzz98     the first version
+ */
+
+#ifndef __MPU_H__
+#define __MPU_H__
+
+#ifdef RT_USING_MEM_PROTECTION
+
+#include <board.h>
+
+#define MPU_MIN_REGION_SIZE 32U
+
+/* MPU attributes for configuring data region permission */
+/* Privileged No Access, Unprivileged No Access */
+#define P_NA_U_NA       ((0x0 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk | MPU_RASR_XN_Msk)
+/* Privileged Read Write, Unprivileged No Access */
+#define P_RW_U_NA       ((0x1 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk | MPU_RASR_XN_Msk)
+/* Privileged Read Write, Unprivileged Read Only */
+#define P_RW_U_RO       ((0x2 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk | MPU_RASR_XN_Msk)
+/* Privileged Read Write, Unprivileged Read Write */
+#define P_RW_U_RW       ((0x3 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk | MPU_RASR_XN_Msk)
+/* Privileged Read Only, Unprivileged No Access */
+#define P_RO_U_NA       ((0x5 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk | MPU_RASR_XN_Msk)
+/* Privileged Read Only, Unprivileged Read Only */
+#define P_RO_U_RO       ((0x6 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk | MPU_RASR_XN_Msk)
+
+/* MPU attributes for configuring code region permission */
+/* Privileged Read Write Execute, Unprivileged Read Write Execute */
+#define P_RWX_U_RWX     ((0x3 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
+/* Privileged Read Write Execute, Unprivileged Read Execute */
+#define P_RWX_U_RX      ((0x2 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
+/* Privileged Read Write Execute, Unprivileged No Access */
+#define P_RWX_U_NA      ((0x1 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
+/* Privileged Read Execute, Unprivileged Read Execute */
+#define P_RX_U_RX       ((0x6 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
+/* Privileged Read Execute, Unprivileged No Access */
+#define P_RX_U_NA       ((0x5 << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk)
+
+/* MPU attributes for configuring memory type, cacheability and shareability */
+#define STRONGLY_ORDERED_SHAREABLE      MPU_RASR_S_Msk
+#define DEVICE_SHAREABLE                (MPU_RASR_B_Msk | MPU_RASR_S_Msk)
+#define NORMAL_OUTER_INNER_WRITE_THROUGH_SHAREABLE \
+        (MPU_RASR_C_Msk | MPU_RASR_S_Msk)
+#define NORMAL_OUTER_INNER_WRITE_THROUGH_NON_SHAREABLE  MPU_RASR_C_Msk
+#define NORMAL_OUTER_INNER_WRITE_BACK_SHAREABLE \
+        (MPU_RASR_C_Msk | MPU_RASR_B_Msk | MPU_RASR_S_Msk)
+#define NORMAL_OUTER_INNER_WRITE_BACK_NON_SHAREABLE \
+        (MPU_RASR_C_Msk | MPU_RASR_B_Msk)
+#define NORMAL_OUTER_INNER_NON_CACHEABLE_SHAREABLE \
+        ((1 << MPU_RASR_TEX_Pos) | MPU_RASR_S_Msk)
+#define NORMAL_OUTER_INNER_NON_CACHEABLE_NON_SHAREABLE \
+        (1 << MPU_RASR_TEX_Pos)
+#define NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_SHAREABLE \
+    ((1 << MPU_RASR_TEX_Pos) |\
+     MPU_RASR_C_Msk | MPU_RASR_B_Msk | MPU_RASR_S_Msk)
+#define NORMAL_OUTER_INNER_WRITE_BACK_WRITE_READ_ALLOCATE_NON_SHAREABLE \
+    ((1 << MPU_RASR_TEX_Pos) | MPU_RASR_C_Msk | MPU_RASR_B_Msk)
+#define DEVICE_NON_SHAREABLE            (2 << MPU_RASR_TEX_Pos)
+#define RESERVED                        ((2 << MPU_RASR_TEX_Pos) | MPU_RASR_B_Msk)
+
+typedef struct
+{
+    rt_thread_t thread;     /* Thread that triggered exception */
+    void *addr;             /* Address of faulting memory access */
+    rt_mem_region_t region; /* Configurations of the memory region containing the address */
+    rt_uint8_t mmfsr;       /* Content of MemManage Status Register */
+} rt_mem_exception_info_t;
+
+typedef void (*rt_hw_mpu_exception_hook_t)(rt_mem_exception_info_t *);
+
+#define RT_ARM_MEM_ATTR(perm, type) ((rt_mem_attr_t){ (perm) | (type)})
+
+/* Convenient macros for configuring data region attributes with default memory type */
+#define RT_MEM_REGION_P_NA_U_NA RT_ARM_MEM_ATTR(P_NA_U_NA, RESERVED)
+#define RT_MEM_REGION_P_RW_U_RW RT_ARM_MEM_ATTR(P_RW_U_RW, RESERVED)
+#define RT_MEM_REGION_P_RW_U_RO RT_ARM_MEM_ATTR(P_RW_U_RO, RESERVED)
+#define RT_MEM_REGION_P_RW_U_NA RT_ARM_MEM_ATTR(P_RW_U_NA, RESERVED)
+#define RT_MEM_REGION_P_RO_U_RO RT_ARM_MEM_ATTR(P_RO_U_RO, RESERVED)
+#define RT_MEM_REGION_P_RO_U_NA RT_ARM_MEM_ATTR(P_RO_U_NA, RESERVED)
+
+/* Convenient macros for configuring code region attributes with default memory type */
+#define RT_MEM_REGION_P_RWX_U_RWX   RT_ARM_MEM_ATTR(P_RWX_U_RWX, RESERVED)
+#define RT_MEM_REGION_P_RWX_U_RX    RT_ARM_MEM_ATTR(P_RWX_U_RX, RESERVED)
+#define RT_MEM_REGION_P_RWX_U_NA    RT_ARM_MEM_ATTR(P_RWX_U_NA, RESERVED)
+#define RT_MEM_REGION_P_RX_U_RX     RT_ARM_MEM_ATTR(P_RX_U_RX, RESERVED)
+#define RT_MEM_REGION_P_RX_U_NA     RT_ARM_MEM_ATTR(P_RX_U_NA, RESERVED)
+
+rt_bool_t rt_hw_mpu_region_valid(rt_mem_region_t *region);
+rt_err_t rt_hw_mpu_init(void);
+rt_err_t rt_hw_mpu_add_region(rt_thread_t thread, rt_mem_region_t *region);
+rt_err_t rt_hw_mpu_delete_region(rt_thread_t thread, rt_mem_region_t *region);
+rt_err_t rt_hw_mpu_update_region(rt_thread_t thread, rt_mem_region_t *region);
+rt_err_t rt_hw_mpu_exception_set_hook(rt_hw_mpu_exception_hook_t hook);
+
+#endif /* RT_USING_MEM_PROTECTION */
+
+#endif /* __MPU_H__ */

+ 29 - 0
libcpu/arm/cortex-m7/mputype.h

@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2006-2023, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2023-09-25     tangzz98     the first version
+ */
+
+#ifndef __MPUTYPE_H__
+#define __MPUTYPE_H__
+
+#ifdef RT_USING_MEM_PROTECTION
+
+#ifdef RT_USING_HW_STACK_GUARD
+#define NUM_DYNAMIC_REGIONS (2 + NUM_CONFIGURABLE_REGIONS)
+#else
+#define NUM_DYNAMIC_REGIONS (NUM_CONFIGURABLE_REGIONS)
+#endif
+
+typedef struct
+{
+    rt_uint32_t rasr;
+} rt_mem_attr_t;
+
+#endif /* RT_USING_MEM_PROTECTION */
+
+#endif /* __MPUTYPE_H__ */

+ 10 - 0
src/idle.c

@@ -233,11 +233,21 @@ static void rt_defunct_execute(void)
         }
 
 #ifdef RT_USING_HEAP
+#ifdef RT_USING_MEM_PROTECTION
+        if (thread->mem_regions != RT_NULL)
+        {
+            RT_KERNEL_FREE(thread->mem_regions);
+        }
+#endif
         /* if need free, delete it */
         if (object_is_systemobject == RT_FALSE)
         {
             /* release thread's stack */
+#ifdef RT_USING_HW_STACK_GUARD
+            RT_KERNEL_FREE(thread->stack_buf);
+#else
             RT_KERNEL_FREE(thread->stack_addr);
+#endif
             /* delete thread object */
             rt_object_delete((rt_object_t)thread);
         }

+ 10 - 0
src/scheduler_mp.c

@@ -107,6 +107,7 @@ static void _scheduler_stack_check(struct rt_thread *thread)
 #endif /* not defined ARCH_MM_MMU */
 #endif /* RT_USING_SMART */
 
+#ifndef RT_USING_HW_STACK_GUARD
 #ifdef ARCH_CPU_STACK_GROWS_UPWARD
     if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
 #else
@@ -124,14 +125,23 @@ static void _scheduler_stack_check(struct rt_thread *thread)
         rt_spin_lock(&_spinlock);
         while (level);
     }
+#endif
 #ifdef ARCH_CPU_STACK_GROWS_UPWARD
+#ifndef RT_USING_HW_STACK_GUARD
     else if ((rt_ubase_t)thread->sp > ((rt_ubase_t)thread->stack_addr + thread->stack_size))
+#else
+    if ((rt_ubase_t)thread->sp > ((rt_ubase_t)thread->stack_addr + thread->stack_size))
+#endif
     {
         rt_kprintf("warning: %s stack is close to the top of stack address.\n",
                    thread->parent.name);
     }
 #else
+#ifndef RT_USING_HW_STACK_GUARD
     else if ((rt_ubase_t)thread->sp <= ((rt_ubase_t)thread->stack_addr + 32))
+#else
+    if ((rt_ubase_t)thread->sp <= ((rt_ubase_t)thread->stack_addr + 32))
+#endif
     {
         rt_kprintf("warning: %s stack is close to end of stack address.\n",
                    thread->parent.name);

+ 10 - 0
src/scheduler_up.c

@@ -110,6 +110,7 @@ static void _scheduler_stack_check(struct rt_thread *thread)
 #endif /* not defined ARCH_MM_MMU */
 #endif /* RT_USING_SMART */
 
+#ifndef RT_USING_HW_STACK_GUARD
 #ifdef ARCH_CPU_STACK_GROWS_UPWARD
     if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
 #else
@@ -126,14 +127,23 @@ static void _scheduler_stack_check(struct rt_thread *thread)
         level = rt_hw_interrupt_disable();
         while (level);
     }
+#endif
 #ifdef ARCH_CPU_STACK_GROWS_UPWARD
+#ifndef RT_USING_HW_STACK_GUARD
     else if ((rt_ubase_t)thread->sp > ((rt_ubase_t)thread->stack_addr + thread->stack_size))
+#else
+    if ((rt_ubase_t)thread->sp > ((rt_ubase_t)thread->stack_addr + thread->stack_size))
+#endif
     {
         rt_kprintf("warning: %s stack is close to the top of stack address.\n",
                    thread->parent.name);
     }
 #else
+#ifndef RT_USING_HW_STACK_GUARD
     else if ((rt_ubase_t)thread->sp <= ((rt_ubase_t)thread->stack_addr + 32))
+#else
+    if ((rt_ubase_t)thread->sp <= ((rt_ubase_t)thread->stack_addr + 32))
+#endif
     {
         rt_kprintf("warning: %s stack is close to end of stack address.\n",
                    thread->parent.name);

+ 7 - 0
src/thread.c

@@ -157,6 +157,10 @@ static rt_err_t _thread_init(struct rt_thread *thread,
     rt_list_init(&(thread->tlist));
     rt_list_init(&(thread->tlist_schedule));
 
+#ifdef RT_USING_MEM_PROTECTION
+    thread->mem_regions = RT_NULL;
+#endif
+
 #ifdef RT_USING_SMART
     thread->wakeup.func = RT_NULL;
 #endif
@@ -170,6 +174,9 @@ static rt_err_t _thread_init(struct rt_thread *thread,
 
     /* init thread stack */
     rt_memset(thread->stack_addr, '#', thread->stack_size);
+#ifdef RT_USING_HW_STACK_GUARD
+    rt_hw_stack_guard_init(thread);
+#endif
 #ifdef ARCH_CPU_STACK_GROWS_UPWARD
     thread->sp = (void *)rt_hw_stack_init(thread->entry, thread->parameter,
                                           (void *)((char *)thread->stack_addr),