Browse Source

bsp: k230: add pdma driver support

This commit introduces the PDMA (Peripheral DMA) driver for K230 SoC,
providing essential DMA capabilities for peripheral data transfers.

Key features implemented:
1. PDMA channel request/release management
2. Channel start/stop control
3. Interrupt callback registration
4. Data transfer between device ports and DDR memory

The driver includes:
- Core driver implementation (drv_pdma.c/h)
- Build system support (SConscript)
- Basic test cases (test_pdma.c)

Tested with:
- PDMA channel request/release
- DDR to UART TX FIFO transfers
- UART RX FIFO to DDR transfers

Signed-off-by: eatvector <2302147681@qq.com>
eatvector 1 tháng trước cách đây
mục cha
commit
7db99433e5

+ 23 - 0
bsp/k230/.config

@@ -228,6 +228,7 @@ CONFIG_FINSH_THREAD_PRIORITY=20
 CONFIG_FINSH_THREAD_STACK_SIZE=8192
 CONFIG_FINSH_USING_HISTORY=y
 CONFIG_FINSH_HISTORY_LINES=5
+# CONFIG_FINSH_USING_WORD_OPERATION is not set
 CONFIG_FINSH_USING_SYMTAB=y
 CONFIG_FINSH_CMD_SIZE=80
 CONFIG_MSH_USING_BUILT_IN_COMMANDS=y
@@ -710,6 +711,7 @@ CONFIG_RT_USING_VDSO=y
 # CONFIG_PKG_USING_QMODBUS is not set
 # CONFIG_PKG_USING_PNET is not set
 # CONFIG_PKG_USING_OPENER is not set
+# CONFIG_PKG_USING_FREEMQTT is not set
 # end of IoT - internet of things
 
 #
@@ -937,6 +939,7 @@ CONFIG_RT_USING_VDSO=y
 # CONFIG_PKG_USING_RMP is not set
 # CONFIG_PKG_USING_R_RHEALSTONE is not set
 # CONFIG_PKG_USING_HEARTBEAT is not set
+# CONFIG_PKG_USING_MICRO_ROS_RTTHREAD_PACKAGE is not set
 # end of system packages
 
 #
@@ -1061,6 +1064,8 @@ CONFIG_RT_USING_VDSO=y
 #
 # HC32 DDL Drivers
 #
+# CONFIG_PKG_USING_HC32F4_CMSIS_DRIVER is not set
+# CONFIG_PKG_USING_HC32F4_SERIES_DRIVER is not set
 # end of HC32 DDL Drivers
 
 #
@@ -1074,6 +1079,21 @@ CONFIG_RT_USING_VDSO=y
 # CONFIG_PKG_USING_NXP_IMX6UL_DRIVER is not set
 # CONFIG_PKG_USING_NXP_IMXRT_DRIVER is not set
 # end of NXP HAL & SDK Drivers
+
+#
+# NUVOTON Drivers
+#
+# CONFIG_PKG_USING_NUVOTON_CMSIS_DRIVER is not set
+# CONFIG_PKG_USING_NUVOTON_SERIES_DRIVER is not set
+# CONFIG_PKG_USING_NUVOTON_ARM926_LIB is not set
+# end of NUVOTON Drivers
+
+#
+# GD32 Drivers
+#
+# CONFIG_PKG_USING_GD32_ARM_CMSIS_DRIVER is not set
+# CONFIG_PKG_USING_GD32_ARM_SERIES_DRIVER is not set
+# end of GD32 Drivers
 # end of HAL & SDK Drivers
 
 #
@@ -1149,6 +1169,7 @@ CONFIG_RT_USING_VDSO=y
 # CONFIG_PKG_USING_STHS34PF80 is not set
 # CONFIG_PKG_USING_P3T1755 is not set
 # CONFIG_PKG_USING_QMI8658 is not set
+# CONFIG_PKG_USING_ICM20948 is not set
 # end of sensors drivers
 
 #
@@ -1243,6 +1264,7 @@ CONFIG_RT_USING_VDSO=y
 # CONFIG_PKG_USING_SERVO is not set
 # CONFIG_PKG_USING_SEAN_WS2812B is not set
 # CONFIG_PKG_USING_IC74HC165 is not set
+# CONFIG_PKG_USING_IST8310 is not set
 # CONFIG_PKG_USING_SPI_TOOLS is not set
 # end of peripheral libraries and drivers
 
@@ -1601,6 +1623,7 @@ CONFIG_BSP_USING_SDIO0=y
 CONFIG_BSP_SD_MNT_DEVNAME="sd0p1"
 # CONFIG_BSP_USING_TIMERS is not set
 # CONFIG_BSP_USING_WDT is not set
+# CONFIG_BSP_USING_PDMA is not set
 # CONFIG_BSP_UTEST_DRIVERS is not set
 # end of Drivers Configuration
 

+ 39 - 0
bsp/k230/board/Kconfig

@@ -107,6 +107,45 @@ menu "Drivers Configuration"
 
         endif
 
+    menuconfig BSP_USING_PDMA
+        bool "Enable PDMA"
+        select RT_USING_PDMA
+        default n 
+         
+        if BSP_USING_PDMA
+            config BSP_USING_PDMA_CHANNEL0
+                bool "Enable PDMA Channel 0"
+                default n
+                
+            config BSP_USING_PDMA_CHANNEL1
+                bool "Enable PDMA Channel 1"
+                default n
+                
+            config BSP_USING_PDMA_CHANNEL2
+                bool "Enable PDMA Channel 2"
+                default n
+               
+            config BSP_USING_PDMA_CHANNEL3
+                bool "Enable PDMA Channel 3"
+                default n
+            
+            config BSP_USING_PDMA_CHANNEL4
+                bool "Enable PDMA Channel 4"
+                default n
+                
+            config BSP_USING_PDMA_CHANNEL5
+                bool "Enable PDMA Channel 5"
+                default n
+                
+            config BSP_USING_PDMA_CHANNEL6
+                bool "Enable PDMA Channel 6"
+                default n
+                
+            config BSP_USING_PDMA_CHANNEL7
+                bool "Enable PDMA Channel 7"
+                default n
+        endif
+
     config BSP_UTEST_DRIVERS
         bool "Enable drivers utest"
         select RT_USING_UTEST

+ 12 - 0
bsp/k230/drivers/interdrv/pdma/SConscript

@@ -0,0 +1,12 @@
+# RT-Thread building script for component
+
+from building import *
+
+cwd     = GetCurrentDir()
+src     = Glob('*.c')
+CPPPATH = [cwd]
+
+group = DefineGroup('PDMA', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')
+

+ 887 - 0
bsp/k230/drivers/interdrv/pdma/drv_pdma.c

@@ -0,0 +1,887 @@
+/* Copyright (c) 2023, Canaan Bright Sight Co., Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2006-2025 RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#include <rtthread.h>
+#include <rthw.h>
+#include <rtdevice.h>
+#include <rtdef.h>
+#include <rtatomic.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <riscv_io.h>
+#include <mmu.h>
+#include <cache.h>
+#include <page.h>
+#include "board.h"
+#include "ioremap.h"
+#include "drv_hardlock.h"
+#include "drv_pdma.h"
+#include <rtdbg.h>
+
+#define DBG_TAG "drv_pdma"
+#ifdef RT_DEBUG
+#define DBG_LVL DBG_LOG
+#else
+#define DBG_LVL DBG_WARNING
+#endif
+#define DBG_COLOR
+
+/**
+ * @brief PDMA controller instance initialization
+ */
+static pdma_controller_t pdma_ctrl = {0};
+
+#define PDMA_CH_MENUCONFIG_ENABLED(ch) \
+    (((ch) >= 0 && (ch) < PDMA_CH_MAX) ? \
+        (pdma_ctrl.chan[(ch)].menuconfig_enabled) : \
+        (RT_FALSE))
+
+/**
+ * @brief Acquire PDMA hardware lock
+ * @note Busy-waits until lock is acquired
+ */
+#define PDMA_LOCK()   while (kd_hardlock_lock(pdma_ctrl.hardlock) != 0)
+
+/**
+ * @brief Release PDMA hardware lock
+ */
+#define PDMA_UNLOCK() kd_hardlock_unlock(pdma_ctrl.hardlock)
+
+/*--------------------- Channel Enable Control ---------------------*/
+/**
+ * @brief Enable specific PDMA channel
+ */
+#define PDMA_CH_ENABLE(ch) \
+    (pdma_write32(&pdma_ctrl.reg->pdma_ch_en, pdma_read32(&pdma_ctrl.reg->pdma_ch_en) | (1U << (ch))))
+
+/**
+ * @brief Disable specific PDMA channel
+ */
+#define PDMA_CH_DISABLE(ch) \
+    (pdma_write32(&pdma_ctrl.reg->pdma_ch_en, pdma_read32(&pdma_ctrl.reg->pdma_ch_en) & ~(1U << (ch))))
+
+/**
+ * @brief Check if PDMA channel is enabled
+ */
+#define PDMA_CH_IS_ENABLED(ch) \
+    (pdma_read32(&pdma_ctrl.reg->pdma_ch_en) & (1U << (ch)))
+
+
+/*--------------------- Interrupt Control ---------------------*/
+/**
+ * @brief Enable interrupts for specific PDMA channel
+ */
+#define PDMA_CH_INT_ENABLE(ch, mask) \
+    (pdma_write32(&pdma_ctrl.reg->dma_int_mask, pdma_read32(&pdma_ctrl.reg->dma_int_mask) & ~((mask) << (ch))))
+
+/**
+ * @brief Disable interrupts for specific PDMA channel
+ */
+#define PDMA_CH_INT_DISABLE(ch, mask) \
+    (pdma_write32(&pdma_ctrl.reg->dma_int_mask, pdma_read32(&pdma_ctrl.reg->dma_int_mask) | ((mask) << (ch))))
+
+/**
+ * @brief Disable all interrupts for specific PDMA channel
+ */
+#define PDMA_CH_INT_DISABLE_ALL(ch) \
+    PDMA_CH_INT_DISABLE(ch, PDMA_ALL_INTS)
+
+/**
+ * @brief Clear interrupt status for specific PDMA channel
+ */
+#define PDMA_CH_INT_CLEAR(ch, intr) \
+    (pdma_write32(&pdma_ctrl.reg->dma_int_stat, (intr) << (ch)))
+
+/**
+ * @brief Clear all interrupt status for specific PDMA channel
+ */
+#define PDMA_CH_INT_CLEAR_ALL(ch) \
+    PDMA_CH_INT_CLEAR(ch, PDMA_ALL_INTS)
+
+/**
+ * @brief Check if interrupt is triggered for specific PDMA channel
+ */
+#define PDMA_CH_INT_IS_TRIGGERED(ch, intr) \
+    (pdma_read32(&pdma_ctrl.reg->dma_int_stat) & ((intr) << (ch)))
+
+
+/*--------------------- Status Check ---------------------*/
+/**
+ * @brief Check if PDMA channel is busy
+ */
+#define PDMA_CH_IS_BUSY(ch) \
+    (pdma_read32(&pdma_ctrl.reg->pdma_ch_reg[ch].ch_status) & PDMA_STATE_BUSY)
+
+/**
+ * @brief Check if PDMA channel is paused
+ */
+#define PDMA_CH_IS_PAUSED(ch) \
+    (pdma_read32(&pdma_ctrl.reg->pdma_ch_reg[ch].ch_status) & PDMA_STATE_PAUSE)
+
+
+/*--------------------- Data Transfer Control ---------------------*/
+/**
+ * @brief Start PDMA transfer on specific channel
+ */
+#define PDMA_CH_START(ch) \
+    (pdma_write32(&pdma_ctrl.reg->pdma_ch_reg[ch].ch_ctl, PDMA_CMD_START))
+
+/**
+ * @brief Stop PDMA transfer on specific channel
+ */
+#define PDMA_CH_STOP(ch) \
+    (pdma_write32(&pdma_ctrl.reg->pdma_ch_reg[ch].ch_ctl, PDMA_CMD_STOP))
+
+/**
+ * @brief Resume paused PDMA transfer on specific channel
+ */
+#define PDMA_CH_RESUME(ch) \
+    (pdma_write32(&pdma_ctrl.reg->pdma_ch_reg[ch].ch_ctl, PDMA_CMD_RESUME))
+
+
+static void  _k230_pdma_llt_free(rt_uint8_t ch);
+static rt_uint32_t *_k230_pdma_llt_cal(rt_uint8_t ch, usr_pdma_cfg_t *pdma_cfg);
+static rt_err_t _k230_pdma_safe_stop(rt_uint8_t ch, rt_uint32_t timeout_ms);
+
+/**
+ * @brief Set callback function for specified PDMA channel
+ * @param ch PDMA channel number
+ * @param func Callback function pointer
+ * @return RT_EOK on success, -RT_EINVAL on invalid parameters
+ */
+rt_err_t k230_pdma_set_callback(rt_uint8_t ch, k230_pdma_callback_t func)
+{
+    /* Validate channel and callback function */
+    if (!PDMA_CH_MENUCONFIG_ENABLED(ch) || func == RT_NULL)
+    {
+        return -RT_EINVAL;
+    }
+
+    /*
+     * Safely set callback function by masking interrupts during update
+     * This prevents potential race conditions with DMA interrupts
+     */
+    rt_hw_interrupt_mask(pdma_ctrl.chan[ch].irq_num);
+    pdma_ctrl.chan[ch].cb.callback = func;
+    rt_hw_interrupt_umask(pdma_ctrl.chan[ch].irq_num);
+
+    return RT_EOK;
+}
+
+/**
+ * @brief Request an available PDMA channel
+ * @param ch [out] Pointer to store the allocated channel number
+ * @return rt_err_t RT_EOK if success, error code otherwise
+ */
+rt_err_t k230_pdma_request_channel(rt_uint8_t *ch)
+{
+    if (ch == RT_NULL)
+    {
+        LOG_E("PDMA: Invalid channel pointer");
+        return -RT_EINVAL;
+    }
+
+    rt_base_t level;
+    level = rt_hw_interrupt_disable();
+    PDMA_LOCK();
+
+    for (rt_uint8_t i = 0; i < PDMA_CH_MAX; i++)
+    {
+        if (!PDMA_CH_MENUCONFIG_ENABLED(i))
+        {
+            LOG_D("PDMA: Channel %d not enabled in menuconfig", i);
+            continue;
+        }
+
+        if (PDMA_CH_IS_ENABLED(i))
+        {
+            LOG_D("PDMA: Channel %d already enabled", i);
+            continue;
+        }
+
+        PDMA_CH_ENABLE(i);
+        LOG_D("PDMA: Trying channel %d", i);
+
+        if (!PDMA_CH_IS_ENABLED(i))
+        {
+            LOG_W("PDMA: Channel %d failed to enable - possible hardware issue", i);
+            continue;
+        }
+
+        if (PDMA_CH_IS_BUSY(i))
+        {
+            LOG_W("PDMA: Channel %d is busy, disabling", i);
+            PDMA_CH_DISABLE(i);
+            continue;
+        }
+
+        *ch = i;
+        PDMA_CH_INT_DISABLE_ALL(i);
+
+        PDMA_UNLOCK();
+        rt_hw_interrupt_enable(level);
+
+        pdma_ctrl.chan[i].cb.callback = RT_NULL;
+        pdma_ctrl.chan[i].is_hw_configured = RT_FALSE;
+        pdma_ctrl.chan[i].llt_va =RT_NULL;
+        pdma_ctrl.chan[i].page_size = 0;
+        rt_hw_interrupt_umask(pdma_ctrl.chan[i].irq_num);
+
+        LOG_I("PDMA: Allocated channel %d", i);
+        return RT_EOK;
+    }
+
+    *ch = PDMA_CH_MAX;
+    PDMA_UNLOCK();
+    rt_hw_interrupt_enable(level);
+
+    LOG_E("PDMA: No available channel found");
+    return -RT_EBUSY;
+}
+
+/**
+ * @brief Release an allocated PDMA channel
+ * @param ch Channel number to release
+ * @return rt_err_t RT_EOK if success, error code otherwise
+ */
+rt_err_t k230_pdma_release_channel(rt_uint8_t ch)
+{
+    rt_base_t level;
+    level = rt_hw_interrupt_disable();
+    PDMA_LOCK();
+
+    /* Validate channel configuration and status */
+    if (!PDMA_CH_MENUCONFIG_ENABLED(ch) || !PDMA_CH_IS_ENABLED(ch))
+    {
+        PDMA_UNLOCK();
+        rt_hw_interrupt_enable(level);
+        LOG_E("PDMA: Invalid channel %d to release", ch);
+        return -RT_EINVAL;
+    }
+
+    PDMA_UNLOCK();
+    rt_hw_interrupt_enable(level);
+
+    rt_hw_interrupt_mask(pdma_ctrl.chan[ch].irq_num);
+
+    /* Clear any registered callback */
+    pdma_ctrl.chan[ch].cb.callback = RT_NULL;
+
+    /* Safely stop DMA operation and release resources */
+    rt_err_t err = _k230_pdma_safe_stop(ch, PDMA_MAX_WAIT_MS);
+    if (err != RT_EOK)
+    {
+        LOG_E("PDMA: Failed to safely stop channel %d (err:%d)", ch, err);
+        return err;
+    }
+
+    pdma_ctrl.chan[ch].is_hw_configured = RT_FALSE;
+
+    /* Disable the channel */
+    level = rt_hw_interrupt_disable();
+    PDMA_LOCK();
+    PDMA_CH_DISABLE(ch);
+    PDMA_UNLOCK();
+    rt_hw_interrupt_enable(level);
+
+    LOG_I("PDMA: Channel %d released successfully", ch);
+    return RT_EOK;
+}
+
+/**
+ * @brief Start a PDMA channel operation
+ * @param ch The channel number to start
+ * @return RT_EOK on success, error code on failure
+ */
+rt_err_t k230_pdma_start(rt_uint8_t ch)
+{
+    rt_base_t level;
+    level = rt_hw_interrupt_disable();
+    PDMA_LOCK();
+
+    LOG_D("Starting PDMA channel %d", ch);
+
+    /* Basic channel validation */
+    if (!PDMA_CH_MENUCONFIG_ENABLED(ch) || !PDMA_CH_IS_ENABLED(ch))
+    {
+        LOG_E("Channel %d not enabled in menuconfig or not enabled", ch);
+        PDMA_UNLOCK();
+        rt_hw_interrupt_enable(level);
+        return -RT_EINVAL;
+    }
+
+    /* Only start DMA if channel is properly configured to prevent unclosable channel */
+    if (pdma_ctrl.chan[ch].is_hw_configured == RT_FALSE)
+    {
+        LOG_E("Channel %d not properly configured", ch);
+        PDMA_UNLOCK();
+        rt_hw_interrupt_enable(level);
+        return -RT_ERROR;
+    }
+
+    /* Enable completion, pause and timeout interrupts */
+    PDMA_CH_INT_ENABLE(ch, PDMA_PDONE_INT | PDMA_PPAUSE_INT | PDMA_PTOUT_INT);
+
+    PDMA_UNLOCK();
+    rt_hw_interrupt_enable(level);
+
+    /* Start the channel operation */
+    PDMA_CH_START(ch);
+    LOG_I("Successfully started PDMA channel %d", ch);
+
+    /* Clear configuration flag */
+    pdma_ctrl.chan[ch].is_hw_configured == RT_FALSE;
+
+    return RT_EOK;
+}
+
+/**
+ * @brief Stop an active PDMA channel operation
+ * @param ch The channel number to stop
+ * @return RT_EOK on success, error code on failure
+ */
+rt_err_t k230_pdma_stop(rt_uint8_t ch)
+{
+    rt_base_t level;
+    level = rt_hw_interrupt_disable();
+    PDMA_LOCK();
+
+    LOG_D("Attempting to stop PDMA channel %d", ch);
+
+    /* Basic channel validation */
+    if (!PDMA_CH_MENUCONFIG_ENABLED(ch) || !PDMA_CH_IS_ENABLED(ch))
+    {
+        LOG_E("Channel %d not enabled in menuconfig or not enabled", ch);
+        PDMA_UNLOCK();
+        rt_hw_interrupt_enable(level);
+        return -RT_EINVAL;
+    }
+
+    PDMA_UNLOCK();
+    rt_hw_interrupt_enable(level);
+
+    /* Safely stop the channel operation */
+    rt_err_t ret = _k230_pdma_safe_stop(ch, PDMA_MAX_WAIT_MS);
+    if (ret == RT_EOK)
+    {
+        LOG_I("Successfully stopped PDMA channel %d", ch);
+    }
+    else
+    {
+        LOG_E("Failed to stop PDMA channel %d (error: %d)", ch, ret);
+    }
+
+    return ret;
+}
+
+/**
+ * @brief Convert PDMA channel configuration structure to register value
+ * @param cfg Pointer to the channel configuration structure
+ * @return 32-bit register value representing the configuration
+ */
+static rt_uint32_t _k230_pdma_ch_cfg_to_reg(const pdma_ch_cfg_t *cfg)
+{
+    rt_uint32_t reg = 0;
+
+    /* Source type configuration */
+    reg |= (cfg->ch_src_type & 0x1) << 0;
+
+    /* Device horizontal size */
+    reg |= (cfg->ch_dev_hsize & 0x3) << 1;
+
+    /* Data endianness configuration */
+    reg |= (cfg->ch_dat_endian & 0x3) << 4;
+
+    /* Device burst length */
+    reg |= (cfg->ch_dev_blen & 0xF) << 8;
+
+    /* Channel priority */
+    reg |= (cfg->ch_priority & 0xF) << 12;
+
+    /* Device timeout */
+    reg |= (cfg->ch_dev_tout & 0xFFF) << 16;
+
+    return reg;
+}
+
+/**
+ * @brief Configure PDMA channel with user settings
+ * @param ch Channel number to configure
+ * @param ucfg Pointer to user configuration structure
+ * @return RT_EOK on success, error code on failure
+ */
+static rt_err_t _k230_pdma_config(rt_uint8_t ch, usr_pdma_cfg_t *ucfg)
+{
+    volatile rt_uint32_t *ch_cfg = (volatile rt_uint32_t*)(&(pdma_ctrl.reg->pdma_ch_reg[ch].ch_cfg));
+
+    LOG_D("Configuring PDMA channel %d", ch);
+
+    /* Convert configuration to register format */
+    rt_uint32_t reg_val = _k230_pdma_ch_cfg_to_reg(&ucfg->pdma_ch_cfg);
+
+    /* Write configuration to hardware registers */
+    pdma_write32(ch_cfg, reg_val);
+    pdma_write32(&(pdma_ctrl.reg->ch_peri_dev_sel[ch]), ucfg->device);
+
+    LOG_I("PDMA channel %d configured successfully", ch);
+    return RT_EOK;
+}
+
+/**
+ * @brief Validate user configuration parameters
+ * @param ucfg Pointer to user configuration structure
+ * @return RT_EOK if valid, error code if invalid
+ */
+static rt_err_t _k230_ucfg_check(usr_pdma_cfg_t *ucfg)
+{
+    /* Parameter NULL check */
+    if (ucfg == RT_NULL)
+    {
+        LOG_E("Configuration pointer is NULL");
+        return -RT_EINVAL;
+    }
+
+    /* Device range validation */
+    if ((ucfg->device > PDM_IN) || (ucfg->device < UART0_TX))
+    {
+        LOG_E("Invalid device selection: %d", ucfg->device);
+        return -RT_EINVAL;
+    }
+
+    /* Validate peripheral data word width */
+    if ((ucfg->pdma_ch_cfg.ch_dev_hsize > PSBYTE4) ||
+        (ucfg->pdma_ch_cfg.ch_dev_hsize < PSBYTE1))
+    {
+        LOG_E("Invalid peripheral data width: %d (1-4 bytes supported)",
+            ucfg->pdma_ch_cfg.ch_dev_hsize);
+        return -RT_EINVAL;
+    }
+
+    /* Address and size alignment check */
+    if (((rt_uintptr_t)ucfg->src_addr % 4) ||
+        ((rt_uintptr_t)ucfg->dst_addr % 4) ||
+        (ucfg->line_size % 4))
+    {
+        LOG_E("Alignment error - src: 0x%08X, dst: 0x%08X, size: %d",
+              ucfg->src_addr, ucfg->dst_addr, ucfg->line_size);
+        return -RT_EINVAL;
+    }
+
+    LOG_D("User configuration validation passed");
+    return RT_EOK;
+}
+
+/**
+ * @brief Configure a PDMA channel with user settings
+ * @param ch Channel number to configure (0-PDMA_MAX_CHANNELS-1)
+ * @param ucfg Pointer to user configuration structure
+ * @return RT_EOK on success, error code on failure
+ */
+rt_err_t k230_pdma_config(rt_uint8_t ch, usr_pdma_cfg_t *ucfg)
+{
+    rt_err_t err;
+    rt_base_t level;
+
+    LOG_D("[CH%d] Starting PDMA configuration", ch);
+
+    /* Enter critical section */
+    level = rt_hw_interrupt_disable();
+    PDMA_LOCK();
+
+    /* Channel availability check */
+    if (!PDMA_CH_MENUCONFIG_ENABLED(ch) || !PDMA_CH_IS_ENABLED(ch))
+    {
+        LOG_E("[CH%d] Channel not enabled in menuconfig or hardware", ch);
+        PDMA_UNLOCK();
+        rt_hw_interrupt_enable(level);
+        return -RT_EINVAL;
+    }
+
+    PDMA_UNLOCK();
+    rt_hw_interrupt_enable(level);
+
+    /* Validate user configuration */
+    err = _k230_ucfg_check(ucfg);
+    if (err != RT_EOK)
+    {
+        LOG_E("[CH%d] Configuration validation failed", ch);
+        return err;
+    }
+
+    /* Safely stop channel if active */
+    err = _k230_pdma_safe_stop(ch, PDMA_MAX_WAIT_MS);
+    if (err != RT_EOK)
+    {
+        LOG_E("[CH%d] Failed to stop channel (err: %d)", ch, err);
+        return err;
+    }
+
+    /* Apply hardware configuration */
+    _k230_pdma_config(ch, ucfg);
+    LOG_D("[CH%d] Hardware registers configured", ch);
+
+    /* Build DMA transfer linked list */
+    rt_uint32_t* llt_saddr = _k230_pdma_llt_cal(ch, ucfg);
+    if (llt_saddr == RT_NULL)
+    {
+        LOG_E("[CH%d] Failed to allocate memory for linked list", ch);
+        return -RT_ENOMEM;
+    }
+
+    /* Program linked list starting address */
+    pdma_write32(&(pdma_ctrl.reg->pdma_ch_reg[ch].ch_llt_saddr), (rt_uint32_t)(rt_uintptr_t)llt_saddr);
+    LOG_D("[CH%d] Linked list programmed (addr: 0x%p)", ch, llt_saddr);
+
+    /* Mark channel as configured */
+    pdma_ctrl.chan[ch].is_hw_configured = RT_TRUE;
+    LOG_I("[CH%d] Configuration completed successfully", ch);
+
+    return RT_EOK;
+}
+
+/**
+ * @brief Safely stop a PDMA channel operation
+ * @param ch Channel number to stop (0-PDMA_MAX_CHANNELS-1)
+ * @param timeout_ms Maximum wait time in milliseconds (0 for no timeout)
+ * @return RT_EOK on success, -RT_ETIMEOUT on timeout, other errors
+ */
+static rt_err_t _k230_pdma_safe_stop(rt_uint8_t ch, rt_uint32_t timeout_ms)
+{
+    rt_err_t err = RT_EOK;
+    rt_tick_t start_tick;
+
+    LOG_D("[CH%d] Attempting safe stop (timeout: %dms)", ch, timeout_ms);
+
+    /* Immediately request channel stop */
+    PDMA_CH_STOP(ch);
+
+    /* Wait for channel to become inactive */
+    start_tick = rt_tick_get();
+    while (PDMA_CH_IS_BUSY(ch))
+    {
+        /* Check for timeout if specified */
+        if (timeout_ms > 0 &&
+            (rt_tick_get_delta(start_tick) >= rt_tick_from_millisecond(timeout_ms)))
+        {
+            LOG_E("[CH%d] Stop operation timed out", ch);
+            return -RT_ETIMEOUT;
+        }
+
+        rt_thread_mdelay(1);
+    }
+
+    /* Enter critical section for register cleanup */
+    rt_base_t level = rt_hw_interrupt_disable();
+    PDMA_LOCK();
+
+    /* Clear and disable all interrupts */
+    PDMA_CH_INT_CLEAR_ALL(ch);
+    PDMA_CH_INT_DISABLE_ALL(ch);
+    LOG_D("[CH%d] Interrupts cleared and disabled", ch);
+
+    PDMA_UNLOCK();
+    rt_hw_interrupt_enable(level);
+
+    /* Free linked list memory */
+    _k230_pdma_llt_free(ch);
+    LOG_D("[CH%d] Linked list memory freed", ch);
+
+    pdma_ctrl.chan[ch].is_hw_configured = RT_FALSE;
+
+    LOG_I("[CH%d] Successfully stopped", ch);
+    return RT_EOK;
+}
+
+/**
+ * @brief Calculate and allocate PDMA linked list table (LLT)
+ * @param ch Channel number (0-PDMA_MAX_CHANNELS-1)
+ * @param pdma_cfg Pointer to PDMA configuration structure
+ * @return Physical address of LLT on success, RT_NULL on failure
+ */
+static rt_uint32_t *_k230_pdma_llt_cal(rt_uint8_t ch, usr_pdma_cfg_t *pdma_cfg)
+{
+    rt_int32_t i;
+    rt_uint32_t list_num;
+    pdma_llt_t *llt_list;
+    rt_bool_t mem_to_dev;
+
+    LOG_D("[CH%d] Calculating LLT parameters", ch);
+
+    /* Calculate number of LLT entries needed */
+    list_num = (pdma_cfg->line_size - 1) / PDMA_MAX_LINE_SIZE + 1;
+    LOG_D("[CH%d] Line size: %d, requires %d LLT entries",
+          ch, pdma_cfg->line_size, list_num);
+
+    /* Determine transfer direction */
+    mem_to_dev = (pdma_cfg->pdma_ch_cfg.ch_src_type == CONTINUE) ? RT_TRUE : RT_FALSE;
+    LOG_D("[CH%d] Transfer direction: %s", ch, mem_to_dev ? "Memory->Device" : "Device->Memory");
+
+    /* Allocate memory for LLT */
+    pdma_ctrl.chan[ch].page_size = rt_page_bits(sizeof(pdma_llt_t) * list_num);
+    llt_list = (pdma_llt_t *)rt_pages_alloc(pdma_ctrl.chan[ch].page_size);
+
+    if (llt_list == RT_NULL)
+    {
+        pdma_ctrl.chan[ch].page_size = 0 ;
+
+        LOG_E("[CH%d] Failed to allocate memory for LLT", ch);
+        return RT_NULL;
+    }
+    LOG_D("[CH%d] Allocated %d bytes for LLT", ch, sizeof(pdma_llt_t) * list_num);
+
+    pdma_ctrl.chan[ch].llt_va = llt_list;
+
+    /* Initialize LLT entries */
+    for (i = 0; i < list_num; i++)
+    {
+        /* Set source and destination addresses */
+        if (mem_to_dev)
+        {
+            llt_list[i].src_addr = ((rt_uint32_t)(intptr_t)pdma_cfg->src_addr + PDMA_MAX_LINE_SIZE * i);
+            llt_list[i].dst_addr = ((rt_uint32_t)(intptr_t)pdma_cfg->dst_addr);  /* Device address remains fixed */
+        }
+        else
+        {
+            llt_list[i].src_addr = ((rt_uint32_t)(intptr_t)pdma_cfg->src_addr);  /* Device address remains fixed */
+            llt_list[i].dst_addr = ((rt_uint32_t)(intptr_t)pdma_cfg->dst_addr + PDMA_MAX_LINE_SIZE * i);
+        }
+
+        /* Set transfer size and next pointer */
+        if (i == list_num - 1)
+        {
+            /* Last entry uses remaining size */
+            llt_list[i].line_size = (pdma_cfg->line_size % PDMA_MAX_LINE_SIZE) ?
+                                   (pdma_cfg->line_size % PDMA_MAX_LINE_SIZE) :
+                                   PDMA_MAX_LINE_SIZE;
+            llt_list[i].next_llt_addr = 0;  /* Terminate list */
+            LOG_D("[CH%d] Last LLT entry: size=%d", ch, llt_list[i].line_size);
+        }
+        else
+        {
+            llt_list[i].line_size = PDMA_MAX_LINE_SIZE;
+            /* Convert virtual address of next entry to physical address */
+            void *next_llt_va = &llt_list[i+1];
+            llt_list[i].next_llt_addr = (rt_uint32_t)(intptr_t)rt_kmem_v2p(next_llt_va);
+        }
+        llt_list[i].pause = 0;
+    }
+
+    /* Handle cache coherency based on transfer direction */
+    if (mem_to_dev)
+    {
+        /* Memory to Device: clean source data cache */
+        void *src_va = rt_kmem_p2v(pdma_cfg->src_addr);
+        rt_hw_cpu_dcache_clean(src_va, pdma_cfg->line_size);
+        LOG_D("[CH%d] Cleaned source cache (va: %p, size: %d)",
+              ch, src_va, pdma_cfg->line_size);
+    }
+    else
+    {
+        /* Device to Memory: invalidate destination cache */
+        void *dst_va = rt_kmem_p2v(pdma_cfg->dst_addr);
+        rt_hw_cpu_dcache_invalidate(dst_va, pdma_cfg->line_size);
+        LOG_D("[CH%d] Invalidated destination cache (va: %p, size: %d)",
+              ch, dst_va, pdma_cfg->line_size);
+    }
+
+    /* Ensure LLT is visible to DMA */
+    rt_hw_cpu_dcache_clean((void*)llt_list, sizeof(pdma_llt_t) * list_num);
+    LOG_D("[CH%d] Cleaned LLT cache (va: %p, size: %d)",
+          ch, llt_list, sizeof(pdma_llt_t) * list_num);
+
+    /* Return physical address of LLT */
+    void *llt_list_pa = rt_kmem_v2p(llt_list);
+    LOG_I("[CH%d] LLT calculation complete (pa: %p)", ch, llt_list_pa);
+
+    return (rt_uint32_t *)llt_list_pa;
+}
+
+/**
+ * @brief Free allocated PDMA linked list table (LLT) memory
+ * @param ch Channel number (0-PDMA_MAX_CHANNELS-1) to free
+ */
+static void _k230_pdma_llt_free(rt_uint8_t ch)
+{
+    rt_uint32_t *llt_list_pa;
+    void *llt_list_va;
+
+    LOG_D("[CH%d] Freeing LLT memory", ch);
+
+    if(pdma_ctrl.chan[ch].llt_va != RT_NULL)
+    {
+        /* Free the allocated pages */
+        rt_pages_free(pdma_ctrl.chan[ch].llt_va, pdma_ctrl.chan[ch].page_size);
+        pdma_ctrl.chan[ch].llt_va = 0;
+        pdma_ctrl.chan[ch].page_size = 0;
+        LOG_D("[CH%d] Freed %d bytes of LLT memory", ch,pdma_ctrl.chan[ch].page_size);
+    }
+}
+
+/**
+ * @brief PDMA interrupt service routine
+ * @param irq Interrupt number (unused)
+ * @param param Channel number passed as void pointer
+ */
+static void k230_pdma_isr(int irq, void *param)
+{
+    rt_uint8_t ch = (rt_uintptr_t)param;  /* Convert channel parameter */
+    rt_bool_t success = RT_FALSE; /* Transfer result flag */
+    k230_pdma_callback_t callback = RT_NULL; /* Callback function pointer */
+
+    LOG_D("[CH%d] PDMA interrupt triggered", ch);
+
+    PDMA_LOCK();
+
+    /* Only process interrupts for enabled channels */
+    if (PDMA_CH_MENUCONFIG_ENABLED(ch) && PDMA_CH_IS_ENABLED(ch))
+    {
+        /* Check for transfer complete interrupt */
+        if (PDMA_CH_INT_IS_TRIGGERED(ch, PDMA_PDONE_INT))
+        {
+            success = RT_TRUE;
+            callback = pdma_ctrl.chan[ch].cb.callback;
+            LOG_D("[CH%d] Transfer complete", ch);
+        }
+        /* Check for timeout interrupt */
+        else if (PDMA_CH_INT_IS_TRIGGERED(ch, PDMA_PTOUT_INT))
+        {
+            success = RT_FALSE;
+            callback = pdma_ctrl.chan[ch].cb.callback;
+            LOG_E("[CH%d] Transfer timeout", ch);
+        }
+        /* Check for pause interrupt */
+        else if (PDMA_CH_INT_IS_TRIGGERED(ch, PDMA_PPAUSE_INT))
+        {
+            PDMA_CH_RESUME(ch);
+            LOG_D("[CH%d] Transfer resumed", ch);
+        }
+
+        /* Clear all interrupt flags for this channel */
+        PDMA_CH_INT_CLEAR_ALL(ch);
+        LOG_D("[CH%d] Interrupts cleared", ch);
+    }
+
+    PDMA_UNLOCK();
+
+    if (callback)
+    {
+        callback(ch, success);
+    }
+}
+
+/**
+ * @brief Initialize PDMA hardware device
+ * @return RT_EOK on success, error code on failure
+ */
+int rt_hw_pdma_device_init(void)
+{
+    LOG_I("Initializing PDMA controller");
+
+    /* Map PDMA registers */
+    pdma_ctrl.reg = rt_ioremap((void *)DMA_BASE_ADDR, DMA_IO_SIZE);
+    if (RT_NULL == pdma_ctrl.reg)
+    {
+        LOG_E("Failed to map PDMA registers");
+        return -RT_ERROR;
+    }
+    LOG_D("Mapped PDMA registers at 0x%08X", DMA_BASE_ADDR);
+
+    if (kd_request_lock(HARDLOCK_PDMA))
+    {
+        pdma_ctrl.hardlock = -1;
+        rt_iounmap(pdma_ctrl.reg);
+        LOG_E("Failed to acquire PDMA hardware lock");
+        return -RT_ERROR;
+    }
+    pdma_ctrl.hardlock = HARDLOCK_PDMA;
+    LOG_D("Acquired PDMA hardware lock");
+
+    /* Install and enable interrupts for configured channels */
+#if defined(BSP_USING_PDMA_CHANNEL0)
+    pdma_ctrl.chan[PDMA_CH_0].menuconfig_enabled = RT_TRUE;
+    pdma_ctrl.chan[PDMA_CH_0].irq_num = PDMA_CHANNEL0_IRQn;
+    rt_hw_interrupt_install(PDMA_CHANNEL0_IRQn, k230_pdma_isr, (void *)PDMA_CH_0, "pdma_ch0");
+    LOG_D("Enabled interrupts for channel 0");
+#endif
+
+#if defined(BSP_USING_PDMA_CHANNEL1)
+    pdma_ctrl.chan[PDMA_CH_1].menuconfig_enabled = RT_TRUE;
+    pdma_ctrl.chan[PDMA_CH_1].irq_num = PDMA_CHANNEL1_IRQn;
+    rt_hw_interrupt_install(PDMA_CHANNEL1_IRQn, k230_pdma_isr, (void *)PDMA_CH_1, "pdma_ch1");
+    LOG_D("Enabled interrupts for channel 1");
+#endif
+
+#if defined(BSP_USING_PDMA_CHANNEL2)
+    pdma_ctrl.chan[PDMA_CH_2].menuconfig_enabled = RT_TRUE;
+    pdma_ctrl.chan[PDMA_CH_2].irq_num = PDMA_CHANNEL2_IRQn;
+    rt_hw_interrupt_install(PDMA_CHANNEL2_IRQn, k230_pdma_isr, (void *)PDMA_CH_2, "pdma_ch2");
+    LOG_D("Enabled interrupts for channel 2");
+#endif
+
+#if defined(BSP_USING_PDMA_CHANNEL3)
+    pdma_ctrl.chan[PDMA_CH_3].menuconfig_enabled = RT_TRUE;
+    pdma_ctrl.chan[PDMA_CH_3].irq_num = PDMA_CHANNEL3_IRQn;
+    rt_hw_interrupt_install(PDMA_CHANNEL3_IRQn, k230_pdma_isr, (void *)PDMA_CH_3, "pdma_ch3");
+    LOG_D("Enabled interrupts for channel 3");
+#endif
+
+#if defined(BSP_USING_PDMA_CHANNEL4)
+    pdma_ctrl.chan[PDMA_CH_4].menuconfig_enabled = RT_TRUE;
+    pdma_ctrl.chan[PDMA_CH_4].irq_num = PDMA_CHANNEL4_IRQn;
+    rt_hw_interrupt_install(PDMA_CHANNEL4_IRQn, k230_pdma_isr, (void *)PDMA_CH_4, "pdma_ch4");
+    LOG_D("Enabled interrupts for channel 4");
+#endif
+
+#if defined(BSP_USING_PDMA_CHANNEL5)
+    pdma_ctrl.chan[PDMA_CH_5].menuconfig_enabled = RT_TRUE;
+    pdma_ctrl.chan[PDMA_CH_5].irq_num = PDMA_CHANNEL5_IRQn;
+    rt_hw_interrupt_install(PDMA_CHANNEL5_IRQn, k230_pdma_isr, (void *)PDMA_CH_5, "pdma_ch5");
+    LOG_D("Enabled interrupts for channel 5");
+#endif
+
+#if defined(BSP_USING_PDMA_CHANNEL6)
+    pdma_ctrl.chan[PDMA_CH_6].menuconfig_enabled = RT_TRUE;
+    pdma_ctrl.chan[PDMA_CH_6].irq_num = PDMA_CHANNEL6_IRQn;
+    rt_hw_interrupt_install(PDMA_CHANNEL6_IRQn, k230_pdma_isr, (void *)PDMA_CH_6, "pdma_ch6");
+    LOG_D("Enabled interrupts for channel 6");
+#endif
+
+#if defined(BSP_USING_PDMA_CHANNEL7)
+    pdma_ctrl.chan[PDMA_CH_7].menuconfig_enabled = RT_TRUE;
+    pdma_ctrl.chan[PDMA_CH_7].irq_num = PDMA_CHANNEL7_IRQn;
+    rt_hw_interrupt_install(PDMA_CHANNEL7_IRQn, k230_pdma_isr, (void *)PDMA_CH_7, "pdma_ch7");
+    LOG_D("Enabled interrupts for channel 7");
+#endif
+
+    return RT_EOK;
+}
+INIT_BOARD_EXPORT(rt_hw_pdma_device_init);

+ 334 - 0
bsp/k230/drivers/interdrv/pdma/drv_pdma.h

@@ -0,0 +1,334 @@
+/* Copyright (c) 2023, Canaan Bright Sight Co., Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2006-2025 RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef DRV_PDMA_H_
+#define DRV_PDMA_H_
+
+#include <rtdef.h>
+
+/**
+ * @brief PDMA channel enumeration
+ */
+typedef enum pdma_ch {
+    PDMA_CH_0 = 0,  /**< Channel 0 */
+    PDMA_CH_1 = 1,  /**< Channel 1 */
+    PDMA_CH_2 = 2,  /**< Channel 2 */
+    PDMA_CH_3 = 3,  /**< Channel 3 */
+    PDMA_CH_4 = 4,  /**< Channel 4 */
+    PDMA_CH_5 = 5,  /**< Channel 5 */
+    PDMA_CH_6 = 6,  /**< Channel 6 */
+    PDMA_CH_7 = 7,  /**< Channel 7 */
+    PDMA_CH_MAX,    /**< Maximum channel count */
+} pdma_ch_e;
+
+/* PDMA Register Access Macros */
+#define pdma_write32(addr, val)    writel((val), (volatile void *)(addr))
+#define pdma_read32(addr)          readl((volatile void *)(addr))
+
+/* PDMA Configuration Constants */
+#define PDMA_MAX_WAIT_MS           2000    /* Maximum wait time in milliseconds */
+#define PDMA_MAX_LINE_SIZE         64      /* Maximum data block size per transfer */
+
+/**
+ * @brief PDMA Interrupt Mask Definitions
+ *
+ * These macros define the bitmask values for different PDMA interrupt types.
+ * Each interrupt type is represented by a unique bit in the interrupt status register.
+ */
+#define PDMA_PDONE_INT          0x00000001  /**< Transfer Done interrupt mask */
+#define PDMA_PITEM_INT          0x00000100  /**< Item Done interrupt mask */
+#define PDMA_PPAUSE_INT         0x00010000  /**< Transfer Paused interrupt mask */
+#define PDMA_PTOUT_INT          0x01000000  /**< Transfer Timeout interrupt mask */
+
+/* Combined interrupt masks */
+#define PDMA_ALL_INTS       (PDMA_PDONE_INT | PDMA_PITEM_INT | PDMA_PPAUSE_INT | PDMA_PTOUT_INT) /**< All PDMA interrupts mask */
+
+#define PDMA_IRQ_OFFSET         16
+
+/**
+ * @brief PDMA channel interrupt numbers
+ * @note These values equal (manual_specified_IRQ + PDMA_IRQ_OFFSET)
+ */
+#define PDMA_CHANNEL0_IRQn     (123 + PDMA_IRQ_OFFSET)  /**< Channel 0 IRQ number */
+#define PDMA_CHANNEL1_IRQn     (180 + PDMA_IRQ_OFFSET)  /**< Channel 1 IRQ number */
+#define PDMA_CHANNEL2_IRQn     (181 + PDMA_IRQ_OFFSET)  /**< Channel 2 IRQ number */
+#define PDMA_CHANNEL3_IRQn     (182 + PDMA_IRQ_OFFSET)  /**< Channel 3 IRQ number */
+#define PDMA_CHANNEL4_IRQn     (183 + PDMA_IRQ_OFFSET)  /**< Channel 4 IRQ number */
+#define PDMA_CHANNEL5_IRQn     (184 + PDMA_IRQ_OFFSET)  /**< Channel 5 IRQ number */
+#define PDMA_CHANNEL6_IRQn     (185 + PDMA_IRQ_OFFSET)  /**< Channel 6 IRQ number */
+#define PDMA_CHANNEL7_IRQn     (186 + PDMA_IRQ_OFFSET)  /**< Channel 7 IRQ number */
+
+/**
+ * @brief PDMA channel state enumeration
+ */
+typedef enum {
+    PDMA_STATE_BUSY    = 0x1,    /**< Channel is busy transferring */
+    PDMA_STATE_PAUSE   = 0x2,    /**< Channel is paused */
+} pdma_state_e;
+
+/**
+ * @brief PDMA control command enumeration
+ */
+typedef enum {
+    PDMA_CMD_START     = 0x1,    /**< Start DMA transfer command */
+    PDMA_CMD_STOP      = 0x2,    /**< Stop DMA transfer command */
+    PDMA_CMD_RESUME    = 0x4,    /**< Resume paused transfer command */
+} pdma_cmd_e;
+
+/**
+ * @brief PDMA data transfer direction enumeration
+ *
+ * @param TX Transmit direction: Data flows from system memory to peripheral device
+ * @param RX Receive direction: Data flows from peripheral device to system memory
+ */
+typedef enum pdma_rxtx {
+    TX = 0,
+    RX = 1,
+} pdma_rxtx_e;
+
+/**
+ * @brief PDMA burst length configuration
+ */
+typedef enum pdma_burst_len {
+    PBURST_LEN_1 = 0,
+    PBURST_LEN_2 = 1,
+    PBURST_LEN_3 = 2,
+    PBURST_LEN_4 = 3,
+    PBURST_LEN_5 = 4,
+    PBURST_LEN_6 = 5,
+    PBURST_LEN_7 = 6,
+    PBURST_LEN_8 = 7,
+    PBURST_LEN_9 = 8,
+    PBURST_LEN_10 = 9,
+    PBURST_LEN_11 = 10,
+    PBURST_LEN_12 = 11,
+    PBURST_LEN_13 = 12,
+    PBURST_LEN_14 = 13,
+    PBURST_LEN_15 = 14,
+    PBURST_LEN_16 = 15,
+} pdma_burst_len_e;
+
+/**
+ * @brief Peripheral device selection for PDMA
+ */
+typedef enum device_sel {
+    UART0_TX = 0,
+    UART0_RX = 1,
+    UART1_TX = 2,
+    UART1_RX = 3,
+    UART2_TX = 4,
+    UART2_RX = 5,
+    UART3_TX = 6,
+    UART3_RX = 7,
+    UART4_TX = 8,
+    UART4_RX = 9,
+    I2C0_TX = 10,
+    I2C0_RX = 11,
+    I2C1_TX = 12,
+    I2C1_RX = 13,
+    I2C2_TX = 14,
+    I2C2_RX = 15,
+    I2C3_TX = 16,
+    I2C3_RX = 17,
+    I2C4_TX = 18,
+    I2C4_RX = 19,
+    AUDIO_TX = 20,
+    AUDIO_RX = 21,
+    JAMLINK0_TX = 22,
+    JAMLINK0_RX = 23,
+    JAMLINK1_TX = 24,
+    JAMLINK1_RX = 25,
+    JAMLINK2_TX = 26,
+    JAMLINK2_RX = 27,
+    JAMLINK3_TX = 28,
+    JAMLINK3_RX = 29,
+    ADC0 = 30,
+    ADC1 = 31,
+    ADC2 = 32,
+    PDM_IN = 33,
+} device_sel_e;
+
+/**
+ * @brief Data endianness configuration
+ */
+typedef enum pendian {
+    PDEFAULT,
+    PBYTE2,
+    PBYTE4,
+    PBYTE8,
+} pendian_e;
+
+/**
+ * @brief Data transfer size configuration
+ */
+typedef enum hsize {
+    PSBYTE1,
+    PSBYTE2,
+    PSBYTE4,
+} hsize_e;
+
+/**
+ * @brief Source address behavior configuration
+ */
+typedef enum src_type {
+    CONTINUE,
+    FIXED,
+} src_type_e;
+
+/**
+ * @brief PDMA channel configuration structure
+ */
+typedef struct pdma_ch_cfg {
+    volatile rt_uint32_t ch_src_type : 1;
+    volatile rt_uint32_t ch_dev_hsize : 2;
+    volatile rt_uint32_t reserved0 : 1;
+    volatile rt_uint32_t ch_dat_endian : 2;
+    volatile rt_uint32_t reserved1 : 2;
+    volatile rt_uint32_t ch_dev_blen : 4;
+    volatile rt_uint32_t ch_priority : 4;
+    volatile rt_uint32_t ch_dev_tout : 12;
+    volatile rt_uint32_t reserved2 : 4;
+} pdma_ch_cfg_t;
+
+/**
+ * @brief PDMA channel register structure
+ */
+typedef struct pdma_ch_reg {
+    volatile rt_uint32_t ch_ctl;
+    volatile rt_uint32_t ch_status;
+    volatile pdma_ch_cfg_t ch_cfg;
+    volatile rt_uint32_t ch_llt_saddr;
+    volatile rt_uint32_t reserved[4];
+} pdma_ch_reg_t;
+
+/**
+ * @brief PDMA controller register structure
+ */
+typedef struct pdma_reg {
+    volatile rt_uint32_t pdma_ch_en;
+    volatile rt_uint32_t dma_int_mask;
+    volatile rt_uint32_t dma_int_stat;
+    volatile rt_uint32_t reserved[5];
+    volatile pdma_ch_reg_t pdma_ch_reg[8];
+    volatile rt_uint32_t ch_peri_dev_sel[8];
+} pdma_reg_t;
+
+/**
+ * @brief PDMA Linked List Transfer (LLT) node structure
+ */
+typedef struct pdma_llt {
+    rt_uint32_t line_size : 30;    /**< Transfer length in bytes */
+    rt_uint32_t pause : 1;         /**< Pause control flag */
+    rt_uint32_t node_intr : 1;     /**< Node completion interrupt enable */
+    rt_uint32_t src_addr;          /**< Source address */
+    rt_uint32_t dst_addr;          /**< Destination address */
+    rt_uint32_t next_llt_addr;     /**< Next LLT node address */
+} pdma_llt_t;
+
+/**
+ * @brief User PDMA configuration structure
+ */
+typedef struct usr_pdma_cfg {
+    device_sel_e device;           /**< Peripheral device selection */
+    rt_uint8_t *src_addr;          /**< Source address pointer */
+    rt_uint8_t *dst_addr;          /**< Destination address pointer */
+    rt_uint32_t line_size;         /**< Transfer block size */
+    pdma_ch_cfg_t pdma_ch_cfg;     /**< Channel configuration */
+} usr_pdma_cfg_t;
+
+/**
+ * @brief DMA transfer event callback function type
+ * @param ch DMA channel number where event occurred
+ * @param is_done Event type indicator:
+ *               - RT_TRUE: Transfer completed successfully
+ *               - RT_FALSE: Transfer terminated due to timeout
+ * @note This callback will be invoked for both successful completion
+ *       and timeout conditions. The is_done parameter distinguishes
+ *       between these cases.
+ */
+typedef void (*k230_pdma_callback_t)(rt_uint8_t ch, rt_bool_t is_done);
+
+/**
+ * @brief DMA channel callback structure
+ * Contains callback function for DMA channel events
+ */
+typedef struct {
+    k230_pdma_callback_t callback;  /* Callback function pointer */
+} k230_pdma_chan_cb_t;
+
+/**
+ * @brief PDMA controller structure
+ */
+typedef struct {
+    rt_int32_t hardlock;             /**< Hardware lock for critical sections */
+    volatile pdma_reg_t *reg;        /**< PDMA register base address */
+
+    struct {
+        rt_uint32_t irq_num;         /**< Interrupt number for this channel */
+        void *llt_va;                /**< Virtual address of LLT memory (NULL if not allocated) */
+        rt_size_t page_size;         /**< Page size for each channel */
+        k230_pdma_chan_cb_t cb;      /**< Channel callback functions */
+        rt_bool_t is_hw_configured;  /**< Hardware config flag (must set via k230_pdma_config() before start to avoid BUSY lock) */
+        rt_bool_t menuconfig_enabled;/**< Channel enabled in menuconfig */
+    } chan[PDMA_CH_MAX];             /**< Channel configuration array */
+} pdma_controller_t;
+
+/**
+ * @brief Set PDMA callback function for specified channel
+ */
+rt_err_t k230_pdma_set_callback(rt_uint8_t ch, k230_pdma_callback_t func);
+
+/**
+ * @brief Request an available PDMA channel
+ */
+rt_err_t k230_pdma_request_channel(rt_uint8_t *ch);
+
+/**
+ * @brief Release allocated PDMA channel
+ */
+rt_err_t k230_pdma_release_channel(rt_uint8_t ch);
+
+/**
+ * @brief Start PDMA transfer on specified channel
+ */
+rt_err_t k230_pdma_start(rt_uint8_t ch);
+
+/**
+ * @brief Stop PDMA transfer on specified channel
+ */
+rt_err_t k230_pdma_stop(rt_uint8_t ch);
+
+/**
+ * @brief Configure PDMA channel parameters
+ */
+rt_err_t k230_pdma_config(rt_uint8_t ch, usr_pdma_cfg_t *ucfg);
+
+#endif /* DRV_PDMA_H_ */

+ 4 - 1
bsp/k230/drivers/utest/SConscript

@@ -17,7 +17,10 @@ if GetDepend('RT_UTEST_USING_ALL_CASES') or GetDepend('BSP_UTEST_DRIVERS'):
 
     if GetDepend('BSP_USING_PWM'):
         src += ['test_pwm.c']
-        
+
+    if GetDepend('BSP_USING_PDMA'):
+        src += ['test_pdma.c']
+     
 group = DefineGroup('utestcases', src, depend = [''])
 
 Return('group')

+ 299 - 0
bsp/k230/drivers/utest/test_pdma.c

@@ -0,0 +1,299 @@
+/*
+ * Copyright (c) 2006-2025, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+/*
+ * Refer to PDMA driver implementation and hardware specifications
+ * This is a PDMA (Peripheral DMA) device test routine. The program tests:
+ * - Channel allocation/release functionality
+ * - DMA transfer from DDR to UART (TX)
+ * - DMA transfer from UART to DDR (RX)
+ *
+ * "test_pdma_tx" tests DDR-to-UART transmission:
+ * - Should display 3 identical rows of data on screen
+ * - Each row consists of 62 'x' characters
+ * - Total output should contain exactly 186 'x' characters
+ * - Data is transferred from DDR to UART0 TX FIFO via PDMA
+ *
+ * "test_pdma_rx" tests UART-to-DDR reception:
+ * - After "Send test data by keyboard input within 2 seconds" prompt appears:
+ *   - User has 2 seconds to input data via keyboard
+ *   - Input will be captured via UART0 RX FIFO to DDR through PDMA
+ * - Test will display first 16 bytes of received DDR data
+ * - Verify output matches partial input data
+ */
+
+#include <rtthread.h>
+#include <rtdevice.h>
+#include "utest.h"
+#include <mmu.h>
+#include "board.h"
+#include "drv_pdma.h"
+
+#define UART0_IRQ 0x10
+
+#define CACHE_LINE_SIZE 64
+
+typedef enum
+{
+    TEST_PDMA_EVENT_NONE,
+    TEST_PDMA_EVENT_COMPLETE,
+    TEST_PDMA_EVENT_TIMEOUT
+} test_pdma_event_t;
+
+static rt_event_t test_pdma_event = RT_NULL;
+
+void test_pdma_call_back(rt_uint8_t ch, rt_bool_t is_done)
+{
+    /* Send completion or timeout event based on callback status */
+    test_pdma_event_t event_type = is_done ? TEST_PDMA_EVENT_COMPLETE : TEST_PDMA_EVENT_TIMEOUT;
+    rt_event_send(test_pdma_event, event_type);
+}
+
+void test_pdma_request()
+{
+    rt_uint8_t ch;
+    rt_err_t err;
+
+    /* Test channel allocation for all available channels */
+    for (rt_uint8_t i = 0; i < PDMA_CH_MAX; i++)
+    {
+        err = k230_pdma_request_channel(&ch);
+        uassert_int_equal(err, RT_EOK);
+    }
+
+    /* Should fail when all channels are allocated */
+    err = k230_pdma_request_channel(&ch);
+    uassert_int_equal(err, -RT_EBUSY);
+
+    /* Release channel 0 and test re-allocation */
+    err = k230_pdma_release_channel(0);
+    uassert_int_equal(err, RT_EOK);
+
+    err = k230_pdma_request_channel(&ch);
+    uassert_int_equal(err, RT_EOK);
+
+    /* Cleanup: release all channels */
+    for (rt_uint8_t i = 0; i < PDMA_CH_MAX; i++)
+    {
+        err = k230_pdma_release_channel(i);
+        uassert_int_equal(err, RT_EOK);
+    }
+}
+
+/* Test DMA transfer from DDR to UART output */
+void test_pdma_tx()
+{
+    rt_uint8_t ch;
+    rt_err_t err;
+    rt_uint32_t recv_event;
+    rt_uint32_t len = 192;
+
+    /* For software-managed DMA cache coherency, ensure buffer start address and size are cache-line aligned */
+    uint8_t *buf = rt_malloc_align(len, CACHE_LINE_SIZE);
+    void *buf_pa = rt_kmem_v2p(buf);
+
+    for (int i = 0; i < 192; i++)
+    {
+        if ((i + 2) % 64 == 0)
+        {
+            buf[i] = '\r';
+        }
+        else if ((i + 1) % 64 == 0)
+        {
+            buf[i] = '\n';
+        }
+        else
+        {
+            buf[i] = 'x';
+        }
+    }
+
+    rt_event_control(test_pdma_event, RT_IPC_CMD_RESET, NULL);
+
+    /* Configure DMA transfer */
+    err = k230_pdma_request_channel(&ch);
+    uassert_int_equal(err, RT_EOK);
+
+    usr_pdma_cfg_t pdma_cfg;
+    /* Configure DMA parameters */
+    pdma_cfg.device = UART0_TX;
+    pdma_cfg.src_addr = buf_pa;
+    pdma_cfg.dst_addr = (rt_uint8_t *)UART0_BASE_ADDR;
+    pdma_cfg.line_size = len;
+
+    /* Set channel configuration */
+    pdma_cfg.pdma_ch_cfg.ch_src_type = CONTINUE;
+    pdma_cfg.pdma_ch_cfg.ch_dev_hsize = PSBYTE1;
+    pdma_cfg.pdma_ch_cfg.ch_dat_endian = PDEFAULT;
+    pdma_cfg.pdma_ch_cfg.ch_dev_blen = PBURST_LEN_16;
+    pdma_cfg.pdma_ch_cfg.ch_priority = 7;
+    pdma_cfg.pdma_ch_cfg.ch_dev_tout = 0xFFF;
+
+    err = k230_pdma_set_callback(ch, test_pdma_call_back);
+    uassert_int_equal(err, RT_EOK);
+
+    err = k230_pdma_config(ch, &pdma_cfg);
+    uassert_int_equal(err, RT_EOK);
+
+    /* Start transfer and wait for completion */
+    err = k230_pdma_start(ch);
+    uassert_int_equal(err, RT_EOK);
+
+    err = rt_event_recv(test_pdma_event,
+                       TEST_PDMA_EVENT_COMPLETE | TEST_PDMA_EVENT_TIMEOUT,
+                       RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR,
+                       RT_WAITING_FOREVER,
+                       &recv_event);
+    uassert_int_equal(err, RT_EOK);
+    uassert_int_equal(recv_event, TEST_PDMA_EVENT_COMPLETE);
+
+    /* Cleanup */
+    err = k230_pdma_stop(ch);
+    uassert_int_equal(err, RT_EOK);
+
+    err = k230_pdma_release_channel(ch);
+    uassert_int_equal(err, RT_EOK);
+
+    rt_free_align(buf);
+    LOG_I("PDMA TX test completed successfully");
+}
+
+/* Test DMA transfer from UART RX FIFO to DDR */
+void test_pdma_rx()
+{
+    rt_uint8_t ch;
+    rt_err_t err;
+    rt_uint32_t recv_event;
+    rt_uint32_t len = 16;
+
+    uint8_t *buf = rt_malloc_align(64, CACHE_LINE_SIZE);
+    void *buf_pa = rt_kmem_v2p(buf);
+
+    /* Reset event before starting test */
+    rt_event_control(test_pdma_event, RT_IPC_CMD_RESET, NULL);
+
+    /* Request DMA channel */
+    err = k230_pdma_request_channel(&ch);
+    uassert_int_equal(err, RT_EOK);
+
+    /* Configure DMA parameters */
+    usr_pdma_cfg_t pdma_cfg;
+    pdma_cfg.device = UART0_RX;
+    pdma_cfg.src_addr = (rt_uint8_t *)UART0_BASE_ADDR;
+    pdma_cfg.dst_addr = buf_pa;
+    pdma_cfg.line_size = len;
+
+    /* Set channel configuration */
+    pdma_cfg.pdma_ch_cfg.ch_src_type = FIXED;
+    pdma_cfg.pdma_ch_cfg.ch_dev_hsize = PSBYTE1;
+    pdma_cfg.pdma_ch_cfg.ch_dat_endian = PDEFAULT;
+    pdma_cfg.pdma_ch_cfg.ch_dev_blen = PBURST_LEN_16;
+    pdma_cfg.pdma_ch_cfg.ch_priority = 7;
+    pdma_cfg.pdma_ch_cfg.ch_dev_tout = 0xFFF;
+
+    /* Set callback and configure DMA */
+    err = k230_pdma_set_callback(ch, test_pdma_call_back);
+    uassert_int_equal(err, RT_EOK);
+
+    err = k230_pdma_config(ch, &pdma_cfg);
+    uassert_int_equal(err, RT_EOK);
+
+    LOG_I("Send test data by keyboard input within 2 seconds (to UART receive buffer)");
+
+    /* Setup 2 second timeout */
+    rt_tick_t timeout = RT_TICK_PER_SECOND * 2;
+    rt_tick_t start_tick = rt_tick_get();
+
+    /* Mask UART0 interrupt to prevent FIFO access by ISR */
+    rt_hw_interrupt_mask(UART0_IRQ);
+
+    /* Wait for timeout period */
+    while (RT_TRUE)
+    {
+        if (rt_tick_get_delta(start_tick) >= timeout)
+        {
+            break;
+        }
+    }
+
+    /* Start DMA transfer */
+    k230_pdma_start(ch);
+
+    /* Wait for transfer completion event */
+    err = rt_event_recv(test_pdma_event,
+                       TEST_PDMA_EVENT_COMPLETE | TEST_PDMA_EVENT_TIMEOUT,
+                       RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR,
+                       RT_WAITING_FOREVER,
+                       &recv_event);
+    uassert_int_equal(err, RT_EOK);
+    uassert_int_equal(recv_event, TEST_PDMA_EVENT_COMPLETE);
+
+    rt_hw_interrupt_umask(UART0_IRQ);
+
+    err = k230_pdma_stop(ch);
+    uassert_int_equal(err, RT_EOK);
+
+    err = k230_pdma_release_channel(ch);
+    uassert_int_equal(err, RT_EOK);
+
+    LOG_I("Got: %.*s", len, buf);
+
+    rt_free_align(buf);
+}
+
+static rt_err_t utest_tc_init(void)
+{
+    test_pdma_event = (rt_event_t)rt_malloc(sizeof(struct rt_event));
+    if (test_pdma_event == RT_NULL)
+    {
+        LOG_E("Failed to allocate memory for pdma_event!");
+        return -RT_ENOMEM;
+    }
+
+    if (rt_event_init(test_pdma_event, "pdma_event", RT_IPC_FLAG_FIFO) != RT_EOK)
+    {
+        LOG_E("Failed to init pdma_event!");
+        rt_free(test_pdma_event);
+        return -RT_ERROR;
+    }
+
+    LOG_I("PDMA event initialized successfully!");
+    return RT_EOK;
+}
+
+static rt_err_t utest_tc_cleanup(void)
+{
+    rt_free(test_pdma_event);
+
+    /* Check and release all DMA channels */
+    for (rt_uint8_t ch = 0; ch < PDMA_CH_MAX; ch++)
+    {
+        rt_err_t err = k230_pdma_release_channel(ch);
+
+        /* Channel was successfully released now - means it wasn't properly released in test case */
+        if (err == RT_EOK)
+        {
+            LOG_W("PDMA channel %d was not released in test case!", ch);
+        }
+        /* Channel release failed with error other than -RT_EINVAL (unexpected error) */
+        else if (err != -RT_EINVAL)
+        {
+            LOG_I("PDMA channel %d release failed: %d", ch, err);
+        }
+        /* -RT_EINVAL means channel was already released (normal case) - no action needed */
+    }
+
+    return RT_EOK;
+}
+
+void test_pdma()
+{
+    UTEST_UNIT_RUN(test_pdma_request);
+    UTEST_UNIT_RUN(test_pdma_tx);
+    UTEST_UNIT_RUN(test_pdma_rx);
+}
+
+UTEST_TC_EXPORT(test_pdma, "pdma", utest_tc_init, utest_tc_cleanup, 10);

+ 8 - 0
bsp/k230/rtconfig.h

@@ -494,6 +494,14 @@
 /* NXP HAL & SDK Drivers */
 
 /* end of NXP HAL & SDK Drivers */
+
+/* NUVOTON Drivers */
+
+/* end of NUVOTON Drivers */
+
+/* GD32 Drivers */
+
+/* end of GD32 Drivers */
 /* end of HAL & SDK Drivers */
 
 /* sensors drivers */