Browse Source

[component][drivers]add clk framework (#8213)

zms123456 1 year ago
parent
commit
e1fdc13288

+ 1 - 0
components/drivers/Kconfig

@@ -971,6 +971,7 @@ menuconfig RT_USING_VIRTIO
 source "$RTT_DIR/components/drivers/ofw/Kconfig"
 source "$RTT_DIR/components/drivers/ofw/Kconfig"
 source "$RTT_DIR/components/drivers/pic/Kconfig"
 source "$RTT_DIR/components/drivers/pic/Kconfig"
 source "$RTT_DIR/components/drivers/ktime/Kconfig"
 source "$RTT_DIR/components/drivers/ktime/Kconfig"
+source "$RTT_DIR/components/drivers/clk/Kconfig"
 
 
 menu "Using USB"
 menu "Using USB"
     config RT_USING_USB
     config RT_USING_USB

+ 5 - 0
components/drivers/clk/Kconfig

@@ -0,0 +1,5 @@
+menuconfig RT_USING_CLK
+    bool "Using Common Clock Framework (CLK)"
+    depends on RT_USING_DM
+    select RT_USING_ADT_REF
+    default y

+ 26 - 0
components/drivers/clk/SConscript

@@ -0,0 +1,26 @@
+from building import *
+
+group = []
+objs = []
+
+if not GetDepend(['RT_USING_CLK']):
+    Return('group')
+
+cwd = GetCurrentDir()
+list = os.listdir(cwd)
+CPPPATH = [cwd + '/../include']
+
+src = ['clk.c']
+
+if GetDepend(['RT_USING_OFW']):
+    src += ['clk-fixed-rate.c']
+
+group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
+
+for d in list:
+    path = os.path.join(cwd, d)
+    if os.path.isfile(os.path.join(path, 'SConscript')):
+        objs = objs + SConscript(os.path.join(d, 'SConscript'))
+objs = objs + group
+
+Return('objs')

+ 92 - 0
components/drivers/clk/clk-fixed-rate.c

@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-11-26     GuEe-GUI     first version
+ */
+
+#include <rtthread.h>
+#include <rtdevice.h>
+
+#include <drivers/platform.h>
+
+static rt_err_t fixed_clk_ofw_init(struct rt_platform_device *pdev, struct rt_clk_fixed_rate *clk_fixed)
+{
+    rt_err_t err = RT_EOK;
+    rt_uint32_t rate, accuracy;
+    struct rt_ofw_node *np = pdev->parent.ofw_node;
+    const char *clk_name = np->name;
+
+    if (!rt_ofw_prop_read_u32(np, "clock-frequency", &rate))
+    {
+        rt_ofw_prop_read_u32(np, "clock-accuracy", &accuracy);
+        rt_ofw_prop_read_string(np, "clock-output-names", &clk_name);
+
+        clk_fixed->clk.name = clk_name;
+        clk_fixed->clk.rate = rate;
+        clk_fixed->clk.min_rate = rate;
+        clk_fixed->clk.max_rate = rate;
+        clk_fixed->fixed_rate = rate;
+        clk_fixed->fixed_accuracy = accuracy;
+
+        rt_ofw_data(np) = &clk_fixed->clk;
+    }
+    else
+    {
+        err = -RT_EIO;
+    }
+
+    return err;
+}
+
+static rt_err_t fixed_clk_probe(struct rt_platform_device *pdev)
+{
+    rt_err_t err = RT_EOK;
+    struct rt_clk_fixed_rate *clk_fixed = rt_calloc(1, sizeof(*clk_fixed));
+
+    if (clk_fixed)
+    {
+        err = fixed_clk_ofw_init(pdev, clk_fixed);
+    }
+    else
+    {
+        err = -RT_ENOMEM;
+    }
+
+    if (!err)
+    {
+        err = rt_clk_register(&clk_fixed->clk, RT_NULL);
+    }
+
+    if (err && clk_fixed)
+    {
+        rt_free(clk_fixed);
+    }
+
+    return err;
+}
+
+static const struct rt_ofw_node_id fixed_clk_ofw_ids[] =
+{
+    { .compatible = "fixed-clock" },
+    { /* sentinel */ }
+};
+
+static struct rt_platform_driver fixed_clk_driver =
+{
+    .name = "clk-fixed-rate",
+    .ids = fixed_clk_ofw_ids,
+
+    .probe = fixed_clk_probe,
+};
+
+static int fixed_clk_drv_register(void)
+{
+    rt_platform_driver_register(&fixed_clk_driver);
+
+    return 0;
+}
+INIT_SUBSYS_EXPORT(fixed_clk_drv_register);

+ 1078 - 0
components/drivers/clk/clk.c

@@ -0,0 +1,1078 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-11-26     GuEe-GUI     first version
+ */
+
+#include <rtthread.h>
+#include <rtservice.h>
+#include <rtdevice.h>
+
+#define DBG_TAG "rtdm.clk"
+#define DBG_LVL DBG_INFO
+#include <rtdbg.h>
+
+static struct rt_spinlock _clk_lock = { 0 };
+static rt_list_t _clk_nodes = RT_LIST_OBJECT_INIT(_clk_nodes);
+static rt_list_t _clk_notifier_nodes = RT_LIST_OBJECT_INIT(_clk_notifier_nodes);
+
+static void clk_release(struct rt_ref *r)
+{
+    struct rt_clk_node *clk_np = rt_container_of(r, struct rt_clk_node, ref);
+
+    LOG_E("%s is release", clk_np->name);
+    (void)clk_np;
+
+    RT_ASSERT(0);
+}
+
+rt_inline struct rt_clk_node *clk_get(struct rt_clk_node *clk_np)
+{
+    rt_ref_get(&clk_np->ref);
+
+    return clk_np;
+}
+
+rt_inline void clk_put(struct rt_clk_node *clk_np)
+{
+    rt_ref_put(&clk_np->ref, &clk_release);
+}
+
+static struct rt_clk *clk_alloc(struct rt_clk_node *clk_np, const char *dev_id,
+        const char *con_id, void *fw_node)
+{
+    struct rt_clk *clk = rt_calloc(1, sizeof(*clk));
+
+    if (clk)
+    {
+        clk->clk_np = clk_np;
+        clk->dev_id = dev_id;
+        clk->con_id = con_id;
+
+        clk->fw_node = fw_node;
+    }
+
+    return clk;
+}
+
+static void clk_free(struct rt_clk *clk)
+{
+    struct rt_clk_node *clk_np = clk->clk_np;
+
+    if (clk_np && clk_np->ops->finit)
+    {
+        clk_np->ops->finit(clk);
+    }
+
+    rt_free(clk);
+}
+
+static struct rt_clk *clk_create(struct rt_clk_node *clk_np, const char *dev_id,
+        const char *con_id, void *fw_data, void *fw_node)
+{
+    struct rt_clk *clk = clk_alloc(clk_np, dev_id, con_id, fw_node);
+
+    if (clk)
+    {
+        clk_get(clk_np);
+
+        if (clk_np->ops->init && clk_np->ops->init(clk, fw_data))
+        {
+            LOG_E("Dev[%s] Con[%s] init fail", dev_id, con_id);
+
+            clk_free(clk);
+            clk = RT_NULL;
+        }
+    }
+
+    return clk;
+}
+
+static rt_err_t clk_notify(struct rt_clk_node *clk_np, rt_ubase_t msg, rt_ubase_t old_rate, rt_ubase_t new_rate)
+{
+    rt_err_t err = RT_EOK;
+    struct rt_clk_notifier *notifier;
+
+    rt_list_for_each_entry(notifier, &_clk_notifier_nodes, list)
+    {
+        if (notifier->clk->clk_np == clk_np)
+        {
+            err = notifier->callback(notifier, msg, old_rate, new_rate);
+
+            /* Only check hareware's error */
+            if (err == -RT_EIO)
+            {
+                break;
+            }
+        }
+    }
+
+    return err;
+}
+
+static void clk_set_parent(struct rt_clk_node *clk_np, struct rt_clk_node *parent_np)
+{
+    rt_hw_spin_lock(&_clk_lock.lock);
+
+    clk_np->parent = parent_np;
+
+    rt_list_insert_after(&parent_np->children_nodes, &clk_np->list);
+
+    rt_hw_spin_unlock(&_clk_lock.lock);
+}
+
+static const struct rt_clk_ops unused_clk_ops =
+{
+};
+
+rt_err_t rt_clk_register(struct rt_clk_node *clk_np, struct rt_clk_node *parent_np)
+{
+    rt_err_t err = RT_EOK;
+    struct rt_clk *clk = RT_NULL;
+
+    if (clk_np)
+    {
+        clk = clk_alloc(clk_np, RT_NULL, RT_NULL, RT_NULL);
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    if (!err && clk_np)
+    {
+        clk_np->clk = clk;
+
+        if (!clk_np->ops)
+        {
+            clk_np->ops = &unused_clk_ops;
+        }
+
+        rt_ref_init(&clk_np->ref);
+        rt_list_init(&clk_np->list);
+        rt_list_init(&clk_np->children_nodes);
+        clk_np->multi_clk = 0;
+
+        if (parent_np)
+        {
+            clk_set_parent(clk_np, parent_np);
+        }
+        else
+        {
+            clk_np->parent = RT_NULL;
+
+            rt_hw_spin_lock(&_clk_lock.lock);
+
+            rt_list_insert_after(&_clk_nodes, &clk_np->list);
+
+            rt_hw_spin_unlock(&_clk_lock.lock);
+        }
+    }
+    else
+    {
+        err = -RT_ENOMEM;
+    }
+
+    return err;
+}
+
+rt_err_t rt_clk_unregister(struct rt_clk_node *clk_np)
+{
+    rt_err_t err = RT_EOK;
+
+    if (clk_np)
+    {
+        err = -RT_EBUSY;
+
+        rt_hw_spin_lock(&_clk_lock.lock);
+
+        if (rt_list_isempty(&clk_np->children_nodes))
+        {
+            if (rt_ref_read(&clk_np->ref) <= 1)
+            {
+                rt_list_remove(&clk_np->list);
+                clk_free(clk_np->clk);
+
+                err = RT_EOK;
+            }
+        }
+
+        rt_hw_spin_unlock(&_clk_lock.lock);
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_clk_notifier_register(struct rt_clk *clk, struct rt_clk_notifier *notifier)
+{
+    if (!clk || !clk->clk_np || !notifier)
+    {
+        return -RT_EINVAL;
+    }
+
+    rt_hw_spin_lock(&_clk_lock.lock);
+
+    ++clk->clk_np->notifier_count;
+    rt_list_init(&notifier->list);
+    rt_list_insert_after(&_clk_notifier_nodes, &notifier->list);
+
+    rt_hw_spin_unlock(&_clk_lock.lock);
+
+    return RT_EOK;
+}
+
+rt_err_t rt_clk_notifier_unregister(struct rt_clk *clk, struct rt_clk_notifier *notifier)
+{
+    struct rt_clk_notifier *notifier_find;
+
+    if (!clk || !notifier)
+    {
+        return -RT_EINVAL;
+    }
+
+    rt_hw_spin_lock(&_clk_lock.lock);
+
+    rt_list_for_each_entry(notifier_find, &_clk_notifier_nodes, list)
+    {
+        if (notifier_find->clk->clk_np == notifier->clk->clk_np)
+        {
+            --clk->clk_np->notifier_count;
+            rt_list_remove(&notifier->list);
+
+            break;
+        }
+    }
+
+    rt_hw_spin_unlock(&_clk_lock.lock);
+
+    return RT_EOK;
+}
+
+static rt_err_t clk_prepare(struct rt_clk *clk, struct rt_clk_node *clk_np)
+{
+    rt_err_t err = RT_EOK;
+
+    if (clk_np->parent)
+    {
+        clk_prepare(clk_np->clk, clk_np->parent);
+    }
+
+    if (clk_np->ops->prepare)
+    {
+        err = clk_np->ops->prepare(clk);
+    }
+
+    return err;
+}
+
+rt_err_t rt_clk_prepare(struct rt_clk *clk)
+{
+    rt_err_t err = RT_EOK;
+
+    RT_DEBUG_NOT_IN_INTERRUPT;
+
+    if (clk && clk->clk_np)
+    {
+        rt_hw_spin_lock(&_clk_lock.lock);
+
+        err = clk_prepare(clk, clk->clk_np);
+
+        rt_hw_spin_unlock(&_clk_lock.lock);
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+static void clk_unprepare(struct rt_clk *clk, struct rt_clk_node *clk_np)
+{
+    if (clk_np->parent)
+    {
+        clk_unprepare(clk_np->clk, clk_np->parent);
+    }
+
+    if (clk_np->ops->unprepare)
+    {
+        clk_np->ops->unprepare(clk);
+    }
+}
+
+rt_err_t rt_clk_unprepare(struct rt_clk *clk)
+{
+    rt_err_t err = RT_EOK;
+
+    RT_DEBUG_NOT_IN_INTERRUPT;
+
+    if (clk && clk->clk_np)
+    {
+        rt_hw_spin_lock(&_clk_lock.lock);
+
+        clk_unprepare(clk, clk->clk_np);
+
+        rt_hw_spin_unlock(&_clk_lock.lock);
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+static rt_err_t clk_enable(struct rt_clk *clk, struct rt_clk_node *clk_np)
+{
+    rt_err_t err = RT_EOK;
+
+    if (clk_np->parent)
+    {
+        clk_enable(clk_np->clk, clk_np->parent);
+    }
+
+    if (clk_np->ops->enable)
+    {
+        err = clk_np->ops->enable(clk);
+    }
+
+    return err;
+}
+
+rt_err_t rt_clk_enable(struct rt_clk *clk)
+{
+    rt_err_t err = RT_EOK;
+
+    if (clk && clk->clk_np)
+    {
+        rt_hw_spin_lock(&_clk_lock.lock);
+
+        err = clk_enable(clk, clk->clk_np);
+
+        rt_hw_spin_unlock(&_clk_lock.lock);
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+static void clk_disable(struct rt_clk *clk, struct rt_clk_node *clk_np)
+{
+    if (clk_np->parent)
+    {
+        clk_disable(clk_np->clk, clk_np->parent);
+    }
+
+    if (clk_np->ops->disable)
+    {
+        clk_np->ops->disable(clk);
+    }
+}
+
+void rt_clk_disable(struct rt_clk *clk)
+{
+    if (clk && clk->clk_np)
+    {
+        rt_hw_spin_lock(&_clk_lock.lock);
+
+        clk_disable(clk, clk->clk_np);
+
+        rt_hw_spin_unlock(&_clk_lock.lock);
+    }
+}
+
+rt_err_t rt_clk_prepare_enable(struct rt_clk *clk)
+{
+    rt_err_t err;
+
+    RT_DEBUG_NOT_IN_INTERRUPT;
+
+    if (clk)
+    {
+        err = rt_clk_prepare(clk);
+
+        if (!err)
+        {
+            err = rt_clk_enable(clk);
+
+            if (err)
+            {
+                rt_clk_unprepare(clk);
+            }
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+void rt_clk_disable_unprepare(struct rt_clk *clk)
+{
+    RT_DEBUG_NOT_IN_INTERRUPT;
+
+    if (clk)
+    {
+        rt_clk_disable(clk);
+        rt_clk_unprepare(clk);
+    }
+}
+
+rt_err_t rt_clk_array_prepare(struct rt_clk_array *clk_arr)
+{
+    rt_err_t err = RT_EOK;
+
+    if (clk_arr)
+    {
+        for (int i = 0; i < clk_arr->count; ++i)
+        {
+            if ((err = rt_clk_prepare(clk_arr->clks[i])))
+            {
+                LOG_E("CLK Array[%d] %s failed error = %s", i,
+                        "prepare", rt_strerror(err));
+
+                while (i --> 0)
+                {
+                    rt_clk_unprepare(clk_arr->clks[i]);
+                }
+
+                break;
+            }
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_clk_array_unprepare(struct rt_clk_array *clk_arr)
+{
+    rt_err_t err = RT_EOK;
+
+    if (clk_arr)
+    {
+        for (int i = 0; i < clk_arr->count; ++i)
+        {
+            if ((err = rt_clk_unprepare(clk_arr->clks[i])))
+            {
+                LOG_E("CLK Array[%d] %s failed error = %s", i,
+                        "unprepare", rt_strerror(err));
+
+                break;
+            }
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_clk_array_enable(struct rt_clk_array *clk_arr)
+{
+    rt_err_t err = RT_EOK;
+
+    if (clk_arr)
+    {
+        for (int i = 0; i < clk_arr->count; ++i)
+        {
+            if ((err = rt_clk_enable(clk_arr->clks[i])))
+            {
+                LOG_E("CLK Array[%d] %s failed error = %s", i,
+                        "enable", rt_strerror(err));
+
+                while (i --> 0)
+                {
+                    rt_clk_disable(clk_arr->clks[i]);
+                }
+
+                break;
+            }
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+void rt_clk_array_disable(struct rt_clk_array *clk_arr)
+{
+    if (clk_arr)
+    {
+        for (int i = 0; i < clk_arr->count; ++i)
+        {
+            rt_clk_disable(clk_arr->clks[i]);
+        }
+    }
+}
+
+rt_err_t rt_clk_array_prepare_enable(struct rt_clk_array *clk_arr)
+{
+    rt_err_t err = RT_EOK;
+
+    if (clk_arr)
+    {
+        for (int i = 0; i < clk_arr->count; ++i)
+        {
+            if ((err = rt_clk_prepare_enable(clk_arr->clks[i])))
+            {
+                LOG_E("CLK Array[%d] %s failed error = %s", i,
+                        "prepare_enable", rt_strerror(err));
+
+                while (i --> 0)
+                {
+                    rt_clk_disable_unprepare(clk_arr->clks[i]);
+                }
+
+                break;
+            }
+        }
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+void rt_clk_array_disable_unprepare(struct rt_clk_array *clk_arr)
+{
+    if (clk_arr)
+    {
+        for (int i = 0; i < clk_arr->count; ++i)
+        {
+            rt_clk_disable_unprepare(clk_arr->clks[i]);
+        }
+    }
+}
+
+rt_err_t rt_clk_set_rate_range(struct rt_clk *clk, rt_ubase_t min, rt_ubase_t max)
+{
+    rt_err_t err = RT_EOK;
+
+    if (clk && clk->clk_np)
+    {
+        struct rt_clk_node *clk_np = clk->clk_np;
+
+        rt_hw_spin_lock(&_clk_lock.lock);
+
+        if (clk_np->ops->set_rate)
+        {
+            rt_ubase_t rate = clk_np->rate;
+            rt_ubase_t old_min = clk_np->min_rate;
+            rt_ubase_t old_max = clk_np->max_rate;
+
+            clk_np->min_rate = min;
+            clk_np->max_rate = max;
+
+            rate = rt_clamp(rate, min, max);
+            err = clk_np->ops->set_rate(clk, rate,
+                rt_clk_get_rate(clk_np->parent ? clk_np->parent->clk : RT_NULL));
+
+            if (err)
+            {
+                clk_np->min_rate = old_min;
+                clk_np->max_rate = old_max;
+            }
+        }
+        else
+        {
+            err = -RT_ENOSYS;
+        }
+
+        rt_hw_spin_unlock(&_clk_lock.lock);
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_clk_set_min_rate(struct rt_clk *clk, rt_ubase_t rate)
+{
+    rt_err_t err = RT_EOK;
+
+    if (clk && clk->clk_np)
+    {
+        struct rt_clk_node *clk_np = clk->clk_np;
+
+        err = rt_clk_set_rate_range(clk, rate, clk_np->max_rate);
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_clk_set_max_rate(struct rt_clk *clk, rt_ubase_t rate)
+{
+    rt_err_t err = RT_EOK;
+
+    if (clk && clk->clk_np)
+    {
+        struct rt_clk_node *clk_np = clk->clk_np;
+
+        err = rt_clk_set_rate_range(clk, clk_np->min_rate, rate);
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_err_t rt_clk_set_rate(struct rt_clk *clk, rt_ubase_t rate)
+{
+    rt_err_t err = RT_EOK;
+
+    if (clk && clk->clk_np)
+    {
+        struct rt_clk_node *clk_np = clk->clk_np;
+
+        rt_hw_spin_lock(&_clk_lock.lock);
+
+        if (clk_np->min_rate && rate < clk_np->min_rate)
+        {
+            err = -RT_EINVAL;
+        }
+
+        if (clk_np->max_rate && rate > clk_np->max_rate)
+        {
+            err = -RT_EINVAL;
+        }
+
+        if (!err)
+        {
+            if (clk_np->ops->set_rate)
+            {
+                rt_ubase_t old_rate = clk_np->rate;
+
+                err = clk_np->ops->set_rate(clk, rate,
+                    rt_clk_get_rate(clk_np->parent ? clk_np->parent->clk : RT_NULL));
+
+                if (clk_np->rate != old_rate)
+                {
+                    clk_notify(clk_np, RT_CLK_MSG_PRE_RATE_CHANGE, old_rate, clk_np->rate);
+                }
+            }
+            else
+            {
+                err = -RT_ENOSYS;
+            }
+        }
+
+        rt_hw_spin_unlock(&_clk_lock.lock);
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_ubase_t rt_clk_get_rate(struct rt_clk *clk)
+{
+    rt_ubase_t rate = -1UL;
+
+    if (clk)
+    {
+        if (clk->rate)
+        {
+            rate = clk->rate;
+        }
+        else if (clk->clk_np)
+        {
+            rate = clk->clk_np->rate;
+        }
+    }
+
+    return rate;
+}
+
+rt_err_t rt_clk_set_phase(struct rt_clk *clk, int degrees)
+{
+    rt_err_t err = RT_EOK;
+
+    if (clk && clk->clk_np && clk->clk_np->ops->set_phase)
+    {
+        rt_hw_spin_lock(&_clk_lock.lock);
+
+        err = clk->clk_np->ops->set_phase(clk, degrees);
+
+        rt_hw_spin_unlock(&_clk_lock.lock);
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+rt_base_t rt_clk_get_phase(struct rt_clk *clk)
+{
+    rt_base_t res = RT_EOK;
+
+    if (clk && clk->clk_np && clk->clk_np->ops->get_phase)
+    {
+        rt_hw_spin_lock(&_clk_lock.lock);
+
+        res = clk->clk_np->ops->get_phase(clk);
+
+        rt_hw_spin_unlock(&_clk_lock.lock);
+    }
+    else
+    {
+        res = -RT_EINVAL;
+    }
+
+    return res;
+}
+
+rt_base_t rt_clk_round_rate(struct rt_clk *clk, rt_ubase_t rate)
+{
+    rt_base_t res = RT_EOK;
+
+    if (clk && clk->clk_np && clk->clk_np->ops->round_rate)
+    {
+        rt_ubase_t best_parent_rate;
+        struct rt_clk_node *clk_np = clk->clk_np;
+
+        rt_hw_spin_lock(&_clk_lock.lock);
+
+        if (clk_np->min_rate && clk_np->max_rate)
+        {
+            rate = rt_clamp(rate, clk_np->min_rate, clk_np->max_rate);
+        }
+
+        res = clk->clk_np->ops->round_rate(clk, rate, &best_parent_rate);
+        (void)best_parent_rate;
+
+        rt_hw_spin_unlock(&_clk_lock.lock);
+    }
+    else
+    {
+        res = -RT_EINVAL;
+    }
+
+    return res;
+}
+
+rt_err_t rt_clk_set_parent(struct rt_clk *clk, struct rt_clk *clk_parent)
+{
+    rt_err_t err = RT_EOK;
+
+    if (clk && clk->clk_np && clk->clk_np->ops->set_parent)
+    {
+        rt_hw_spin_lock(&_clk_lock.lock);
+
+        err = clk->clk_np->ops->set_parent(clk, clk_parent);
+
+        rt_hw_spin_unlock(&_clk_lock.lock);
+    }
+    else
+    {
+        err = -RT_EINVAL;
+    }
+
+    return err;
+}
+
+struct rt_clk *rt_clk_get_parent(struct rt_clk *clk)
+{
+    struct rt_clk *parent = RT_NULL;
+
+    if (clk)
+    {
+        struct rt_clk_node *clk_np = clk->clk_np;
+
+        rt_hw_spin_lock(&_clk_lock.lock);
+
+        parent = clk_np->parent ? clk_np->parent->clk : RT_NULL;
+
+        rt_hw_spin_unlock(&_clk_lock.lock);
+    }
+
+    return parent;
+}
+
+struct rt_clk_array *rt_clk_get_array(struct rt_device *dev)
+{
+    struct rt_clk_array *clk_arr = RT_NULL;
+
+#ifdef RT_USING_OFW
+    clk_arr = rt_ofw_get_clk_array(dev->ofw_node);
+#endif
+
+    return clk_arr;
+}
+
+struct rt_clk *rt_clk_get_by_index(struct rt_device *dev, int index)
+{
+    struct rt_clk *clk = RT_NULL;
+
+#ifdef RT_USING_OFW
+    clk = rt_ofw_get_clk(dev->ofw_node, index);
+#endif
+
+    return clk;
+}
+
+struct rt_clk *rt_clk_get_by_name(struct rt_device *dev, const char *name)
+{
+    struct rt_clk *clk = RT_NULL;
+
+#ifdef RT_USING_OFW
+    clk = rt_ofw_get_clk_by_name(dev->ofw_node, name);
+#endif
+
+    return clk;
+}
+
+void rt_clk_array_put(struct rt_clk_array *clk_arr)
+{
+    if (clk_arr)
+    {
+        for (int i = 0; i < clk_arr->count; ++i)
+        {
+            if (clk_arr->clks[i])
+            {
+                rt_clk_put(clk_arr->clks[i]);
+            }
+            else
+            {
+                break;
+            }
+        }
+
+        rt_free(clk_arr);
+    }
+}
+
+void rt_clk_put(struct rt_clk *clk)
+{
+    if (clk)
+    {
+        clk_put(clk->clk_np);
+        clk_free(clk);
+    }
+}
+
+#ifdef RT_USING_OFW
+static struct rt_clk *ofw_get_clk_no_lock(struct rt_ofw_node *np, int index, const char *name)
+{
+    struct rt_clk *clk = RT_NULL;
+    struct rt_ofw_cell_args clk_args;
+
+    if (!rt_ofw_parse_phandle_cells(np, "clocks", "#clock-cells", index, &clk_args))
+    {
+        int count;
+        struct rt_ofw_node *clk_ofw_np = clk_args.data;
+        struct rt_clk_node *clk_np = rt_ofw_data(clk_ofw_np);
+
+        count = rt_ofw_count_of_clk(clk_ofw_np);
+
+        rt_ofw_node_put(clk_ofw_np);
+
+        if (clk_np)
+        {
+            if (count > 1)
+            {
+                /* args[0] must be the index of CLK */
+                clk_np = &clk_np[clk_args.args[0]];
+            }
+
+            clk = clk_create(clk_np, np->full_name, name, &clk_args, np);
+        }
+    }
+
+    return clk;
+}
+
+static struct rt_clk *ofw_get_clk(struct rt_ofw_node *np, int index, const char *name)
+{
+    struct rt_clk *clk;
+
+    rt_hw_spin_lock(&_clk_lock.lock);
+
+    clk = ofw_get_clk_no_lock(np, index, name);
+
+    rt_hw_spin_unlock(&_clk_lock.lock);
+
+    return clk;
+}
+
+struct rt_clk_array *rt_ofw_get_clk_array(struct rt_ofw_node *np)
+{
+    int count;
+    struct rt_clk_array *clk_arr = RT_NULL;
+
+    if ((count = rt_ofw_count_phandle_cells(np, "clocks", "#clock-cells")) > 0)
+    {
+        clk_arr = rt_calloc(1, sizeof(*clk_arr) + sizeof(clk_arr->clks[0]) * count);
+
+        if (clk_arr)
+        {
+            int i;
+            rt_bool_t has_name = rt_ofw_prop_read_bool(np, "clock-names");
+
+            clk_arr->count = count;
+
+            rt_hw_spin_lock(&_clk_lock.lock);
+
+            for (i = 0; i < count; ++i)
+            {
+                const char *name = RT_NULL;
+
+                if (has_name)
+                {
+                    rt_ofw_prop_read_string_index(np, "clock-names", i, &name);
+                }
+
+                clk_arr->clks[i] = ofw_get_clk_no_lock(np, i, name);
+
+                if (!clk_arr->clks[i])
+                {
+                    --i;
+                    break;
+                }
+            }
+
+            rt_hw_spin_unlock(&_clk_lock.lock);
+
+            if (i > 0 && i < count)
+            {
+                rt_clk_array_put(clk_arr);
+                clk_arr = RT_NULL;
+            }
+        }
+    }
+
+    return clk_arr;
+}
+
+struct rt_clk *rt_ofw_get_clk(struct rt_ofw_node *np, int index)
+{
+    struct rt_clk *clk = RT_NULL;
+
+    if (np && index >= 0)
+    {
+        clk = ofw_get_clk(np, index, RT_NULL);
+    }
+
+    return clk;
+}
+
+struct rt_clk *rt_ofw_get_clk_by_name(struct rt_ofw_node *np, const char *name)
+{
+    struct rt_clk *clk = RT_NULL;
+
+    if (np && name)
+    {
+        int index = rt_ofw_prop_index_of_string(np, "clock-names", name);
+
+        if (index >= 0)
+        {
+            clk = ofw_get_clk(np, index, name);
+        }
+    }
+
+    return clk;
+}
+
+rt_ssize_t rt_ofw_count_of_clk(struct rt_ofw_node *clk_ofw_np)
+{
+    if (clk_ofw_np)
+    {
+        struct rt_clk_node *clk_np = rt_ofw_data(clk_ofw_np);
+
+        if (clk_np && clk_np->multi_clk)
+        {
+            return clk_np->multi_clk;
+        }
+        else
+        {
+            const fdt32_t *cell;
+            rt_uint32_t count = 0;
+            struct rt_ofw_prop *prop;
+
+            prop = rt_ofw_get_prop(clk_ofw_np, "clock-indices", RT_NULL);
+
+            if (prop)
+            {
+                rt_uint32_t max_idx, idx;
+
+                for (cell = rt_ofw_prop_next_u32(prop, RT_NULL, &idx);
+                    cell;
+                    cell = rt_ofw_prop_next_u32(prop, cell, &idx))
+                {
+                    if (idx > max_idx)
+                    {
+                        max_idx = idx;
+                    }
+                }
+
+                count = max_idx + 1;
+            }
+            else
+            {
+                rt_ssize_t len;
+
+                if ((prop = rt_ofw_get_prop(clk_ofw_np, "clock-output-names", &len)))
+                {
+                    char *value = prop->value;
+
+                    for (int i = 0; i < len; ++i, ++value)
+                    {
+                        if (*value == '\0')
+                        {
+                            ++count;
+                        }
+                    }
+                }
+                else
+                {
+                    count = 1;
+                }
+            }
+
+            if (clk_np)
+            {
+                clk_np->multi_clk = count;
+            }
+
+            return count;
+        }
+    }
+
+    return -RT_EINVAL;
+}
+#endif /* RT_USING_OFW */

+ 189 - 0
components/drivers/include/drivers/clk.h

@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2006-2022, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2022-11-26     GuEe-GUI     first version
+ */
+
+#ifndef __CLK_H__
+#define __CLK_H__
+
+#include <rthw.h>
+
+#include <ref.h>
+#include <drivers/ofw.h>
+
+struct rt_clk_ops;
+struct rt_reset_control_node;
+
+struct rt_clk_node
+{
+    /*
+     * Defined as the array like this if if the CLK have multi out clocks:
+     *
+     *  struct XYZ_single_clk
+     *  {
+     *      struct rt_clk_node parent;
+     *      ...
+     *  };
+     *
+     *  struct XYZ_multi_clk
+     *  {
+     *      struct rt_clk_node parent[N];
+     *      ...
+     *  };
+     * We assume the 'N' is the max value of element in 'clock-indices' if OFW.
+     */
+    rt_list_t list;
+    rt_list_t children_nodes;
+
+    const char *name;
+    const struct rt_clk_ops *ops;
+
+    struct rt_clk_node *parent;
+    struct rt_ref ref;
+
+    rt_ubase_t rate;
+    rt_ubase_t min_rate;
+    rt_ubase_t max_rate;
+
+    rt_size_t notifier_count;
+
+    void *priv;
+
+    struct rt_clk *clk;
+    rt_size_t multi_clk;
+};
+
+struct rt_clk_fixed_rate
+{
+    struct rt_clk_node clk;
+
+    rt_ubase_t fixed_rate;
+    rt_ubase_t fixed_accuracy;
+};
+
+struct rt_clk
+{
+    struct rt_clk_node *clk_np;
+
+    const char *dev_id;
+    const char *con_id;
+
+    rt_ubase_t rate;
+
+    void *fw_node;
+    void *priv;
+};
+
+struct rt_clk_array
+{
+    rt_size_t count;
+    struct rt_clk *clks[];
+};
+
+struct rt_clk_ops
+{
+    rt_err_t    (*init)(struct rt_clk *, void *fw_data);
+    rt_err_t    (*finit)(struct rt_clk *);
+    /* API */
+    rt_err_t    (*prepare)(struct rt_clk *);
+    void        (*unprepare)(struct rt_clk *);
+    rt_bool_t   (*is_prepared)(struct rt_clk *);
+    rt_err_t    (*enable)(struct rt_clk *);
+    void        (*disable)(struct rt_clk *);
+    rt_bool_t   (*is_enabled)(struct rt_clk *);
+    rt_err_t    (*set_rate)(struct rt_clk *, rt_ubase_t rate, rt_ubase_t parent_rate);
+    rt_err_t    (*set_parent)(struct rt_clk *, struct rt_clk *parent);
+    rt_err_t    (*set_phase)(struct rt_clk *, int degrees);
+    rt_base_t   (*get_phase)(struct rt_clk *);
+    rt_base_t   (*round_rate)(struct rt_clk *, rt_ubase_t drate, rt_ubase_t *prate);
+};
+
+struct rt_clk_notifier;
+
+#define RT_CLK_MSG_PRE_RATE_CHANGE      RT_BIT(0)
+#define RT_CLK_MSG_POST_RATE_CHANGE     RT_BIT(1)
+#define RT_CLK_MSG_ABORT_RATE_CHANGE    RT_BIT(2)
+
+typedef rt_err_t (*rt_clk_notifier_callback)(struct rt_clk_notifier *notifier,
+        rt_ubase_t msg, rt_ubase_t old_rate, rt_ubase_t new_rate);
+
+struct rt_clk_notifier
+{
+    rt_list_t list;
+
+    struct rt_clk *clk;
+    rt_clk_notifier_callback callback;
+    void *priv;
+};
+
+rt_err_t rt_clk_register(struct rt_clk_node *clk_np, struct rt_clk_node *parent_np);
+rt_err_t rt_clk_unregister(struct rt_clk_node *clk_np);
+
+rt_err_t rt_clk_notifier_register(struct rt_clk *clk, struct rt_clk_notifier *notifier);
+rt_err_t rt_clk_notifier_unregister(struct rt_clk *clk, struct rt_clk_notifier *notifier);
+
+rt_err_t rt_clk_set_parent(struct rt_clk *clk, struct rt_clk *clk_parent);
+
+rt_err_t rt_clk_prepare(struct rt_clk *clk);
+rt_err_t rt_clk_unprepare(struct rt_clk *clk);
+
+rt_err_t rt_clk_enable(struct rt_clk *clk);
+void rt_clk_disable(struct rt_clk *clk);
+
+rt_err_t rt_clk_prepare_enable(struct rt_clk *clk);
+void rt_clk_disable_unprepare(struct rt_clk *clk);
+
+rt_err_t rt_clk_array_prepare(struct rt_clk_array *clk_arr);
+rt_err_t rt_clk_array_unprepare(struct rt_clk_array *clk_arr);
+
+rt_err_t rt_clk_array_enable(struct rt_clk_array *clk_arr);
+void rt_clk_array_disable(struct rt_clk_array *clk_arr);
+
+rt_err_t rt_clk_array_prepare_enable(struct rt_clk_array *clk_arr);
+void rt_clk_array_disable_unprepare(struct rt_clk_array *clk_arr);
+
+rt_err_t rt_clk_set_rate_range(struct rt_clk *clk, rt_ubase_t min, rt_ubase_t max);
+rt_err_t rt_clk_set_min_rate(struct rt_clk *clk, rt_ubase_t rate);
+rt_err_t rt_clk_set_max_rate(struct rt_clk *clk, rt_ubase_t rate);
+rt_err_t rt_clk_set_rate(struct rt_clk *clk, rt_ubase_t rate);
+rt_ubase_t rt_clk_get_rate(struct rt_clk *clk);
+
+rt_err_t rt_clk_set_phase(struct rt_clk *clk, int degrees);
+rt_base_t rt_clk_get_phase(struct rt_clk *clk);
+
+rt_base_t rt_clk_round_rate(struct rt_clk *clk, rt_ubase_t rate);
+
+struct rt_clk *rt_clk_get_parent(struct rt_clk *clk);
+
+struct rt_clk_array *rt_clk_get_array(struct rt_device *dev);
+struct rt_clk *rt_clk_get_by_index(struct rt_device *dev, int index);
+struct rt_clk *rt_clk_get_by_name(struct rt_device *dev, const char *name);
+void rt_clk_array_put(struct rt_clk_array *clk_arr);
+void rt_clk_put(struct rt_clk *clk);
+
+#ifdef RT_USING_OFW
+struct rt_clk_array *rt_ofw_get_clk_array(struct rt_ofw_node *np);
+struct rt_clk *rt_ofw_get_clk(struct rt_ofw_node *np, int index);
+struct rt_clk *rt_ofw_get_clk_by_name(struct rt_ofw_node *np, const char *name);
+rt_ssize_t rt_ofw_count_of_clk(struct rt_ofw_node *clk_ofw_np);
+#else
+rt_inline struct rt_clk *rt_ofw_get_clk(struct rt_ofw_node *np, int index)
+{
+    return RT_NULL;
+}
+rt_inline struct rt_clk *rt_ofw_get_clk_by_name(struct rt_ofw_node *np, const char *name)
+{
+    return RT_NULL;
+}
+rt_inline rt_ssize_t rt_ofw_count_of_clk(struct rt_ofw_node *clk_ofw_np)
+{
+    return 0;
+}
+#endif /* RT_USING_OFW */
+
+#endif /* __CLK_H__ */

+ 4 - 0
components/drivers/include/rtdevice.h

@@ -177,6 +177,10 @@ extern "C" {
 #include "drivers/lcd.h"
 #include "drivers/lcd.h"
 #endif
 #endif
 
 
+#ifdef RT_USING_CLK
+#include "drivers/clk.h"
+#endif /* RT_USING_CLK */
+
 #ifdef RT_USING_DM
 #ifdef RT_USING_DM
 #include "drivers/core/dm.h"
 #include "drivers/core/dm.h"