clk.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458
  1. /*
  2. * Copyright (c) 2006-2025 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-11-26 GuEe-GUI first version
  9. * 2025-01-24 wumingzi add doxygen comment
  10. */
  11. #include <rtthread.h>
  12. #include <rtservice.h>
  13. #include <rtdevice.h>
  14. /**
  15. * @defgroup group_clk clk
  16. * @brief clk driver api
  17. * @ingroup group_device_driver
  18. * @addtogroup group_clk
  19. * @{
  20. */
  21. #define DBG_TAG "rtdm.clk"
  22. #define DBG_LVL DBG_INFO
  23. #include <rtdbg.h>
  24. static RT_DEFINE_SPINLOCK(_clk_lock);
  25. static rt_list_t _clk_nodes = RT_LIST_OBJECT_INIT(_clk_nodes);
  26. static rt_list_t _clk_notifier_nodes = RT_LIST_OBJECT_INIT(_clk_notifier_nodes);
  27. /**
  28. * @brief Release clock node
  29. *
  30. * @param r point to reference count of clock node
  31. * @warning The function only can print log and MORE DETAILS SHOULD BE IMPLEMENTED.
  32. */
  33. static void clk_release(struct rt_ref *r)
  34. {
  35. struct rt_clk_node *clk_np = rt_container_of(r, struct rt_clk_node, ref);
  36. LOG_E("%s is release", clk_np->name);
  37. (void)clk_np;
  38. RT_ASSERT(0);
  39. }
  40. /**
  41. * @brief Increase reference count for clock node
  42. *
  43. * @param clk_np point to clock node
  44. *
  45. * @return struct rt_clk_node * point to clock node whose reference count has increased
  46. */
  47. rt_inline struct rt_clk_node *clk_get(struct rt_clk_node *clk_np)
  48. {
  49. rt_ref_get(&clk_np->ref);
  50. return clk_np;
  51. }
  52. /**
  53. * @brief Decrease reference count for clock node
  54. *
  55. * @param clk_np point to clock node
  56. *
  57. */
  58. rt_inline void clk_put(struct rt_clk_node *clk_np)
  59. {
  60. rt_ref_put(&clk_np->ref, &clk_release);
  61. }
  62. /**
  63. * @brief Allocate memory space for struct clock and return it
  64. *
  65. * @param clk_np point to clock node
  66. * @param dev_id device identifier for the clock
  67. * @param con_id connection identifier for the clock
  68. * @param fw_node point to the firmware node associated with the clock
  69. *
  70. * @return struct rt_clk* point to clock
  71. */
  72. static struct rt_clk *clk_alloc(struct rt_clk_node *clk_np, const char *dev_id,
  73. const char *con_id, void *fw_node)
  74. {
  75. struct rt_clk *clk = rt_calloc(1, sizeof(*clk));
  76. if (clk)
  77. {
  78. clk->clk_np = clk_np;
  79. clk->dev_id = dev_id;
  80. clk->con_id = con_id;
  81. clk->fw_node = fw_node;
  82. }
  83. else
  84. {
  85. clk = rt_err_ptr(-RT_ENOMEM);
  86. }
  87. return clk;
  88. }
  89. /**
  90. * @brief Free memory space of clock object
  91. *
  92. * @param clk point to clock
  93. *
  94. */
  95. static void clk_free(struct rt_clk *clk)
  96. {
  97. struct rt_clk_node *clk_np = clk->clk_np;
  98. if (clk_np && clk_np->ops->finit)
  99. {
  100. clk_np->ops->finit(clk);
  101. }
  102. rt_free(clk);
  103. }
  104. /**
  105. * @brief Allocate memory space and creat clock object
  106. *
  107. * @param clk_np point to clock node
  108. * @param dev_id device identifier for the clock
  109. * @param con_id connection identifier for the clock
  110. * @param fw_data point to the firmware data associated with the clock
  111. * @param fw_node point to the firmware node associated with the clock
  112. *
  113. * @return struct rt_clk* point to clock
  114. */
  115. static struct rt_clk *clk_create(struct rt_clk_node *clk_np, const char *dev_id,
  116. const char *con_id, void *fw_data, void *fw_node)
  117. {
  118. struct rt_clk *clk = clk_alloc(clk_np, dev_id, con_id, fw_node);
  119. if (!rt_is_err(clk))
  120. {
  121. clk_get(clk_np);
  122. if (clk_np->ops->init && clk_np->ops->init(clk, fw_data))
  123. {
  124. LOG_E("Dev[%s] Con[%s] init fail", dev_id, con_id);
  125. clk_free(clk);
  126. clk = RT_NULL;
  127. }
  128. }
  129. return clk;
  130. }
  131. /**
  132. * @brief Notify corresponding clock from all
  133. *
  134. * @param clk_np point to clock node
  135. * @param msg message identifier for the event
  136. * @param old_rate old rate of the clock before the event
  137. * @param new_rate new rate of the clock after the event
  138. *
  139. * @return rt_err_t RT_EOK on notify clock sucessfully, and other value is failed.
  140. */
  141. static rt_err_t clk_notify(struct rt_clk_node *clk_np, rt_ubase_t msg, rt_ubase_t old_rate, rt_ubase_t new_rate)
  142. {
  143. rt_err_t err = RT_EOK;
  144. struct rt_clk_notifier *notifier;
  145. rt_list_for_each_entry(notifier, &_clk_notifier_nodes, list)
  146. {
  147. if (notifier->clk->clk_np == clk_np)
  148. {
  149. err = notifier->callback(notifier, msg, old_rate, new_rate);
  150. /* Only check hareware's error */
  151. if (err == -RT_EIO)
  152. {
  153. break;
  154. }
  155. }
  156. }
  157. return err;
  158. }
  159. /**
  160. * @brief Set parent clock
  161. *
  162. * @param clk_np point to clock node
  163. * @param parent_np point to parent rt_clk
  164. *
  165. */
  166. static void clk_set_parent(struct rt_clk_node *clk_np, struct rt_clk_node *parent_np)
  167. {
  168. rt_hw_spin_lock(&_clk_lock.lock);
  169. clk_np->parent = parent_np;
  170. rt_list_insert_after(&parent_np->children_nodes, &clk_np->list);
  171. rt_hw_spin_unlock(&_clk_lock.lock);
  172. }
  173. static const struct rt_clk_ops unused_clk_ops =
  174. {
  175. };
  176. /**
  177. * @brief Register clock node into clock list
  178. *
  179. * @param clk_np point to child node that will be registered node.
  180. * @param parent_np point to parent rt_clk. If it is RT_NULL, clock node will be linked to init node.
  181. *
  182. * @retval RT_EOK
  183. * @retval -RT_ENOMEM
  184. */
  185. rt_err_t rt_clk_register(struct rt_clk_node *clk_np, struct rt_clk_node *parent_np)
  186. {
  187. rt_err_t err = RT_EOK;
  188. struct rt_clk *clk = RT_NULL;
  189. if (clk_np)
  190. {
  191. clk_np->clk = clk;
  192. if (!clk_np->ops)
  193. {
  194. clk_np->ops = &unused_clk_ops;
  195. }
  196. #if RT_NAME_MAX > 0
  197. rt_strncpy(clk_np->rt_parent.name, RT_CLK_NODE_OBJ_NAME, RT_NAME_MAX);
  198. #else
  199. clk_np->rt_parent.name = RT_CLK_NODE_OBJ_NAME;
  200. #endif
  201. rt_ref_init(&clk_np->ref);
  202. rt_list_init(&clk_np->list);
  203. rt_list_init(&clk_np->children_nodes);
  204. clk_np->multi_clk = 0;
  205. if (parent_np)
  206. {
  207. clk_np->clk = clk_alloc(clk_np, RT_NULL, RT_NULL, RT_NULL);
  208. if (clk_np->clk)
  209. {
  210. clk_set_parent(clk_np, parent_np);
  211. }
  212. else
  213. {
  214. err = -RT_ENOMEM;
  215. }
  216. }
  217. else
  218. {
  219. clk_np->parent = RT_NULL;
  220. rt_hw_spin_lock(&_clk_lock.lock);
  221. rt_list_insert_after(&_clk_nodes, &clk_np->list);
  222. rt_hw_spin_unlock(&_clk_lock.lock);
  223. }
  224. }
  225. else
  226. {
  227. err = -RT_ENOMEM;
  228. }
  229. return err;
  230. }
  231. /**
  232. * @brief Unregister clock node from clock list
  233. *
  234. * @param clk_np point to child node that will be Unregistered node.
  235. *
  236. * @retval RT_EOK
  237. * @retval -RT_EBUSY
  238. * @retval -RT_EINVAL
  239. */
  240. rt_err_t rt_clk_unregister(struct rt_clk_node *clk_np)
  241. {
  242. rt_err_t err = RT_EOK;
  243. if (clk_np)
  244. {
  245. err = -RT_EBUSY;
  246. rt_hw_spin_lock(&_clk_lock.lock);
  247. if (rt_list_isempty(&clk_np->children_nodes))
  248. {
  249. if (rt_ref_read(&clk_np->ref) <= 1)
  250. {
  251. rt_list_remove(&clk_np->list);
  252. clk_free(clk_np->clk);
  253. err = RT_EOK;
  254. }
  255. }
  256. rt_hw_spin_unlock(&_clk_lock.lock);
  257. }
  258. else
  259. {
  260. err = -RT_EINVAL;
  261. }
  262. return err;
  263. }
  264. /**
  265. * @brief Register clock notifier into notifier list
  266. *
  267. * @param clk point to clock
  268. * @param notifier point to notifier for register
  269. *
  270. * @retval RT_EOK
  271. * @retval -RT_EINVAL
  272. */
  273. rt_err_t rt_clk_notifier_register(struct rt_clk *clk, struct rt_clk_notifier *notifier)
  274. {
  275. if (!clk || !clk->clk_np || !notifier)
  276. {
  277. return -RT_EINVAL;
  278. }
  279. rt_hw_spin_lock(&_clk_lock.lock);
  280. ++clk->clk_np->notifier_count;
  281. rt_list_init(&notifier->list);
  282. rt_list_insert_after(&_clk_notifier_nodes, &notifier->list);
  283. rt_hw_spin_unlock(&_clk_lock.lock);
  284. return RT_EOK;
  285. }
  286. /**
  287. * @brief Unregister clock notifier into notifier list
  288. *
  289. * @param clk point to clock
  290. * @param notifier point to notifier for unregister
  291. *
  292. * @retval RT_EOK
  293. * @retval -RT_EINVAL
  294. */
  295. rt_err_t rt_clk_notifier_unregister(struct rt_clk *clk, struct rt_clk_notifier *notifier)
  296. {
  297. struct rt_clk_notifier *notifier_find;
  298. if (!clk || !notifier)
  299. {
  300. return -RT_EINVAL;
  301. }
  302. rt_hw_spin_lock(&_clk_lock.lock);
  303. rt_list_for_each_entry(notifier_find, &_clk_notifier_nodes, list)
  304. {
  305. if (notifier_find->clk->clk_np == notifier->clk->clk_np)
  306. {
  307. --clk->clk_np->notifier_count;
  308. rt_list_remove(&notifier->list);
  309. break;
  310. }
  311. }
  312. rt_hw_spin_unlock(&_clk_lock.lock);
  313. return RT_EOK;
  314. }
  315. /**
  316. * @brief Recursively prepare clock
  317. *
  318. * @param clk Ponit to clock that will be prepared
  319. * @param clk_np Ponit to clock node that will be prepared
  320. *
  321. * @return rt_err_t RT_EOK on prepare clock sucessfully, and other value is failed.
  322. */
  323. static rt_err_t clk_prepare(struct rt_clk *clk, struct rt_clk_node *clk_np)
  324. {
  325. rt_err_t err = RT_EOK;
  326. if (clk_np->parent)
  327. {
  328. clk_prepare(clk_np->clk, clk_np->parent);
  329. }
  330. if (clk->prepare_count == 0 && clk_np->ops->prepare)
  331. {
  332. err = clk_np->ops->prepare(clk);
  333. }
  334. if (!err)
  335. {
  336. ++clk->prepare_count;
  337. }
  338. return err;
  339. }
  340. /**
  341. * @brief Prepare clock
  342. *
  343. * @param clk
  344. *
  345. * @return rt_err_t RT_EOK on prepare clock sucessfully, and other value is failed.
  346. */
  347. rt_err_t rt_clk_prepare(struct rt_clk *clk)
  348. {
  349. rt_err_t err = RT_EOK;
  350. RT_DEBUG_NOT_IN_INTERRUPT;
  351. if (clk && clk->clk_np)
  352. {
  353. rt_hw_spin_lock(&_clk_lock.lock);
  354. err = clk_prepare(clk, clk->clk_np);
  355. rt_hw_spin_unlock(&_clk_lock.lock);
  356. }
  357. return err;
  358. }
  359. /**
  360. * @brief Recursively unprepare clock
  361. *
  362. * @param clk Ponit to clock that will be unprepared
  363. * @param clk_np Ponit to clock node that will be unprepared
  364. *
  365. */
  366. static void clk_unprepare(struct rt_clk *clk, struct rt_clk_node *clk_np)
  367. {
  368. if (clk_np->parent)
  369. {
  370. clk_unprepare(clk_np->clk, clk_np->parent);
  371. }
  372. if (clk->prepare_count == 1 && clk_np->ops->unprepare)
  373. {
  374. clk_np->ops->unprepare(clk);
  375. }
  376. if (clk->prepare_count)
  377. {
  378. --clk->prepare_count;
  379. }
  380. }
  381. rt_err_t rt_clk_unprepare(struct rt_clk *clk)
  382. {
  383. rt_err_t err = RT_EOK;
  384. RT_DEBUG_NOT_IN_INTERRUPT;
  385. if (clk && clk->clk_np)
  386. {
  387. rt_hw_spin_lock(&_clk_lock.lock);
  388. clk_unprepare(clk, clk->clk_np);
  389. rt_hw_spin_unlock(&_clk_lock.lock);
  390. }
  391. return err;
  392. }
  393. /**
  394. * @brief Enable clock
  395. *
  396. * @param clk point to clock
  397. *
  398. * @return rt_err_t RT_EOK on enable clock FOREVER.
  399. */
  400. static rt_err_t clk_enable(struct rt_clk *clk, struct rt_clk_node *clk_np)
  401. {
  402. rt_err_t err = RT_EOK;
  403. if (clk_np->parent)
  404. {
  405. clk_enable(clk_np->clk, clk_np->parent);
  406. }
  407. if (clk->enable_count == 0 && clk_np->ops->enable)
  408. {
  409. err = clk_np->ops->enable(clk);
  410. }
  411. if (!err)
  412. {
  413. ++clk->enable_count;
  414. }
  415. return err;
  416. }
  417. /**
  418. * @brief Enable clock
  419. *
  420. * @param clk point to clock
  421. *
  422. * @return rt_err_t RT_EOK on enable clock sucessfully, and other value is failed.
  423. */
  424. rt_err_t rt_clk_enable(struct rt_clk *clk)
  425. {
  426. rt_err_t err = RT_EOK;
  427. if (clk && clk->clk_np)
  428. {
  429. rt_hw_spin_lock(&_clk_lock.lock);
  430. err = clk_enable(clk, clk->clk_np);
  431. rt_hw_spin_unlock(&_clk_lock.lock);
  432. }
  433. return err;
  434. }
  435. /**
  436. * @brief Recursively disable clock
  437. *
  438. * @param clk Ponit to clock that will be disabled
  439. * @param clk_np Ponit to clock node that will be disabled
  440. *
  441. */
  442. static void clk_disable(struct rt_clk *clk, struct rt_clk_node *clk_np)
  443. {
  444. if (clk_np->parent)
  445. {
  446. clk_disable(clk_np->clk, clk_np->parent);
  447. }
  448. if (clk->enable_count == 1 && clk_np->ops->disable)
  449. {
  450. clk_np->ops->disable(clk);
  451. }
  452. if (clk->enable_count)
  453. {
  454. --clk->enable_count;
  455. }
  456. }
  457. /**
  458. * @brief Disable clock
  459. *
  460. * @param clk point to clock
  461. *
  462. */
  463. void rt_clk_disable(struct rt_clk *clk)
  464. {
  465. if (clk && clk->clk_np)
  466. {
  467. rt_hw_spin_lock(&_clk_lock.lock);
  468. clk_disable(clk, clk->clk_np);
  469. rt_hw_spin_unlock(&_clk_lock.lock);
  470. }
  471. }
  472. /**
  473. * @brief Prepare and enable clock
  474. *
  475. * @param clk point to clock
  476. *
  477. * @return rt_err_t RT_EOK on prepare and enable clock sucessfully, and other value is failed.
  478. */
  479. rt_err_t rt_clk_prepare_enable(struct rt_clk *clk)
  480. {
  481. rt_err_t err = RT_EOK;
  482. RT_DEBUG_NOT_IN_INTERRUPT;
  483. if (clk)
  484. {
  485. err = rt_clk_prepare(clk);
  486. if (!err)
  487. {
  488. err = rt_clk_enable(clk);
  489. if (err)
  490. {
  491. rt_clk_unprepare(clk);
  492. }
  493. }
  494. }
  495. return err;
  496. }
  497. /**
  498. * @brief Disable and unprepare clock
  499. *
  500. * @param clk point to clock
  501. *
  502. */
  503. void rt_clk_disable_unprepare(struct rt_clk *clk)
  504. {
  505. RT_DEBUG_NOT_IN_INTERRUPT;
  506. if (clk)
  507. {
  508. rt_clk_disable(clk);
  509. rt_clk_unprepare(clk);
  510. }
  511. }
  512. /**
  513. * @brief Prepare clock array for mutipule out clock
  514. *
  515. * @param clk_arr point to clock array
  516. *
  517. * @return rt_err_t RT_EOK on prepare clock array sucessfully, and other value is failed.
  518. */
  519. rt_err_t rt_clk_array_prepare(struct rt_clk_array *clk_arr)
  520. {
  521. rt_err_t err = RT_EOK;
  522. if (clk_arr)
  523. {
  524. for (int i = 0; i < clk_arr->count; ++i)
  525. {
  526. if ((err = rt_clk_prepare(clk_arr->clks[i])))
  527. {
  528. LOG_E("CLK Array[%d] %s failed error = %s", i,
  529. "prepare", rt_strerror(err));
  530. while (i --> 0)
  531. {
  532. rt_clk_unprepare(clk_arr->clks[i]);
  533. }
  534. break;
  535. }
  536. }
  537. }
  538. return err;
  539. }
  540. rt_err_t rt_clk_array_unprepare(struct rt_clk_array *clk_arr)
  541. {
  542. rt_err_t err = RT_EOK;
  543. if (clk_arr)
  544. {
  545. for (int i = 0; i < clk_arr->count; ++i)
  546. {
  547. if ((err = rt_clk_unprepare(clk_arr->clks[i])))
  548. {
  549. LOG_E("CLK Array[%d] %s failed error = %s", i,
  550. "unprepare", rt_strerror(err));
  551. break;
  552. }
  553. }
  554. }
  555. return err;
  556. }
  557. /**
  558. * @brief Enable clock array for mutipule out clock
  559. *
  560. * @param clk_arr point to clock array
  561. *
  562. * @return rt_err_t RT_EOK on Enable clock array sucessfully, and other value is failed.
  563. */
  564. rt_err_t rt_clk_array_enable(struct rt_clk_array *clk_arr)
  565. {
  566. rt_err_t err = RT_EOK;
  567. if (clk_arr)
  568. {
  569. for (int i = 0; i < clk_arr->count; ++i)
  570. {
  571. if ((err = rt_clk_enable(clk_arr->clks[i])))
  572. {
  573. LOG_E("CLK Array[%d] %s failed error = %s", i,
  574. "enable", rt_strerror(err));
  575. while (i --> 0)
  576. {
  577. rt_clk_disable(clk_arr->clks[i]);
  578. }
  579. break;
  580. }
  581. }
  582. }
  583. return err;
  584. }
  585. /**
  586. * @brief Enable clock array for mutipule out clock
  587. *
  588. * @param clk_arr point to clock array
  589. *
  590. */
  591. void rt_clk_array_disable(struct rt_clk_array *clk_arr)
  592. {
  593. if (clk_arr)
  594. {
  595. for (int i = 0; i < clk_arr->count; ++i)
  596. {
  597. rt_clk_disable(clk_arr->clks[i]);
  598. }
  599. }
  600. }
  601. /**
  602. * @brief Prepare and enable clock array
  603. *
  604. * @param clk_arr point to clock array
  605. *
  606. * @return rt_err_t RT_EOK on prepare and enable clock array sucessfully, and other
  607. value is failed.
  608. */
  609. rt_err_t rt_clk_array_prepare_enable(struct rt_clk_array *clk_arr)
  610. {
  611. rt_err_t err;
  612. if ((err = rt_clk_array_prepare(clk_arr)))
  613. {
  614. return err;
  615. }
  616. if ((err = rt_clk_array_enable(clk_arr)))
  617. {
  618. rt_clk_array_unprepare(clk_arr);
  619. }
  620. return err;
  621. }
  622. /**
  623. * @brief Disable and unprepare clock array
  624. *
  625. * @param clk_arr point to clock array
  626. *
  627. */
  628. void rt_clk_array_disable_unprepare(struct rt_clk_array *clk_arr)
  629. {
  630. rt_clk_array_disable(clk_arr);
  631. rt_clk_array_unprepare(clk_arr);
  632. }
  633. /**
  634. * @brief Set clock rate range
  635. *
  636. * @param clk point to clock
  637. * @param min minimum clock rate
  638. * @param max minimum clock rate
  639. *
  640. * @return rt_err_t RT_EOK on set clock rate range sucessfully, and other value is failed.
  641. */
  642. rt_err_t rt_clk_set_rate_range(struct rt_clk *clk, rt_ubase_t min, rt_ubase_t max)
  643. {
  644. rt_err_t err = RT_EOK;
  645. if (clk && clk->clk_np)
  646. {
  647. struct rt_clk_node *clk_np = clk->clk_np;
  648. rt_hw_spin_lock(&_clk_lock.lock);
  649. if (clk_np->ops->set_rate)
  650. {
  651. rt_ubase_t rate = clk_np->rate;
  652. rt_ubase_t old_min = clk_np->min_rate;
  653. rt_ubase_t old_max = clk_np->max_rate;
  654. clk_np->min_rate = min;
  655. clk_np->max_rate = max;
  656. rate = rt_clamp(rate, min, max);
  657. err = clk_np->ops->set_rate(clk, rate,
  658. rt_clk_get_rate(clk_np->parent ? clk_np->parent->clk : RT_NULL));
  659. if (err)
  660. {
  661. clk_np->min_rate = old_min;
  662. clk_np->max_rate = old_max;
  663. }
  664. }
  665. else
  666. {
  667. err = -RT_ENOSYS;
  668. }
  669. rt_hw_spin_unlock(&_clk_lock.lock);
  670. }
  671. return err;
  672. }
  673. /**
  674. * @brief Set minimum clock rate
  675. *
  676. * @param clk point to clock
  677. * @param rate miminum clock rate
  678. *
  679. * @return rt_err_t RT_EOK on set minimum clock rate sucessfully, and other value is failed.
  680. */
  681. rt_err_t rt_clk_set_min_rate(struct rt_clk *clk, rt_ubase_t rate)
  682. {
  683. rt_err_t err = RT_EOK;
  684. if (clk && clk->clk_np)
  685. {
  686. struct rt_clk_node *clk_np = clk->clk_np;
  687. err = rt_clk_set_rate_range(clk, rate, clk_np->max_rate);
  688. }
  689. return err;
  690. }
  691. /**
  692. * @brief Set maximum clock rate
  693. *
  694. * @param clk point to clock
  695. * @param rate maximum clock rate
  696. *
  697. * @return rt_err_t RT_EOK on set maximum clock rate sucessfully, and other value is failed.
  698. */
  699. rt_err_t rt_clk_set_max_rate(struct rt_clk *clk, rt_ubase_t rate)
  700. {
  701. rt_err_t err = RT_EOK;
  702. if (clk && clk->clk_np)
  703. {
  704. struct rt_clk_node *clk_np = clk->clk_np;
  705. err = rt_clk_set_rate_range(clk, clk_np->min_rate, rate);
  706. }
  707. return err;
  708. }
  709. /**
  710. * @brief Set clock rate
  711. *
  712. * @param clk point to clock
  713. * @param rate target rate
  714. *
  715. * @return rt_err_t RT_EOK on set clock rate sucessfully, and other value is failed.
  716. */
  717. rt_err_t rt_clk_set_rate(struct rt_clk *clk, rt_ubase_t rate)
  718. {
  719. rt_err_t err = RT_EOK;
  720. rate = rt_clk_round_rate(clk, rate);
  721. if (clk && clk->clk_np && rate > 0)
  722. {
  723. struct rt_clk_node *clk_np = clk->clk_np;
  724. rt_hw_spin_lock(&_clk_lock.lock);
  725. if (clk_np->min_rate && rate < clk_np->min_rate)
  726. {
  727. err = -RT_EINVAL;
  728. }
  729. if (clk_np->max_rate && rate > clk_np->max_rate)
  730. {
  731. err = -RT_EINVAL;
  732. }
  733. if (!err)
  734. {
  735. if (clk_np->ops->set_rate)
  736. {
  737. rt_ubase_t old_rate = clk_np->rate;
  738. err = clk_np->ops->set_rate(clk, rate,
  739. rt_clk_get_rate(clk_np->parent ? clk_np->parent->clk : RT_NULL));
  740. if (clk_np->rate != old_rate)
  741. {
  742. clk_notify(clk_np, RT_CLK_MSG_PRE_RATE_CHANGE, old_rate, clk_np->rate);
  743. }
  744. }
  745. else
  746. {
  747. err = -RT_ENOSYS;
  748. }
  749. }
  750. rt_hw_spin_unlock(&_clk_lock.lock);
  751. }
  752. return err;
  753. }
  754. /**
  755. * @brief Get clock rate
  756. *
  757. * @param clk point to clock
  758. *
  759. * @return rt_ubase_t clock rate or error code
  760. */
  761. rt_ubase_t rt_clk_get_rate(struct rt_clk *clk)
  762. {
  763. rt_ubase_t rate = 0;
  764. if (clk)
  765. {
  766. if (clk->rate)
  767. {
  768. rate = clk->rate;
  769. }
  770. else if (clk->clk_np)
  771. {
  772. rate = clk->clk_np->rate;
  773. }
  774. }
  775. return rate;
  776. }
  777. /**
  778. * @brief Set clock phase
  779. *
  780. * @param clk point to clock
  781. * @param degrees target phase and the unit of phase is degree
  782. *
  783. * @return rt_err_t RT_EOK on set clock phase sucessfully, and other value is failed.
  784. */
  785. rt_err_t rt_clk_set_phase(struct rt_clk *clk, int degrees)
  786. {
  787. rt_err_t err = RT_EOK;
  788. if (clk && clk->clk_np && clk->clk_np->ops->set_phase)
  789. {
  790. rt_hw_spin_lock(&_clk_lock.lock);
  791. err = clk->clk_np->ops->set_phase(clk, degrees);
  792. rt_hw_spin_unlock(&_clk_lock.lock);
  793. }
  794. return err;
  795. }
  796. /**
  797. * @brief Get clock phase
  798. *
  799. * @param clk point to clock
  800. *
  801. * @return rt_base_t clock phase or error code
  802. */
  803. rt_base_t rt_clk_get_phase(struct rt_clk *clk)
  804. {
  805. rt_base_t res = RT_EOK;
  806. if (clk && clk->clk_np && clk->clk_np->ops->get_phase)
  807. {
  808. rt_hw_spin_lock(&_clk_lock.lock);
  809. res = clk->clk_np->ops->get_phase(clk);
  810. rt_hw_spin_unlock(&_clk_lock.lock);
  811. }
  812. return res;
  813. }
  814. /**
  815. * @brief Check if clock rate is in the minimum to maximun and get it
  816. *
  817. * @param clk point to clock
  818. * @param rate rate will be checked
  819. *
  820. * @return rt_base_t get the correct rate
  821. * @note if parameter rate less than the minimum or more than maximum, the
  822. retrun rate will be set to minimum ormaximum value
  823. */
  824. rt_base_t rt_clk_round_rate(struct rt_clk *clk, rt_ubase_t rate)
  825. {
  826. rt_base_t res = -RT_EINVAL;
  827. if (clk && clk->clk_np)
  828. {
  829. struct rt_clk_node *clk_np = clk->clk_np;
  830. if (clk_np->ops->round_rate)
  831. {
  832. rt_ubase_t best_parent_rate;
  833. rt_hw_spin_lock(&_clk_lock.lock);
  834. if (clk_np->min_rate && clk_np->max_rate)
  835. {
  836. rate = rt_clamp(rate, clk_np->min_rate, clk_np->max_rate);
  837. }
  838. res = clk_np->ops->round_rate(clk, rate, &best_parent_rate);
  839. (void)best_parent_rate;
  840. rt_hw_spin_unlock(&_clk_lock.lock);
  841. }
  842. else
  843. {
  844. if (rate < clk_np->min_rate)
  845. {
  846. res = clk_np->min_rate;
  847. }
  848. else if (rate > clk_np->max_rate)
  849. {
  850. res = clk_np->max_rate;
  851. }
  852. else
  853. {
  854. res = rate;
  855. }
  856. }
  857. }
  858. return res;
  859. }
  860. /**
  861. * @brief Set clock parent object
  862. *
  863. * @param clk point to clock
  864. * @param clk_parent point to parent clock
  865. *
  866. * @return rt_err_t RT_EOK on set clock parent sucessfully, and other value is failed.
  867. */
  868. rt_err_t rt_clk_set_parent(struct rt_clk *clk, struct rt_clk *clk_parent)
  869. {
  870. rt_err_t err = RT_EOK;
  871. if (clk && clk->clk_np && clk->clk_np->ops->set_parent)
  872. {
  873. rt_hw_spin_lock(&_clk_lock.lock);
  874. err = clk->clk_np->ops->set_parent(clk, clk_parent);
  875. rt_hw_spin_unlock(&_clk_lock.lock);
  876. }
  877. return err;
  878. }
  879. /**
  880. * @brief Get parent clock pointer
  881. *
  882. * @param clk child clock
  883. *
  884. * @return struct rt_clk* parent clock object pointer will be return, unless child
  885. clock node havn't parent node instead return RT_NULL
  886. */
  887. struct rt_clk *rt_clk_get_parent(struct rt_clk *clk)
  888. {
  889. struct rt_clk *parent = RT_NULL;
  890. if (clk)
  891. {
  892. struct rt_clk_node *clk_np = clk->clk_np;
  893. rt_hw_spin_lock(&_clk_lock.lock);
  894. parent = clk_np->parent ? clk_np->parent->clk : RT_NULL;
  895. rt_hw_spin_unlock(&_clk_lock.lock);
  896. }
  897. return parent;
  898. }
  899. /**
  900. * @brief Get clock array pointer from ofw device node
  901. *
  902. * @param dev point to dev
  903. *
  904. * @return struct rt_clk_array* if use ofw and under normal circumstance, it will return
  905. clock array pointer and other value is RT_NULL
  906. */
  907. struct rt_clk_array *rt_clk_get_array(struct rt_device *dev)
  908. {
  909. struct rt_clk_array *clk_arr = RT_NULL;
  910. #ifdef RT_USING_OFW
  911. clk_arr = rt_ofw_get_clk_array(dev->ofw_node);
  912. #endif
  913. return clk_arr;
  914. }
  915. /**
  916. * @brief Get clock pointer from ofw device node by index
  917. *
  918. * @param dev point to dev
  919. * @param index index of clock object
  920. *
  921. * @return struct rt_clk* if use ofw and under normal circumstance, it will return clock
  922. pointer and other value is RT_NULL
  923. */
  924. struct rt_clk *rt_clk_get_by_index(struct rt_device *dev, int index)
  925. {
  926. struct rt_clk *clk = RT_NULL;
  927. #ifdef RT_USING_OFW
  928. clk = rt_ofw_get_clk(dev->ofw_node, index);
  929. #endif
  930. return clk;
  931. }
  932. /**
  933. * @brief Get clock pointer from ofw device node by name
  934. *
  935. * @param dev point to dev
  936. * @param name name of clock object
  937. *
  938. * @return struct rt_clk* if use ofw and under normal circumstance, it will return clock
  939. pointer and other value is RT_NULL
  940. */
  941. struct rt_clk *rt_clk_get_by_name(struct rt_device *dev, const char *name)
  942. {
  943. struct rt_clk *clk = RT_NULL;
  944. #ifdef RT_USING_OFW
  945. clk = rt_ofw_get_clk_by_name(dev->ofw_node, name);
  946. #endif
  947. return clk;
  948. }
  949. /**
  950. * @brief Put reference count of all colock in the clock array
  951. *
  952. * @param clk_arr point to clock array
  953. *
  954. */
  955. void rt_clk_array_put(struct rt_clk_array *clk_arr)
  956. {
  957. if (clk_arr)
  958. {
  959. for (int i = 0; i < clk_arr->count; ++i)
  960. {
  961. if (clk_arr->clks[i])
  962. {
  963. rt_clk_put(clk_arr->clks[i]);
  964. }
  965. else
  966. {
  967. break;
  968. }
  969. }
  970. rt_free(clk_arr);
  971. }
  972. }
  973. /**
  974. * @brief Put reference count of clock
  975. *
  976. * @param clk point to clock
  977. *
  978. */
  979. void rt_clk_put(struct rt_clk *clk)
  980. {
  981. if (clk)
  982. {
  983. clk_put(clk->clk_np);
  984. clk_free(clk);
  985. }
  986. }
  987. #ifdef RT_USING_OFW
  988. /**
  989. * @brief Get a clock object from a device tree node without acquiring a lock
  990. *
  991. * @param np point to ofw node
  992. * @param index index of clock in ofw
  993. * @param name connection identifier for the clock
  994. * @param locked lock flag for indicating whether the caller holds the lock
  995. *
  996. * @return struct rt_clk* point to the newly created clock object, or an error pointer
  997. */
  998. static struct rt_clk *ofw_get_clk_no_lock(struct rt_ofw_node *np, int index, const char *name, rt_bool_t locked)
  999. {
  1000. struct rt_clk *clk = RT_NULL;
  1001. struct rt_ofw_cell_args clk_args;
  1002. if (!rt_ofw_parse_phandle_cells(np, "clocks", "#clock-cells", index, &clk_args))
  1003. {
  1004. int count;
  1005. struct rt_object *obj;
  1006. struct rt_clk_node *clk_np = RT_NULL;
  1007. struct rt_ofw_node *clk_ofw_np = clk_args.data;
  1008. if (!rt_ofw_data(clk_ofw_np))
  1009. {
  1010. if (locked)
  1011. {
  1012. rt_hw_spin_unlock(&_clk_lock.lock);
  1013. }
  1014. rt_platform_ofw_request(clk_ofw_np);
  1015. if (locked)
  1016. {
  1017. rt_hw_spin_lock(&_clk_lock.lock);
  1018. }
  1019. }
  1020. if (rt_ofw_data(clk_ofw_np) && (obj = rt_ofw_parse_object(clk_ofw_np,
  1021. RT_CLK_NODE_OBJ_NAME, "#clock-cells")))
  1022. {
  1023. clk_np = rt_container_of(obj, struct rt_clk_node, rt_parent);
  1024. count = rt_ofw_count_of_clk(clk_ofw_np);
  1025. }
  1026. rt_ofw_node_put(clk_ofw_np);
  1027. if (clk_np)
  1028. {
  1029. if (count > 1)
  1030. {
  1031. /* args[0] must be the index of CLK */
  1032. clk_np = &clk_np[clk_args.args[0]];
  1033. }
  1034. clk = clk_create(clk_np, np->full_name, name, &clk_args, np);
  1035. }
  1036. else
  1037. {
  1038. clk = rt_err_ptr(-RT_ERROR);
  1039. }
  1040. }
  1041. return clk;
  1042. }
  1043. /**
  1044. * @brief Get clock from ofw with acquiring a spin lock
  1045. *
  1046. * @param np point to ofw node
  1047. * @param index index of clock in ofw
  1048. * @param name connection identifier for the clock
  1049. *
  1050. * @return struct rt_clk* point to the newly created clock object, or an error pointer
  1051. */
  1052. static struct rt_clk *ofw_get_clk(struct rt_ofw_node *np, int index, const char *name)
  1053. {
  1054. struct rt_clk *clk;
  1055. rt_hw_spin_lock(&_clk_lock.lock);
  1056. clk = ofw_get_clk_no_lock(np, index, name, RT_TRUE);
  1057. rt_hw_spin_unlock(&_clk_lock.lock);
  1058. return clk;
  1059. }
  1060. /**
  1061. * @brief Get clock array from ofw
  1062. *
  1063. * @param np point to ofw node
  1064. *
  1065. * @return struct rt_clk_array* point to the newly created clock array, or an error pointer
  1066. */
  1067. struct rt_clk_array *rt_ofw_get_clk_array(struct rt_ofw_node *np)
  1068. {
  1069. int count;
  1070. struct rt_clk_array *clk_arr = RT_NULL;
  1071. if (!np)
  1072. {
  1073. return rt_err_ptr(-RT_EINVAL);
  1074. }
  1075. if ((count = rt_ofw_count_phandle_cells(np, "clocks", "#clock-cells")) > 0)
  1076. {
  1077. clk_arr = rt_calloc(1, sizeof(*clk_arr) + sizeof(clk_arr->clks[0]) * count);
  1078. if (clk_arr)
  1079. {
  1080. int i;
  1081. rt_err_t err = RT_EOK;
  1082. rt_bool_t has_name = rt_ofw_prop_read_bool(np, "clock-names");
  1083. clk_arr->count = count;
  1084. rt_hw_spin_lock(&_clk_lock.lock);
  1085. for (i = 0; i < count; ++i)
  1086. {
  1087. const char *name = RT_NULL;
  1088. if (has_name)
  1089. {
  1090. rt_ofw_prop_read_string_index(np, "clock-names", i, &name);
  1091. }
  1092. clk_arr->clks[i] = ofw_get_clk_no_lock(np, i, name, RT_FALSE);
  1093. if (rt_is_err(clk_arr->clks[i]))
  1094. {
  1095. err = rt_ptr_err(clk_arr->clks[i]);
  1096. --i;
  1097. break;
  1098. }
  1099. }
  1100. rt_hw_spin_unlock(&_clk_lock.lock);
  1101. if (i > 0 && i < count)
  1102. {
  1103. rt_clk_array_put(clk_arr);
  1104. clk_arr = rt_err_ptr(err);
  1105. }
  1106. }
  1107. }
  1108. return clk_arr;
  1109. }
  1110. /**
  1111. * @brief Get clock from ofw with acquiring a spin lock by index and node pointer
  1112. *
  1113. * @param np point to ofw node
  1114. * @param index index of clock in ofw
  1115. *
  1116. * @return struct rt_clk* point to the newly created clock object, or an error pointer
  1117. */
  1118. struct rt_clk *rt_ofw_get_clk(struct rt_ofw_node *np, int index)
  1119. {
  1120. struct rt_clk *clk = RT_NULL;
  1121. if (np && index >= 0)
  1122. {
  1123. clk = ofw_get_clk(np, index, RT_NULL);
  1124. }
  1125. return clk;
  1126. }
  1127. /**
  1128. * @brief Get clock from ofw with acquiring a spin lock by name
  1129. *
  1130. * @param np point to ofw node
  1131. * @param name name of clock will be returned
  1132. *
  1133. * @return struct rt_clk* point to the newly created clock object, or an error pointer
  1134. */
  1135. struct rt_clk *rt_ofw_get_clk_by_name(struct rt_ofw_node *np, const char *name)
  1136. {
  1137. struct rt_clk *clk = RT_NULL;
  1138. if (np && name)
  1139. {
  1140. int index = rt_ofw_prop_index_of_string(np, "clock-names", name);
  1141. if (index >= 0)
  1142. {
  1143. clk = ofw_get_clk(np, index, name);
  1144. }
  1145. }
  1146. return clk;
  1147. }
  1148. /**
  1149. * @brief Count number of clocks in ofw
  1150. *
  1151. * @param clk_ofw_np point to ofw node
  1152. *
  1153. * @return rt_ssize_t number of clocks
  1154. */
  1155. rt_ssize_t rt_ofw_count_of_clk(struct rt_ofw_node *clk_ofw_np)
  1156. {
  1157. if (clk_ofw_np)
  1158. {
  1159. struct rt_clk_node *clk_np = rt_ofw_data(clk_ofw_np);
  1160. if (clk_np && clk_np->multi_clk)
  1161. {
  1162. return clk_np->multi_clk;
  1163. }
  1164. else
  1165. {
  1166. const fdt32_t *cell;
  1167. rt_uint32_t count = 0;
  1168. struct rt_ofw_prop *prop;
  1169. prop = rt_ofw_get_prop(clk_ofw_np, "clock-indices", RT_NULL);
  1170. if (prop)
  1171. {
  1172. rt_uint32_t max_idx = 0, idx;
  1173. for (cell = rt_ofw_prop_next_u32(prop, RT_NULL, &idx);
  1174. cell;
  1175. cell = rt_ofw_prop_next_u32(prop, cell, &idx))
  1176. {
  1177. if (idx > max_idx)
  1178. {
  1179. max_idx = idx;
  1180. }
  1181. }
  1182. count = max_idx + 1;
  1183. }
  1184. else
  1185. {
  1186. rt_ssize_t len;
  1187. if ((prop = rt_ofw_get_prop(clk_ofw_np, "clock-output-names", &len)))
  1188. {
  1189. char *value = prop->value;
  1190. for (int i = 0; i < len; ++i, ++value)
  1191. {
  1192. if (*value == '\0')
  1193. {
  1194. ++count;
  1195. }
  1196. }
  1197. }
  1198. else
  1199. {
  1200. count = 1;
  1201. }
  1202. }
  1203. if (clk_np)
  1204. {
  1205. clk_np->multi_clk = count;
  1206. }
  1207. return count;
  1208. }
  1209. }
  1210. return -RT_EINVAL;
  1211. }
  1212. #endif /* RT_USING_OFW */
  1213. /**@}*/