clk.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459
  1. /*
  2. * Copyright (c) 2006-2025 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-11-26 GuEe-GUI first version
  9. * 2025-01-24 wumingzi add doxygen comment
  10. */
  11. #include <rtthread.h>
  12. #include <rtservice.h>
  13. #include <rtdevice.h>
  14. /**
  15. * @addtogroup group_Drivers RTTHREAD Driver
  16. * @defgroup group_clk clk
  17. * @brief clk driver api
  18. * @ingroup group_Drivers
  19. * @addtogroup group_clk
  20. * @{
  21. */
  22. #define DBG_TAG "rtdm.clk"
  23. #define DBG_LVL DBG_INFO
  24. #include <rtdbg.h>
  25. static RT_DEFINE_SPINLOCK(_clk_lock);
  26. static rt_list_t _clk_nodes = RT_LIST_OBJECT_INIT(_clk_nodes);
  27. static rt_list_t _clk_notifier_nodes = RT_LIST_OBJECT_INIT(_clk_notifier_nodes);
  28. /**
  29. * @brief Release clock node
  30. *
  31. * @param r point to reference count of clock node
  32. * @warning The function only can print log and MORE DETAILS SHOULD BE IMPLEMENTED.
  33. */
  34. static void clk_release(struct rt_ref *r)
  35. {
  36. struct rt_clk_node *clk_np = rt_container_of(r, struct rt_clk_node, ref);
  37. LOG_E("%s is release", clk_np->name);
  38. (void)clk_np;
  39. RT_ASSERT(0);
  40. }
  41. /**
  42. * @brief Increase reference count for clock node
  43. *
  44. * @param clk_np point to clock node
  45. *
  46. * @return struct rt_clk_node * point to clock node whose reference count has increased
  47. */
  48. rt_inline struct rt_clk_node *clk_get(struct rt_clk_node *clk_np)
  49. {
  50. rt_ref_get(&clk_np->ref);
  51. return clk_np;
  52. }
  53. /**
  54. * @brief Decrease reference count for clock node
  55. *
  56. * @param clk_np point to clock node
  57. *
  58. */
  59. rt_inline void clk_put(struct rt_clk_node *clk_np)
  60. {
  61. rt_ref_put(&clk_np->ref, &clk_release);
  62. }
  63. /**
  64. * @brief Allocate memory space for struct clock and return it
  65. *
  66. * @param clk_np point to clock node
  67. * @param dev_id device identifier for the clock
  68. * @param con_id connection identifier for the clock
  69. * @param fw_node point to the firmware node associated with the clock
  70. *
  71. * @return struct rt_clk* point to clock
  72. */
  73. static struct rt_clk *clk_alloc(struct rt_clk_node *clk_np, const char *dev_id,
  74. const char *con_id, void *fw_node)
  75. {
  76. struct rt_clk *clk = rt_calloc(1, sizeof(*clk));
  77. if (clk)
  78. {
  79. clk->clk_np = clk_np;
  80. clk->dev_id = dev_id;
  81. clk->con_id = con_id;
  82. clk->fw_node = fw_node;
  83. }
  84. else
  85. {
  86. clk = rt_err_ptr(-RT_ENOMEM);
  87. }
  88. return clk;
  89. }
  90. /**
  91. * @brief Free memory space of clock object
  92. *
  93. * @param clk point to clock
  94. *
  95. */
  96. static void clk_free(struct rt_clk *clk)
  97. {
  98. struct rt_clk_node *clk_np = clk->clk_np;
  99. if (clk_np && clk_np->ops->finit)
  100. {
  101. clk_np->ops->finit(clk);
  102. }
  103. rt_free(clk);
  104. }
  105. /**
  106. * @brief Allocate memory space and creat clock object
  107. *
  108. * @param clk_np point to clock node
  109. * @param dev_id device identifier for the clock
  110. * @param con_id connection identifier for the clock
  111. * @param fw_data point to the firmware data associated with the clock
  112. * @param fw_node point to the firmware node associated with the clock
  113. *
  114. * @return struct rt_clk* point to clock
  115. */
  116. static struct rt_clk *clk_create(struct rt_clk_node *clk_np, const char *dev_id,
  117. const char *con_id, void *fw_data, void *fw_node)
  118. {
  119. struct rt_clk *clk = clk_alloc(clk_np, dev_id, con_id, fw_node);
  120. if (!rt_is_err(clk))
  121. {
  122. clk_get(clk_np);
  123. if (clk_np->ops->init && clk_np->ops->init(clk, fw_data))
  124. {
  125. LOG_E("Dev[%s] Con[%s] init fail", dev_id, con_id);
  126. clk_free(clk);
  127. clk = RT_NULL;
  128. }
  129. }
  130. return clk;
  131. }
  132. /**
  133. * @brief Notify corresponding clock from all
  134. *
  135. * @param clk_np point to clock node
  136. * @param msg message identifier for the event
  137. * @param old_rate old rate of the clock before the event
  138. * @param new_rate new rate of the clock after the event
  139. *
  140. * @return rt_err_t RT_EOK on notify clock sucessfully, and other value is failed.
  141. */
  142. static rt_err_t clk_notify(struct rt_clk_node *clk_np, rt_ubase_t msg, rt_ubase_t old_rate, rt_ubase_t new_rate)
  143. {
  144. rt_err_t err = RT_EOK;
  145. struct rt_clk_notifier *notifier;
  146. rt_list_for_each_entry(notifier, &_clk_notifier_nodes, list)
  147. {
  148. if (notifier->clk->clk_np == clk_np)
  149. {
  150. err = notifier->callback(notifier, msg, old_rate, new_rate);
  151. /* Only check hareware's error */
  152. if (err == -RT_EIO)
  153. {
  154. break;
  155. }
  156. }
  157. }
  158. return err;
  159. }
  160. /**
  161. * @brief Set parent clock
  162. *
  163. * @param clk_np point to clock node
  164. * @param parent_np point to parent rt_clk
  165. *
  166. */
  167. static void clk_set_parent(struct rt_clk_node *clk_np, struct rt_clk_node *parent_np)
  168. {
  169. rt_hw_spin_lock(&_clk_lock.lock);
  170. clk_np->parent = parent_np;
  171. rt_list_insert_after(&parent_np->children_nodes, &clk_np->list);
  172. rt_hw_spin_unlock(&_clk_lock.lock);
  173. }
  174. static const struct rt_clk_ops unused_clk_ops =
  175. {
  176. };
  177. /**
  178. * @brief Register clock node into clock list
  179. *
  180. * @param clk_np point to child node that will be registered node.
  181. * @param parent_np point to parent rt_clk. If it is RT_NULL, clock node will be linked to init node.
  182. *
  183. * @retval RT_EOK
  184. * @retval -RT_ENOMEM
  185. */
  186. rt_err_t rt_clk_register(struct rt_clk_node *clk_np, struct rt_clk_node *parent_np)
  187. {
  188. rt_err_t err = RT_EOK;
  189. struct rt_clk *clk = RT_NULL;
  190. if (clk_np)
  191. {
  192. clk_np->clk = clk;
  193. if (!clk_np->ops)
  194. {
  195. clk_np->ops = &unused_clk_ops;
  196. }
  197. #if RT_NAME_MAX > 0
  198. rt_strncpy(clk_np->rt_parent.name, RT_CLK_NODE_OBJ_NAME, RT_NAME_MAX);
  199. #else
  200. clk_np->rt_parent.name = RT_CLK_NODE_OBJ_NAME;
  201. #endif
  202. rt_ref_init(&clk_np->ref);
  203. rt_list_init(&clk_np->list);
  204. rt_list_init(&clk_np->children_nodes);
  205. clk_np->multi_clk = 0;
  206. if (parent_np)
  207. {
  208. clk_np->clk = clk_alloc(clk_np, RT_NULL, RT_NULL, RT_NULL);
  209. if (clk_np->clk)
  210. {
  211. clk_set_parent(clk_np, parent_np);
  212. }
  213. else
  214. {
  215. err = -RT_ENOMEM;
  216. }
  217. }
  218. else
  219. {
  220. clk_np->parent = RT_NULL;
  221. rt_hw_spin_lock(&_clk_lock.lock);
  222. rt_list_insert_after(&_clk_nodes, &clk_np->list);
  223. rt_hw_spin_unlock(&_clk_lock.lock);
  224. }
  225. }
  226. else
  227. {
  228. err = -RT_ENOMEM;
  229. }
  230. return err;
  231. }
  232. /**
  233. * @brief Unregister clock node from clock list
  234. *
  235. * @param clk_np point to child node that will be Unregistered node.
  236. *
  237. * @retval RT_EOK
  238. * @retval -RT_EBUSY
  239. * @retval -RT_EINVAL
  240. */
  241. rt_err_t rt_clk_unregister(struct rt_clk_node *clk_np)
  242. {
  243. rt_err_t err = RT_EOK;
  244. if (clk_np)
  245. {
  246. err = -RT_EBUSY;
  247. rt_hw_spin_lock(&_clk_lock.lock);
  248. if (rt_list_isempty(&clk_np->children_nodes))
  249. {
  250. if (rt_ref_read(&clk_np->ref) <= 1)
  251. {
  252. rt_list_remove(&clk_np->list);
  253. clk_free(clk_np->clk);
  254. err = RT_EOK;
  255. }
  256. }
  257. rt_hw_spin_unlock(&_clk_lock.lock);
  258. }
  259. else
  260. {
  261. err = -RT_EINVAL;
  262. }
  263. return err;
  264. }
  265. /**
  266. * @brief Register clock notifier into notifier list
  267. *
  268. * @param clk point to clock
  269. * @param notifier point to notifier for register
  270. *
  271. * @retval RT_EOK
  272. * @retval -RT_EINVAL
  273. */
  274. rt_err_t rt_clk_notifier_register(struct rt_clk *clk, struct rt_clk_notifier *notifier)
  275. {
  276. if (!clk || !clk->clk_np || !notifier)
  277. {
  278. return -RT_EINVAL;
  279. }
  280. rt_hw_spin_lock(&_clk_lock.lock);
  281. ++clk->clk_np->notifier_count;
  282. rt_list_init(&notifier->list);
  283. rt_list_insert_after(&_clk_notifier_nodes, &notifier->list);
  284. rt_hw_spin_unlock(&_clk_lock.lock);
  285. return RT_EOK;
  286. }
  287. /**
  288. * @brief Unregister clock notifier into notifier list
  289. *
  290. * @param clk point to clock
  291. * @param notifier point to notifier for unregister
  292. *
  293. * @retval RT_EOK
  294. * @retval -RT_EINVAL
  295. */
  296. rt_err_t rt_clk_notifier_unregister(struct rt_clk *clk, struct rt_clk_notifier *notifier)
  297. {
  298. struct rt_clk_notifier *notifier_find;
  299. if (!clk || !notifier)
  300. {
  301. return -RT_EINVAL;
  302. }
  303. rt_hw_spin_lock(&_clk_lock.lock);
  304. rt_list_for_each_entry(notifier_find, &_clk_notifier_nodes, list)
  305. {
  306. if (notifier_find->clk->clk_np == notifier->clk->clk_np)
  307. {
  308. --clk->clk_np->notifier_count;
  309. rt_list_remove(&notifier->list);
  310. break;
  311. }
  312. }
  313. rt_hw_spin_unlock(&_clk_lock.lock);
  314. return RT_EOK;
  315. }
  316. /**
  317. * @brief Recursively prepare clock
  318. *
  319. * @param clk Ponit to clock that will be prepared
  320. * @param clk_np Ponit to clock node that will be prepared
  321. *
  322. * @return rt_err_t RT_EOK on prepare clock sucessfully, and other value is failed.
  323. */
  324. static rt_err_t clk_prepare(struct rt_clk *clk, struct rt_clk_node *clk_np)
  325. {
  326. rt_err_t err = RT_EOK;
  327. if (clk_np->parent)
  328. {
  329. clk_prepare(clk_np->clk, clk_np->parent);
  330. }
  331. if (clk->prepare_count == 0 && clk_np->ops->prepare)
  332. {
  333. err = clk_np->ops->prepare(clk);
  334. }
  335. if (!err)
  336. {
  337. ++clk->prepare_count;
  338. }
  339. return err;
  340. }
  341. /**
  342. * @brief Prepare clock
  343. *
  344. * @param clk
  345. *
  346. * @return rt_err_t RT_EOK on prepare clock sucessfully, and other value is failed.
  347. */
  348. rt_err_t rt_clk_prepare(struct rt_clk *clk)
  349. {
  350. rt_err_t err = RT_EOK;
  351. RT_DEBUG_NOT_IN_INTERRUPT;
  352. if (clk && clk->clk_np)
  353. {
  354. rt_hw_spin_lock(&_clk_lock.lock);
  355. err = clk_prepare(clk, clk->clk_np);
  356. rt_hw_spin_unlock(&_clk_lock.lock);
  357. }
  358. return err;
  359. }
  360. /**
  361. * @brief Recursively unprepare clock
  362. *
  363. * @param clk Ponit to clock that will be unprepared
  364. * @param clk_np Ponit to clock node that will be unprepared
  365. *
  366. */
  367. static void clk_unprepare(struct rt_clk *clk, struct rt_clk_node *clk_np)
  368. {
  369. if (clk_np->parent)
  370. {
  371. clk_unprepare(clk_np->clk, clk_np->parent);
  372. }
  373. if (clk->prepare_count == 1 && clk_np->ops->unprepare)
  374. {
  375. clk_np->ops->unprepare(clk);
  376. }
  377. if (clk->prepare_count)
  378. {
  379. --clk->prepare_count;
  380. }
  381. }
  382. rt_err_t rt_clk_unprepare(struct rt_clk *clk)
  383. {
  384. rt_err_t err = RT_EOK;
  385. RT_DEBUG_NOT_IN_INTERRUPT;
  386. if (clk && clk->clk_np)
  387. {
  388. rt_hw_spin_lock(&_clk_lock.lock);
  389. clk_unprepare(clk, clk->clk_np);
  390. rt_hw_spin_unlock(&_clk_lock.lock);
  391. }
  392. return err;
  393. }
  394. /**
  395. * @brief Enable clock
  396. *
  397. * @param clk point to clock
  398. *
  399. * @return rt_err_t RT_EOK on enable clock FOREVER.
  400. */
  401. static rt_err_t clk_enable(struct rt_clk *clk, struct rt_clk_node *clk_np)
  402. {
  403. rt_err_t err = RT_EOK;
  404. if (clk_np->parent)
  405. {
  406. clk_enable(clk_np->clk, clk_np->parent);
  407. }
  408. if (clk->enable_count == 0 && clk_np->ops->enable)
  409. {
  410. err = clk_np->ops->enable(clk);
  411. }
  412. if (!err)
  413. {
  414. ++clk->enable_count;
  415. }
  416. return err;
  417. }
  418. /**
  419. * @brief Enable clock
  420. *
  421. * @param clk point to clock
  422. *
  423. * @return rt_err_t RT_EOK on enable clock sucessfully, and other value is failed.
  424. */
  425. rt_err_t rt_clk_enable(struct rt_clk *clk)
  426. {
  427. rt_err_t err = RT_EOK;
  428. if (clk && clk->clk_np)
  429. {
  430. rt_hw_spin_lock(&_clk_lock.lock);
  431. err = clk_enable(clk, clk->clk_np);
  432. rt_hw_spin_unlock(&_clk_lock.lock);
  433. }
  434. return err;
  435. }
  436. /**
  437. * @brief Recursively disable clock
  438. *
  439. * @param clk Ponit to clock that will be disabled
  440. * @param clk_np Ponit to clock node that will be disabled
  441. *
  442. */
  443. static void clk_disable(struct rt_clk *clk, struct rt_clk_node *clk_np)
  444. {
  445. if (clk_np->parent)
  446. {
  447. clk_disable(clk_np->clk, clk_np->parent);
  448. }
  449. if (clk->enable_count == 1 && clk_np->ops->disable)
  450. {
  451. clk_np->ops->disable(clk);
  452. }
  453. if (clk->enable_count)
  454. {
  455. --clk->enable_count;
  456. }
  457. }
  458. /**
  459. * @brief Disable clock
  460. *
  461. * @param clk point to clock
  462. *
  463. */
  464. void rt_clk_disable(struct rt_clk *clk)
  465. {
  466. if (clk && clk->clk_np)
  467. {
  468. rt_hw_spin_lock(&_clk_lock.lock);
  469. clk_disable(clk, clk->clk_np);
  470. rt_hw_spin_unlock(&_clk_lock.lock);
  471. }
  472. }
  473. /**
  474. * @brief Prepare and enable clock
  475. *
  476. * @param clk point to clock
  477. *
  478. * @return rt_err_t RT_EOK on prepare and enable clock sucessfully, and other value is failed.
  479. */
  480. rt_err_t rt_clk_prepare_enable(struct rt_clk *clk)
  481. {
  482. rt_err_t err = RT_EOK;
  483. RT_DEBUG_NOT_IN_INTERRUPT;
  484. if (clk)
  485. {
  486. err = rt_clk_prepare(clk);
  487. if (!err)
  488. {
  489. err = rt_clk_enable(clk);
  490. if (err)
  491. {
  492. rt_clk_unprepare(clk);
  493. }
  494. }
  495. }
  496. return err;
  497. }
  498. /**
  499. * @brief Disable and unprepare clock
  500. *
  501. * @param clk point to clock
  502. *
  503. */
  504. void rt_clk_disable_unprepare(struct rt_clk *clk)
  505. {
  506. RT_DEBUG_NOT_IN_INTERRUPT;
  507. if (clk)
  508. {
  509. rt_clk_disable(clk);
  510. rt_clk_unprepare(clk);
  511. }
  512. }
  513. /**
  514. * @brief Prepare clock array for mutipule out clock
  515. *
  516. * @param clk_arr point to clock array
  517. *
  518. * @return rt_err_t RT_EOK on prepare clock array sucessfully, and other value is failed.
  519. */
  520. rt_err_t rt_clk_array_prepare(struct rt_clk_array *clk_arr)
  521. {
  522. rt_err_t err = RT_EOK;
  523. if (clk_arr)
  524. {
  525. for (int i = 0; i < clk_arr->count; ++i)
  526. {
  527. if ((err = rt_clk_prepare(clk_arr->clks[i])))
  528. {
  529. LOG_E("CLK Array[%d] %s failed error = %s", i,
  530. "prepare", rt_strerror(err));
  531. while (i --> 0)
  532. {
  533. rt_clk_unprepare(clk_arr->clks[i]);
  534. }
  535. break;
  536. }
  537. }
  538. }
  539. return err;
  540. }
  541. rt_err_t rt_clk_array_unprepare(struct rt_clk_array *clk_arr)
  542. {
  543. rt_err_t err = RT_EOK;
  544. if (clk_arr)
  545. {
  546. for (int i = 0; i < clk_arr->count; ++i)
  547. {
  548. if ((err = rt_clk_unprepare(clk_arr->clks[i])))
  549. {
  550. LOG_E("CLK Array[%d] %s failed error = %s", i,
  551. "unprepare", rt_strerror(err));
  552. break;
  553. }
  554. }
  555. }
  556. return err;
  557. }
  558. /**
  559. * @brief Enable clock array for mutipule out clock
  560. *
  561. * @param clk_arr point to clock array
  562. *
  563. * @return rt_err_t RT_EOK on Enable clock array sucessfully, and other value is failed.
  564. */
  565. rt_err_t rt_clk_array_enable(struct rt_clk_array *clk_arr)
  566. {
  567. rt_err_t err = RT_EOK;
  568. if (clk_arr)
  569. {
  570. for (int i = 0; i < clk_arr->count; ++i)
  571. {
  572. if ((err = rt_clk_enable(clk_arr->clks[i])))
  573. {
  574. LOG_E("CLK Array[%d] %s failed error = %s", i,
  575. "enable", rt_strerror(err));
  576. while (i --> 0)
  577. {
  578. rt_clk_disable(clk_arr->clks[i]);
  579. }
  580. break;
  581. }
  582. }
  583. }
  584. return err;
  585. }
  586. /**
  587. * @brief Enable clock array for mutipule out clock
  588. *
  589. * @param clk_arr point to clock array
  590. *
  591. */
  592. void rt_clk_array_disable(struct rt_clk_array *clk_arr)
  593. {
  594. if (clk_arr)
  595. {
  596. for (int i = 0; i < clk_arr->count; ++i)
  597. {
  598. rt_clk_disable(clk_arr->clks[i]);
  599. }
  600. }
  601. }
  602. /**
  603. * @brief Prepare and enable clock array
  604. *
  605. * @param clk_arr point to clock array
  606. *
  607. * @return rt_err_t RT_EOK on prepare and enable clock array sucessfully, and other
  608. value is failed.
  609. */
  610. rt_err_t rt_clk_array_prepare_enable(struct rt_clk_array *clk_arr)
  611. {
  612. rt_err_t err;
  613. if ((err = rt_clk_array_prepare(clk_arr)))
  614. {
  615. return err;
  616. }
  617. if ((err = rt_clk_array_enable(clk_arr)))
  618. {
  619. rt_clk_array_unprepare(clk_arr);
  620. }
  621. return err;
  622. }
  623. /**
  624. * @brief Disable and unprepare clock array
  625. *
  626. * @param clk_arr point to clock array
  627. *
  628. */
  629. void rt_clk_array_disable_unprepare(struct rt_clk_array *clk_arr)
  630. {
  631. rt_clk_array_disable(clk_arr);
  632. rt_clk_array_unprepare(clk_arr);
  633. }
  634. /**
  635. * @brief Set clock rate range
  636. *
  637. * @param clk point to clock
  638. * @param min minimum clock rate
  639. * @param max minimum clock rate
  640. *
  641. * @return rt_err_t RT_EOK on set clock rate range sucessfully, and other value is failed.
  642. */
  643. rt_err_t rt_clk_set_rate_range(struct rt_clk *clk, rt_ubase_t min, rt_ubase_t max)
  644. {
  645. rt_err_t err = RT_EOK;
  646. if (clk && clk->clk_np)
  647. {
  648. struct rt_clk_node *clk_np = clk->clk_np;
  649. rt_hw_spin_lock(&_clk_lock.lock);
  650. if (clk_np->ops->set_rate)
  651. {
  652. rt_ubase_t rate = clk_np->rate;
  653. rt_ubase_t old_min = clk_np->min_rate;
  654. rt_ubase_t old_max = clk_np->max_rate;
  655. clk_np->min_rate = min;
  656. clk_np->max_rate = max;
  657. rate = rt_clamp(rate, min, max);
  658. err = clk_np->ops->set_rate(clk, rate,
  659. rt_clk_get_rate(clk_np->parent ? clk_np->parent->clk : RT_NULL));
  660. if (err)
  661. {
  662. clk_np->min_rate = old_min;
  663. clk_np->max_rate = old_max;
  664. }
  665. }
  666. else
  667. {
  668. err = -RT_ENOSYS;
  669. }
  670. rt_hw_spin_unlock(&_clk_lock.lock);
  671. }
  672. return err;
  673. }
  674. /**
  675. * @brief Set minimum clock rate
  676. *
  677. * @param clk point to clock
  678. * @param rate miminum clock rate
  679. *
  680. * @return rt_err_t RT_EOK on set minimum clock rate sucessfully, and other value is failed.
  681. */
  682. rt_err_t rt_clk_set_min_rate(struct rt_clk *clk, rt_ubase_t rate)
  683. {
  684. rt_err_t err = RT_EOK;
  685. if (clk && clk->clk_np)
  686. {
  687. struct rt_clk_node *clk_np = clk->clk_np;
  688. err = rt_clk_set_rate_range(clk, rate, clk_np->max_rate);
  689. }
  690. return err;
  691. }
  692. /**
  693. * @brief Set maximum clock rate
  694. *
  695. * @param clk point to clock
  696. * @param rate maximum clock rate
  697. *
  698. * @return rt_err_t RT_EOK on set maximum clock rate sucessfully, and other value is failed.
  699. */
  700. rt_err_t rt_clk_set_max_rate(struct rt_clk *clk, rt_ubase_t rate)
  701. {
  702. rt_err_t err = RT_EOK;
  703. if (clk && clk->clk_np)
  704. {
  705. struct rt_clk_node *clk_np = clk->clk_np;
  706. err = rt_clk_set_rate_range(clk, clk_np->min_rate, rate);
  707. }
  708. return err;
  709. }
  710. /**
  711. * @brief Set clock rate
  712. *
  713. * @param clk point to clock
  714. * @param rate target rate
  715. *
  716. * @return rt_err_t RT_EOK on set clock rate sucessfully, and other value is failed.
  717. */
  718. rt_err_t rt_clk_set_rate(struct rt_clk *clk, rt_ubase_t rate)
  719. {
  720. rt_err_t err = RT_EOK;
  721. rate = rt_clk_round_rate(clk, rate);
  722. if (clk && clk->clk_np && rate > 0)
  723. {
  724. struct rt_clk_node *clk_np = clk->clk_np;
  725. rt_hw_spin_lock(&_clk_lock.lock);
  726. if (clk_np->min_rate && rate < clk_np->min_rate)
  727. {
  728. err = -RT_EINVAL;
  729. }
  730. if (clk_np->max_rate && rate > clk_np->max_rate)
  731. {
  732. err = -RT_EINVAL;
  733. }
  734. if (!err)
  735. {
  736. if (clk_np->ops->set_rate)
  737. {
  738. rt_ubase_t old_rate = clk_np->rate;
  739. err = clk_np->ops->set_rate(clk, rate,
  740. rt_clk_get_rate(clk_np->parent ? clk_np->parent->clk : RT_NULL));
  741. if (clk_np->rate != old_rate)
  742. {
  743. clk_notify(clk_np, RT_CLK_MSG_PRE_RATE_CHANGE, old_rate, clk_np->rate);
  744. }
  745. }
  746. else
  747. {
  748. err = -RT_ENOSYS;
  749. }
  750. }
  751. rt_hw_spin_unlock(&_clk_lock.lock);
  752. }
  753. return err;
  754. }
  755. /**
  756. * @brief Get clock rate
  757. *
  758. * @param clk point to clock
  759. *
  760. * @return rt_ubase_t clock rate or error code
  761. */
  762. rt_ubase_t rt_clk_get_rate(struct rt_clk *clk)
  763. {
  764. rt_ubase_t rate = 0;
  765. if (clk)
  766. {
  767. if (clk->rate)
  768. {
  769. rate = clk->rate;
  770. }
  771. else if (clk->clk_np)
  772. {
  773. rate = clk->clk_np->rate;
  774. }
  775. }
  776. return rate;
  777. }
  778. /**
  779. * @brief Set clock phase
  780. *
  781. * @param clk point to clock
  782. * @param degrees target phase and the unit of phase is degree
  783. *
  784. * @return rt_err_t RT_EOK on set clock phase sucessfully, and other value is failed.
  785. */
  786. rt_err_t rt_clk_set_phase(struct rt_clk *clk, int degrees)
  787. {
  788. rt_err_t err = RT_EOK;
  789. if (clk && clk->clk_np && clk->clk_np->ops->set_phase)
  790. {
  791. rt_hw_spin_lock(&_clk_lock.lock);
  792. err = clk->clk_np->ops->set_phase(clk, degrees);
  793. rt_hw_spin_unlock(&_clk_lock.lock);
  794. }
  795. return err;
  796. }
  797. /**
  798. * @brief Get clock phase
  799. *
  800. * @param clk point to clock
  801. *
  802. * @return rt_base_t clock phase or error code
  803. */
  804. rt_base_t rt_clk_get_phase(struct rt_clk *clk)
  805. {
  806. rt_base_t res = RT_EOK;
  807. if (clk && clk->clk_np && clk->clk_np->ops->get_phase)
  808. {
  809. rt_hw_spin_lock(&_clk_lock.lock);
  810. res = clk->clk_np->ops->get_phase(clk);
  811. rt_hw_spin_unlock(&_clk_lock.lock);
  812. }
  813. return res;
  814. }
  815. /**
  816. * @brief Check if clock rate is in the minimum to maximun and get it
  817. *
  818. * @param clk point to clock
  819. * @param rate rate will be checked
  820. *
  821. * @return rt_base_t get the correct rate
  822. * @note if parameter rate less than the minimum or more than maximum, the
  823. retrun rate will be set to minimum ormaximum value
  824. */
  825. rt_base_t rt_clk_round_rate(struct rt_clk *clk, rt_ubase_t rate)
  826. {
  827. rt_base_t res = -RT_EINVAL;
  828. if (clk && clk->clk_np)
  829. {
  830. struct rt_clk_node *clk_np = clk->clk_np;
  831. if (clk_np->ops->round_rate)
  832. {
  833. rt_ubase_t best_parent_rate;
  834. rt_hw_spin_lock(&_clk_lock.lock);
  835. if (clk_np->min_rate && clk_np->max_rate)
  836. {
  837. rate = rt_clamp(rate, clk_np->min_rate, clk_np->max_rate);
  838. }
  839. res = clk_np->ops->round_rate(clk, rate, &best_parent_rate);
  840. (void)best_parent_rate;
  841. rt_hw_spin_unlock(&_clk_lock.lock);
  842. }
  843. else
  844. {
  845. if (rate < clk_np->min_rate)
  846. {
  847. res = clk_np->min_rate;
  848. }
  849. else if (rate > clk_np->max_rate)
  850. {
  851. res = clk_np->max_rate;
  852. }
  853. else
  854. {
  855. res = rate;
  856. }
  857. }
  858. }
  859. return res;
  860. }
  861. /**
  862. * @brief Set clock parent object
  863. *
  864. * @param clk point to clock
  865. * @param clk_parent point to parent clock
  866. *
  867. * @return rt_err_t RT_EOK on set clock parent sucessfully, and other value is failed.
  868. */
  869. rt_err_t rt_clk_set_parent(struct rt_clk *clk, struct rt_clk *clk_parent)
  870. {
  871. rt_err_t err = RT_EOK;
  872. if (clk && clk->clk_np && clk->clk_np->ops->set_parent)
  873. {
  874. rt_hw_spin_lock(&_clk_lock.lock);
  875. err = clk->clk_np->ops->set_parent(clk, clk_parent);
  876. rt_hw_spin_unlock(&_clk_lock.lock);
  877. }
  878. return err;
  879. }
  880. /**
  881. * @brief Get parent clock pointer
  882. *
  883. * @param clk child clock
  884. *
  885. * @return struct rt_clk* parent clock object pointer will be return, unless child
  886. clock node havn't parent node instead return RT_NULL
  887. */
  888. struct rt_clk *rt_clk_get_parent(struct rt_clk *clk)
  889. {
  890. struct rt_clk *parent = RT_NULL;
  891. if (clk)
  892. {
  893. struct rt_clk_node *clk_np = clk->clk_np;
  894. rt_hw_spin_lock(&_clk_lock.lock);
  895. parent = clk_np->parent ? clk_np->parent->clk : RT_NULL;
  896. rt_hw_spin_unlock(&_clk_lock.lock);
  897. }
  898. return parent;
  899. }
  900. /**
  901. * @brief Get clock array pointer from ofw device node
  902. *
  903. * @param dev point to dev
  904. *
  905. * @return struct rt_clk_array* if use ofw and under normal circumstance, it will return
  906. clock array pointer and other value is RT_NULL
  907. */
  908. struct rt_clk_array *rt_clk_get_array(struct rt_device *dev)
  909. {
  910. struct rt_clk_array *clk_arr = RT_NULL;
  911. #ifdef RT_USING_OFW
  912. clk_arr = rt_ofw_get_clk_array(dev->ofw_node);
  913. #endif
  914. return clk_arr;
  915. }
  916. /**
  917. * @brief Get clock pointer from ofw device node by index
  918. *
  919. * @param dev point to dev
  920. * @param index index of clock object
  921. *
  922. * @return struct rt_clk* if use ofw and under normal circumstance, it will return clock
  923. pointer and other value is RT_NULL
  924. */
  925. struct rt_clk *rt_clk_get_by_index(struct rt_device *dev, int index)
  926. {
  927. struct rt_clk *clk = RT_NULL;
  928. #ifdef RT_USING_OFW
  929. clk = rt_ofw_get_clk(dev->ofw_node, index);
  930. #endif
  931. return clk;
  932. }
  933. /**
  934. * @brief Get clock pointer from ofw device node by name
  935. *
  936. * @param dev point to dev
  937. * @param name name of clock object
  938. *
  939. * @return struct rt_clk* if use ofw and under normal circumstance, it will return clock
  940. pointer and other value is RT_NULL
  941. */
  942. struct rt_clk *rt_clk_get_by_name(struct rt_device *dev, const char *name)
  943. {
  944. struct rt_clk *clk = RT_NULL;
  945. #ifdef RT_USING_OFW
  946. clk = rt_ofw_get_clk_by_name(dev->ofw_node, name);
  947. #endif
  948. return clk;
  949. }
  950. /**
  951. * @brief Put reference count of all colock in the clock array
  952. *
  953. * @param clk_arr point to clock array
  954. *
  955. */
  956. void rt_clk_array_put(struct rt_clk_array *clk_arr)
  957. {
  958. if (clk_arr)
  959. {
  960. for (int i = 0; i < clk_arr->count; ++i)
  961. {
  962. if (clk_arr->clks[i])
  963. {
  964. rt_clk_put(clk_arr->clks[i]);
  965. }
  966. else
  967. {
  968. break;
  969. }
  970. }
  971. rt_free(clk_arr);
  972. }
  973. }
  974. /**
  975. * @brief Put reference count of clock
  976. *
  977. * @param clk point to clock
  978. *
  979. */
  980. void rt_clk_put(struct rt_clk *clk)
  981. {
  982. if (clk)
  983. {
  984. clk_put(clk->clk_np);
  985. clk_free(clk);
  986. }
  987. }
  988. #ifdef RT_USING_OFW
  989. /**
  990. * @brief Get a clock object from a device tree node without acquiring a lock
  991. *
  992. * @param np point to ofw node
  993. * @param index index of clock in ofw
  994. * @param name connection identifier for the clock
  995. * @param locked lock flag for indicating whether the caller holds the lock
  996. *
  997. * @return struct rt_clk* point to the newly created clock object, or an error pointer
  998. */
  999. static struct rt_clk *ofw_get_clk_no_lock(struct rt_ofw_node *np, int index, const char *name, rt_bool_t locked)
  1000. {
  1001. struct rt_clk *clk = RT_NULL;
  1002. struct rt_ofw_cell_args clk_args;
  1003. if (!rt_ofw_parse_phandle_cells(np, "clocks", "#clock-cells", index, &clk_args))
  1004. {
  1005. int count;
  1006. struct rt_object *obj;
  1007. struct rt_clk_node *clk_np = RT_NULL;
  1008. struct rt_ofw_node *clk_ofw_np = clk_args.data;
  1009. if (!rt_ofw_data(clk_ofw_np))
  1010. {
  1011. if (locked)
  1012. {
  1013. rt_hw_spin_unlock(&_clk_lock.lock);
  1014. }
  1015. rt_platform_ofw_request(clk_ofw_np);
  1016. if (locked)
  1017. {
  1018. rt_hw_spin_lock(&_clk_lock.lock);
  1019. }
  1020. }
  1021. if (rt_ofw_data(clk_ofw_np) && (obj = rt_ofw_parse_object(clk_ofw_np,
  1022. RT_CLK_NODE_OBJ_NAME, "#clock-cells")))
  1023. {
  1024. clk_np = rt_container_of(obj, struct rt_clk_node, rt_parent);
  1025. count = rt_ofw_count_of_clk(clk_ofw_np);
  1026. }
  1027. rt_ofw_node_put(clk_ofw_np);
  1028. if (clk_np)
  1029. {
  1030. if (count > 1)
  1031. {
  1032. /* args[0] must be the index of CLK */
  1033. clk_np = &clk_np[clk_args.args[0]];
  1034. }
  1035. clk = clk_create(clk_np, np->full_name, name, &clk_args, np);
  1036. }
  1037. else
  1038. {
  1039. clk = rt_err_ptr(-RT_ERROR);
  1040. }
  1041. }
  1042. return clk;
  1043. }
  1044. /**
  1045. * @brief Get clock from ofw with acquiring a spin lock
  1046. *
  1047. * @param np point to ofw node
  1048. * @param index index of clock in ofw
  1049. * @param name connection identifier for the clock
  1050. *
  1051. * @return struct rt_clk* point to the newly created clock object, or an error pointer
  1052. */
  1053. static struct rt_clk *ofw_get_clk(struct rt_ofw_node *np, int index, const char *name)
  1054. {
  1055. struct rt_clk *clk;
  1056. rt_hw_spin_lock(&_clk_lock.lock);
  1057. clk = ofw_get_clk_no_lock(np, index, name, RT_TRUE);
  1058. rt_hw_spin_unlock(&_clk_lock.lock);
  1059. return clk;
  1060. }
  1061. /**
  1062. * @brief Get clock array from ofw
  1063. *
  1064. * @param np point to ofw node
  1065. *
  1066. * @return struct rt_clk_array* point to the newly created clock array, or an error pointer
  1067. */
  1068. struct rt_clk_array *rt_ofw_get_clk_array(struct rt_ofw_node *np)
  1069. {
  1070. int count;
  1071. struct rt_clk_array *clk_arr = RT_NULL;
  1072. if (!np)
  1073. {
  1074. return rt_err_ptr(-RT_EINVAL);
  1075. }
  1076. if ((count = rt_ofw_count_phandle_cells(np, "clocks", "#clock-cells")) > 0)
  1077. {
  1078. clk_arr = rt_calloc(1, sizeof(*clk_arr) + sizeof(clk_arr->clks[0]) * count);
  1079. if (clk_arr)
  1080. {
  1081. int i;
  1082. rt_err_t err = RT_EOK;
  1083. rt_bool_t has_name = rt_ofw_prop_read_bool(np, "clock-names");
  1084. clk_arr->count = count;
  1085. rt_hw_spin_lock(&_clk_lock.lock);
  1086. for (i = 0; i < count; ++i)
  1087. {
  1088. const char *name = RT_NULL;
  1089. if (has_name)
  1090. {
  1091. rt_ofw_prop_read_string_index(np, "clock-names", i, &name);
  1092. }
  1093. clk_arr->clks[i] = ofw_get_clk_no_lock(np, i, name, RT_FALSE);
  1094. if (rt_is_err(clk_arr->clks[i]))
  1095. {
  1096. err = rt_ptr_err(clk_arr->clks[i]);
  1097. --i;
  1098. break;
  1099. }
  1100. }
  1101. rt_hw_spin_unlock(&_clk_lock.lock);
  1102. if (i > 0 && i < count)
  1103. {
  1104. rt_clk_array_put(clk_arr);
  1105. clk_arr = rt_err_ptr(err);
  1106. }
  1107. }
  1108. }
  1109. return clk_arr;
  1110. }
  1111. /**
  1112. * @brief Get clock from ofw with acquiring a spin lock by index and node pointer
  1113. *
  1114. * @param np point to ofw node
  1115. * @param index index of clock in ofw
  1116. *
  1117. * @return struct rt_clk* point to the newly created clock object, or an error pointer
  1118. */
  1119. struct rt_clk *rt_ofw_get_clk(struct rt_ofw_node *np, int index)
  1120. {
  1121. struct rt_clk *clk = RT_NULL;
  1122. if (np && index >= 0)
  1123. {
  1124. clk = ofw_get_clk(np, index, RT_NULL);
  1125. }
  1126. return clk;
  1127. }
  1128. /**
  1129. * @brief Get clock from ofw with acquiring a spin lock by name
  1130. *
  1131. * @param np point to ofw node
  1132. * @param name name of clock will be returned
  1133. *
  1134. * @return struct rt_clk* point to the newly created clock object, or an error pointer
  1135. */
  1136. struct rt_clk *rt_ofw_get_clk_by_name(struct rt_ofw_node *np, const char *name)
  1137. {
  1138. struct rt_clk *clk = RT_NULL;
  1139. if (np && name)
  1140. {
  1141. int index = rt_ofw_prop_index_of_string(np, "clock-names", name);
  1142. if (index >= 0)
  1143. {
  1144. clk = ofw_get_clk(np, index, name);
  1145. }
  1146. }
  1147. return clk;
  1148. }
  1149. /**
  1150. * @brief Count number of clocks in ofw
  1151. *
  1152. * @param clk_ofw_np point to ofw node
  1153. *
  1154. * @return rt_ssize_t number of clocks
  1155. */
  1156. rt_ssize_t rt_ofw_count_of_clk(struct rt_ofw_node *clk_ofw_np)
  1157. {
  1158. if (clk_ofw_np)
  1159. {
  1160. struct rt_clk_node *clk_np = rt_ofw_data(clk_ofw_np);
  1161. if (clk_np && clk_np->multi_clk)
  1162. {
  1163. return clk_np->multi_clk;
  1164. }
  1165. else
  1166. {
  1167. const fdt32_t *cell;
  1168. rt_uint32_t count = 0;
  1169. struct rt_ofw_prop *prop;
  1170. prop = rt_ofw_get_prop(clk_ofw_np, "clock-indices", RT_NULL);
  1171. if (prop)
  1172. {
  1173. rt_uint32_t max_idx = 0, idx;
  1174. for (cell = rt_ofw_prop_next_u32(prop, RT_NULL, &idx);
  1175. cell;
  1176. cell = rt_ofw_prop_next_u32(prop, cell, &idx))
  1177. {
  1178. if (idx > max_idx)
  1179. {
  1180. max_idx = idx;
  1181. }
  1182. }
  1183. count = max_idx + 1;
  1184. }
  1185. else
  1186. {
  1187. rt_ssize_t len;
  1188. if ((prop = rt_ofw_get_prop(clk_ofw_np, "clock-output-names", &len)))
  1189. {
  1190. char *value = prop->value;
  1191. for (int i = 0; i < len; ++i, ++value)
  1192. {
  1193. if (*value == '\0')
  1194. {
  1195. ++count;
  1196. }
  1197. }
  1198. }
  1199. else
  1200. {
  1201. count = 1;
  1202. }
  1203. }
  1204. if (clk_np)
  1205. {
  1206. clk_np->multi_clk = count;
  1207. }
  1208. return count;
  1209. }
  1210. }
  1211. return -RT_EINVAL;
  1212. }
  1213. #endif /* RT_USING_OFW */
  1214. /**@}*/