clk.c 21 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-11-26 GuEe-GUI first version
  9. */
  10. #include <rtthread.h>
  11. #include <rtservice.h>
  12. #include <rtdevice.h>
  13. #define DBG_TAG "rtdm.clk"
  14. #define DBG_LVL DBG_INFO
  15. #include <rtdbg.h>
  16. static struct rt_spinlock _clk_lock = { 0 };
  17. static rt_list_t _clk_nodes = RT_LIST_OBJECT_INIT(_clk_nodes);
  18. static rt_list_t _clk_notifier_nodes = RT_LIST_OBJECT_INIT(_clk_notifier_nodes);
  19. static void clk_release(struct rt_ref *r)
  20. {
  21. struct rt_clk_node *clk_np = rt_container_of(r, struct rt_clk_node, ref);
  22. LOG_E("%s is release", clk_np->name);
  23. (void)clk_np;
  24. RT_ASSERT(0);
  25. }
  26. rt_inline struct rt_clk_node *clk_get(struct rt_clk_node *clk_np)
  27. {
  28. rt_ref_get(&clk_np->ref);
  29. return clk_np;
  30. }
  31. rt_inline void clk_put(struct rt_clk_node *clk_np)
  32. {
  33. rt_ref_put(&clk_np->ref, &clk_release);
  34. }
  35. static struct rt_clk *clk_alloc(struct rt_clk_node *clk_np, const char *dev_id,
  36. const char *con_id, void *fw_node)
  37. {
  38. struct rt_clk *clk = rt_calloc(1, sizeof(*clk));
  39. if (clk)
  40. {
  41. clk->clk_np = clk_np;
  42. clk->dev_id = dev_id;
  43. clk->con_id = con_id;
  44. clk->fw_node = fw_node;
  45. }
  46. return clk;
  47. }
  48. static void clk_free(struct rt_clk *clk)
  49. {
  50. struct rt_clk_node *clk_np = clk->clk_np;
  51. if (clk_np && clk_np->ops->finit)
  52. {
  53. clk_np->ops->finit(clk);
  54. }
  55. rt_free(clk);
  56. }
  57. static struct rt_clk *clk_create(struct rt_clk_node *clk_np, const char *dev_id,
  58. const char *con_id, void *fw_data, void *fw_node)
  59. {
  60. struct rt_clk *clk = clk_alloc(clk_np, dev_id, con_id, fw_node);
  61. if (clk)
  62. {
  63. clk_get(clk_np);
  64. if (clk_np->ops->init && clk_np->ops->init(clk, fw_data))
  65. {
  66. LOG_E("Dev[%s] Con[%s] init fail", dev_id, con_id);
  67. clk_free(clk);
  68. clk = RT_NULL;
  69. }
  70. }
  71. return clk;
  72. }
  73. static rt_err_t clk_notify(struct rt_clk_node *clk_np, rt_ubase_t msg, rt_ubase_t old_rate, rt_ubase_t new_rate)
  74. {
  75. rt_err_t err = RT_EOK;
  76. struct rt_clk_notifier *notifier;
  77. rt_list_for_each_entry(notifier, &_clk_notifier_nodes, list)
  78. {
  79. if (notifier->clk->clk_np == clk_np)
  80. {
  81. err = notifier->callback(notifier, msg, old_rate, new_rate);
  82. /* Only check hareware's error */
  83. if (err == -RT_EIO)
  84. {
  85. break;
  86. }
  87. }
  88. }
  89. return err;
  90. }
  91. static void clk_set_parent(struct rt_clk_node *clk_np, struct rt_clk_node *parent_np)
  92. {
  93. rt_hw_spin_lock(&_clk_lock.lock);
  94. clk_np->parent = parent_np;
  95. rt_list_insert_after(&parent_np->children_nodes, &clk_np->list);
  96. rt_hw_spin_unlock(&_clk_lock.lock);
  97. }
  98. static const struct rt_clk_ops unused_clk_ops =
  99. {
  100. };
  101. rt_err_t rt_clk_register(struct rt_clk_node *clk_np, struct rt_clk_node *parent_np)
  102. {
  103. rt_err_t err = RT_EOK;
  104. struct rt_clk *clk = RT_NULL;
  105. if (clk_np)
  106. {
  107. clk = clk_alloc(clk_np, RT_NULL, RT_NULL, RT_NULL);
  108. }
  109. else
  110. {
  111. err = -RT_EINVAL;
  112. }
  113. if (!err && clk_np)
  114. {
  115. clk_np->clk = clk;
  116. if (!clk_np->ops)
  117. {
  118. clk_np->ops = &unused_clk_ops;
  119. }
  120. rt_ref_init(&clk_np->ref);
  121. rt_list_init(&clk_np->list);
  122. rt_list_init(&clk_np->children_nodes);
  123. clk_np->multi_clk = 0;
  124. if (parent_np)
  125. {
  126. clk_set_parent(clk_np, parent_np);
  127. }
  128. else
  129. {
  130. clk_np->parent = RT_NULL;
  131. rt_hw_spin_lock(&_clk_lock.lock);
  132. rt_list_insert_after(&_clk_nodes, &clk_np->list);
  133. rt_hw_spin_unlock(&_clk_lock.lock);
  134. }
  135. }
  136. else
  137. {
  138. err = -RT_ENOMEM;
  139. }
  140. return err;
  141. }
  142. rt_err_t rt_clk_unregister(struct rt_clk_node *clk_np)
  143. {
  144. rt_err_t err = RT_EOK;
  145. if (clk_np)
  146. {
  147. err = -RT_EBUSY;
  148. rt_hw_spin_lock(&_clk_lock.lock);
  149. if (rt_list_isempty(&clk_np->children_nodes))
  150. {
  151. if (rt_ref_read(&clk_np->ref) <= 1)
  152. {
  153. rt_list_remove(&clk_np->list);
  154. clk_free(clk_np->clk);
  155. err = RT_EOK;
  156. }
  157. }
  158. rt_hw_spin_unlock(&_clk_lock.lock);
  159. }
  160. else
  161. {
  162. err = -RT_EINVAL;
  163. }
  164. return err;
  165. }
  166. rt_err_t rt_clk_notifier_register(struct rt_clk *clk, struct rt_clk_notifier *notifier)
  167. {
  168. if (!clk || !clk->clk_np || !notifier)
  169. {
  170. return -RT_EINVAL;
  171. }
  172. rt_hw_spin_lock(&_clk_lock.lock);
  173. ++clk->clk_np->notifier_count;
  174. rt_list_init(&notifier->list);
  175. rt_list_insert_after(&_clk_notifier_nodes, &notifier->list);
  176. rt_hw_spin_unlock(&_clk_lock.lock);
  177. return RT_EOK;
  178. }
  179. rt_err_t rt_clk_notifier_unregister(struct rt_clk *clk, struct rt_clk_notifier *notifier)
  180. {
  181. struct rt_clk_notifier *notifier_find;
  182. if (!clk || !notifier)
  183. {
  184. return -RT_EINVAL;
  185. }
  186. rt_hw_spin_lock(&_clk_lock.lock);
  187. rt_list_for_each_entry(notifier_find, &_clk_notifier_nodes, list)
  188. {
  189. if (notifier_find->clk->clk_np == notifier->clk->clk_np)
  190. {
  191. --clk->clk_np->notifier_count;
  192. rt_list_remove(&notifier->list);
  193. break;
  194. }
  195. }
  196. rt_hw_spin_unlock(&_clk_lock.lock);
  197. return RT_EOK;
  198. }
  199. static rt_err_t clk_prepare(struct rt_clk *clk, struct rt_clk_node *clk_np)
  200. {
  201. rt_err_t err = RT_EOK;
  202. if (clk_np->parent)
  203. {
  204. clk_prepare(clk_np->clk, clk_np->parent);
  205. }
  206. if (clk_np->ops->prepare)
  207. {
  208. err = clk_np->ops->prepare(clk);
  209. }
  210. return err;
  211. }
  212. rt_err_t rt_clk_prepare(struct rt_clk *clk)
  213. {
  214. rt_err_t err = RT_EOK;
  215. RT_DEBUG_NOT_IN_INTERRUPT;
  216. if (clk && clk->clk_np)
  217. {
  218. rt_hw_spin_lock(&_clk_lock.lock);
  219. err = clk_prepare(clk, clk->clk_np);
  220. rt_hw_spin_unlock(&_clk_lock.lock);
  221. }
  222. else
  223. {
  224. err = -RT_EINVAL;
  225. }
  226. return err;
  227. }
  228. static void clk_unprepare(struct rt_clk *clk, struct rt_clk_node *clk_np)
  229. {
  230. if (clk_np->parent)
  231. {
  232. clk_unprepare(clk_np->clk, clk_np->parent);
  233. }
  234. if (clk_np->ops->unprepare)
  235. {
  236. clk_np->ops->unprepare(clk);
  237. }
  238. }
  239. rt_err_t rt_clk_unprepare(struct rt_clk *clk)
  240. {
  241. rt_err_t err = RT_EOK;
  242. RT_DEBUG_NOT_IN_INTERRUPT;
  243. if (clk && clk->clk_np)
  244. {
  245. rt_hw_spin_lock(&_clk_lock.lock);
  246. clk_unprepare(clk, clk->clk_np);
  247. rt_hw_spin_unlock(&_clk_lock.lock);
  248. }
  249. else
  250. {
  251. err = -RT_EINVAL;
  252. }
  253. return err;
  254. }
  255. static rt_err_t clk_enable(struct rt_clk *clk, struct rt_clk_node *clk_np)
  256. {
  257. rt_err_t err = RT_EOK;
  258. if (clk_np->parent)
  259. {
  260. clk_enable(clk_np->clk, clk_np->parent);
  261. }
  262. if (clk_np->ops->enable)
  263. {
  264. err = clk_np->ops->enable(clk);
  265. }
  266. return err;
  267. }
  268. rt_err_t rt_clk_enable(struct rt_clk *clk)
  269. {
  270. rt_err_t err = RT_EOK;
  271. if (clk && clk->clk_np)
  272. {
  273. rt_hw_spin_lock(&_clk_lock.lock);
  274. err = clk_enable(clk, clk->clk_np);
  275. rt_hw_spin_unlock(&_clk_lock.lock);
  276. }
  277. else
  278. {
  279. err = -RT_EINVAL;
  280. }
  281. return err;
  282. }
  283. static void clk_disable(struct rt_clk *clk, struct rt_clk_node *clk_np)
  284. {
  285. if (clk_np->parent)
  286. {
  287. clk_disable(clk_np->clk, clk_np->parent);
  288. }
  289. if (clk_np->ops->disable)
  290. {
  291. clk_np->ops->disable(clk);
  292. }
  293. }
  294. void rt_clk_disable(struct rt_clk *clk)
  295. {
  296. if (clk && clk->clk_np)
  297. {
  298. rt_hw_spin_lock(&_clk_lock.lock);
  299. clk_disable(clk, clk->clk_np);
  300. rt_hw_spin_unlock(&_clk_lock.lock);
  301. }
  302. }
  303. rt_err_t rt_clk_prepare_enable(struct rt_clk *clk)
  304. {
  305. rt_err_t err;
  306. RT_DEBUG_NOT_IN_INTERRUPT;
  307. if (clk)
  308. {
  309. err = rt_clk_prepare(clk);
  310. if (!err)
  311. {
  312. err = rt_clk_enable(clk);
  313. if (err)
  314. {
  315. rt_clk_unprepare(clk);
  316. }
  317. }
  318. }
  319. else
  320. {
  321. err = -RT_EINVAL;
  322. }
  323. return err;
  324. }
  325. void rt_clk_disable_unprepare(struct rt_clk *clk)
  326. {
  327. RT_DEBUG_NOT_IN_INTERRUPT;
  328. if (clk)
  329. {
  330. rt_clk_disable(clk);
  331. rt_clk_unprepare(clk);
  332. }
  333. }
  334. rt_err_t rt_clk_array_prepare(struct rt_clk_array *clk_arr)
  335. {
  336. rt_err_t err = RT_EOK;
  337. if (clk_arr)
  338. {
  339. for (int i = 0; i < clk_arr->count; ++i)
  340. {
  341. if ((err = rt_clk_prepare(clk_arr->clks[i])))
  342. {
  343. LOG_E("CLK Array[%d] %s failed error = %s", i,
  344. "prepare", rt_strerror(err));
  345. while (i --> 0)
  346. {
  347. rt_clk_unprepare(clk_arr->clks[i]);
  348. }
  349. break;
  350. }
  351. }
  352. }
  353. else
  354. {
  355. err = -RT_EINVAL;
  356. }
  357. return err;
  358. }
  359. rt_err_t rt_clk_array_unprepare(struct rt_clk_array *clk_arr)
  360. {
  361. rt_err_t err = RT_EOK;
  362. if (clk_arr)
  363. {
  364. for (int i = 0; i < clk_arr->count; ++i)
  365. {
  366. if ((err = rt_clk_unprepare(clk_arr->clks[i])))
  367. {
  368. LOG_E("CLK Array[%d] %s failed error = %s", i,
  369. "unprepare", rt_strerror(err));
  370. break;
  371. }
  372. }
  373. }
  374. else
  375. {
  376. err = -RT_EINVAL;
  377. }
  378. return err;
  379. }
  380. rt_err_t rt_clk_array_enable(struct rt_clk_array *clk_arr)
  381. {
  382. rt_err_t err = RT_EOK;
  383. if (clk_arr)
  384. {
  385. for (int i = 0; i < clk_arr->count; ++i)
  386. {
  387. if ((err = rt_clk_enable(clk_arr->clks[i])))
  388. {
  389. LOG_E("CLK Array[%d] %s failed error = %s", i,
  390. "enable", rt_strerror(err));
  391. while (i --> 0)
  392. {
  393. rt_clk_disable(clk_arr->clks[i]);
  394. }
  395. break;
  396. }
  397. }
  398. }
  399. else
  400. {
  401. err = -RT_EINVAL;
  402. }
  403. return err;
  404. }
  405. void rt_clk_array_disable(struct rt_clk_array *clk_arr)
  406. {
  407. if (clk_arr)
  408. {
  409. for (int i = 0; i < clk_arr->count; ++i)
  410. {
  411. rt_clk_disable(clk_arr->clks[i]);
  412. }
  413. }
  414. }
  415. rt_err_t rt_clk_array_prepare_enable(struct rt_clk_array *clk_arr)
  416. {
  417. rt_err_t err = RT_EOK;
  418. if (clk_arr)
  419. {
  420. for (int i = 0; i < clk_arr->count; ++i)
  421. {
  422. if ((err = rt_clk_prepare_enable(clk_arr->clks[i])))
  423. {
  424. LOG_E("CLK Array[%d] %s failed error = %s", i,
  425. "prepare_enable", rt_strerror(err));
  426. while (i --> 0)
  427. {
  428. rt_clk_disable_unprepare(clk_arr->clks[i]);
  429. }
  430. break;
  431. }
  432. }
  433. }
  434. else
  435. {
  436. err = -RT_EINVAL;
  437. }
  438. return err;
  439. }
  440. void rt_clk_array_disable_unprepare(struct rt_clk_array *clk_arr)
  441. {
  442. if (clk_arr)
  443. {
  444. for (int i = 0; i < clk_arr->count; ++i)
  445. {
  446. rt_clk_disable_unprepare(clk_arr->clks[i]);
  447. }
  448. }
  449. }
  450. rt_err_t rt_clk_set_rate_range(struct rt_clk *clk, rt_ubase_t min, rt_ubase_t max)
  451. {
  452. rt_err_t err = RT_EOK;
  453. if (clk && clk->clk_np)
  454. {
  455. struct rt_clk_node *clk_np = clk->clk_np;
  456. rt_hw_spin_lock(&_clk_lock.lock);
  457. if (clk_np->ops->set_rate)
  458. {
  459. rt_ubase_t rate = clk_np->rate;
  460. rt_ubase_t old_min = clk_np->min_rate;
  461. rt_ubase_t old_max = clk_np->max_rate;
  462. clk_np->min_rate = min;
  463. clk_np->max_rate = max;
  464. rate = rt_clamp(rate, min, max);
  465. err = clk_np->ops->set_rate(clk, rate,
  466. rt_clk_get_rate(clk_np->parent ? clk_np->parent->clk : RT_NULL));
  467. if (err)
  468. {
  469. clk_np->min_rate = old_min;
  470. clk_np->max_rate = old_max;
  471. }
  472. }
  473. else
  474. {
  475. err = -RT_ENOSYS;
  476. }
  477. rt_hw_spin_unlock(&_clk_lock.lock);
  478. }
  479. else
  480. {
  481. err = -RT_EINVAL;
  482. }
  483. return err;
  484. }
  485. rt_err_t rt_clk_set_min_rate(struct rt_clk *clk, rt_ubase_t rate)
  486. {
  487. rt_err_t err = RT_EOK;
  488. if (clk && clk->clk_np)
  489. {
  490. struct rt_clk_node *clk_np = clk->clk_np;
  491. err = rt_clk_set_rate_range(clk, rate, clk_np->max_rate);
  492. }
  493. else
  494. {
  495. err = -RT_EINVAL;
  496. }
  497. return err;
  498. }
  499. rt_err_t rt_clk_set_max_rate(struct rt_clk *clk, rt_ubase_t rate)
  500. {
  501. rt_err_t err = RT_EOK;
  502. if (clk && clk->clk_np)
  503. {
  504. struct rt_clk_node *clk_np = clk->clk_np;
  505. err = rt_clk_set_rate_range(clk, clk_np->min_rate, rate);
  506. }
  507. else
  508. {
  509. err = -RT_EINVAL;
  510. }
  511. return err;
  512. }
  513. rt_err_t rt_clk_set_rate(struct rt_clk *clk, rt_ubase_t rate)
  514. {
  515. rt_err_t err = RT_EOK;
  516. if (clk && clk->clk_np)
  517. {
  518. struct rt_clk_node *clk_np = clk->clk_np;
  519. rt_hw_spin_lock(&_clk_lock.lock);
  520. if (clk_np->min_rate && rate < clk_np->min_rate)
  521. {
  522. err = -RT_EINVAL;
  523. }
  524. if (clk_np->max_rate && rate > clk_np->max_rate)
  525. {
  526. err = -RT_EINVAL;
  527. }
  528. if (!err)
  529. {
  530. if (clk_np->ops->set_rate)
  531. {
  532. rt_ubase_t old_rate = clk_np->rate;
  533. err = clk_np->ops->set_rate(clk, rate,
  534. rt_clk_get_rate(clk_np->parent ? clk_np->parent->clk : RT_NULL));
  535. if (clk_np->rate != old_rate)
  536. {
  537. clk_notify(clk_np, RT_CLK_MSG_PRE_RATE_CHANGE, old_rate, clk_np->rate);
  538. }
  539. }
  540. else
  541. {
  542. err = -RT_ENOSYS;
  543. }
  544. }
  545. rt_hw_spin_unlock(&_clk_lock.lock);
  546. }
  547. else
  548. {
  549. err = -RT_EINVAL;
  550. }
  551. return err;
  552. }
  553. rt_ubase_t rt_clk_get_rate(struct rt_clk *clk)
  554. {
  555. rt_ubase_t rate = -1UL;
  556. if (clk)
  557. {
  558. if (clk->rate)
  559. {
  560. rate = clk->rate;
  561. }
  562. else if (clk->clk_np)
  563. {
  564. rate = clk->clk_np->rate;
  565. }
  566. }
  567. return rate;
  568. }
  569. rt_err_t rt_clk_set_phase(struct rt_clk *clk, int degrees)
  570. {
  571. rt_err_t err = RT_EOK;
  572. if (clk && clk->clk_np && clk->clk_np->ops->set_phase)
  573. {
  574. rt_hw_spin_lock(&_clk_lock.lock);
  575. err = clk->clk_np->ops->set_phase(clk, degrees);
  576. rt_hw_spin_unlock(&_clk_lock.lock);
  577. }
  578. else
  579. {
  580. err = -RT_EINVAL;
  581. }
  582. return err;
  583. }
  584. rt_base_t rt_clk_get_phase(struct rt_clk *clk)
  585. {
  586. rt_base_t res = RT_EOK;
  587. if (clk && clk->clk_np && clk->clk_np->ops->get_phase)
  588. {
  589. rt_hw_spin_lock(&_clk_lock.lock);
  590. res = clk->clk_np->ops->get_phase(clk);
  591. rt_hw_spin_unlock(&_clk_lock.lock);
  592. }
  593. else
  594. {
  595. res = -RT_EINVAL;
  596. }
  597. return res;
  598. }
  599. rt_base_t rt_clk_round_rate(struct rt_clk *clk, rt_ubase_t rate)
  600. {
  601. rt_base_t res = RT_EOK;
  602. if (clk && clk->clk_np && clk->clk_np->ops->round_rate)
  603. {
  604. rt_ubase_t best_parent_rate;
  605. struct rt_clk_node *clk_np = clk->clk_np;
  606. rt_hw_spin_lock(&_clk_lock.lock);
  607. if (clk_np->min_rate && clk_np->max_rate)
  608. {
  609. rate = rt_clamp(rate, clk_np->min_rate, clk_np->max_rate);
  610. }
  611. res = clk->clk_np->ops->round_rate(clk, rate, &best_parent_rate);
  612. (void)best_parent_rate;
  613. rt_hw_spin_unlock(&_clk_lock.lock);
  614. }
  615. else
  616. {
  617. res = -RT_EINVAL;
  618. }
  619. return res;
  620. }
  621. rt_err_t rt_clk_set_parent(struct rt_clk *clk, struct rt_clk *clk_parent)
  622. {
  623. rt_err_t err = RT_EOK;
  624. if (clk && clk->clk_np && clk->clk_np->ops->set_parent)
  625. {
  626. rt_hw_spin_lock(&_clk_lock.lock);
  627. err = clk->clk_np->ops->set_parent(clk, clk_parent);
  628. rt_hw_spin_unlock(&_clk_lock.lock);
  629. }
  630. else
  631. {
  632. err = -RT_EINVAL;
  633. }
  634. return err;
  635. }
  636. struct rt_clk *rt_clk_get_parent(struct rt_clk *clk)
  637. {
  638. struct rt_clk *parent = RT_NULL;
  639. if (clk)
  640. {
  641. struct rt_clk_node *clk_np = clk->clk_np;
  642. rt_hw_spin_lock(&_clk_lock.lock);
  643. parent = clk_np->parent ? clk_np->parent->clk : RT_NULL;
  644. rt_hw_spin_unlock(&_clk_lock.lock);
  645. }
  646. return parent;
  647. }
  648. struct rt_clk_array *rt_clk_get_array(struct rt_device *dev)
  649. {
  650. struct rt_clk_array *clk_arr = RT_NULL;
  651. #ifdef RT_USING_OFW
  652. clk_arr = rt_ofw_get_clk_array(dev->ofw_node);
  653. #endif
  654. return clk_arr;
  655. }
  656. struct rt_clk *rt_clk_get_by_index(struct rt_device *dev, int index)
  657. {
  658. struct rt_clk *clk = RT_NULL;
  659. #ifdef RT_USING_OFW
  660. clk = rt_ofw_get_clk(dev->ofw_node, index);
  661. #endif
  662. return clk;
  663. }
  664. struct rt_clk *rt_clk_get_by_name(struct rt_device *dev, const char *name)
  665. {
  666. struct rt_clk *clk = RT_NULL;
  667. #ifdef RT_USING_OFW
  668. clk = rt_ofw_get_clk_by_name(dev->ofw_node, name);
  669. #endif
  670. return clk;
  671. }
  672. void rt_clk_array_put(struct rt_clk_array *clk_arr)
  673. {
  674. if (clk_arr)
  675. {
  676. for (int i = 0; i < clk_arr->count; ++i)
  677. {
  678. if (clk_arr->clks[i])
  679. {
  680. rt_clk_put(clk_arr->clks[i]);
  681. }
  682. else
  683. {
  684. break;
  685. }
  686. }
  687. rt_free(clk_arr);
  688. }
  689. }
  690. void rt_clk_put(struct rt_clk *clk)
  691. {
  692. if (clk)
  693. {
  694. clk_put(clk->clk_np);
  695. clk_free(clk);
  696. }
  697. }
  698. #ifdef RT_USING_OFW
  699. static struct rt_clk *ofw_get_clk_no_lock(struct rt_ofw_node *np, int index, const char *name)
  700. {
  701. struct rt_clk *clk = RT_NULL;
  702. struct rt_ofw_cell_args clk_args;
  703. if (!rt_ofw_parse_phandle_cells(np, "clocks", "#clock-cells", index, &clk_args))
  704. {
  705. int count;
  706. struct rt_ofw_node *clk_ofw_np = clk_args.data;
  707. struct rt_clk_node *clk_np = rt_ofw_data(clk_ofw_np);
  708. count = rt_ofw_count_of_clk(clk_ofw_np);
  709. rt_ofw_node_put(clk_ofw_np);
  710. if (clk_np)
  711. {
  712. if (count > 1)
  713. {
  714. /* args[0] must be the index of CLK */
  715. clk_np = &clk_np[clk_args.args[0]];
  716. }
  717. clk = clk_create(clk_np, np->full_name, name, &clk_args, np);
  718. }
  719. }
  720. return clk;
  721. }
  722. static struct rt_clk *ofw_get_clk(struct rt_ofw_node *np, int index, const char *name)
  723. {
  724. struct rt_clk *clk;
  725. rt_hw_spin_lock(&_clk_lock.lock);
  726. clk = ofw_get_clk_no_lock(np, index, name);
  727. rt_hw_spin_unlock(&_clk_lock.lock);
  728. return clk;
  729. }
  730. struct rt_clk_array *rt_ofw_get_clk_array(struct rt_ofw_node *np)
  731. {
  732. int count;
  733. struct rt_clk_array *clk_arr = RT_NULL;
  734. if ((count = rt_ofw_count_phandle_cells(np, "clocks", "#clock-cells")) > 0)
  735. {
  736. clk_arr = rt_calloc(1, sizeof(*clk_arr) + sizeof(clk_arr->clks[0]) * count);
  737. if (clk_arr)
  738. {
  739. int i;
  740. rt_bool_t has_name = rt_ofw_prop_read_bool(np, "clock-names");
  741. clk_arr->count = count;
  742. rt_hw_spin_lock(&_clk_lock.lock);
  743. for (i = 0; i < count; ++i)
  744. {
  745. const char *name = RT_NULL;
  746. if (has_name)
  747. {
  748. rt_ofw_prop_read_string_index(np, "clock-names", i, &name);
  749. }
  750. clk_arr->clks[i] = ofw_get_clk_no_lock(np, i, name);
  751. if (!clk_arr->clks[i])
  752. {
  753. --i;
  754. break;
  755. }
  756. }
  757. rt_hw_spin_unlock(&_clk_lock.lock);
  758. if (i > 0 && i < count)
  759. {
  760. rt_clk_array_put(clk_arr);
  761. clk_arr = RT_NULL;
  762. }
  763. }
  764. }
  765. return clk_arr;
  766. }
  767. struct rt_clk *rt_ofw_get_clk(struct rt_ofw_node *np, int index)
  768. {
  769. struct rt_clk *clk = RT_NULL;
  770. if (np && index >= 0)
  771. {
  772. clk = ofw_get_clk(np, index, RT_NULL);
  773. }
  774. return clk;
  775. }
  776. struct rt_clk *rt_ofw_get_clk_by_name(struct rt_ofw_node *np, const char *name)
  777. {
  778. struct rt_clk *clk = RT_NULL;
  779. if (np && name)
  780. {
  781. int index = rt_ofw_prop_index_of_string(np, "clock-names", name);
  782. if (index >= 0)
  783. {
  784. clk = ofw_get_clk(np, index, name);
  785. }
  786. }
  787. return clk;
  788. }
  789. rt_ssize_t rt_ofw_count_of_clk(struct rt_ofw_node *clk_ofw_np)
  790. {
  791. if (clk_ofw_np)
  792. {
  793. struct rt_clk_node *clk_np = rt_ofw_data(clk_ofw_np);
  794. if (clk_np && clk_np->multi_clk)
  795. {
  796. return clk_np->multi_clk;
  797. }
  798. else
  799. {
  800. const fdt32_t *cell;
  801. rt_uint32_t count = 0;
  802. struct rt_ofw_prop *prop;
  803. prop = rt_ofw_get_prop(clk_ofw_np, "clock-indices", RT_NULL);
  804. if (prop)
  805. {
  806. rt_uint32_t max_idx = 0, idx;
  807. for (cell = rt_ofw_prop_next_u32(prop, RT_NULL, &idx);
  808. cell;
  809. cell = rt_ofw_prop_next_u32(prop, cell, &idx))
  810. {
  811. if (idx > max_idx)
  812. {
  813. max_idx = idx;
  814. }
  815. }
  816. count = max_idx + 1;
  817. }
  818. else
  819. {
  820. rt_ssize_t len;
  821. if ((prop = rt_ofw_get_prop(clk_ofw_np, "clock-output-names", &len)))
  822. {
  823. char *value = prop->value;
  824. for (int i = 0; i < len; ++i, ++value)
  825. {
  826. if (*value == '\0')
  827. {
  828. ++count;
  829. }
  830. }
  831. }
  832. else
  833. {
  834. count = 1;
  835. }
  836. }
  837. if (clk_np)
  838. {
  839. clk_np->multi_clk = count;
  840. }
  841. return count;
  842. }
  843. }
  844. return -RT_EINVAL;
  845. }
  846. #endif /* RT_USING_OFW */