clk.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2022-11-26 GuEe-GUI first version
  9. */
  10. #include <rtthread.h>
  11. #include <rtservice.h>
  12. #include <rtdevice.h>
  13. #define DBG_TAG "rtdm.clk"
  14. #define DBG_LVL DBG_INFO
  15. #include <rtdbg.h>
  16. static struct rt_spinlock _clk_lock = { 0 };
  17. static rt_list_t _clk_nodes = RT_LIST_OBJECT_INIT(_clk_nodes);
  18. static rt_list_t _clk_notifier_nodes = RT_LIST_OBJECT_INIT(_clk_notifier_nodes);
  19. static void clk_release(struct rt_ref *r)
  20. {
  21. struct rt_clk_node *clk_np = rt_container_of(r, struct rt_clk_node, ref);
  22. LOG_E("%s is release", clk_np->name);
  23. (void)clk_np;
  24. RT_ASSERT(0);
  25. }
  26. rt_inline struct rt_clk_node *clk_get(struct rt_clk_node *clk_np)
  27. {
  28. rt_ref_get(&clk_np->ref);
  29. return clk_np;
  30. }
  31. rt_inline void clk_put(struct rt_clk_node *clk_np)
  32. {
  33. rt_ref_put(&clk_np->ref, &clk_release);
  34. }
  35. static struct rt_clk *clk_alloc(struct rt_clk_node *clk_np, const char *dev_id,
  36. const char *con_id, void *fw_node)
  37. {
  38. struct rt_clk *clk = rt_calloc(1, sizeof(*clk));
  39. if (clk)
  40. {
  41. clk->clk_np = clk_np;
  42. clk->dev_id = dev_id;
  43. clk->con_id = con_id;
  44. clk->fw_node = fw_node;
  45. }
  46. else
  47. {
  48. clk = rt_err_ptr(-RT_ENOMEM);
  49. }
  50. return clk;
  51. }
  52. static void clk_free(struct rt_clk *clk)
  53. {
  54. struct rt_clk_node *clk_np = clk->clk_np;
  55. if (clk_np && clk_np->ops->finit)
  56. {
  57. clk_np->ops->finit(clk);
  58. }
  59. rt_free(clk);
  60. }
  61. static struct rt_clk *clk_create(struct rt_clk_node *clk_np, const char *dev_id,
  62. const char *con_id, void *fw_data, void *fw_node)
  63. {
  64. struct rt_clk *clk = clk_alloc(clk_np, dev_id, con_id, fw_node);
  65. if (!rt_is_err(clk))
  66. {
  67. clk_get(clk_np);
  68. if (clk_np->ops->init && clk_np->ops->init(clk, fw_data))
  69. {
  70. LOG_E("Dev[%s] Con[%s] init fail", dev_id, con_id);
  71. clk_free(clk);
  72. clk = RT_NULL;
  73. }
  74. }
  75. return clk;
  76. }
  77. static rt_err_t clk_notify(struct rt_clk_node *clk_np, rt_ubase_t msg, rt_ubase_t old_rate, rt_ubase_t new_rate)
  78. {
  79. rt_err_t err = RT_EOK;
  80. struct rt_clk_notifier *notifier;
  81. rt_list_for_each_entry(notifier, &_clk_notifier_nodes, list)
  82. {
  83. if (notifier->clk->clk_np == clk_np)
  84. {
  85. err = notifier->callback(notifier, msg, old_rate, new_rate);
  86. /* Only check hareware's error */
  87. if (err == -RT_EIO)
  88. {
  89. break;
  90. }
  91. }
  92. }
  93. return err;
  94. }
  95. static void clk_set_parent(struct rt_clk_node *clk_np, struct rt_clk_node *parent_np)
  96. {
  97. rt_hw_spin_lock(&_clk_lock.lock);
  98. clk_np->parent = parent_np;
  99. rt_list_insert_after(&parent_np->children_nodes, &clk_np->list);
  100. rt_hw_spin_unlock(&_clk_lock.lock);
  101. }
  102. static const struct rt_clk_ops unused_clk_ops =
  103. {
  104. };
  105. rt_err_t rt_clk_register(struct rt_clk_node *clk_np, struct rt_clk_node *parent_np)
  106. {
  107. rt_err_t err = RT_EOK;
  108. struct rt_clk *clk = RT_NULL;
  109. if (clk_np)
  110. {
  111. clk_np->clk = clk;
  112. if (!clk_np->ops)
  113. {
  114. clk_np->ops = &unused_clk_ops;
  115. }
  116. #if RT_NAME_MAX > 0
  117. rt_strncpy(clk_np->rt_parent.name, RT_CLK_NODE_OBJ_NAME, RT_NAME_MAX);
  118. #else
  119. clk_np->rt_parent.name = RT_CLK_NODE_OBJ_NAME;
  120. #endif
  121. rt_ref_init(&clk_np->ref);
  122. rt_list_init(&clk_np->list);
  123. rt_list_init(&clk_np->children_nodes);
  124. clk_np->multi_clk = 0;
  125. if (parent_np)
  126. {
  127. clk_np->clk = clk_alloc(clk_np, RT_NULL, RT_NULL, RT_NULL);
  128. if (clk_np->clk)
  129. {
  130. clk_set_parent(clk_np, parent_np);
  131. }
  132. else
  133. {
  134. err = -RT_ENOMEM;
  135. }
  136. }
  137. else
  138. {
  139. clk_np->parent = RT_NULL;
  140. rt_hw_spin_lock(&_clk_lock.lock);
  141. rt_list_insert_after(&_clk_nodes, &clk_np->list);
  142. rt_hw_spin_unlock(&_clk_lock.lock);
  143. }
  144. }
  145. else
  146. {
  147. err = -RT_ENOMEM;
  148. }
  149. return err;
  150. }
  151. rt_err_t rt_clk_unregister(struct rt_clk_node *clk_np)
  152. {
  153. rt_err_t err = RT_EOK;
  154. if (clk_np)
  155. {
  156. err = -RT_EBUSY;
  157. rt_hw_spin_lock(&_clk_lock.lock);
  158. if (rt_list_isempty(&clk_np->children_nodes))
  159. {
  160. if (rt_ref_read(&clk_np->ref) <= 1)
  161. {
  162. rt_list_remove(&clk_np->list);
  163. clk_free(clk_np->clk);
  164. err = RT_EOK;
  165. }
  166. }
  167. rt_hw_spin_unlock(&_clk_lock.lock);
  168. }
  169. else
  170. {
  171. err = -RT_EINVAL;
  172. }
  173. return err;
  174. }
  175. rt_err_t rt_clk_notifier_register(struct rt_clk *clk, struct rt_clk_notifier *notifier)
  176. {
  177. if (!clk || !clk->clk_np || !notifier)
  178. {
  179. return -RT_EINVAL;
  180. }
  181. rt_hw_spin_lock(&_clk_lock.lock);
  182. ++clk->clk_np->notifier_count;
  183. rt_list_init(&notifier->list);
  184. rt_list_insert_after(&_clk_notifier_nodes, &notifier->list);
  185. rt_hw_spin_unlock(&_clk_lock.lock);
  186. return RT_EOK;
  187. }
  188. rt_err_t rt_clk_notifier_unregister(struct rt_clk *clk, struct rt_clk_notifier *notifier)
  189. {
  190. struct rt_clk_notifier *notifier_find;
  191. if (!clk || !notifier)
  192. {
  193. return -RT_EINVAL;
  194. }
  195. rt_hw_spin_lock(&_clk_lock.lock);
  196. rt_list_for_each_entry(notifier_find, &_clk_notifier_nodes, list)
  197. {
  198. if (notifier_find->clk->clk_np == notifier->clk->clk_np)
  199. {
  200. --clk->clk_np->notifier_count;
  201. rt_list_remove(&notifier->list);
  202. break;
  203. }
  204. }
  205. rt_hw_spin_unlock(&_clk_lock.lock);
  206. return RT_EOK;
  207. }
  208. static rt_err_t clk_prepare(struct rt_clk *clk, struct rt_clk_node *clk_np)
  209. {
  210. rt_err_t err = RT_EOK;
  211. if (clk_np->parent)
  212. {
  213. clk_prepare(clk_np->clk, clk_np->parent);
  214. }
  215. if (clk->prepare_count == 0 && clk_np->ops->prepare)
  216. {
  217. err = clk_np->ops->prepare(clk);
  218. }
  219. if (!err)
  220. {
  221. ++clk->prepare_count;
  222. }
  223. return err;
  224. }
  225. rt_err_t rt_clk_prepare(struct rt_clk *clk)
  226. {
  227. rt_err_t err = RT_EOK;
  228. RT_DEBUG_NOT_IN_INTERRUPT;
  229. if (clk && clk->clk_np)
  230. {
  231. rt_hw_spin_lock(&_clk_lock.lock);
  232. err = clk_prepare(clk, clk->clk_np);
  233. rt_hw_spin_unlock(&_clk_lock.lock);
  234. }
  235. return err;
  236. }
  237. static void clk_unprepare(struct rt_clk *clk, struct rt_clk_node *clk_np)
  238. {
  239. if (clk_np->parent)
  240. {
  241. clk_unprepare(clk_np->clk, clk_np->parent);
  242. }
  243. if (clk->prepare_count == 1 && clk_np->ops->unprepare)
  244. {
  245. clk_np->ops->unprepare(clk);
  246. }
  247. if (clk->prepare_count)
  248. {
  249. --clk->prepare_count;
  250. }
  251. }
  252. rt_err_t rt_clk_unprepare(struct rt_clk *clk)
  253. {
  254. rt_err_t err = RT_EOK;
  255. RT_DEBUG_NOT_IN_INTERRUPT;
  256. if (clk && clk->clk_np)
  257. {
  258. rt_hw_spin_lock(&_clk_lock.lock);
  259. clk_unprepare(clk, clk->clk_np);
  260. rt_hw_spin_unlock(&_clk_lock.lock);
  261. }
  262. return err;
  263. }
  264. static rt_err_t clk_enable(struct rt_clk *clk, struct rt_clk_node *clk_np)
  265. {
  266. rt_err_t err = RT_EOK;
  267. if (clk_np->parent)
  268. {
  269. clk_enable(clk_np->clk, clk_np->parent);
  270. }
  271. if (clk->enable_count == 0 && clk_np->ops->enable)
  272. {
  273. err = clk_np->ops->enable(clk);
  274. }
  275. if (!err)
  276. {
  277. ++clk->enable_count;
  278. }
  279. return err;
  280. }
  281. rt_err_t rt_clk_enable(struct rt_clk *clk)
  282. {
  283. rt_err_t err = RT_EOK;
  284. if (clk && clk->clk_np)
  285. {
  286. rt_hw_spin_lock(&_clk_lock.lock);
  287. err = clk_enable(clk, clk->clk_np);
  288. rt_hw_spin_unlock(&_clk_lock.lock);
  289. }
  290. return err;
  291. }
  292. static void clk_disable(struct rt_clk *clk, struct rt_clk_node *clk_np)
  293. {
  294. if (clk_np->parent)
  295. {
  296. clk_disable(clk_np->clk, clk_np->parent);
  297. }
  298. if (clk->enable_count == 1 && clk_np->ops->disable)
  299. {
  300. clk_np->ops->disable(clk);
  301. }
  302. if (clk->enable_count)
  303. {
  304. --clk->enable_count;
  305. }
  306. }
  307. void rt_clk_disable(struct rt_clk *clk)
  308. {
  309. if (clk && clk->clk_np)
  310. {
  311. rt_hw_spin_lock(&_clk_lock.lock);
  312. clk_disable(clk, clk->clk_np);
  313. rt_hw_spin_unlock(&_clk_lock.lock);
  314. }
  315. }
  316. rt_err_t rt_clk_prepare_enable(struct rt_clk *clk)
  317. {
  318. rt_err_t err = RT_EOK;
  319. RT_DEBUG_NOT_IN_INTERRUPT;
  320. if (clk)
  321. {
  322. err = rt_clk_prepare(clk);
  323. if (!err)
  324. {
  325. err = rt_clk_enable(clk);
  326. if (err)
  327. {
  328. rt_clk_unprepare(clk);
  329. }
  330. }
  331. }
  332. return err;
  333. }
  334. void rt_clk_disable_unprepare(struct rt_clk *clk)
  335. {
  336. RT_DEBUG_NOT_IN_INTERRUPT;
  337. if (clk)
  338. {
  339. rt_clk_disable(clk);
  340. rt_clk_unprepare(clk);
  341. }
  342. }
  343. rt_err_t rt_clk_array_prepare(struct rt_clk_array *clk_arr)
  344. {
  345. rt_err_t err = RT_EOK;
  346. if (clk_arr)
  347. {
  348. for (int i = 0; i < clk_arr->count; ++i)
  349. {
  350. if ((err = rt_clk_prepare(clk_arr->clks[i])))
  351. {
  352. LOG_E("CLK Array[%d] %s failed error = %s", i,
  353. "prepare", rt_strerror(err));
  354. while (i --> 0)
  355. {
  356. rt_clk_unprepare(clk_arr->clks[i]);
  357. }
  358. break;
  359. }
  360. }
  361. }
  362. return err;
  363. }
  364. rt_err_t rt_clk_array_unprepare(struct rt_clk_array *clk_arr)
  365. {
  366. rt_err_t err = RT_EOK;
  367. if (clk_arr)
  368. {
  369. for (int i = 0; i < clk_arr->count; ++i)
  370. {
  371. if ((err = rt_clk_unprepare(clk_arr->clks[i])))
  372. {
  373. LOG_E("CLK Array[%d] %s failed error = %s", i,
  374. "unprepare", rt_strerror(err));
  375. break;
  376. }
  377. }
  378. }
  379. return err;
  380. }
  381. rt_err_t rt_clk_array_enable(struct rt_clk_array *clk_arr)
  382. {
  383. rt_err_t err = RT_EOK;
  384. if (clk_arr)
  385. {
  386. for (int i = 0; i < clk_arr->count; ++i)
  387. {
  388. if ((err = rt_clk_enable(clk_arr->clks[i])))
  389. {
  390. LOG_E("CLK Array[%d] %s failed error = %s", i,
  391. "enable", rt_strerror(err));
  392. while (i --> 0)
  393. {
  394. rt_clk_disable(clk_arr->clks[i]);
  395. }
  396. break;
  397. }
  398. }
  399. }
  400. return err;
  401. }
  402. void rt_clk_array_disable(struct rt_clk_array *clk_arr)
  403. {
  404. if (clk_arr)
  405. {
  406. for (int i = 0; i < clk_arr->count; ++i)
  407. {
  408. rt_clk_disable(clk_arr->clks[i]);
  409. }
  410. }
  411. }
  412. rt_err_t rt_clk_array_prepare_enable(struct rt_clk_array *clk_arr)
  413. {
  414. rt_err_t err;
  415. if ((err = rt_clk_array_prepare(clk_arr)))
  416. {
  417. return err;
  418. }
  419. if ((err = rt_clk_array_enable(clk_arr)))
  420. {
  421. rt_clk_array_unprepare(clk_arr);
  422. }
  423. return err;
  424. }
  425. void rt_clk_array_disable_unprepare(struct rt_clk_array *clk_arr)
  426. {
  427. rt_clk_array_disable(clk_arr);
  428. rt_clk_array_unprepare(clk_arr);
  429. }
  430. rt_err_t rt_clk_set_rate_range(struct rt_clk *clk, rt_ubase_t min, rt_ubase_t max)
  431. {
  432. rt_err_t err = RT_EOK;
  433. if (clk && clk->clk_np)
  434. {
  435. struct rt_clk_node *clk_np = clk->clk_np;
  436. rt_hw_spin_lock(&_clk_lock.lock);
  437. if (clk_np->ops->set_rate)
  438. {
  439. rt_ubase_t rate = clk_np->rate;
  440. rt_ubase_t old_min = clk_np->min_rate;
  441. rt_ubase_t old_max = clk_np->max_rate;
  442. clk_np->min_rate = min;
  443. clk_np->max_rate = max;
  444. rate = rt_clamp(rate, min, max);
  445. err = clk_np->ops->set_rate(clk, rate,
  446. rt_clk_get_rate(clk_np->parent ? clk_np->parent->clk : RT_NULL));
  447. if (err)
  448. {
  449. clk_np->min_rate = old_min;
  450. clk_np->max_rate = old_max;
  451. }
  452. }
  453. else
  454. {
  455. err = -RT_ENOSYS;
  456. }
  457. rt_hw_spin_unlock(&_clk_lock.lock);
  458. }
  459. return err;
  460. }
  461. rt_err_t rt_clk_set_min_rate(struct rt_clk *clk, rt_ubase_t rate)
  462. {
  463. rt_err_t err = RT_EOK;
  464. if (clk && clk->clk_np)
  465. {
  466. struct rt_clk_node *clk_np = clk->clk_np;
  467. err = rt_clk_set_rate_range(clk, rate, clk_np->max_rate);
  468. }
  469. return err;
  470. }
  471. rt_err_t rt_clk_set_max_rate(struct rt_clk *clk, rt_ubase_t rate)
  472. {
  473. rt_err_t err = RT_EOK;
  474. if (clk && clk->clk_np)
  475. {
  476. struct rt_clk_node *clk_np = clk->clk_np;
  477. err = rt_clk_set_rate_range(clk, clk_np->min_rate, rate);
  478. }
  479. return err;
  480. }
  481. rt_err_t rt_clk_set_rate(struct rt_clk *clk, rt_ubase_t rate)
  482. {
  483. rt_err_t err = RT_EOK;
  484. rate = rt_clk_round_rate(clk, rate);
  485. if (clk && clk->clk_np && rate > 0)
  486. {
  487. struct rt_clk_node *clk_np = clk->clk_np;
  488. rt_hw_spin_lock(&_clk_lock.lock);
  489. if (clk_np->min_rate && rate < clk_np->min_rate)
  490. {
  491. err = -RT_EINVAL;
  492. }
  493. if (clk_np->max_rate && rate > clk_np->max_rate)
  494. {
  495. err = -RT_EINVAL;
  496. }
  497. if (!err)
  498. {
  499. if (clk_np->ops->set_rate)
  500. {
  501. rt_ubase_t old_rate = clk_np->rate;
  502. err = clk_np->ops->set_rate(clk, rate,
  503. rt_clk_get_rate(clk_np->parent ? clk_np->parent->clk : RT_NULL));
  504. if (clk_np->rate != old_rate)
  505. {
  506. clk_notify(clk_np, RT_CLK_MSG_PRE_RATE_CHANGE, old_rate, clk_np->rate);
  507. }
  508. }
  509. else
  510. {
  511. err = -RT_ENOSYS;
  512. }
  513. }
  514. rt_hw_spin_unlock(&_clk_lock.lock);
  515. }
  516. return err;
  517. }
  518. rt_ubase_t rt_clk_get_rate(struct rt_clk *clk)
  519. {
  520. rt_ubase_t rate = 0;
  521. if (clk)
  522. {
  523. if (clk->rate)
  524. {
  525. rate = clk->rate;
  526. }
  527. else if (clk->clk_np)
  528. {
  529. rate = clk->clk_np->rate;
  530. }
  531. }
  532. return rate;
  533. }
  534. rt_err_t rt_clk_set_phase(struct rt_clk *clk, int degrees)
  535. {
  536. rt_err_t err = RT_EOK;
  537. if (clk && clk->clk_np && clk->clk_np->ops->set_phase)
  538. {
  539. rt_hw_spin_lock(&_clk_lock.lock);
  540. err = clk->clk_np->ops->set_phase(clk, degrees);
  541. rt_hw_spin_unlock(&_clk_lock.lock);
  542. }
  543. return err;
  544. }
  545. rt_base_t rt_clk_get_phase(struct rt_clk *clk)
  546. {
  547. rt_base_t res = RT_EOK;
  548. if (clk && clk->clk_np && clk->clk_np->ops->get_phase)
  549. {
  550. rt_hw_spin_lock(&_clk_lock.lock);
  551. res = clk->clk_np->ops->get_phase(clk);
  552. rt_hw_spin_unlock(&_clk_lock.lock);
  553. }
  554. return res;
  555. }
  556. rt_base_t rt_clk_round_rate(struct rt_clk *clk, rt_ubase_t rate)
  557. {
  558. rt_base_t res = -RT_EINVAL;
  559. if (clk && clk->clk_np)
  560. {
  561. struct rt_clk_node *clk_np = clk->clk_np;
  562. if (clk_np->ops->round_rate)
  563. {
  564. rt_ubase_t best_parent_rate;
  565. rt_hw_spin_lock(&_clk_lock.lock);
  566. if (clk_np->min_rate && clk_np->max_rate)
  567. {
  568. rate = rt_clamp(rate, clk_np->min_rate, clk_np->max_rate);
  569. }
  570. res = clk_np->ops->round_rate(clk, rate, &best_parent_rate);
  571. (void)best_parent_rate;
  572. rt_hw_spin_unlock(&_clk_lock.lock);
  573. }
  574. else
  575. {
  576. if (rate < clk_np->min_rate)
  577. {
  578. res = clk_np->min_rate;
  579. }
  580. else if (rate > clk_np->max_rate)
  581. {
  582. res = clk_np->max_rate;
  583. }
  584. else
  585. {
  586. res = rate;
  587. }
  588. }
  589. }
  590. return res;
  591. }
  592. rt_err_t rt_clk_set_parent(struct rt_clk *clk, struct rt_clk *clk_parent)
  593. {
  594. rt_err_t err = RT_EOK;
  595. if (clk && clk->clk_np && clk->clk_np->ops->set_parent)
  596. {
  597. rt_hw_spin_lock(&_clk_lock.lock);
  598. err = clk->clk_np->ops->set_parent(clk, clk_parent);
  599. rt_hw_spin_unlock(&_clk_lock.lock);
  600. }
  601. return err;
  602. }
  603. struct rt_clk *rt_clk_get_parent(struct rt_clk *clk)
  604. {
  605. struct rt_clk *parent = RT_NULL;
  606. if (clk)
  607. {
  608. struct rt_clk_node *clk_np = clk->clk_np;
  609. rt_hw_spin_lock(&_clk_lock.lock);
  610. parent = clk_np->parent ? clk_np->parent->clk : RT_NULL;
  611. rt_hw_spin_unlock(&_clk_lock.lock);
  612. }
  613. return parent;
  614. }
  615. struct rt_clk_array *rt_clk_get_array(struct rt_device *dev)
  616. {
  617. struct rt_clk_array *clk_arr = RT_NULL;
  618. #ifdef RT_USING_OFW
  619. clk_arr = rt_ofw_get_clk_array(dev->ofw_node);
  620. #endif
  621. return clk_arr;
  622. }
  623. struct rt_clk *rt_clk_get_by_index(struct rt_device *dev, int index)
  624. {
  625. struct rt_clk *clk = RT_NULL;
  626. #ifdef RT_USING_OFW
  627. clk = rt_ofw_get_clk(dev->ofw_node, index);
  628. #endif
  629. return clk;
  630. }
  631. struct rt_clk *rt_clk_get_by_name(struct rt_device *dev, const char *name)
  632. {
  633. struct rt_clk *clk = RT_NULL;
  634. #ifdef RT_USING_OFW
  635. clk = rt_ofw_get_clk_by_name(dev->ofw_node, name);
  636. #endif
  637. return clk;
  638. }
  639. void rt_clk_array_put(struct rt_clk_array *clk_arr)
  640. {
  641. if (clk_arr)
  642. {
  643. for (int i = 0; i < clk_arr->count; ++i)
  644. {
  645. if (clk_arr->clks[i])
  646. {
  647. rt_clk_put(clk_arr->clks[i]);
  648. }
  649. else
  650. {
  651. break;
  652. }
  653. }
  654. rt_free(clk_arr);
  655. }
  656. }
  657. void rt_clk_put(struct rt_clk *clk)
  658. {
  659. if (clk)
  660. {
  661. clk_put(clk->clk_np);
  662. clk_free(clk);
  663. }
  664. }
  665. #ifdef RT_USING_OFW
  666. static struct rt_clk *ofw_get_clk_no_lock(struct rt_ofw_node *np, int index, const char *name, rt_bool_t locked)
  667. {
  668. struct rt_clk *clk = RT_NULL;
  669. struct rt_ofw_cell_args clk_args;
  670. if (!rt_ofw_parse_phandle_cells(np, "clocks", "#clock-cells", index, &clk_args))
  671. {
  672. int count;
  673. struct rt_object *obj;
  674. struct rt_clk_node *clk_np = RT_NULL;
  675. struct rt_ofw_node *clk_ofw_np = clk_args.data;
  676. if (!rt_ofw_data(clk_ofw_np))
  677. {
  678. if (locked)
  679. {
  680. rt_hw_spin_unlock(&_clk_lock.lock);
  681. }
  682. rt_platform_ofw_request(clk_ofw_np);
  683. if (locked)
  684. {
  685. rt_hw_spin_lock(&_clk_lock.lock);
  686. }
  687. }
  688. if (rt_ofw_data(clk_ofw_np) && (obj = rt_ofw_parse_object(clk_ofw_np,
  689. RT_CLK_NODE_OBJ_NAME, "#clock-cells")))
  690. {
  691. clk_np = rt_container_of(obj, struct rt_clk_node, rt_parent);
  692. count = rt_ofw_count_of_clk(clk_ofw_np);
  693. }
  694. rt_ofw_node_put(clk_ofw_np);
  695. if (clk_np)
  696. {
  697. if (count > 1)
  698. {
  699. /* args[0] must be the index of CLK */
  700. clk_np = &clk_np[clk_args.args[0]];
  701. }
  702. clk = clk_create(clk_np, np->full_name, name, &clk_args, np);
  703. }
  704. else
  705. {
  706. clk = rt_err_ptr(-RT_ERROR);
  707. }
  708. }
  709. return clk;
  710. }
  711. static struct rt_clk *ofw_get_clk(struct rt_ofw_node *np, int index, const char *name)
  712. {
  713. struct rt_clk *clk;
  714. rt_hw_spin_lock(&_clk_lock.lock);
  715. clk = ofw_get_clk_no_lock(np, index, name, RT_TRUE);
  716. rt_hw_spin_unlock(&_clk_lock.lock);
  717. return clk;
  718. }
  719. struct rt_clk_array *rt_ofw_get_clk_array(struct rt_ofw_node *np)
  720. {
  721. int count;
  722. struct rt_clk_array *clk_arr = RT_NULL;
  723. if (!np)
  724. {
  725. return rt_err_ptr(-RT_EINVAL);
  726. }
  727. if ((count = rt_ofw_count_phandle_cells(np, "clocks", "#clock-cells")) > 0)
  728. {
  729. clk_arr = rt_calloc(1, sizeof(*clk_arr) + sizeof(clk_arr->clks[0]) * count);
  730. if (clk_arr)
  731. {
  732. int i;
  733. rt_err_t err = RT_EOK;
  734. rt_bool_t has_name = rt_ofw_prop_read_bool(np, "clock-names");
  735. clk_arr->count = count;
  736. rt_hw_spin_lock(&_clk_lock.lock);
  737. for (i = 0; i < count; ++i)
  738. {
  739. const char *name = RT_NULL;
  740. if (has_name)
  741. {
  742. rt_ofw_prop_read_string_index(np, "clock-names", i, &name);
  743. }
  744. clk_arr->clks[i] = ofw_get_clk_no_lock(np, i, name, RT_FALSE);
  745. if (rt_is_err(clk_arr->clks[i]))
  746. {
  747. err = rt_ptr_err(clk_arr->clks[i]);
  748. --i;
  749. break;
  750. }
  751. }
  752. rt_hw_spin_unlock(&_clk_lock.lock);
  753. if (i > 0 && i < count)
  754. {
  755. rt_clk_array_put(clk_arr);
  756. clk_arr = rt_err_ptr(err);
  757. }
  758. }
  759. }
  760. return clk_arr;
  761. }
  762. struct rt_clk *rt_ofw_get_clk(struct rt_ofw_node *np, int index)
  763. {
  764. struct rt_clk *clk = RT_NULL;
  765. if (np && index >= 0)
  766. {
  767. clk = ofw_get_clk(np, index, RT_NULL);
  768. }
  769. return clk;
  770. }
  771. struct rt_clk *rt_ofw_get_clk_by_name(struct rt_ofw_node *np, const char *name)
  772. {
  773. struct rt_clk *clk = RT_NULL;
  774. if (np && name)
  775. {
  776. int index = rt_ofw_prop_index_of_string(np, "clock-names", name);
  777. if (index >= 0)
  778. {
  779. clk = ofw_get_clk(np, index, name);
  780. }
  781. }
  782. return clk;
  783. }
  784. rt_ssize_t rt_ofw_count_of_clk(struct rt_ofw_node *clk_ofw_np)
  785. {
  786. if (clk_ofw_np)
  787. {
  788. struct rt_clk_node *clk_np = rt_ofw_data(clk_ofw_np);
  789. if (clk_np && clk_np->multi_clk)
  790. {
  791. return clk_np->multi_clk;
  792. }
  793. else
  794. {
  795. const fdt32_t *cell;
  796. rt_uint32_t count = 0;
  797. struct rt_ofw_prop *prop;
  798. prop = rt_ofw_get_prop(clk_ofw_np, "clock-indices", RT_NULL);
  799. if (prop)
  800. {
  801. rt_uint32_t max_idx = 0, idx;
  802. for (cell = rt_ofw_prop_next_u32(prop, RT_NULL, &idx);
  803. cell;
  804. cell = rt_ofw_prop_next_u32(prop, cell, &idx))
  805. {
  806. if (idx > max_idx)
  807. {
  808. max_idx = idx;
  809. }
  810. }
  811. count = max_idx + 1;
  812. }
  813. else
  814. {
  815. rt_ssize_t len;
  816. if ((prop = rt_ofw_get_prop(clk_ofw_np, "clock-output-names", &len)))
  817. {
  818. char *value = prop->value;
  819. for (int i = 0; i < len; ++i, ++value)
  820. {
  821. if (*value == '\0')
  822. {
  823. ++count;
  824. }
  825. }
  826. }
  827. else
  828. {
  829. count = 1;
  830. }
  831. }
  832. if (clk_np)
  833. {
  834. clk_np->multi_clk = count;
  835. }
  836. return count;
  837. }
  838. }
  839. return -RT_EINVAL;
  840. }
  841. #endif /* RT_USING_OFW */