drv_pdma.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070
  1. /**************************************************************************//**
  2. *
  3. * @copyright (C) 2020 Nuvoton Technology Corp. All rights reserved.
  4. *
  5. * SPDX-License-Identifier: Apache-2.0
  6. *
  7. * Change Logs:
  8. * Date Author Notes
  9. * 2020-2-7 Wayne First version
  10. *
  11. ******************************************************************************/
  12. #include <rtconfig.h>
  13. #if defined(BSP_USING_PDMA)
  14. #include <rtdevice.h>
  15. #include <rtthread.h>
  16. #include <drv_pdma.h>
  17. #include <nu_bitutil.h>
  18. /* Private define ---------------------------------------------------------------*/
  19. // RT_DEV_NAME_PREFIX pdma
  20. #ifndef NU_PDMA_MEMFUN_ACTOR_MAX
  21. #define NU_PDMA_MEMFUN_ACTOR_MAX (4)
  22. #endif
  23. #define NU_PDMA_SG_TBL_MAXSIZE (NU_PDMA_SG_LIMITED_DISTANCE/sizeof(DSCT_T))
  24. #define NU_PDMA_CH_MAX (PDMA_CH_MAX) /* Specify maximum channels of PDMA */
  25. #define NU_PDMA_CH_Pos (0) /* Specify first channel number of PDMA */
  26. #define NU_PDMA_CH_Msk (((1 << NU_PDMA_CH_MAX) - 1) << NU_PDMA_CH_Pos)
  27. /* Private typedef --------------------------------------------------------------*/
  28. struct nu_pdma_periph_ctl
  29. {
  30. uint32_t m_u32Peripheral;
  31. nu_pdma_memctrl_t m_eMemCtl;
  32. };
  33. typedef struct nu_pdma_periph_ctl nu_pdma_periph_ctl_t;
  34. struct nu_pdma_chn
  35. {
  36. nu_pdma_cb_handler_t m_pfnCBHandler;
  37. void *m_pvUserData;
  38. uint32_t m_u32EventFilter;
  39. uint32_t m_u32IdleTimeout_us;
  40. nu_pdma_periph_ctl_t m_spPeripCtl;
  41. };
  42. typedef struct nu_pdma_chn nu_pdma_chn_t;
  43. struct nu_pdma_memfun_actor
  44. {
  45. int m_i32ChannID;
  46. uint32_t m_u32Result;
  47. rt_sem_t m_psSemMemFun;
  48. } ;
  49. typedef struct nu_pdma_memfun_actor *nu_pdma_memfun_actor_t;
  50. /* Private functions ------------------------------------------------------------*/
  51. static int nu_pdma_peripheral_set(uint32_t u32PeriphType);
  52. static void nu_pdma_init(void);
  53. static void nu_pdma_channel_enable(int i32ChannID);
  54. static void nu_pdma_channel_disable(int i32ChannID);
  55. static void nu_pdma_channel_reset(int i32ChannID);
  56. static rt_err_t nu_pdma_timeout_set(int i32ChannID, int i32Timeout_us);
  57. static void nu_pdma_periph_ctrl_fill(int i32ChannID, int i32CtlPoolIdx);
  58. static rt_size_t nu_pdma_memfun(void *dest, void *src, uint32_t u32DataWidth, unsigned int count, nu_pdma_memctrl_t eMemCtl);
  59. static void nu_pdma_memfun_cb(void *pvUserData, uint32_t u32Events);
  60. static void nu_pdma_memfun_actor_init(void);
  61. static int nu_pdma_memfun_employ(void);
  62. static int nu_pdma_non_transfer_count_get(int32_t i32ChannID);
  63. /* Public functions -------------------------------------------------------------*/
  64. /* Private variables ------------------------------------------------------------*/
  65. static volatile int nu_pdma_inited = 0;
  66. static volatile uint32_t nu_pdma_chn_mask = 0;
  67. static nu_pdma_chn_t nu_pdma_chn_arr[NU_PDMA_CH_MAX];
  68. static rt_mutex_t g_mutex_res = RT_NULL;
  69. static volatile uint32_t nu_pdma_memfun_actor_mask = 0;
  70. static volatile uint32_t nu_pdma_memfun_actor_maxnum = 0;
  71. static rt_sem_t nu_pdma_memfun_actor_pool_sem = RT_NULL;
  72. static rt_mutex_t nu_pdma_memfun_actor_pool_lock = RT_NULL;
  73. static const nu_pdma_periph_ctl_t g_nu_pdma_peripheral_ctl_pool[ ] =
  74. {
  75. // M2M
  76. { PDMA_MEM, eMemCtl_SrcInc_DstInc },
  77. // M2P
  78. { PDMA_USB_TX, eMemCtl_SrcInc_DstFix },
  79. { PDMA_UART0_TX, eMemCtl_SrcInc_DstFix },
  80. { PDMA_UART1_TX, eMemCtl_SrcInc_DstFix },
  81. { PDMA_UART2_TX, eMemCtl_SrcInc_DstFix },
  82. { PDMA_UART3_TX, eMemCtl_SrcInc_DstFix },
  83. { PDMA_UART4_TX, eMemCtl_SrcInc_DstFix },
  84. { PDMA_UART5_TX, eMemCtl_SrcInc_DstFix },
  85. { PDMA_UART6_TX, eMemCtl_SrcInc_DstFix },
  86. { PDMA_UART7_TX, eMemCtl_SrcInc_DstFix },
  87. { PDMA_USCI0_TX, eMemCtl_SrcInc_DstFix },
  88. { PDMA_USCI1_TX, eMemCtl_SrcInc_DstFix },
  89. { PDMA_QSPI0_TX, eMemCtl_SrcInc_DstFix },
  90. { PDMA_QSPI1_TX, eMemCtl_SrcInc_DstFix },
  91. { PDMA_SPI0_TX, eMemCtl_SrcInc_DstFix },
  92. { PDMA_SPI1_TX, eMemCtl_SrcInc_DstFix },
  93. { PDMA_SPI2_TX, eMemCtl_SrcInc_DstFix },
  94. { PDMA_SPI3_TX, eMemCtl_SrcInc_DstFix },
  95. { PDMA_I2C0_TX, eMemCtl_SrcInc_DstFix },
  96. { PDMA_I2C1_TX, eMemCtl_SrcInc_DstFix },
  97. { PDMA_I2C2_TX, eMemCtl_SrcInc_DstFix },
  98. { PDMA_I2S0_TX, eMemCtl_SrcInc_DstFix },
  99. { PDMA_DAC0_TX, eMemCtl_SrcInc_DstFix },
  100. { PDMA_DAC1_TX, eMemCtl_SrcInc_DstFix },
  101. { PDMA_EPWM0_CH0_TX, eMemCtl_SrcInc_DstFix },
  102. { PDMA_EPWM0_CH1_TX, eMemCtl_SrcInc_DstFix },
  103. { PDMA_EPWM0_CH2_TX, eMemCtl_SrcInc_DstFix },
  104. { PDMA_EPWM0_CH3_TX, eMemCtl_SrcInc_DstFix },
  105. { PDMA_EPWM0_CH4_TX, eMemCtl_SrcInc_DstFix },
  106. { PDMA_EPWM1_CH0_TX, eMemCtl_SrcInc_DstFix },
  107. { PDMA_EPWM1_CH1_TX, eMemCtl_SrcInc_DstFix },
  108. { PDMA_EPWM1_CH2_TX, eMemCtl_SrcInc_DstFix },
  109. { PDMA_EPWM1_CH3_TX, eMemCtl_SrcInc_DstFix },
  110. { PDMA_EPWM1_CH4_TX, eMemCtl_SrcInc_DstFix },
  111. // P2M
  112. { PDMA_USB_RX, eMemCtl_SrcFix_DstInc },
  113. { PDMA_UART0_RX, eMemCtl_SrcFix_DstInc },
  114. { PDMA_UART1_RX, eMemCtl_SrcFix_DstInc },
  115. { PDMA_UART2_RX, eMemCtl_SrcFix_DstInc },
  116. { PDMA_UART3_RX, eMemCtl_SrcFix_DstInc },
  117. { PDMA_UART4_RX, eMemCtl_SrcFix_DstInc },
  118. { PDMA_UART5_RX, eMemCtl_SrcFix_DstInc },
  119. { PDMA_UART6_RX, eMemCtl_SrcFix_DstInc },
  120. { PDMA_UART7_RX, eMemCtl_SrcFix_DstInc },
  121. { PDMA_USCI0_RX, eMemCtl_SrcFix_DstInc },
  122. { PDMA_USCI1_RX, eMemCtl_SrcFix_DstInc },
  123. { PDMA_QSPI0_RX, eMemCtl_SrcFix_DstInc },
  124. { PDMA_QSPI1_RX, eMemCtl_SrcFix_DstInc },
  125. { PDMA_SPI0_RX, eMemCtl_SrcFix_DstInc },
  126. { PDMA_SPI1_RX, eMemCtl_SrcFix_DstInc },
  127. { PDMA_SPI2_RX, eMemCtl_SrcFix_DstInc },
  128. { PDMA_SPI3_RX, eMemCtl_SrcFix_DstInc },
  129. { PDMA_EPWM0_P1_RX, eMemCtl_SrcFix_DstInc },
  130. { PDMA_EPWM0_P2_RX, eMemCtl_SrcFix_DstInc },
  131. { PDMA_EPWM0_P3_RX, eMemCtl_SrcFix_DstInc },
  132. { PDMA_EPWM1_P1_RX, eMemCtl_SrcFix_DstInc },
  133. { PDMA_EPWM1_P2_RX, eMemCtl_SrcFix_DstInc },
  134. { PDMA_EPWM1_P3_RX, eMemCtl_SrcFix_DstInc },
  135. { PDMA_I2C0_RX, eMemCtl_SrcFix_DstInc },
  136. { PDMA_I2C1_RX, eMemCtl_SrcFix_DstInc },
  137. { PDMA_I2C2_RX, eMemCtl_SrcFix_DstInc },
  138. { PDMA_I2S0_RX, eMemCtl_SrcFix_DstInc },
  139. { PDMA_EADC0_RX, eMemCtl_SrcFix_DstInc },
  140. { PDMA_EADC1_RX, eMemCtl_SrcFix_DstInc },
  141. };
  142. #define NU_PERIPHERAL_SIZE ( sizeof(g_nu_pdma_peripheral_ctl_pool) / sizeof(g_nu_pdma_peripheral_ctl_pool[0]) )
  143. static struct nu_pdma_memfun_actor nu_pdma_memfun_actor_arr[NU_PDMA_MEMFUN_ACTOR_MAX];
  144. /* SG table pool */
  145. static DSCT_T nu_pdma_sgtbl_arr[NU_PDMA_SGTBL_POOL_SIZE] = { 0 };
  146. static uint32_t nu_pdma_sgtbl_token[RT_ALIGN(NU_PDMA_SGTBL_POOL_SIZE, 32) / 32];
  147. static rt_mutex_t g_mutex_sg = RT_NULL;
  148. static int nu_pdma_peripheral_set(uint32_t u32PeriphType)
  149. {
  150. int idx = 0;
  151. while (idx < NU_PERIPHERAL_SIZE)
  152. {
  153. if (g_nu_pdma_peripheral_ctl_pool[idx].m_u32Peripheral == u32PeriphType)
  154. return idx;
  155. idx++;
  156. }
  157. // Not such peripheral
  158. return -1;
  159. }
  160. static void nu_pdma_periph_ctrl_fill(int i32ChannID, int i32CtlPoolIdx)
  161. {
  162. nu_pdma_chn_t *psPdmaChann = &nu_pdma_chn_arr[i32ChannID - NU_PDMA_CH_Pos];
  163. psPdmaChann->m_spPeripCtl.m_u32Peripheral = g_nu_pdma_peripheral_ctl_pool[i32CtlPoolIdx].m_u32Peripheral;
  164. psPdmaChann->m_spPeripCtl.m_eMemCtl = g_nu_pdma_peripheral_ctl_pool[i32CtlPoolIdx].m_eMemCtl;
  165. }
  166. static void nu_pdma_init(void)
  167. {
  168. int latest = 0;
  169. if (nu_pdma_inited)
  170. return;
  171. g_mutex_res = rt_mutex_create("pdmalock", RT_IPC_FLAG_PRIO);
  172. RT_ASSERT(g_mutex_res != RT_NULL);
  173. g_mutex_sg = rt_mutex_create("sgtbles", RT_IPC_FLAG_PRIO);
  174. RT_ASSERT(g_mutex_sg != RT_NULL);
  175. nu_pdma_chn_mask = ~NU_PDMA_CH_Msk;
  176. rt_memset(nu_pdma_chn_arr, 0x00, NU_PDMA_CH_MAX * sizeof(nu_pdma_chn_t));
  177. NVIC_EnableIRQ(PDMA_IRQn);
  178. /* Initialize PDMA setting */
  179. PDMA_Open(PDMA, NU_PDMA_CH_Msk);
  180. PDMA_Close(PDMA);
  181. rt_memset(&nu_pdma_sgtbl_arr[0], 0x00, sizeof(nu_pdma_sgtbl_arr));
  182. /* Assign first SG table address as PDMA SG table base address */
  183. PDMA->SCATBA = (uint32_t)&nu_pdma_sgtbl_arr[0];
  184. /* Initializa token pool. */
  185. rt_memset(&nu_pdma_sgtbl_token[0], 0xff, sizeof(nu_pdma_sgtbl_token));
  186. latest = NU_PDMA_SGTBL_POOL_SIZE / 32;
  187. nu_pdma_sgtbl_token[latest] ^= ~((1 << (NU_PDMA_SGTBL_POOL_SIZE % 32)) - 1) ;
  188. nu_pdma_inited = 1;
  189. }
  190. static void nu_pdma_channel_enable(int i32ChannID)
  191. {
  192. PDMA_Open(PDMA, 1 << i32ChannID);
  193. }
  194. static inline void nu_pdma_channel_disable(int i32ChannID)
  195. {
  196. PDMA->CHCTL &= ~(1 << i32ChannID);
  197. }
  198. static inline void nu_pdma_channel_reset(int i32ChannID)
  199. {
  200. PDMA->CHRST = (1 << i32ChannID);
  201. }
  202. void nu_pdma_channel_terminate(int i32ChannID)
  203. {
  204. int i;
  205. uint32_t u32EnabledChans;
  206. int ch_mask = 0;
  207. rt_err_t result;
  208. if (!(nu_pdma_chn_mask & (1 << i32ChannID)))
  209. goto exit_pdma_channel_terminate;
  210. result = rt_mutex_take(g_mutex_res, RT_WAITING_FOREVER);
  211. RT_ASSERT(result == RT_EOK);
  212. // Suspend all channels.
  213. u32EnabledChans = nu_pdma_chn_mask & NU_PDMA_CH_Msk;
  214. while ((i = nu_ctz(u32EnabledChans)) != 32)
  215. {
  216. ch_mask = (1 << i);
  217. if (i == i32ChannID)
  218. {
  219. u32EnabledChans &= ~ch_mask;
  220. continue;
  221. }
  222. // Pause the channel
  223. PDMA_PAUSE(PDMA, i);
  224. // Wait for channel to finish current transfer
  225. while (PDMA->TACTSTS & ch_mask) { }
  226. u32EnabledChans &= ~ch_mask;
  227. } //while
  228. // Reset specified channel ID
  229. nu_pdma_channel_reset(i32ChannID);
  230. // Clean descriptor table control register.
  231. PDMA->DSCT[i32ChannID].CTL = 0UL;
  232. // Resume all channels.
  233. u32EnabledChans = nu_pdma_chn_mask & NU_PDMA_CH_Msk;
  234. while ((i = nu_ctz(u32EnabledChans)) != 32)
  235. {
  236. ch_mask = (1 << i);
  237. PDMA->CHCTL |= ch_mask;
  238. PDMA_Trigger(PDMA, i);
  239. u32EnabledChans &= ~ch_mask;
  240. }
  241. result = rt_mutex_release(g_mutex_res);
  242. RT_ASSERT(result == RT_EOK);
  243. exit_pdma_channel_terminate:
  244. return;
  245. }
  246. static rt_err_t nu_pdma_timeout_set(int i32ChannID, int i32Timeout_us)
  247. {
  248. rt_err_t ret = RT_EINVAL;
  249. if (!(nu_pdma_chn_mask & (1 << i32ChannID)))
  250. goto exit_nu_pdma_timeout_set;
  251. nu_pdma_chn_arr[i32ChannID - NU_PDMA_CH_Pos].m_u32IdleTimeout_us = i32Timeout_us;
  252. if (i32Timeout_us && i32ChannID <= 1) // M480 limit
  253. {
  254. uint32_t u32ToClk_Max = 1000000 / (CLK_GetHCLKFreq() / (1 << 8));
  255. uint32_t u32Divider = (i32Timeout_us / u32ToClk_Max) / (1 << 16);
  256. uint32_t u32TOutCnt = (i32Timeout_us / u32ToClk_Max) % (1 << 16);
  257. PDMA_DisableTimeout(PDMA, 1 << i32ChannID);
  258. PDMA_EnableInt(PDMA, i32ChannID, PDMA_INT_TIMEOUT); // Interrupt type
  259. if (u32Divider > 7)
  260. {
  261. u32Divider = 7;
  262. u32TOutCnt = (1 << 16);
  263. }
  264. PDMA->TOUTPSC |= (u32Divider << (PDMA_TOUTPSC_TOUTPSC1_Pos * i32ChannID));
  265. PDMA_SetTimeOut(PDMA, i32ChannID, 1, u32TOutCnt);
  266. ret = RT_EOK;
  267. }
  268. else
  269. {
  270. PDMA_DisableInt(PDMA, i32ChannID, PDMA_INT_TIMEOUT); // Interrupt type
  271. PDMA_DisableTimeout(PDMA, 1 << i32ChannID);
  272. }
  273. exit_nu_pdma_timeout_set:
  274. return -(ret);
  275. }
  276. int nu_pdma_channel_allocate(int32_t i32PeripType)
  277. {
  278. int i, i32PeripCtlIdx;
  279. nu_pdma_init();
  280. if ((i32PeripCtlIdx = nu_pdma_peripheral_set(i32PeripType)) < 0)
  281. goto exit_nu_pdma_channel_allocate;
  282. /* Find the position of first '0' in nu_pdma_chn_mask. */
  283. i = nu_cto(nu_pdma_chn_mask);
  284. if (i != 32)
  285. {
  286. nu_pdma_chn_mask |= (1 << i);
  287. rt_memset(nu_pdma_chn_arr + i - NU_PDMA_CH_Pos, 0x00, sizeof(nu_pdma_chn_t));
  288. /* Set idx number of g_nu_pdma_peripheral_ctl_pool */
  289. nu_pdma_periph_ctrl_fill(i, i32PeripCtlIdx);
  290. /* Reset channel */
  291. nu_pdma_channel_reset(i);
  292. nu_pdma_channel_enable(i);
  293. return i;
  294. }
  295. exit_nu_pdma_channel_allocate:
  296. // No channel available
  297. return -(RT_ERROR);
  298. }
  299. rt_err_t nu_pdma_channel_free(int i32ChannID)
  300. {
  301. rt_err_t ret = RT_EINVAL;
  302. if (! nu_pdma_inited)
  303. goto exit_nu_pdma_channel_free;
  304. if (i32ChannID < NU_PDMA_CH_MAX && i32ChannID >= NU_PDMA_CH_Pos)
  305. {
  306. nu_pdma_chn_mask &= ~(1 << i32ChannID);
  307. nu_pdma_channel_disable(i32ChannID);
  308. ret = RT_EOK;
  309. }
  310. exit_nu_pdma_channel_free:
  311. return -(ret);
  312. }
  313. rt_err_t nu_pdma_callback_register(int i32ChannID, nu_pdma_cb_handler_t pfnHandler, void *pvUserData, uint32_t u32EventFilter)
  314. {
  315. rt_err_t ret = RT_EINVAL;
  316. if (!(nu_pdma_chn_mask & (1 << i32ChannID)))
  317. goto exit_nu_pdma_callback_register;
  318. nu_pdma_chn_arr[i32ChannID - NU_PDMA_CH_Pos].m_pfnCBHandler = pfnHandler;
  319. nu_pdma_chn_arr[i32ChannID - NU_PDMA_CH_Pos].m_pvUserData = pvUserData;
  320. nu_pdma_chn_arr[i32ChannID - NU_PDMA_CH_Pos].m_u32EventFilter = u32EventFilter;
  321. ret = RT_EOK;
  322. exit_nu_pdma_callback_register:
  323. return -(ret) ;
  324. }
  325. nu_pdma_cb_handler_t nu_pdma_callback_hijack(int i32ChannID, nu_pdma_cb_handler_t *ppfnHandler_Hijack,
  326. void **ppvUserData_Hijack, uint32_t *pu32Events_Hijack)
  327. {
  328. nu_pdma_cb_handler_t pfnHandler_Org = NULL;
  329. void *pvUserData_Org;
  330. uint32_t u32Events_Org;
  331. RT_ASSERT(ppfnHandler_Hijack != NULL);
  332. RT_ASSERT(ppvUserData_Hijack != NULL);
  333. RT_ASSERT(pu32Events_Hijack != NULL);
  334. if (!(nu_pdma_chn_mask & (1 << i32ChannID)))
  335. goto exit_nu_pdma_callback_hijack;
  336. pfnHandler_Org = nu_pdma_chn_arr[i32ChannID - NU_PDMA_CH_Pos].m_pfnCBHandler;
  337. pvUserData_Org = nu_pdma_chn_arr[i32ChannID - NU_PDMA_CH_Pos].m_pvUserData;
  338. u32Events_Org = nu_pdma_chn_arr[i32ChannID - NU_PDMA_CH_Pos].m_u32EventFilter;
  339. nu_pdma_chn_arr[i32ChannID - NU_PDMA_CH_Pos].m_pfnCBHandler = *ppfnHandler_Hijack;
  340. nu_pdma_chn_arr[i32ChannID - NU_PDMA_CH_Pos].m_pvUserData = *ppvUserData_Hijack;
  341. nu_pdma_chn_arr[i32ChannID - NU_PDMA_CH_Pos].m_u32EventFilter = *pu32Events_Hijack;
  342. *ppfnHandler_Hijack = pfnHandler_Org;
  343. *ppvUserData_Hijack = pvUserData_Org;
  344. *pu32Events_Hijack = u32Events_Org;
  345. exit_nu_pdma_callback_hijack:
  346. return pfnHandler_Org;
  347. }
  348. static int nu_pdma_non_transfer_count_get(int32_t i32ChannID)
  349. {
  350. return ((PDMA->DSCT[i32ChannID].CTL & PDMA_DSCT_CTL_TXCNT_Msk) >> PDMA_DSCT_CTL_TXCNT_Pos) + 1;
  351. }
  352. int nu_pdma_transferred_byte_get(int32_t i32ChannID, int32_t i32TriggerByteLen)
  353. {
  354. int i32BitWidth = 0;
  355. int cur_txcnt = 0;
  356. if (!(nu_pdma_chn_mask & (1 << i32ChannID)))
  357. goto exit_nu_pdma_transferred_byte_get;
  358. i32BitWidth = PDMA->DSCT[i32ChannID].CTL & PDMA_DSCT_CTL_TXWIDTH_Msk;
  359. i32BitWidth = (i32BitWidth == PDMA_WIDTH_8) ? 1 : (i32BitWidth == PDMA_WIDTH_16) ? 2 : (i32BitWidth == PDMA_WIDTH_32) ? 4 : 0;
  360. cur_txcnt = nu_pdma_non_transfer_count_get(i32ChannID);
  361. return (i32TriggerByteLen - (cur_txcnt) * i32BitWidth);
  362. exit_nu_pdma_transferred_byte_get:
  363. return -1;
  364. }
  365. nu_pdma_memctrl_t nu_pdma_channel_memctrl_get(int i32ChannID)
  366. {
  367. nu_pdma_memctrl_t eMemCtrl = eMemCtl_Undefined;
  368. if (!(nu_pdma_chn_mask & (1 << i32ChannID)))
  369. goto exit_nu_pdma_channel_memctrl_get;
  370. eMemCtrl = nu_pdma_chn_arr[i32ChannID - NU_PDMA_CH_Pos].m_spPeripCtl.m_eMemCtl;
  371. exit_nu_pdma_channel_memctrl_get:
  372. return eMemCtrl;
  373. }
  374. rt_err_t nu_pdma_channel_memctrl_set(int i32ChannID, nu_pdma_memctrl_t eMemCtrl)
  375. {
  376. rt_err_t ret = RT_EINVAL;
  377. nu_pdma_chn_t *psPdmaChann = &nu_pdma_chn_arr[i32ChannID - NU_PDMA_CH_Pos];
  378. if (!(nu_pdma_chn_mask & (1 << i32ChannID)))
  379. goto exit_nu_pdma_channel_memctrl_set;
  380. else if ((eMemCtrl < eMemCtl_SrcFix_DstFix) || (eMemCtrl > eMemCtl_SrcInc_DstInc))
  381. goto exit_nu_pdma_channel_memctrl_set;
  382. /* PDMA_MEM/SAR_FIX/BURST mode is not supported. */
  383. if ((psPdmaChann->m_spPeripCtl.m_u32Peripheral == PDMA_MEM) &&
  384. ((eMemCtrl == eMemCtl_SrcFix_DstInc) || (eMemCtrl == eMemCtl_SrcFix_DstFix)))
  385. goto exit_nu_pdma_channel_memctrl_set;
  386. nu_pdma_chn_arr[i32ChannID - NU_PDMA_CH_Pos].m_spPeripCtl.m_eMemCtl = eMemCtrl;
  387. ret = RT_EOK;
  388. exit_nu_pdma_channel_memctrl_set:
  389. return -(ret);
  390. }
  391. static void nu_pdma_channel_memctrl_fill(nu_pdma_memctrl_t eMemCtl, uint32_t *pu32SrcCtl, uint32_t *pu32DstCtl)
  392. {
  393. switch ((int)eMemCtl)
  394. {
  395. case eMemCtl_SrcFix_DstFix:
  396. *pu32SrcCtl = PDMA_SAR_FIX;
  397. *pu32DstCtl = PDMA_DAR_FIX;
  398. break;
  399. case eMemCtl_SrcFix_DstInc:
  400. *pu32SrcCtl = PDMA_SAR_FIX;
  401. *pu32DstCtl = PDMA_DAR_INC;
  402. break;
  403. case eMemCtl_SrcInc_DstFix:
  404. *pu32SrcCtl = PDMA_SAR_INC;
  405. *pu32DstCtl = PDMA_DAR_FIX;
  406. break;
  407. case eMemCtl_SrcInc_DstInc:
  408. *pu32SrcCtl = PDMA_SAR_INC;
  409. *pu32DstCtl = PDMA_DAR_INC;
  410. break;
  411. default:
  412. break;
  413. }
  414. }
  415. /* This is for Scatter-gather DMA. */
  416. rt_err_t nu_pdma_desc_setup(int i32ChannID, nu_pdma_desc_t dma_desc, uint32_t u32DataWidth, uint32_t u32AddrSrc,
  417. uint32_t u32AddrDst, int32_t i32TransferCnt, nu_pdma_desc_t next)
  418. {
  419. nu_pdma_periph_ctl_t *psPeriphCtl = NULL;
  420. uint32_t u32SrcCtl = 0;
  421. uint32_t u32DstCtl = 0;
  422. rt_err_t ret = RT_EINVAL;
  423. if (!dma_desc)
  424. goto exit_nu_pdma_desc_setup;
  425. else if (!(nu_pdma_chn_mask & (1 << i32ChannID)))
  426. goto exit_nu_pdma_desc_setup;
  427. else if (!(u32DataWidth == 8 || u32DataWidth == 16 || u32DataWidth == 32))
  428. goto exit_nu_pdma_desc_setup;
  429. else if ((u32AddrSrc % (u32DataWidth / 8)) || (u32AddrDst % (u32DataWidth / 8)))
  430. goto exit_nu_pdma_desc_setup;
  431. else if (i32TransferCnt > NU_PDMA_MAX_TXCNT)
  432. goto exit_nu_pdma_desc_setup;
  433. psPeriphCtl = &nu_pdma_chn_arr[i32ChannID - NU_PDMA_CH_Pos].m_spPeripCtl;
  434. nu_pdma_channel_memctrl_fill(psPeriphCtl->m_eMemCtl, &u32SrcCtl, &u32DstCtl);
  435. dma_desc->CTL = ((i32TransferCnt - 1) << PDMA_DSCT_CTL_TXCNT_Pos) |
  436. ((u32DataWidth == 8) ? PDMA_WIDTH_8 : (u32DataWidth == 16) ? PDMA_WIDTH_16 : PDMA_WIDTH_32) |
  437. u32SrcCtl |
  438. u32DstCtl |
  439. PDMA_OP_BASIC;
  440. dma_desc->SA = u32AddrSrc;
  441. dma_desc->DA = u32AddrDst;
  442. dma_desc->NEXT = 0; /* Terminating node by default. */
  443. if (psPeriphCtl->m_u32Peripheral == PDMA_MEM)
  444. {
  445. /* For M2M transfer */
  446. dma_desc->CTL |= (PDMA_REQ_BURST | PDMA_BURST_32);
  447. }
  448. else
  449. {
  450. /* For P2M and M2P transfer */
  451. dma_desc->CTL |= (PDMA_REQ_SINGLE);
  452. }
  453. if (next)
  454. {
  455. /* Link to Next and modify to scatter-gather DMA mode. */
  456. dma_desc->CTL = (dma_desc->CTL & ~PDMA_DSCT_CTL_OPMODE_Msk) | PDMA_OP_SCATTER;
  457. dma_desc->NEXT = (uint32_t)next - (PDMA->SCATBA);
  458. }
  459. ret = RT_EOK;
  460. exit_nu_pdma_desc_setup:
  461. return -(ret);
  462. }
  463. static int nu_pdma_sgtbls_token_allocate(void)
  464. {
  465. int idx, i;
  466. int pool_size = sizeof(nu_pdma_sgtbl_token) / sizeof(uint32_t);
  467. for (i = 0; i < pool_size; i++)
  468. {
  469. if ((idx = nu_ctz(nu_pdma_sgtbl_token[i])) != 32)
  470. {
  471. nu_pdma_sgtbl_token[i] &= ~(1 << idx);
  472. idx += i * 32;
  473. return idx;
  474. }
  475. }
  476. /* No available */
  477. return -1;
  478. }
  479. static void nu_pdma_sgtbls_token_free(nu_pdma_desc_t psSgtbls)
  480. {
  481. int idx = (int)(psSgtbls - &nu_pdma_sgtbl_arr[0]);
  482. RT_ASSERT(idx >= 0);
  483. RT_ASSERT((idx + 1) <= NU_PDMA_SGTBL_POOL_SIZE);
  484. nu_pdma_sgtbl_token[idx / 32] |= (1 << (idx % 32));
  485. }
  486. rt_err_t nu_pdma_sgtbls_allocate(nu_pdma_desc_t *ppsSgtbls, int num)
  487. {
  488. int i, j, idx;
  489. rt_err_t result;
  490. RT_ASSERT(ppsSgtbls != NULL);
  491. RT_ASSERT(num <= NU_PDMA_SG_TBL_MAXSIZE);
  492. result = rt_mutex_take(g_mutex_sg, RT_WAITING_FOREVER);
  493. RT_ASSERT(result == RT_EOK);
  494. for (i = 0; i < num; i++)
  495. {
  496. ppsSgtbls[i] = NULL;
  497. /* Get token. */
  498. if ((idx = nu_pdma_sgtbls_token_allocate()) < 0)
  499. {
  500. rt_kprintf("No available sgtbl.\n");
  501. goto fail_nu_pdma_sgtbls_allocate;
  502. }
  503. ppsSgtbls[i] = (nu_pdma_desc_t)&nu_pdma_sgtbl_arr[idx];
  504. }
  505. result = rt_mutex_release(g_mutex_sg);
  506. RT_ASSERT(result == RT_EOK);
  507. return RT_EOK;
  508. fail_nu_pdma_sgtbls_allocate:
  509. /* Release allocated tables. */
  510. for (j = 0; j < i; j++)
  511. {
  512. if (ppsSgtbls[j] != NULL)
  513. {
  514. nu_pdma_sgtbls_token_free(ppsSgtbls[j]);
  515. }
  516. ppsSgtbls[j] = NULL;
  517. }
  518. result = rt_mutex_release(g_mutex_sg);
  519. RT_ASSERT(result == RT_EOK);
  520. return -RT_ERROR;
  521. }
  522. void nu_pdma_sgtbls_free(nu_pdma_desc_t *ppsSgtbls, int num)
  523. {
  524. int i;
  525. rt_err_t result;
  526. RT_ASSERT(ppsSgtbls != NULL);
  527. RT_ASSERT(num <= NU_PDMA_SG_TBL_MAXSIZE);
  528. result = rt_mutex_take(g_mutex_sg, RT_WAITING_FOREVER);
  529. RT_ASSERT(result == RT_EOK);
  530. for (i = 0; i < num; i++)
  531. {
  532. if (ppsSgtbls[i] != NULL)
  533. {
  534. nu_pdma_sgtbls_token_free(ppsSgtbls[i]);
  535. }
  536. ppsSgtbls[i] = NULL;
  537. }
  538. result = rt_mutex_release(g_mutex_sg);
  539. RT_ASSERT(result == RT_EOK);
  540. }
  541. static rt_err_t nu_pdma_sgtbls_valid(nu_pdma_desc_t head)
  542. {
  543. uint32_t node_addr;
  544. nu_pdma_desc_t node = head;
  545. do
  546. {
  547. node_addr = (uint32_t)node;
  548. if ((node_addr < PDMA->SCATBA) || (node_addr - PDMA->SCATBA) >= NU_PDMA_SG_LIMITED_DISTANCE)
  549. {
  550. rt_kprintf("The distance is over %d between 0x%08x and 0x%08x. \n", NU_PDMA_SG_LIMITED_DISTANCE, PDMA->SCATBA, node);
  551. rt_kprintf("Please use nu_pdma_sgtbl_allocate to allocate valid sg-table.\n");
  552. return RT_ERROR;
  553. }
  554. node = (nu_pdma_desc_t)(node->NEXT + PDMA->SCATBA);
  555. }
  556. while (((uint32_t)node != PDMA->SCATBA) && (node != head));
  557. return RT_EOK;
  558. }
  559. static void _nu_pdma_transfer(int i32ChannID, uint32_t u32Peripheral, nu_pdma_desc_t head, uint32_t u32IdleTimeout_us)
  560. {
  561. PDMA_DisableTimeout(PDMA, 1 << i32ChannID);
  562. PDMA_EnableInt(PDMA, i32ChannID, PDMA_INT_TRANS_DONE);
  563. nu_pdma_timeout_set(i32ChannID, u32IdleTimeout_us);
  564. /* Set scatter-gather mode and head */
  565. PDMA_SetTransferMode(PDMA,
  566. i32ChannID,
  567. u32Peripheral,
  568. (head->NEXT != 0) ? 1 : 0,
  569. (uint32_t)head);
  570. /* If peripheral is M2M, trigger it. */
  571. if (u32Peripheral == PDMA_MEM)
  572. PDMA_Trigger(PDMA, i32ChannID);
  573. }
  574. rt_err_t nu_pdma_transfer(int i32ChannID, uint32_t u32DataWidth, uint32_t u32AddrSrc, uint32_t u32AddrDst, int32_t i32TransferCnt, uint32_t u32IdleTimeout_us)
  575. {
  576. rt_err_t ret = RT_EINVAL;
  577. nu_pdma_periph_ctl_t *psPeriphCtl = NULL;
  578. if (!(nu_pdma_chn_mask & (1 << i32ChannID)))
  579. goto exit_nu_pdma_transfer;
  580. psPeriphCtl = &nu_pdma_chn_arr[i32ChannID - NU_PDMA_CH_Pos].m_spPeripCtl;
  581. ret = nu_pdma_desc_setup(i32ChannID,
  582. &PDMA->DSCT[i32ChannID],
  583. u32DataWidth,
  584. u32AddrSrc,
  585. u32AddrDst,
  586. i32TransferCnt,
  587. NULL);
  588. if (ret != RT_EOK)
  589. goto exit_nu_pdma_transfer;
  590. _nu_pdma_transfer(i32ChannID, psPeriphCtl->m_u32Peripheral, &PDMA->DSCT[i32ChannID], u32IdleTimeout_us);
  591. ret = RT_EOK;
  592. exit_nu_pdma_transfer:
  593. return -(ret);
  594. }
  595. rt_err_t nu_pdma_sg_transfer(int i32ChannID, nu_pdma_desc_t head, uint32_t u32IdleTimeout_us)
  596. {
  597. rt_err_t ret = RT_EINVAL;
  598. nu_pdma_periph_ctl_t *psPeriphCtl = NULL;
  599. if (!head)
  600. goto exit_nu_pdma_sg_transfer;
  601. else if (!(nu_pdma_chn_mask & (1 << i32ChannID)))
  602. goto exit_nu_pdma_sg_transfer;
  603. else if ((ret = nu_pdma_sgtbls_valid(head)) != RT_EOK) /* Check SG-tbls. */
  604. goto exit_nu_pdma_sg_transfer;
  605. psPeriphCtl = &nu_pdma_chn_arr[i32ChannID - NU_PDMA_CH_Pos].m_spPeripCtl;
  606. _nu_pdma_transfer(i32ChannID, psPeriphCtl->m_u32Peripheral, head, u32IdleTimeout_us);
  607. ret = RT_EOK;
  608. exit_nu_pdma_sg_transfer:
  609. return -(ret);
  610. }
  611. void PDMA_IRQHandler(void)
  612. {
  613. int i;
  614. /* enter interrupt */
  615. rt_interrupt_enter();
  616. uint32_t intsts = PDMA_GET_INT_STATUS(PDMA);
  617. uint32_t abtsts = PDMA_GET_ABORT_STS(PDMA);
  618. uint32_t tdsts = PDMA_GET_TD_STS(PDMA);
  619. uint32_t reqto = intsts & (PDMA_INTSTS_REQTOF0_Msk | PDMA_INTSTS_REQTOF1_Msk);
  620. uint32_t reqto_ch = ((reqto & PDMA_INTSTS_REQTOF0_Msk) ? (1 << 0) : 0x0) | ((reqto & PDMA_INTSTS_REQTOF1_Msk) ? (1 << 1) : 0x0);
  621. int allch_sts = (reqto_ch | tdsts | abtsts);
  622. // Abort
  623. if (intsts & PDMA_INTSTS_ABTIF_Msk)
  624. {
  625. // Clear all Abort flags
  626. PDMA_CLR_ABORT_FLAG(PDMA, abtsts);
  627. }
  628. // Transfer done
  629. if (intsts & PDMA_INTSTS_TDIF_Msk)
  630. {
  631. // Clear all transfer done flags
  632. PDMA_CLR_TD_FLAG(PDMA, tdsts);
  633. }
  634. // Timeout
  635. if (reqto)
  636. {
  637. // Clear all Timeout flags
  638. PDMA->INTSTS = reqto;
  639. }
  640. // Find the position of first '1' in allch_sts.
  641. while ((i = nu_ctz(allch_sts)) != 32)
  642. {
  643. int ch_mask = (1 << i);
  644. if (nu_pdma_chn_mask & ch_mask)
  645. {
  646. int ch_event = 0;
  647. nu_pdma_chn_t *dma_chn = nu_pdma_chn_arr + i - NU_PDMA_CH_Pos;
  648. if (dma_chn->m_pfnCBHandler)
  649. {
  650. if (abtsts & ch_mask)
  651. {
  652. ch_event |= NU_PDMA_EVENT_ABORT;
  653. }
  654. if (tdsts & ch_mask) ch_event |= NU_PDMA_EVENT_TRANSFER_DONE;
  655. if (reqto_ch & ch_mask)
  656. {
  657. PDMA_DisableTimeout(PDMA, ch_mask);
  658. ch_event |= NU_PDMA_EVENT_TIMEOUT;
  659. }
  660. if (dma_chn->m_u32EventFilter & ch_event)
  661. dma_chn->m_pfnCBHandler(dma_chn->m_pvUserData, ch_event);
  662. if (reqto_ch & ch_mask)
  663. nu_pdma_timeout_set(i, nu_pdma_chn_arr[i - NU_PDMA_CH_Pos].m_u32IdleTimeout_us);
  664. }//if(dma_chn->handler)
  665. } //if (nu_pdma_chn_mask & ch_mask)
  666. // Clear the served bit.
  667. allch_sts &= ~ch_mask;
  668. } //while
  669. /* leave interrupt */
  670. rt_interrupt_leave();
  671. }
  672. static void nu_pdma_memfun_actor_init(void)
  673. {
  674. int i = 0 ;
  675. nu_pdma_init();
  676. for (i = 0; i < NU_PDMA_MEMFUN_ACTOR_MAX; i++)
  677. {
  678. rt_memset(&nu_pdma_memfun_actor_arr[i], 0, sizeof(struct nu_pdma_memfun_actor));
  679. if (-(RT_ERROR) != (nu_pdma_memfun_actor_arr[i].m_i32ChannID = nu_pdma_channel_allocate(PDMA_MEM)))
  680. {
  681. nu_pdma_memfun_actor_arr[i].m_psSemMemFun = rt_sem_create("memactor_sem", 0, RT_IPC_FLAG_FIFO);
  682. RT_ASSERT(nu_pdma_memfun_actor_arr[i].m_psSemMemFun != RT_NULL);
  683. }
  684. else
  685. break;
  686. }
  687. if (i)
  688. {
  689. nu_pdma_memfun_actor_maxnum = i;
  690. nu_pdma_memfun_actor_mask = ~(((1 << i) - 1));
  691. nu_pdma_memfun_actor_pool_sem = rt_sem_create("mempool_sem", nu_pdma_memfun_actor_maxnum, RT_IPC_FLAG_FIFO);
  692. RT_ASSERT(nu_pdma_memfun_actor_pool_sem != RT_NULL);
  693. nu_pdma_memfun_actor_pool_lock = rt_mutex_create("mempool_lock", RT_IPC_FLAG_PRIO);
  694. RT_ASSERT(nu_pdma_memfun_actor_pool_lock != RT_NULL);
  695. }
  696. }
  697. static void nu_pdma_memfun_cb(void *pvUserData, uint32_t u32Events)
  698. {
  699. rt_err_t result;
  700. nu_pdma_memfun_actor_t psMemFunActor = (nu_pdma_memfun_actor_t)pvUserData;
  701. psMemFunActor->m_u32Result = u32Events;
  702. result = rt_sem_release(psMemFunActor->m_psSemMemFun);
  703. RT_ASSERT(result == RT_EOK);
  704. }
  705. static int nu_pdma_memfun_employ(void)
  706. {
  707. int idx = -1 ;
  708. /* Headhunter */
  709. if (nu_pdma_memfun_actor_pool_sem && (rt_sem_take(nu_pdma_memfun_actor_pool_sem, RT_WAITING_FOREVER) == RT_EOK))
  710. {
  711. rt_err_t result;
  712. result = rt_mutex_take(nu_pdma_memfun_actor_pool_lock, RT_WAITING_FOREVER);
  713. RT_ASSERT(result == RT_EOK);
  714. /* Find the position of first '0' in nu_pdma_memfun_actor_mask. */
  715. idx = nu_cto(nu_pdma_memfun_actor_mask);
  716. if (idx != 32)
  717. {
  718. nu_pdma_memfun_actor_mask |= (1 << idx);
  719. }
  720. else
  721. {
  722. idx = -1;
  723. }
  724. result = rt_mutex_release(nu_pdma_memfun_actor_pool_lock);
  725. RT_ASSERT(result == RT_EOK);
  726. }
  727. return idx;
  728. }
  729. static rt_size_t nu_pdma_memfun(void *dest, void *src, uint32_t u32DataWidth, unsigned int u32TransferCnt, nu_pdma_memctrl_t eMemCtl)
  730. {
  731. nu_pdma_memfun_actor_t psMemFunActor = NULL;
  732. int idx;
  733. rt_size_t ret = 0;
  734. rt_uint32_t u32Offset = 0;
  735. rt_uint32_t u32TxCnt = 0;
  736. while (1)
  737. {
  738. rt_err_t result;
  739. /* Employ actor */
  740. if ((idx = nu_pdma_memfun_employ()) < 0)
  741. continue;
  742. psMemFunActor = &nu_pdma_memfun_actor_arr[idx];
  743. do
  744. {
  745. u32TxCnt = (u32TransferCnt > NU_PDMA_MAX_TXCNT) ? NU_PDMA_MAX_TXCNT : u32TransferCnt;
  746. /* Set PDMA memory control to eMemCtl. */
  747. nu_pdma_channel_memctrl_set(psMemFunActor->m_i32ChannID, eMemCtl);
  748. /* Register ISR callback function */
  749. nu_pdma_callback_register(psMemFunActor->m_i32ChannID, nu_pdma_memfun_cb, (void *)psMemFunActor, NU_PDMA_EVENT_ABORT | NU_PDMA_EVENT_TRANSFER_DONE);
  750. psMemFunActor->m_u32Result = 0;
  751. /* Trigger it */
  752. nu_pdma_transfer(psMemFunActor->m_i32ChannID,
  753. u32DataWidth,
  754. (eMemCtl & 0x2ul) ? (uint32_t)src + u32Offset : (uint32_t)src, /* Src address is Inc or not. */
  755. (eMemCtl & 0x1ul) ? (uint32_t)dest + u32Offset : (uint32_t)dest, /* Dst address is Inc or not. */
  756. u32TxCnt,
  757. 0);
  758. /* Wait it done. */
  759. result = rt_sem_take(psMemFunActor->m_psSemMemFun, RT_WAITING_FOREVER);
  760. RT_ASSERT(result == RT_EOK);
  761. /* Give result if get NU_PDMA_EVENT_TRANSFER_DONE.*/
  762. if (psMemFunActor->m_u32Result & NU_PDMA_EVENT_TRANSFER_DONE)
  763. {
  764. ret += u32TxCnt;
  765. }
  766. else
  767. {
  768. ret += (u32TxCnt - nu_pdma_non_transfer_count_get(psMemFunActor->m_i32ChannID));
  769. }
  770. /* Terminate it if get ABORT event */
  771. if (psMemFunActor->m_u32Result & NU_PDMA_EVENT_ABORT)
  772. {
  773. nu_pdma_channel_terminate(psMemFunActor->m_i32ChannID);
  774. break;
  775. }
  776. u32TransferCnt -= u32TxCnt;
  777. u32Offset += u32TxCnt * (u32DataWidth / 8);
  778. }
  779. while (u32TransferCnt > 0);
  780. result = rt_mutex_take(nu_pdma_memfun_actor_pool_lock, RT_WAITING_FOREVER);
  781. RT_ASSERT(result == RT_EOK);
  782. nu_pdma_memfun_actor_mask &= ~(1 << idx);
  783. result = rt_mutex_release(nu_pdma_memfun_actor_pool_lock);
  784. RT_ASSERT(result == RT_EOK);
  785. /* Fire actor */
  786. result = rt_sem_release(nu_pdma_memfun_actor_pool_sem);
  787. RT_ASSERT(result == RT_EOK);
  788. break;
  789. }
  790. return ret;
  791. }
  792. rt_size_t nu_pdma_mempush(void *dest, void *src, uint32_t data_width, unsigned int transfer_count)
  793. {
  794. if (data_width == 8 || data_width == 16 || data_width == 32)
  795. return nu_pdma_memfun(dest, src, data_width, transfer_count, eMemCtl_SrcInc_DstFix);
  796. return 0;
  797. }
  798. void *nu_pdma_memcpy(void *dest, void *src, unsigned int count)
  799. {
  800. int i = 0;
  801. uint32_t u32Offset = 0;
  802. uint32_t u32Remaining = count;
  803. for (i = 4; (i > 0) && (u32Remaining > 0) ; i >>= 1)
  804. {
  805. uint32_t u32src = (uint32_t)src + u32Offset;
  806. uint32_t u32dest = (uint32_t)dest + u32Offset;
  807. if (((u32src % i) == (u32dest % i)) &&
  808. ((u32src % i) == 0) &&
  809. (RT_ALIGN_DOWN(u32Remaining, i) >= i))
  810. {
  811. uint32_t u32TXCnt = u32Remaining / i;
  812. if (u32TXCnt != nu_pdma_memfun((void *)u32dest, (void *)u32src, i * 8, u32TXCnt, eMemCtl_SrcInc_DstInc))
  813. goto exit_nu_pdma_memcpy;
  814. u32Offset += (u32TXCnt * i);
  815. u32Remaining -= (u32TXCnt * i);
  816. }
  817. }
  818. if (count == u32Offset)
  819. return dest;
  820. exit_nu_pdma_memcpy:
  821. return NULL;
  822. }
  823. /**
  824. * PDMA memfun actor initialization
  825. */
  826. int rt_hw_pdma_memfun_init(void)
  827. {
  828. nu_pdma_memfun_actor_init();
  829. return 0;
  830. }
  831. INIT_DEVICE_EXPORT(rt_hw_pdma_memfun_init);
  832. #endif // #if defined(BSP_USING_PDMA)