dmad.c 74 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513
  1. /*****************************************************************************
  2. *
  3. * Copyright Andes Technology Corporation 2007-2008
  4. * All Rights Reserved.
  5. *
  6. * Revision History:
  7. *
  8. * Aug.21.2007 Created.
  9. ****************************************************************************/
  10. /*****************************************************************************
  11. *
  12. * FILE NAME VERSION
  13. *
  14. * dmad.c
  15. *
  16. * DESCRIPTION
  17. *
  18. * DMA controller driver internal supplement library.
  19. *
  20. * DATA STRUCTURES
  21. *
  22. * None
  23. *
  24. * DEPENDENCIES
  25. *
  26. * dmad.h
  27. *
  28. ****************************************************************************/
  29. #include "dmad.h"
  30. #include "cache.h"
  31. #include "bsp_hal.h"
  32. // #define DMAD_POLLING
  33. #define DMAD_AHB_MAX_CHANNELS DMAC_MAX_CHANNELS
  34. #define DMAD_APB_MAX_CHANNELS APBBR_DMA_MAX_CHANNELS
  35. #define DMAD_DRB_POOL_SIZE 128 /* 64 // 128*/
  36. //#define DMAD_HISR_PRIORITY 0 // 0: highest, 2: lowest
  37. #define DMAD_HISR_STACK_SIZE 4096 // Please align to 32-bit
  38. #ifdef CONFIG_PLAT_AG101P_4GB
  39. #define NTC0_BONDER_START 0x00000000
  40. #define NTC0_BONDER_END 0x40000000
  41. #else
  42. #define NTC0_BONDER_START 0x00000000
  43. #define NTC0_BONDER_END 0x00400000
  44. #endif
  45. /*
  46. * DMA module is shared between drivers and has no specific
  47. * initialization entry point. For this reason, it's stack
  48. * pool is declared in the global data or bss section.
  49. */
  50. static uint32_t dmad_hisr_stack[DMAD_HISR_STACK_SIZE];
  51. /* Driver data structure, one instance per system */
  52. typedef struct DMAD_DATA_STRUCT{
  53. /* Driver data initialization flag */
  54. uint32_t init; /* init flag for this object */
  55. uint32_t drq_pool_mutex_init; /* init flag for DMA queue pool access control object */
  56. uint32_t ahb_lisr_registered; /* init flag for AHB DMA LISR */
  57. uint32_t apb_lisr_registered; /* init flag for APB DMA LISR */
  58. uint32_t hisr_registered; /* init flag for HISR */
  59. /* DMA queue pool access control object */
  60. hal_mutex_t drq_pool_mutex; /* Mutex for access control of DRQ (DMA Request Queue) pool between tasks */
  61. /* DMA HISR resources */
  62. hal_bh_t hisr; /* HISR kernel object, used to perform deffered tasks of DMA LISR */
  63. uint32_t hisr_as; /* HISR activation state (for the single HISR to identify who activated it) */
  64. } DMAD_DATA;
  65. /* Driver data structure instance, one instance per system */
  66. static DMAD_DATA dmad;
  67. /* DMA request queue, one instance per channel */
  68. typedef struct DMAD_DRQ_STRUCT{
  69. uint32_t allocated; /* Flag to indicate the channel allocation status */
  70. DMAD_DATA *dmad; /* Pointer to driver object (DMAD_DATA) */
  71. uint32_t channel_base; /* DMA channel register base address */
  72. hal_mutex_t drb_pool_mutex; /* Mutex for access control of DRB (DMA Request Block) pool between tasks */
  73. DMAD_DRB drb_pool[DMAD_DRB_POOL_SIZE]; /* DRB (DMA Request Block) pool for this channel */
  74. hal_semaphore_t drb_sem;
  75. uint32_t fre_head; /* Free(un-allocated, completed) DRB linked list head */
  76. uint32_t fre_tail; /* Free(un-allocated, completed) DRB linked list tail */
  77. uint32_t rdy_head; /* Ready(allocated, un-submitted) DRB linked list head */
  78. uint32_t rdy_tail; /* Ready(allocated, un-submitted) DRB linked list tail */
  79. uint32_t sbt_head; /* Submitted DRB linked list head */
  80. uint32_t sbt_tail; /* Submitted DRB linked list tail */
  81. uint32_t cpl_head; /* Completed (those need to notify client) DRB linked list head */
  82. uint32_t cpl_tail; /* Completed (those need to notify client) DRB linked list tail */
  83. /*
  84. * cache writeback function
  85. *
  86. * source destination writeback invalidate function
  87. * ---------------------------------------------------------------------------------------------------
  88. * memory -> memory v (for src data) v (for dest readback) NDS_DCache_Invalidate_Flush()
  89. * device -> memory v (for invalidate) v (for dest readback) NDS_DCache_Invalidate_Flush()
  90. * memory -> device v (for src data) x NDS_DCache_Flush()
  91. * device -> device x x null
  92. */
  93. void (*dc_writeback)(unsigned long start, unsigned long end);
  94. void (*dc_invalidate)(unsigned long start, unsigned long end);
  95. } DMAD_DRQ;
  96. /* DMA queue for AHB DMA channels */
  97. static DMAD_DRQ ahb_drq_pool[DMAD_AHB_MAX_CHANNELS];
  98. /* DMA queue for APB DMA channels */
  99. static DMAD_DRQ apb_drq_pool[DMAD_APB_MAX_CHANNELS];
  100. /* AHB DMAC channel re-route table structure */
  101. typedef struct _DMAD_AHB_CH_ROUTE {
  102. // uint32_t dev_reqn; /* device req/gnt number */
  103. uint32_t route_cr; /* routing control register address */
  104. } DMAD_AHB_CH_ROUTE;
  105. /* AHB DMAC channel re-route table. Indexed by AHB DMAC req/ack number. */
  106. static DMAD_AHB_CH_ROUTE dmad_ahb_ch_route_table[] = {
  107. { 0 },
  108. { PMU_CFC_REQACK_CFG }, /* CFC REQ/ACK connection configuration register */
  109. { PMU_SSP1_REQACK_CFG }, /* SSP1 REQ/ACK connection configuration register */
  110. { PMU_UART1TX_REQACK_CFG }, /* UART1 TX REQ/ACK connection configuration register */
  111. { PMU_UART1RX_REQACK_CFG }, /* UART1 RX REQ/ACK connection configuration register */
  112. { PMU_UART2TX_REQACK_CFG }, /* UART2 TX REQ/ACK connection configuration register */
  113. { PMU_UART2RX_REQACK_CFG }, /* UART2 RX REQ/ACK connection configuration register */
  114. { PMU_SDC_REQACK_CFG }, /* SDC REQ/ACK connection configuration register */
  115. { PMU_I2SAC97_REQACK_CFG }, /* I2S/AC97 REQ/ACK connection configuration register */
  116. { 0 },
  117. { 0 },
  118. { PMU_USB_REQACK_CFG }, /* USB 2.0 REQ/ACK connection configuration register */
  119. { 0 },
  120. { 0 },
  121. { PMU_EXT0_REQACK_CFG }, /* External device0 REQ/ACK connection configuration register */
  122. { PMU_EXT1_REQACK_CFG }, /* External device1 REQ/ACK connection configuration register */
  123. };
  124. /* APB Bridge DMA request number re-route table */
  125. typedef struct _DMAD_APB_REQN_ROUTE{
  126. // uint32_t apb_reqn; /* APB device req/gnt number */
  127. uint32_t ahb_reqn; /* AHB DMAC req/ack number */
  128. uint32_t bus_sel; /* Address selection: APBBR_ADDRSEL_APB(0) or APBBR_ADDRSEL_AHB(1) */
  129. } DMAD_APB_REQN_ROUTE;
  130. /* APB Bridge DMA request number re-route table. Indexed by APB DMA req/gnt number. */
  131. static DMAD_APB_REQN_ROUTE dmad_apb_reqn_route_table[] = {
  132. { 0x00, APBBR_ADDRSEL_AHB }, /* APBBR_REQN_NONE */
  133. { 0x01, APBBR_ADDRSEL_APB }, /* APBBR_REQN_CFC */
  134. { 0x02, APBBR_ADDRSEL_APB }, /* APBBR_REQN_SSP */
  135. { 0x00, APBBR_ADDRSEL_AHB }, /* APB reserved */
  136. { 0x05, APBBR_ADDRSEL_APB }, /* APBBR_REQN_BTUART (AHB TX reqn: 5, AHB RX reqn: 6) */
  137. { 0x07, APBBR_ADDRSEL_APB }, /* APBBR_REQN_SDC */
  138. { 0x08, APBBR_ADDRSEL_APB }, /* APBBR_REQN_I2SAC97 */
  139. { 0x00, APBBR_ADDRSEL_AHB }, /* APB reserved */
  140. { 0x03, APBBR_ADDRSEL_APB }, /* APBBR_REQN_STUART (AHB TX reqn: 3, AHB RX reqn: 4) */
  141. { 0x00, APBBR_ADDRSEL_AHB }, /* APB reserved (comment out following fields to save code size) */
  142. { 0x00, APBBR_ADDRSEL_AHB }, /* APB reserved */
  143. { 0x00, APBBR_ADDRSEL_AHB }, /* APB reserved */
  144. { 0x00, APBBR_ADDRSEL_AHB }, /* APB reserved */
  145. { 0x00, APBBR_ADDRSEL_AHB }, /* APB reserved */
  146. { 0x00, APBBR_ADDRSEL_AHB }, /* APB reserved */
  147. { 0x00, APBBR_ADDRSEL_AHB }, /* APB reserved */
  148. };
  149. /* AHB DMA Request number */
  150. /* Used to record DMA request numbers in different platform */
  151. typedef struct _APB_DMA_REQN {
  152. uint32_t xc5_reqn;
  153. uint32_t xc7_reqn;
  154. } APB_DMA_REQN;
  155. static APB_DMA_REQN apb_dma_reqn_table[] = {
  156. {APBBR_REQN_NONE, APBBR_REQN_NONE},//APB_REQN_NONE
  157. /* REQN in XC5 */
  158. {XC5_APBBR_REQN_CFC, APBBR_REQN_RESERVED},//APB_REQN_CFC
  159. {XC5_APBBR_REQN_SSP, APBBR_REQN_RESERVED},//APB_REQN_SSP
  160. {XC5_APBBR_REQN_BTUART, APBBR_REQN_RESERVED},//APBBR_REQN_BTUART
  161. {XC5_APBBR_REQN_I2SAC97, XC7_APBBR_REQN_I2SAC97},//APB_REQN_I2SAC97
  162. {XC5_APBBR_REQN_STUART, APBBR_REQN_RESERVED},//APB_REQN_STUART
  163. {XC5_APBBR_REQN_I2S, APBBR_REQN_RESERVED},//APB_REQN_I2S
  164. {XC5_APBBR_REQN_SSP2, APBBR_REQN_RESERVED},//APB_REQN_SSP2
  165. {APBBR_REQN_RESERVED, XC7_APBBR_REQN_EXTREQ0},//APB_REQN_EXT0
  166. {APBBR_REQN_RESERVED, XC7_APBBR_REQN_EXTREQ1},//APB_REQN_EXT1
  167. /* REQN in XC7 */
  168. {APBBR_REQN_RESERVED, XC7_APBBR_REQN_SSP1TX},//APB_REQN_SSP1TX
  169. {APBBR_REQN_RESERVED, XC7_APBBR_REQN_SSP1RX},//APB_REQN_SSP1RX
  170. {APBBR_REQN_RESERVED, XC7_APBBR_REQN_UART2TX},//APB_REQN_UART2TX
  171. {APBBR_REQN_RESERVED, XC7_APBBR_REQN_UART2RX},//APB_REQN_UART2RX
  172. {APBBR_REQN_RESERVED, XC7_APBBR_REQN_UART4TX},//APB_REQN_UART4TX
  173. {APBBR_REQN_RESERVED, XC7_APBBR_REQN_UART4RX},//APB_REQN_UART4RX
  174. {XC5_APBBR_REQN_SDC, XC7_APBBR_REQN_SDC},//APB_REQN_SDC
  175. {APBBR_REQN_RESERVED, XC7_APBBR_REQN_SSP2TX},//APB_REQN_SSP2TX
  176. {APBBR_REQN_RESERVED, XC7_APBBR_REQN_SSP2RX},//APB_REQN_SSP2RX
  177. {APBBR_REQN_RESERVED, XC7_APBBR_REQN_USB_2_0},//APB_REQN_USB_2_0
  178. {APBBR_REQN_RESERVED, XC7_APBBR_REQN_USB_1_1_EP1},//APB_REQN_USB_1_1_EP1
  179. {APBBR_REQN_RESERVED, XC7_APBBR_REQN_USB_1_1_EP2},//APB_REQN_USB_1_1_EP2
  180. {APBBR_REQN_RESERVED, XC7_APBBR_REQN_USB_1_1_EP3},//APB_REQN_USB_1_1_EP3
  181. {APBBR_REQN_RESERVED, XC7_APBBR_REQN_USB_1_1_EP4},//AHB_REQN_USB_1_1_EP4
  182. {XC5_APBBR_REQN_MAX, XC7_APBBR_REQN_MAX},//APB_REQN_MAX
  183. };
  184. /* AHB DMA Request number */
  185. /* Used to record DMA request numbers in different platform */
  186. typedef struct _AHB_DMA_REQN {
  187. uint32_t xc5_reqn;
  188. uint32_t xc7_reqn;
  189. } AHB_DMA_REQN;
  190. static AHB_DMA_REQN ahb_dma_reqn_table[] = {
  191. {AHB_REQN_NONE, AHB_REQN_NONE},//AHB_REQN_NONE
  192. /* REQN in XC5 */
  193. {XC5_AHB_REQN_CFC, AHB_REQN_RESERVED},//AHB_REQN_CFC
  194. {XC5_AHB_REQN_SSP, AHB_REQN_RESERVED},//AHB_REQN_SSP
  195. {XC5_AHB_REQN_UART1TX, AHB_REQN_RESERVED},//AHB_REQN_UART1TX
  196. {XC5_AHB_REQN_UART1RX, AHB_REQN_RESERVED},//AHB_REQN_UART1RX
  197. {XC5_AHB_REQN_I2SAC97, XC7_AHB_REQN_I2SAC97},//AHB_REQN_I2SAC97
  198. {XC5_AHB_REQN_USB, AHB_REQN_RESERVED},//AHB_REQN_USB
  199. {XC5_AHB_REQN_EXT0, XC7_AHB_REQN_EXTREQ0},//AHB_REQN_EXT0
  200. {XC5_AHB_REQN_EXT1, XC7_AHB_REQN_EXTREQ1},//AHB_REQN_EXT1
  201. /* REQN in XC7 */
  202. {AHB_REQN_RESERVED, XC7_AHB_REQN_SSP1TX},//AHB_REQN_SSP1TX
  203. {AHB_REQN_RESERVED, XC7_AHB_REQN_SSP1RX},//AHB_REQN_SSP1RX
  204. {AHB_REQN_RESERVED, XC7_AHB_REQN_UART2TX},//AHB_REQN_UART2TX
  205. {AHB_REQN_RESERVED, XC7_AHB_REQN_UART2RX},//AHB_REQN_UART2RX
  206. {AHB_REQN_RESERVED, XC7_AHB_REQN_UART4TX},//AHB_REQN_UART4TX
  207. {AHB_REQN_RESERVED, XC7_AHB_REQN_UART4RX},//AHB_REQN_UART4RX
  208. {XC5_AHB_REQN_SDC, XC7_AHB_REQN_SDC},//AHB_REQN_SDC
  209. {AHB_REQN_RESERVED, XC7_AHB_REQN_SSP2TX},//AHB_REQN_SSP2TX
  210. {AHB_REQN_RESERVED, XC7_AHB_REQN_SSP2RX},//AHB_REQN_SSP2RX
  211. {AHB_REQN_RESERVED, XC7_AHB_REQN_USB_2_0},//AHB_REQN_USB_2_0
  212. {AHB_REQN_RESERVED, XC7_AHB_REQN_USB_1_1_EP1},//AHB_REQN_USB_1_1_EP1
  213. {AHB_REQN_RESERVED, XC7_AHB_REQN_USB_1_1_EP2},//AHB_REQN_USB_1_1_EP2
  214. {AHB_REQN_RESERVED, XC7_AHB_REQN_USB_1_1_EP3},//AHB_REQN_USB_1_1_EP3
  215. {AHB_REQN_RESERVED, XC7_AHB_REQN_USB_1_1_EP4},//AHB_REQN_USB_1_1_EP4
  216. };
  217. /*****************************************************************************
  218. * FUNCTION
  219. *
  220. * _dmad_get_reqn
  221. *
  222. * DESCRIPTION
  223. *
  224. * Get DMA request number from various platform.
  225. *
  226. * INPUTS
  227. *
  228. * dma_controller : (in) AHB or APB
  229. * device : (in) Device and peripheral.
  230. *
  231. * OUTPUTS
  232. *
  233. * none
  234. *
  235. ****************************************************************************/
  236. uint32_t _dmad_get_reqn(uint32_t dma_controller, uint32_t device){
  237. uint32_t reqn;
  238. uint32_t platform_id = IN32(PMU_IDNMBR0);
  239. if (dma_controller == DMAD_DMAC_APB_CORE){ /* APB */
  240. if ((platform_id & PRODUCT_ID_MASK) == AG101P_EMERALD)
  241. reqn = apb_dma_reqn_table[device].xc7_reqn;
  242. else
  243. reqn = apb_dma_reqn_table[device].xc5_reqn;
  244. } else { /* AHB */
  245. if ((platform_id & PRODUCT_ID_MASK) == AG101P_EMERALD)
  246. reqn = ahb_dma_reqn_table[device].xc7_reqn;
  247. else
  248. reqn = ahb_dma_reqn_table[device].xc5_reqn;
  249. }
  250. return reqn;
  251. }
  252. /*****************************************************************************
  253. * FUNCTION
  254. *
  255. * _dmad_detach_node
  256. *
  257. * DESCRIPTION
  258. *
  259. * Detach a DRB node from a specified list. The list is acknowledged in the
  260. * form of a head node and a tail one.
  261. *
  262. * INPUTS
  263. *
  264. * drb_pool : (in) The DRB pool of a DMA queue for a DMA channel
  265. * head : (in/out) Pointer to the head node of the list
  266. * tail : (in/out) Pointer to the tail node of the list
  267. * node : (in) The node to detach from the list
  268. *
  269. * OUTPUTS
  270. *
  271. * none
  272. *
  273. ****************************************************************************/
  274. static void _dmad_detach_node(DMAD_DRB *drb_pool, uint32_t *head, uint32_t *tail, uint32_t node){
  275. if (drb_pool[node].prev != 0){
  276. /* prev->next = this->next (= 0, if this is a tail) */
  277. drb_pool[drb_pool[node].prev].next = drb_pool[node].next;
  278. }
  279. else {
  280. /* this node is head, move head to next node (= 0, if this is the only one node) */
  281. *head = drb_pool[node].next;
  282. }
  283. if (drb_pool[node].next != 0){
  284. /* next->prev = this->prev (= 0, if this is a head) */
  285. drb_pool[drb_pool[node].next].prev = drb_pool[node].prev;
  286. }
  287. else {
  288. /* this node is tail, move tail to previous node (= 0, if this is the only one node) */
  289. *tail = drb_pool[node].prev;
  290. }
  291. drb_pool[node].prev = drb_pool[node].next = 0;
  292. }
  293. /*****************************************************************************
  294. * FUNCTION
  295. *
  296. * _dmad_detach_head
  297. *
  298. * DESCRIPTION
  299. *
  300. * Detach a DRB node from the head of a specified list. The list is
  301. * acknowledged in the form of a head node and a tail one.
  302. *
  303. * INPUTS
  304. *
  305. * drb_pool : (in) The DRB pool of a DMA queue for a DMA channel
  306. * head : (in/out) Pointer to the head node of the list
  307. * tail : (in/out) Pointer to the tail node of the list
  308. * drb : (out) Reference to the detached node pointer
  309. *
  310. * OUTPUTS
  311. *
  312. * none
  313. *
  314. ****************************************************************************/
  315. static void _dmad_detach_head(DMAD_DRB *drb_pool, uint32_t *head, uint32_t *tail, DMAD_DRB **drb){
  316. if (*head == 0){
  317. *drb = HAL_NULL;
  318. return;
  319. }
  320. *drb = &drb_pool[*head];
  321. if ((*drb)->next != 0){
  322. /* next->prev = this->prev (= 0, if this is a head) */
  323. drb_pool[(*drb)->next].prev = 0;
  324. /* prev->next = this->next (do nothing, if this is a head) */
  325. /* head = this->next */
  326. *head = (*drb)->next;
  327. }
  328. else {
  329. /* head = tail = 0 */
  330. *head = 0;
  331. *tail = 0;
  332. }
  333. /* this->prev = this->next = 0 (do nothing, if save code size) */
  334. (*drb)->prev = (*drb)->next = 0;
  335. }
  336. /*****************************************************************************
  337. * FUNCTION
  338. *
  339. * _dmad_detach_tail
  340. *
  341. * DESCRIPTION
  342. *
  343. * Detach a DRB node from the tail of a specified list. The list is
  344. * acknowledged in the form of a head node and a tail one.
  345. *
  346. * INPUTS
  347. *
  348. * drb_pool : (in) The DRB pool of a DMA queue for a DMA channel
  349. * head : (in/out) Pointer to the head node of the list
  350. * tail : (in/out) Pointer to the tail node of the list
  351. * drb : (out) Reference to the detached node pointer
  352. *
  353. * OUTPUTS
  354. *
  355. * none
  356. *
  357. ****************************************************************************/
  358. static inline void _dmad_detach_tail(DMAD_DRB *drb_pool, uint32_t *head, uint32_t *tail, DMAD_DRB **drb){
  359. if (*tail == 0){
  360. *drb = HAL_NULL;
  361. return;
  362. }
  363. *drb = &drb_pool[*tail];
  364. if ((*drb)->prev != 0){
  365. /* prev->next = this->next (= 0, if this is a tail) */
  366. drb_pool[(*drb)->prev].next = 0;
  367. /* next->prev = this->prev (do nothing, if this is a tail) */
  368. /* tail = this->prev */
  369. *tail = (*drb)->prev;
  370. }
  371. else {
  372. /* head = tail = 0 */
  373. *head = 0;
  374. *tail = 0;
  375. }
  376. /* this->next = this->prev = 0 (do nothing, if save code size) */
  377. (*drb)->prev = (*drb)->next = 0;
  378. }
  379. /*****************************************************************************
  380. * FUNCTION
  381. *
  382. * _dmad_attach_head
  383. *
  384. * DESCRIPTION
  385. *
  386. * Attach a DRB node to the head of a specified list. The list is
  387. * acknowledged in the form of a head node and a tail one.
  388. *
  389. * INPUTS
  390. *
  391. * drb_pool : (in) The DRB pool of a DMA queue for a DMA channel
  392. * head : (in/out) Pointer to the head node of the list
  393. * tail : (in/out) Pointer to the tail node of the list
  394. * drb : (in) The node number of the node to attach
  395. *
  396. * OUTPUTS
  397. *
  398. * none
  399. *
  400. ****************************************************************************/
  401. static inline void _dmad_attach_head(DMAD_DRB *drb_pool, uint32_t *head, uint32_t *tail, uint32_t node){
  402. if (*head != 0){
  403. drb_pool[*head].prev = node; /* head->prev = this */
  404. drb_pool[node].next = *head; /* this->next = head */
  405. drb_pool[node].prev = 0; /* this->prev = 0 */
  406. *head = node; /* head = node */
  407. }
  408. else {
  409. *head = *tail = node; /* head = tail = node */
  410. drb_pool[node].prev = drb_pool[node].next = 0;
  411. }
  412. }
  413. /*****************************************************************************
  414. * FUNCTION
  415. *
  416. * _dmad_attach_tail
  417. *
  418. * DESCRIPTION
  419. *
  420. * Attach a DRB node to the tail of a specified list. The list is
  421. * acknowledged in the form of a head node and a tail one.
  422. *
  423. * INPUTS
  424. *
  425. * drb_pool : (in) The DRB pool of a DMA queue for a DMA channel
  426. * head : (in/out) Pointer to the head node of the list
  427. * tail : (in/out) Pointer to the tail node of the list
  428. * drb : (in) The node number of the node to attach
  429. *
  430. * OUTPUTS
  431. *
  432. * none
  433. *
  434. ****************************************************************************/
  435. static void _dmad_attach_tail(DMAD_DRB *drb_pool, uint32_t *head, uint32_t *tail, uint32_t node){
  436. if (*tail != 0){
  437. drb_pool[*tail].next = node; /* tail->next = this */
  438. drb_pool[node].prev = *tail; /* this->prev = tail */
  439. drb_pool[node].next = 0; /* this->next = 0 */
  440. *tail = node; /* tail = node */
  441. }
  442. else {
  443. *head = *tail = node; /* head = tail = node */
  444. drb_pool[node].prev = drb_pool[node].next = 0;
  445. }
  446. }
  447. /*****************************************************************************
  448. * FUNCTION
  449. *
  450. * _dmad_ahb_lisr
  451. *
  452. * DESCRIPTION
  453. *
  454. * This is the ISR that services all AHB DMA channels on the NDS32
  455. * Integrator.
  456. *
  457. * NOTE
  458. * Currently this ISR processes one channel at a time. This replies the
  459. * assumption that the ISR will be invoked again as long as it's status
  460. * bit remains not-cleared, if interrupts for multiple channels happens
  461. * simultaneously.
  462. *
  463. * [todo] Above assumption may not be the real world case. Check it and
  464. * implement processing of multiple channels at once in the ISR, if
  465. * necessary.
  466. *
  467. * INPUTS
  468. *
  469. * vector : Interrupt vector number
  470. *
  471. * OUTPUTS
  472. *
  473. * none
  474. *
  475. ****************************************************************************/
  476. static void _dmad_ahb_lisr(int vector){
  477. DMAD_DRQ *drq;
  478. DMAD_DRB *drb;
  479. uint32_t channel; /* interrupt channel number */
  480. uint8_t tc_int = 0; /* interrupt reason is terminal count */
  481. uint8_t err_int = 0; /* interrupt reason is DMA error */
  482. //uint8_t abt_int = 0; /* interrupt reason is abort DMA transfer of this channel */
  483. uint32_t prv_msk = 0;
  484. if (vector != INTC_DMA_BIT)
  485. hal_system_error(HAL_ERR_UNHANDLED_INTERRUPT);
  486. prv_msk = hal_intc_irq_mask(IRQ_DMA_VECTOR);
  487. /* Check DMA status register to get channel number */
  488. for (channel = 0; channel < DMAD_AHB_MAX_CHANNELS; ++channel){
  489. if (GETB32(DMAC_INT_TC, channel)){
  490. tc_int = 1; /* Mark as TC int */
  491. SETB32(DMAC_INT_TC_CLR, channel); /* DMAC INT TC status clear */
  492. hal_intc_irq_clean(IRQ_DMA_VECTOR);
  493. break;
  494. }
  495. }
  496. /* Perform DMA error checking if no valid channel was found who assert the TC signal. */
  497. if (channel == DMAD_AHB_MAX_CHANNELS){
  498. for (channel = 0; channel < DMAD_AHB_MAX_CHANNELS; ++channel){
  499. if (GETB32(DMAC_INT_ERRABT, channel + DMAC_INT_ERR_SHIFT)){
  500. err_int = 1; /* Mark as ERR int */
  501. SETB32(DMAC_INT_ERRABT_CLR, channel + DMAC_INT_ERR_CLR_SHIFT); /* DMAC INT ERR status clear */
  502. hal_intc_irq_clean(IRQ_DMA_VECTOR);
  503. break;
  504. }
  505. }
  506. if (channel == DMAD_AHB_MAX_CHANNELS){
  507. for (channel = 0; channel < DMAD_AHB_MAX_CHANNELS; ++channel){
  508. if (GETB32(DMAC_INT_ERRABT, channel + DMAC_INT_ABT_SHIFT)){
  509. //abt_int = 1; /* Mark as ABT int */
  510. SETB32(DMAC_INT_ERRABT_CLR, channel + DMAC_INT_ABT_CLR_SHIFT); /* DMAC INT ABT status clear */
  511. hal_intc_irq_clean(IRQ_DMA_VECTOR);
  512. break;
  513. }
  514. }
  515. if (channel == DMAD_AHB_MAX_CHANNELS){
  516. /* Unknown reason ... (check why) */
  517. hal_system_error(HAL_ERR_UNHANDLED_INTERRUPT); /*return; */
  518. }
  519. }
  520. }
  521. /* Lookup channel's DRQ (DMA Request Queue) */
  522. drq = (DMAD_DRQ *)&ahb_drq_pool[channel];
  523. /* DMAC */
  524. /* Stop DMA channel temporarily */
  525. CLRB32(drq->channel_base + DMAC_CSR_OFFSET, DMAC_CSR_CH_EN_BIT);
  526. /*
  527. * Lookup/detach latest submitted DRB (DMA Request Block) from
  528. * the DRQ (DMA Request Queue), so ISR could kick off next DRB
  529. */
  530. _dmad_detach_head(drq->drb_pool, &drq->sbt_head, &drq->sbt_tail, &drb);
  531. if (drb == HAL_NULL){
  532. /* Check why DMA is triggered while submitted list is empty. */
  533. hal_system_error(HAL_ERR_UNHANDLED_INTERRUPT); /*return; */
  534. }
  535. /* Enable nested interrupt */
  536. GIE_ENABLE();
  537. /* Notify that new node is going to be available in the free list */
  538. if (drb->completion_sem != HAL_NULL){
  539. dmad.hisr_as |= (1 << channel); /* [15:0] AHB channel indicator */
  540. hal_raise_bh(&dmad.hisr); /* Call HISR to complete deferred tasks */
  541. }
  542. /* Process DRBs according to interrupt reason */
  543. if (tc_int){
  544. /* Mark DRB state as completed */
  545. drb->state = DMAD_DRB_STATE_COMPLETED;
  546. /* destination is memory */
  547. if (drq->dc_invalidate != HAL_NULL && drb->dst_index == DMAC_REQN_NONE)
  548. drq->dc_invalidate((unsigned long)(drb->dst_addr),
  549. (unsigned long)(drb->dst_addr) + (unsigned long)(drb->transfer_size));
  550. _dmad_attach_tail(drq->drb_pool, &drq->cpl_head, &drq->cpl_tail, drb->node);
  551. /* Check whether there are pending requests in the DRQ */
  552. if (drq->sbt_head != 0){
  553. drb = &drq->drb_pool[drq->sbt_head]; /* Lookup next DRB (DMA Request Block) */
  554. /* pre-submission-programming */
  555. if (drb->psp)
  556. drb->psp(drb->data);
  557. /* Kick-off DMA for next DRB */
  558. /* - Source and destination address */
  559. OUT32(drq->channel_base + DMAC_SRC_ADDR_OFFSET, drb->src_addr);
  560. OUT32(drq->channel_base + DMAC_DST_ADDR_OFFSET, drb->dst_addr);
  561. /* - Transfer size (in units of source width) */
  562. OUT32(drq->channel_base + DMAC_SIZE_OFFSET, drb->req_size);
  563. /* - Re-enable DMA channel */
  564. SETB32(drq->channel_base + DMAC_CSR_OFFSET, DMAC_CSR_CH_EN_BIT);
  565. }
  566. }
  567. else if (err_int){
  568. /* Mark DRB state as error */
  569. drb->state = DMAD_DRB_STATE_ERROR;
  570. _dmad_attach_tail(drq->drb_pool, &drq->cpl_head, &drq->cpl_tail, drb->node);
  571. /* Check whether there are pending requests in the DRQ */
  572. if (drq->sbt_head != 0){
  573. /* Lookup next DRB (DMA Request Block) */
  574. drb = &drq->drb_pool[drq->sbt_head];
  575. /* pre-submission-programming */
  576. if (drb->psp)
  577. drb->psp(drb->data);
  578. /*
  579. * Kick-off DMA for next DRB
  580. */
  581. /* Source and destination address */
  582. OUT32(drq->channel_base + DMAC_SRC_ADDR_OFFSET, drb->src_addr);
  583. OUT32(drq->channel_base + DMAC_DST_ADDR_OFFSET, drb->dst_addr);
  584. /* Transfer size (in units of source width) */
  585. OUT32(drq->channel_base + DMAC_SIZE_OFFSET, drb->req_size);
  586. /* Re-enable DMA channel */
  587. SETB32(drq->channel_base + DMAC_CSR_OFFSET, DMAC_CSR_CH_EN_BIT);
  588. }
  589. }
  590. else { /* abt_int */
  591. /* Remove all pending requests in the queue */
  592. while (1){
  593. /* Mark DRB state as abort */
  594. drb->state = DMAD_DRB_STATE_ABORT;
  595. _dmad_attach_tail(drq->drb_pool, &drq->cpl_head, &drq->cpl_tail, drb->node);
  596. /* Detach next submitted DRB (DMA Request Block) from the DRQ (DMA Request Queue) */
  597. _dmad_detach_head(drq->drb_pool, &drq->sbt_head, &drq->sbt_tail, &drb);
  598. if (drb == HAL_NULL)
  599. break;
  600. }
  601. }
  602. #ifdef DMAD_POLLING
  603. if (dmad.hisr_as & 0x0000ffff){
  604. while (drq->cpl_head != 0){
  605. _dmad_detach_head(drq->drb_pool, &drq->cpl_head, &drq->cpl_tail, &drb);
  606. _dmad_attach_tail(drq->drb_pool, &drq->fre_head, &drq->fre_tail, drb->node);
  607. /* completion-of-submission-programming */
  608. if (drb->rcp)
  609. drb->rcp(drb->data);
  610. }
  611. }
  612. #endif
  613. GIE_DISABLE();
  614. hal_intc_irq_unmask(prv_msk);
  615. }
  616. /*****************************************************************************
  617. * FUNCTION
  618. *
  619. * _dmad_apb_lisr
  620. *
  621. * DESCRIPTION
  622. *
  623. * This is the ISR that services all APB DMA channels on the NDS32
  624. * Integrator.
  625. *
  626. * NOTE
  627. * Currently this ISR processes one channel at a time. This replies the
  628. * assumption that the ISR will be invoked again as long as it's status
  629. * bit remains not-cleared, if interrupts for multiple channels happens
  630. * simultaneously.
  631. *
  632. * [todo] Above assumption may not be the real world case. Check it and
  633. * implement processing of multiple channels at once in the ISR, if
  634. * necessary.
  635. *
  636. * INPUTS
  637. *
  638. * vector : Interrupt vector number
  639. *
  640. * OUTPUTS
  641. *
  642. * none
  643. *
  644. ****************************************************************************/
  645. static void _dmad_apb_lisr(int vector){
  646. DMAD_DRQ *drq;
  647. DMAD_DRB *drb;
  648. uint32_t channel; /* interrupt channel number */
  649. uint8_t finish_int = 0; /* interrupt reason is transfer completed */
  650. uint8_t err_int = 0; /* interrupt reason is DMA error */
  651. uint32_t prv_msk = 0;
  652. if (vector != INTC_APB_BIT)
  653. hal_system_error(HAL_ERR_UNHANDLED_INTERRUPT);
  654. /* Mask priority <= apb_bridge's interrupt */
  655. prv_msk = hal_intc_irq_mask(IRQ_APBBRIDGE_VECTOR);
  656. /* Check DMA status register to get channel number & clean pending */
  657. for (channel = 0; channel < DMAD_APB_MAX_CHANNELS; ++channel){
  658. uint32_t channel_base = APBBR_DMA_BASE_CH(channel);
  659. if (GETB32(channel_base + APBBR_DMA_CMD_OFFSET, APBBR_DMA_FINTST_BIT)){
  660. /* Mark as finish int */
  661. finish_int = 1;
  662. /* APB DMA finish int status clear */
  663. CLRB32(channel_base + APBBR_DMA_CMD_OFFSET, APBBR_DMA_FINTST_BIT);
  664. hal_intc_irq_clean(IRQ_APBBRIDGE_VECTOR);
  665. break;
  666. }
  667. }
  668. /* Perform DMA error checking if no valid channel was found who assert the finish signal
  669. * & clean pending
  670. */
  671. if (channel == DMAD_APB_MAX_CHANNELS){
  672. for (channel = 0; channel < DMAD_APB_MAX_CHANNELS; ++channel){
  673. uint32_t channel_base = APBBR_DMA_BASE_CH(channel);
  674. if (GETB32(channel_base + APBBR_DMA_CMD_OFFSET, APBBR_DMA_ERRINTST_BIT)){
  675. /* Mark as error int */
  676. err_int = 1;
  677. /* APB DMA error int status clear */
  678. CLRB32(channel_base + APBBR_DMA_CMD_OFFSET, APBBR_DMA_ERRINTST_BIT);
  679. hal_intc_irq_clean(IRQ_APBBRIDGE_VECTOR);
  680. break;
  681. }
  682. }
  683. if (channel == DMAD_AHB_MAX_CHANNELS)
  684. hal_system_error(HAL_ERR_UNHANDLED_INTERRUPT);
  685. }
  686. /* Lookup channel's DRQ (DMA Request Queue) */
  687. drq = (DMAD_DRQ *)&apb_drq_pool[channel];
  688. /*
  689. * APB
  690. */
  691. /* Stop DMA channel temporarily */
  692. CLRB32(drq->channel_base + APBBR_DMA_CMD_OFFSET, APBBR_DMA_CHEN_BIT);
  693. /*
  694. * Lookup/detach latest submitted DRB (DMA Request Block) from
  695. * the DRQ (DMA Request Queue), so ISR could kick off next DRB
  696. */
  697. _dmad_detach_head(drq->drb_pool, &drq->sbt_head, &drq->sbt_tail, &drb);
  698. if (drb == HAL_NULL){
  699. /* Check why DMA is triggered while submitted list is empty. */
  700. hal_system_error(HAL_ERR_UNHANDLED_INTERRUPT);
  701. }
  702. GIE_ENABLE();
  703. /* Notify that new node is going to be available in the free list */
  704. dmad.hisr_as |= (0x00010000 << channel); /* [31:16] APB channel indicator */
  705. hal_raise_bh(&dmad.hisr); /* Call HISR to complete deferred tasks */
  706. /* Process DRBs according to the cause of this interrupt */
  707. if (finish_int){
  708. /* Mark DRB state as completed */
  709. drb->state = DMAD_DRB_STATE_COMPLETED;
  710. _dmad_attach_tail(drq->drb_pool, &drq->cpl_head, &drq->cpl_tail, drb->node);
  711. /* destination is memory */
  712. if (drq->dc_invalidate != HAL_NULL && drb->dst_index == DMAC_REQN_NONE)
  713. drq->dc_invalidate((unsigned long)(drb->dst_addr),
  714. (unsigned long)(drb->dst_addr) + (unsigned long)(drb->transfer_size));
  715. /* Check whether there are pending requests in the DRQ */
  716. if (drq->sbt_head != 0){
  717. /* Lookup next DRB (DMA Request Block) */
  718. drb = &drq->drb_pool[drq->sbt_head];
  719. /* pre-submission-programming */
  720. if (drb->psp)
  721. drb->psp(drb->data);
  722. /*
  723. * Kick-off DMA for next DRB
  724. */
  725. /* Source and destination address */
  726. OUT32(drq->channel_base + APBBR_DMA_SAD_OFFSET, drb->src_addr);
  727. OUT32(drq->channel_base + APBBR_DMA_DAD_OFFSET, drb->dst_addr);
  728. /* - Transfer size (in units of source width) */
  729. OUT32(drq->channel_base + APBBR_DMA_CYC_OFFSET, drb->req_size & APBBR_DMA_CYC_MASK);
  730. /* - Re-enable DMA channel */
  731. SETB32(drq->channel_base + APBBR_DMA_CMD_OFFSET, APBBR_DMA_CHEN_BIT);
  732. }
  733. }
  734. else if (err_int){
  735. /* Remove all pending requests in the queue */
  736. while (1){
  737. /* Mark DRB state as abort */
  738. drb->state = DMAD_DRB_STATE_ABORT;
  739. _dmad_attach_tail(drq->drb_pool, &drq->cpl_head, &drq->cpl_tail, drb->node);
  740. _dmad_detach_head(drq->drb_pool, &drq->sbt_head, &drq->sbt_tail, &drb);
  741. if (drb == HAL_NULL)
  742. break;
  743. }
  744. }
  745. #ifdef DMAD_POLLING
  746. if (dmad.hisr_as & 0xffff0000){
  747. while (drq->cpl_head != 0){
  748. _dmad_detach_head(drq->drb_pool, &drq->cpl_head, &drq->cpl_tail, &drb);
  749. _dmad_attach_tail(drq->drb_pool, &drq->fre_head, &drq->fre_tail, drb->node);
  750. /* completion-of-submission-programming */
  751. if (drb->rcp)
  752. drb->rcp(drb->data);
  753. }
  754. dmad.hisr_as &= ~(0x00010000 << channel);
  755. }
  756. #endif
  757. GIE_DISABLE();
  758. hal_intc_irq_unmask(prv_msk);
  759. }
  760. /*****************************************************************************
  761. * FUNCTION
  762. *
  763. * _dmad_hisr
  764. *
  765. * DESCRIPTION
  766. *
  767. * This HISR performs the defferred tasks from LISR.
  768. *
  769. * NOTE
  770. *
  771. * Current task list of this HISR
  772. *
  773. * - Signal DRQ available event for waiting DRQ allocations.
  774. *
  775. * INPUTS
  776. *
  777. * vector : Interrupt vector number
  778. *
  779. * OUTPUTS
  780. *
  781. * none
  782. *
  783. ****************************************************************************/
  784. static inline void _dmad_hisr(void *param){
  785. DMAD_DRQ *drq;
  786. DMAD_DRB *drb = NULL;
  787. //uint32_t core_intl;
  788. uint32_t channel;
  789. while(1){
  790. hal_bh_t *bh = (hal_bh_t *)param;
  791. hal_pend_semaphore(&bh->sem, HAL_SUSPEND);
  792. //core_intl = hal_global_int_ctl(HAL_DISABLE_INTERRUPTS);
  793. /* Signal free-list available event */
  794. if ((dmad.hisr_as & 0xffff0000) != 0){
  795. /* Disable apb_bridge interrupt to avoid race condition */
  796. HAL_INTC_IRQ_ATOMIC_DISABLE(IRQ_APBBRIDGE_VECTOR);
  797. /* APB LISR */
  798. for (channel = 0; channel < DMAD_APB_MAX_CHANNELS; ++channel){
  799. if (dmad.hisr_as & (0x00010000 << channel)){
  800. drq = (DMAD_DRQ *)&apb_drq_pool[channel];
  801. while (drq->cpl_head != 0){
  802. _dmad_detach_head(drq->drb_pool, &drq->cpl_head, &drq->cpl_tail, &drb);
  803. _dmad_attach_tail(drq->drb_pool, &drq->fre_head, &drq->fre_tail, drb->node);
  804. hal_post_semaphore(&drq->drb_sem);
  805. /* completion-of-submission-programming */
  806. if (drb->rcp)
  807. drb->rcp(drb->data);
  808. if(drb->completion_sem != HAL_NULL)
  809. {
  810. // puts("APB DMA HISR Complete!!!\r\n");
  811. hal_post_semaphore(drb->completion_sem);
  812. }
  813. }
  814. dmad.hisr_as &= ~(0x00010000 << channel);
  815. }
  816. }
  817. /* Re-enable apb_bridge interrupt */
  818. HAL_INTC_IRQ_ATOMIC_ENABLE(IRQ_APBBRIDGE_VECTOR);
  819. }
  820. else if ((dmad.hisr_as & 0x0000ffff) != 0){
  821. /* Disable AHB_DMA interrupt to avoid race condition */
  822. HAL_INTC_IRQ_ATOMIC_DISABLE(IRQ_DMA_VECTOR);
  823. /* AHB LISR */
  824. for (channel = 0; channel < DMAD_AHB_MAX_CHANNELS; ++channel){
  825. if (dmad.hisr_as & (1 << channel)){
  826. drq = (DMAD_DRQ *)&ahb_drq_pool[channel];
  827. while (drq->cpl_head != 0){
  828. _dmad_detach_head(drq->drb_pool, &drq->cpl_head, &drq->cpl_tail, &drb);
  829. _dmad_attach_tail(drq->drb_pool, &drq->fre_head, &drq->fre_tail, drb->node);
  830. hal_post_semaphore(&drq->drb_sem);
  831. /* completion-of-submission-programming */
  832. if (drb->rcp)
  833. drb->rcp(drb->data);
  834. }
  835. if (drb->completion_sem != HAL_NULL)
  836. hal_post_semaphore(drb->completion_sem);
  837. dmad.hisr_as &= ~(1 << channel);
  838. }
  839. }
  840. HAL_INTC_IRQ_ATOMIC_ENABLE(IRQ_DMA_VECTOR);
  841. }
  842. // hal_global_int_ctl(core_intl);
  843. }
  844. }
  845. /*****************************************************************************
  846. * FUNCTION
  847. *
  848. * _dmad_channel_alloc
  849. *
  850. * DESCRIPTION
  851. *
  852. * This function allocates a DMA channel for client's request. If the
  853. * channel is already used by other clients, then this function will
  854. * fail the allocation.
  855. *
  856. * INPUTS
  857. *
  858. * ch_req : Pointer to the DMA request descriptor structure
  859. * init : Specify whether to initialize the DMA channel HW if the
  860. * allocation is successfull. Clients could also postpone
  861. * the initialization task to the _dmad_channel_init()
  862. * routine.
  863. *
  864. * OUTPUTS
  865. *
  866. * uint32_t : Returns HAL_SUCCESS if successful allocation,
  867. * else positive value is DMAD-specific error code,
  868. * else negative value is NU system error code.
  869. *
  870. ****************************************************************************/
  871. uint32_t _dmad_channel_alloc(DMAD_CHANNEL_REQUEST_DESC *ch_req, uint8_t init){
  872. uint32_t status;
  873. DMAD_DRQ *drq_iter;
  874. DMAD_DRB *drb_iter;
  875. uint32_t i = 0;
  876. if (ch_req == HAL_NULL)
  877. return HAL_ERR_INVALID_POINTER;
  878. if (ch_req->controller == DMAD_DMAC_AHB_CORE)
  879. drq_iter = ahb_drq_pool;
  880. else if (ch_req->controller == DMAD_DMAC_APB_CORE)
  881. drq_iter = apb_drq_pool;
  882. else
  883. return HAL_ERR_NOT_PRESENT;
  884. /* First-time initialization for DMA queue pool access control object */
  885. if (dmad.drq_pool_mutex_init == 0){
  886. status = hal_create_mutex(&dmad.drq_pool_mutex, "drqpool");
  887. if (status != HAL_SUCCESS){
  888. DMAD_TRACE(("[dmad] failed to create drq_pool mutex!\r\n"));
  889. return status;
  890. }
  891. dmad.drq_pool_mutex_init = 1;
  892. }
  893. /* Obtain exclusive access to the pool of channel queues */
  894. if (hal_current() != HAL_NULL){
  895. /* Suspending is only valid to the current task -- no need to lock if invoked from HISR. */
  896. status = hal_wait_for_mutex(&dmad.drq_pool_mutex, HAL_SUSPEND);
  897. if (status != HAL_SUCCESS){
  898. DMAD_TRACE(("[dmad] failed to lock drq_pool! status(0x%08lx)\r\n", status));
  899. return status;
  900. }
  901. }
  902. /* Locate an available DMA channel */
  903. if (ch_req->controller == DMAD_DMAC_AHB_CORE){
  904. #if 0
  905. /* UART - TX/RX channel is limitted */
  906. if ((ch_req->ahbch_req.src_index == DMAC_REQN_UART1TX) ||
  907. (ch_req->ahbch_req.dst_index == DMAC_REQN_UART1TX) ||
  908. (ch_req->ahbch_req.src_index == DMAC_REQN_UART2TX) ||
  909. (ch_req->ahbch_req.dst_index == DMAC_REQN_UART2TX))
  910. {
  911. /* TX channel is limitied to C/D */
  912. drq_iter = &ahb_drq_pool[2];
  913. for (i = 2; i < 4; ++i, ++drq_iter){
  914. if (drq_iter->allocated == 0)
  915. break;
  916. }
  917. }
  918. else if ((ch_req->ahbch_req.src_index == DMAC_REQN_UART1RX) ||
  919. (ch_req->ahbch_req.dst_index == DMAC_REQN_UART1RX) ||
  920. (ch_req->ahbch_req.src_index == DMAC_REQN_UART2RX) ||
  921. (ch_req->ahbch_req.dst_index == DMAC_REQN_UART2RX)){
  922. /* RX channel is limitied to A/B */
  923. for (i = 0; i < 2; ++i, ++drq_iter){
  924. if (drq_iter->allocated == 0)
  925. break;
  926. }
  927. }
  928. else
  929. #endif
  930. {
  931. if ((ch_req->ahbch_req.src_index != DMAC_REQN_NONE) ||
  932. (ch_req->ahbch_req.dst_index != DMAC_REQN_NONE)){
  933. /*
  934. * [2007-12-03] It looks current board have problem to do dma
  935. * traffic for APB devices on DMAC channel 0/1. Redirect all
  936. * APB devices to start from channel 2.
  937. * [todo] include USB controller ?
  938. */
  939. drq_iter = &ahb_drq_pool[2];
  940. for (i = 2; i < DMAD_AHB_MAX_CHANNELS; ++i, ++drq_iter){
  941. if (drq_iter->allocated == 0)
  942. break;
  943. }
  944. }
  945. else {
  946. /* channel for other devices is free to allocate */
  947. for (i = 0; i < DMAD_AHB_MAX_CHANNELS; ++i, ++drq_iter){
  948. if (drq_iter->allocated == 0)
  949. break;
  950. }
  951. }
  952. }
  953. if (i == DMAD_AHB_MAX_CHANNELS){
  954. DMAD_TRACE(("out of available channels (AHB DMAC)!\r\n"));
  955. return HAL_ERR_UNAVAILABLE;
  956. }
  957. DMAD_TRACE(("allocated channel: %d (AHB DMAC)\r\n"));
  958. }
  959. else if (ch_req->controller == DMAD_DMAC_APB_CORE){
  960. for (i = 0; i < DMAD_APB_MAX_CHANNELS; ++i, ++drq_iter){
  961. if (drq_iter->allocated == 0)
  962. break;
  963. }
  964. if (i == DMAD_APB_MAX_CHANNELS){
  965. DMAD_TRACE(("out of available channels (APB DMAC)!\r\n"));
  966. return HAL_ERR_UNAVAILABLE;
  967. }
  968. DMAD_TRACE(("allocated channel: %d (APB DMAC)\r\n", i));
  969. }
  970. /* Allocate the DMA channel */
  971. drq_iter->allocated = 1;
  972. if (hal_current() != HAL_NULL){
  973. /*
  974. * Suspending is only valid to the current task -- no need to lock if invoked from HISR.
  975. * Release exclusive access to the pool of channel queues
  976. */
  977. status = hal_release_mutex(&dmad.drq_pool_mutex);
  978. if (status != HAL_SUCCESS){
  979. DMAD_TRACE(("[dmad] failed to unlock drq_pool!\r\n"));
  980. return status;
  981. }
  982. }
  983. /* Create mutex object for DMA queue access control */
  984. status = hal_create_mutex(&drq_iter->drb_pool_mutex, "drq");
  985. if (status != HAL_SUCCESS){
  986. DEBUG(1, 1, "failed to create mutex for drb_pool!\n");
  987. return status;
  988. }
  989. /* Create semaphores for free-list allocation operation */
  990. status = hal_create_semaphore(&drq_iter->drb_sem, DMAD_DRB_POOL_SIZE - 1, (void*)0);
  991. if (status != HAL_SUCCESS){
  992. DEBUG(1, 1, "failed to create semaphores for drb_pool!\n");
  993. return status;
  994. }
  995. /* Record the channel number in client's struct */
  996. ch_req->channel = i;
  997. /* Record the channel's queue handle in client's struct */
  998. ch_req->drq = drq_iter;
  999. if (ch_req->controller == DMAD_DMAC_AHB_CORE){
  1000. //drq_iter->controller_base = DMAC_BASE;
  1001. drq_iter->channel_base = DMAC_BASE_CH(i);
  1002. }
  1003. else {
  1004. //drq_iter->controller_base = APBBR_BASE;
  1005. drq_iter->channel_base = APBBR_DMA_BASE_CH(i);
  1006. }
  1007. /* Initialize DMA channel's DRB pool as list of free DRBs */
  1008. drb_iter = &drq_iter->drb_pool[0];
  1009. drb_iter->prev = 0;
  1010. drb_iter->next = 0;
  1011. drb_iter->node = 0;
  1012. //drb_iter->src_addr = 0;
  1013. //drb_iter->dst_addr = 0;
  1014. //drb_iter->req_size = 0;
  1015. ++drb_iter;
  1016. for (i = 1; i < DMAD_DRB_POOL_SIZE; ++i, ++drb_iter){
  1017. drb_iter->prev = i - 1;
  1018. drb_iter->next = i + 1;
  1019. drb_iter->node = i;
  1020. //drb_iter->src_addr = 0;
  1021. //drb_iter->dst_addr = 0;
  1022. //drb_iter->req_size = 0;
  1023. }
  1024. drq_iter->drb_pool[DMAD_DRB_POOL_SIZE - 1].next = 0;
  1025. /* Initialize DMA channel's DRB free-list, ready-list, and submitted-list */
  1026. drq_iter->fre_head = 1;
  1027. drq_iter->fre_tail = DMAD_DRB_POOL_SIZE - 1;
  1028. drq_iter->rdy_head = drq_iter->rdy_tail = 0;
  1029. drq_iter->sbt_head = drq_iter->sbt_tail = 0;
  1030. drq_iter->cpl_head = drq_iter->cpl_tail = 0;
  1031. /* Initialize the channel */
  1032. if (init)
  1033. _dmad_channel_init(ch_req);
  1034. /* Initialize cache writeback function */
  1035. #ifndef CONFIG_CPU_DCACHE_ENABLE
  1036. drq_iter->dc_writeback = HAL_NULL;
  1037. drq_iter->dc_invalidate = HAL_NULL;
  1038. #else
  1039. drq_iter->dc_writeback = nds32_dma_flush_range;
  1040. drq_iter->dc_invalidate = nds32_dma_inv_range;
  1041. #endif
  1042. return HAL_SUCCESS;
  1043. }
  1044. /*****************************************************************************
  1045. * FUNCTION
  1046. *
  1047. * _dmad_channel_free
  1048. *
  1049. * DESCRIPTION
  1050. *
  1051. * This function frees a DMA channel for future clients' request.
  1052. *
  1053. * INPUTS
  1054. *
  1055. * ch_req : Pointer to the DMA request descriptor structure
  1056. *
  1057. * OUTPUTS
  1058. *
  1059. * uint32_t : Returns HAL_SUCCESS if successful channel free,
  1060. * else positive value is DMAD-specific error code,
  1061. * else negative value is NU system error code.
  1062. *
  1063. ****************************************************************************/
  1064. uint32_t _dmad_channel_free(const DMAD_CHANNEL_REQUEST_DESC *ch_req){
  1065. uint32_t status;
  1066. DMAD_DRQ *drq;
  1067. if (ch_req == HAL_NULL)
  1068. return HAL_ERR_INVALID_POINTER;
  1069. drq = (DMAD_DRQ *)ch_req->drq;
  1070. if (drq == HAL_NULL)
  1071. return HAL_ERR_INVALID_POINTER;
  1072. if (drq->allocated == 0)
  1073. return HAL_ERR_INVALID_POINTER;
  1074. if (hal_current() != HAL_NULL){
  1075. /*
  1076. * Suspending is only valid to the current task -- no need to lock if invoked from HISR.
  1077. * Obtain exclusive access to the pool of channel queues
  1078. */
  1079. status = hal_wait_for_mutex(&dmad.drq_pool_mutex, HAL_SUSPEND);
  1080. if (status != HAL_SUCCESS)
  1081. return status;
  1082. }
  1083. /* Todo: Stop/abort channel I/O if it's busy ? */
  1084. /* Delete mutex object of DMA queue access control */
  1085. status = hal_destroy_mutex(&drq->drb_pool_mutex);
  1086. if (status != HAL_SUCCESS)
  1087. return status;
  1088. /* Delete semaphores of free-list allocation operation */
  1089. status = hal_destroy_semaphore(&drq->drb_sem);
  1090. if (status != HAL_SUCCESS)
  1091. return status;
  1092. /* Reset HISR activation state */
  1093. if (ch_req->controller == DMAD_DMAC_AHB_CORE)
  1094. dmad.hisr_as &= ~(1 << ch_req->channel);
  1095. else
  1096. dmad.hisr_as &= ~(1 << (ch_req->channel + 16));
  1097. /* Set released flag. */
  1098. drq->allocated = 0;
  1099. if (hal_current() != HAL_NULL){
  1100. /*
  1101. * Suspending is only valid to the current task -- no need to lock if invoked from HISR.
  1102. * Release exclusive access to the pool of channel queues
  1103. */
  1104. status = hal_release_mutex(&dmad.drq_pool_mutex);
  1105. if (status != HAL_SUCCESS)
  1106. return status;
  1107. }
  1108. return HAL_SUCCESS;
  1109. }
  1110. /*****************************************************************************
  1111. * FUNCTION
  1112. *
  1113. * _dmad_ahb_init
  1114. *
  1115. * DESCRIPTION
  1116. *
  1117. * This function performs the AHB DMAC channel initialization.
  1118. *
  1119. * INPUTS
  1120. *
  1121. * ch_req : Pointer to the DMA request descriptor structure
  1122. *
  1123. * OUTPUTS
  1124. *
  1125. * uint32_t : Returns HAL_SUCCESS if successful initialization,
  1126. * else positive value is DMAD-specific error code,
  1127. * else negative value is NU system error code.
  1128. *
  1129. ****************************************************************************/
  1130. static uint32_t _dmad_ahb_init(const DMAD_CHANNEL_REQUEST_DESC *ch_req){
  1131. uint32_t status = HAL_SUCCESS;
  1132. DMAD_DRQ *drq = (DMAD_DRQ *)ch_req->drq;
  1133. DMAD_AHBCH_REQUEST *ahb_req = (DMAD_AHBCH_REQUEST *)(&ch_req->ahbch_req);
  1134. uint32_t channel = (uint32_t)ch_req->channel;
  1135. uint32_t channel_base = drq->channel_base;
  1136. uint32_t core_intl;
  1137. /* Register LISR */
  1138. if (dmad.ahb_lisr_registered == 0){
  1139. status = hal_register_isr(IRQ_DMA_VECTOR, _dmad_ahb_lisr, (void*)0);
  1140. // status = hal_register_isr(INTC_DMA_BIT, _dmad_ahb_lisr, (void*)0);
  1141. if (status != HAL_SUCCESS)
  1142. return status;
  1143. dmad.ahb_lisr_registered = 1;
  1144. }
  1145. core_intl = hal_global_int_ctl(HAL_DISABLE_INTERRUPTS);
  1146. /* Following code require _safe_exit return path */
  1147. /* INTC */
  1148. /* Disable DMAC interrupt */
  1149. hal_intc_irq_disable(IRQ_DMA_VECTOR);
  1150. /* Clear DMAC interrupt status */
  1151. hal_intc_irq_clean(IRQ_DMA_VECTOR);
  1152. /* Setup DMAC interrupt trigger mode - level trigger */
  1153. /* Setup DMAC interrupt trigger level - assert high */
  1154. hal_intc_irq_config(IRQ_DMA_VECTOR, IRQ_LEVEL_TRIGGER, IRQ_ACTIVE_HIGH);
  1155. /* Enable DMAC interrupt */
  1156. hal_intc_irq_enable(IRQ_DMA_VECTOR);
  1157. #if 0
  1158. #if ( NO_EXTERNAL_INT_CTL == 1 )
  1159. /*
  1160. * IVIC without INTC
  1161. */
  1162. /* FIXME add trigger mode */
  1163. /* Enable DMAC interupt */
  1164. SR_SETB32(NDS32_SR_INT_MASK2,IRQ_DMA_VECTOR);
  1165. #else
  1166. /*
  1167. * INTC
  1168. */
  1169. /* Clear DMAC interrupt status */
  1170. SETB32(INTC_HW1_CLR, INTC_DMA_BIT);
  1171. /* Setup DMAC interrupt trigger mode - level trigger */
  1172. CLRB32(INTC_HW1_TMR, INTC_DMA_BIT);
  1173. /* Setup DMAC interrupt trigger level - assert high */
  1174. CLRB32(INTC_HW1_TLR, INTC_DMA_BIT);
  1175. /* Enable DMAC interrupt */
  1176. SETB32(INTC_HW1_ER, INTC_DMA_BIT);
  1177. #endif
  1178. #endif
  1179. /*
  1180. * PMU
  1181. */
  1182. /*
  1183. * Route APB device DMA to an AHB DMAC channel and specify the channel
  1184. * number. (connection status could be read back from PMU_AHBDMA_REQACK
  1185. * register)
  1186. * Note: Only one device is routed per AHB DMA channel, the other target
  1187. * should be either (1) the same device (same reqn), or (2) the AHB
  1188. * device (reqn = 0).
  1189. */
  1190. if (ahb_req->dst_index != PMU_REQN_NONE){
  1191. /* DMA transfer to device */
  1192. if ((ahb_req->dst_index > PMU_REQN_EXT1) ||
  1193. (dmad_ahb_ch_route_table[ahb_req->dst_index].route_cr == 0)){
  1194. status = HAL_ERR_NOT_PRESENT;
  1195. goto _safe_exit;
  1196. }
  1197. OUT32(dmad_ahb_ch_route_table[ahb_req->dst_index].route_cr,
  1198. PMU_DMACUSED_MASK | ((ahb_req->dst_reqn << PMU_CHANNEL_SHIFT) & PMU_CHANNEL_MASK));
  1199. }
  1200. else if (ahb_req->src_index != PMU_REQN_NONE){
  1201. /* DMA transfer from device */
  1202. if ((ahb_req->src_index > PMU_REQN_EXT1) ||
  1203. (dmad_ahb_ch_route_table[ahb_req->src_index].route_cr == 0)){
  1204. status = HAL_ERR_NOT_PRESENT;
  1205. goto _safe_exit;
  1206. }
  1207. OUT32(dmad_ahb_ch_route_table[ahb_req->src_index].route_cr,
  1208. PMU_DMACUSED_MASK | ((ahb_req->src_reqn << PMU_CHANNEL_SHIFT) & PMU_CHANNEL_MASK));
  1209. }
  1210. /*
  1211. * DMAC (Controller Setting)
  1212. * Note: Controller global setting actually should not placed in this channel
  1213. * specific setup routine. However, currently the only global setting
  1214. * is a fixed value, so it is ok to set it here. In this way, we save
  1215. * the effert to setup the global parameters elsewhere.
  1216. */
  1217. /* INT TC/ERR/ABT status clear */
  1218. SETB32(DMAC_INT_TC_CLR, channel); /* TC clear */
  1219. SETB32(DMAC_INT_ERRABT_CLR, channel + DMAC_INT_ERR_CLR_SHIFT); /* ERR clear */
  1220. SETB32(DMAC_INT_ERRABT_CLR, channel + DMAC_INT_ABT_CLR_SHIFT); /* ABT clear */
  1221. /* CSR (enable DMAC, set M0 & M1 endian default to little endian transfer) */
  1222. OUT32(DMAC_CSR, DMAC_DMACEN_MASK |
  1223. ((DMAC_ENDIAN_LITTLE << DMAC_M0ENDIAN_BIT) & DMAC_M0ENDIAN_MASK) |
  1224. ((DMAC_ENDIAN_LITTLE << DMAC_M1ENDIAN_BIT) & DMAC_M1ENDIAN_MASK));
  1225. /* DMAC (Channel-Specific Setting) */
  1226. /* SYNC */
  1227. if (ahb_req->sync)
  1228. SETB32(DMAC_SYNC, channel);
  1229. else
  1230. CLRB32(DMAC_SYNC, channel);
  1231. /*
  1232. * Channel CSR
  1233. * CH_EN : 0 (disable)
  1234. * DST_SEL : 0 (Master 0)
  1235. * SRC_SEL : 0 (Master 0)
  1236. * DSTAD_CTL : ahb_req->dst_addr_ctrl
  1237. * SRCAD_CTL : ahb_req->src_addr_ctrl
  1238. * MODE : 0 (normal)
  1239. * DST_WIDTH : ahb_req->dst_width
  1240. * SRC_WIDTH : ahb_req->src_width
  1241. * ABT : 0 (not abort)
  1242. * SRC_SIZE : 0 (burst size = 1 byte)
  1243. * PROT1 : 0 (user mode)
  1244. * PROT2 : 0 (bot bufferable)
  1245. * PROT3 : 0 (not cacheable)
  1246. * CHPRI : ahb_req->priority
  1247. * DMA_FF_TH : 0 (FIA320 only, threshold = 1)
  1248. * TC_MSK : 0 (TC counter status enable)
  1249. */
  1250. OUT32(channel_base + DMAC_CSR_OFFSET,
  1251. ((ahb_req->src_width << DMAC_CSR_SRC_WIDTH_SHIFT) & DMAC_CSR_SRC_WIDTH_MASK) |
  1252. ((ahb_req->src_addr_ctrl << DMAC_CSR_SRCAD_CTL_SHIFT) & DMAC_CSR_SRCAD_CTL_MASK) |
  1253. ((ahb_req->dst_width << DMAC_CSR_DST_WIDTH_SHIFT) & DMAC_CSR_DST_WIDTH_MASK) |
  1254. ((ahb_req->dst_addr_ctrl << DMAC_CSR_DSTAD_CTL_SHIFT) & DMAC_CSR_DSTAD_CTL_MASK) |
  1255. ((ahb_req->priority << DMAC_CSR_CHPRI_SHIFT) & DMAC_CSR_CHPRI_MASK));
  1256. /* Channel CFG
  1257. * INT_TC_MSK : 0 (enable TC int)
  1258. * INT_ERR_MSK : 0 (enable ERR int)
  1259. * INT_ABT_MSK : 0 (enable ABT int)
  1260. * SRC_RS : 0
  1261. * SRC_HE : 0
  1262. * BUSY : r/o
  1263. * DST_RS : 0
  1264. * DST_HE : 0
  1265. * LLP_CNT : r/o
  1266. */
  1267. OUT32(channel_base + DMAC_CFG_OFFSET, 0);
  1268. /*(DMAC_CFG_INT_TC_MSK | DMAC_CFG_INT_ERR_MSK | DMAC_CFG_INT_ABT_MSK)); */
  1269. #if 1 /* (Not found in AG101 spec -- has removed this setting?) */
  1270. /* - HW handshake mode: CSR & CFG */
  1271. if (ahb_req->hw_handshake != 0){
  1272. /* Channel CFG - Device REQN and HW-handshake mode */
  1273. uint32_t cfg = IN32(channel_base + DMAC_CFG_OFFSET);
  1274. if (ahb_req->src_index != DMAC_REQN_NONE){
  1275. OUT32(channel_base + DMAC_CFG_OFFSET, cfg |
  1276. ((ahb_req->src_reqn << DMAC_CFG_INT_SRC_RS_SHIFT) & DMAC_CFG_INT_SRC_RS_MASK) |
  1277. ((1 << DMAC_CFG_INT_SRC_HE_BIT) & DMAC_CFG_INT_SRC_HE_MASK) |
  1278. ((0 << DMAC_CFG_INT_DST_RS_SHIFT) & DMAC_CFG_INT_DST_RS_MASK) |
  1279. ((0 << DMAC_CFG_INT_DST_HE_BIT) & DMAC_CFG_INT_DST_HE_MASK));
  1280. }
  1281. else {
  1282. OUT32(channel_base + DMAC_CFG_OFFSET, cfg |
  1283. ((0 << DMAC_CFG_INT_SRC_RS_SHIFT) & DMAC_CFG_INT_SRC_RS_MASK) |
  1284. ((0 << DMAC_CFG_INT_SRC_HE_BIT) & DMAC_CFG_INT_SRC_HE_MASK) |
  1285. ((ahb_req->dst_reqn << DMAC_CFG_INT_DST_RS_SHIFT) & DMAC_CFG_INT_DST_RS_MASK) |
  1286. ((1 << DMAC_CFG_INT_DST_HE_BIT) & DMAC_CFG_INT_DST_HE_MASK));
  1287. }
  1288. /* Channel CSR - Enable HW-handshake mode */
  1289. SETB32(channel_base + DMAC_CSR_OFFSET, DMAC_CSR_MODE_BIT);
  1290. }
  1291. #endif
  1292. /* SRC_ADDR and DST_ADDR - not now */
  1293. /* LLP */
  1294. OUT32(channel_base + DMAC_LLP_OFFSET, 0);
  1295. /* TOT_SIZE - not now */
  1296. _safe_exit:
  1297. hal_global_int_ctl(core_intl);
  1298. return status;
  1299. }
  1300. /*****************************************************************************
  1301. * FUNCTION
  1302. *
  1303. * _dmad_apb_init
  1304. *
  1305. * DESCRIPTION
  1306. *
  1307. * This function performs the APB bridge DMA channel initialization.
  1308. *
  1309. * INPUTS
  1310. *
  1311. * ch_req : Pointer to the DMA request descriptor structure
  1312. *
  1313. * OUTPUTS
  1314. *
  1315. * uint32_t : Returns HAL_SUCCESS if successful initialization,
  1316. * else positive value is DMAD-specific error code,
  1317. * else negative value is NU system error code.
  1318. *
  1319. ****************************************************************************/
  1320. static uint32_t _dmad_apb_init(const DMAD_CHANNEL_REQUEST_DESC *ch_req){
  1321. uint32_t status = HAL_SUCCESS;
  1322. DMAD_DRQ *drq = (DMAD_DRQ *)ch_req->drq;
  1323. DMAD_APBCH_REQUEST *apb_req = (DMAD_APBCH_REQUEST *)(&ch_req->apbch_req);
  1324. uint32_t channel = (uint32_t)ch_req->channel;
  1325. uint32_t channel_base = drq->channel_base;
  1326. uint32_t channel_cmd = 0;
  1327. uint32_t core_intl;
  1328. uint32_t dst_bus_sel;
  1329. uint32_t src_bus_sel;
  1330. /* Register LISR */
  1331. if (dmad.apb_lisr_registered == 0){
  1332. status = hal_register_isr(IRQ_APBBRIDGE_VECTOR , _dmad_apb_lisr, (void*)0);
  1333. if (status != HAL_SUCCESS)
  1334. return status;
  1335. dmad.apb_lisr_registered = 1;
  1336. }
  1337. core_intl = hal_global_int_ctl(HAL_DISABLE_INTERRUPTS);
  1338. /* Following code require _safe_exit return path */
  1339. /* INTC */
  1340. /* Disable APB Bridge interrupt */
  1341. hal_intc_irq_disable(IRQ_APBBRIDGE_VECTOR);
  1342. /* Clear APB Bridge interrupt status */
  1343. hal_intc_irq_clean(IRQ_APBBRIDGE_VECTOR);
  1344. /* Setup APB Bridge interrupt trigger mode - level trigger */
  1345. /* Setup APB Bridge interrupt trigger level - assert high */
  1346. hal_intc_irq_config(IRQ_APBBRIDGE_VECTOR, IRQ_LEVEL_TRIGGER, IRQ_ACTIVE_HIGH);
  1347. /* Enable APB Bridge interrupt */
  1348. hal_intc_irq_enable(IRQ_APBBRIDGE_VECTOR);
  1349. #if 0
  1350. #if ( NO_EXTERNAL_INT_CTL == 1 )
  1351. /*
  1352. * IVIC without INTC
  1353. */
  1354. /* FIXME add trigger mode */
  1355. /* Enable APB Bridge interrupt */
  1356. SR_SETB32(NDS32_SR_INT_MASK2,IRQ_APBBRIDGE_VECTOR);
  1357. #else
  1358. /*
  1359. * INTC
  1360. */
  1361. /* Clear APB Bridge interrupt status */
  1362. SETB32(INTC_HW1_CLR, INTC_APB_BIT);
  1363. /* Setup APB Bridge interrupt trigger mode - level trigger */
  1364. CLRB32(INTC_HW1_TMR, INTC_APB_BIT);
  1365. /* Setup APB Bridge interrupt trigger level - assert high */
  1366. CLRB32(INTC_HW1_TLR, INTC_APB_BIT);
  1367. /* Enable APB Bridge interrupt */
  1368. SETB32(INTC_HW1_ER, INTC_APB_BIT);
  1369. #endif
  1370. #endif
  1371. /* PMU */
  1372. /* Check platform version */
  1373. uint32_t max_reqn = _dmad_get_reqn(DMAD_DMAC_APB_CORE, APB_MAX);
  1374. /*
  1375. * Undo APB device DMA to AHB DMAC channel routing. (connection status
  1376. * is obtained from reading back the PMU_AHBDMA_REQACK register)
  1377. */
  1378. if ((apb_req->src_index > max_reqn) ||
  1379. (apb_req->dst_index > max_reqn)){
  1380. status = HAL_ERR_NOT_PRESENT;
  1381. goto _safe_exit;
  1382. }
  1383. if (apb_req->src_index != APBBR_REQN_NONE){ /* quick filter out non-APB reqn */
  1384. uint32_t ahb_ch;
  1385. /* Search for source device whether it is re-routed to AHB DMA channel */
  1386. for (ahb_ch = 0; ahb_ch < DMAD_AHB_MAX_CHANNELS; ++ahb_ch){
  1387. uint32_t ahb_reqn = (IN32(PMU_AHBDMA_REQACK) >> (ahb_ch << 2)) & 0x0000000f;
  1388. if ((ahb_reqn != APBBR_REQN_NONE) &&
  1389. (ahb_reqn == dmad_apb_reqn_route_table[apb_req->src_index].ahb_reqn)){
  1390. DMAD_TRACE(("src: re-route DMAC ch %2d to APB.\r\n", ahb_ch));
  1391. /* got it! un-route from AHB back to APB */
  1392. OUT32(dmad_ahb_ch_route_table[ahb_reqn].route_cr,
  1393. ((channel << PMU_CHANNEL_SHIFT) & PMU_CHANNEL_MASK));
  1394. break;
  1395. }
  1396. }
  1397. }
  1398. if (apb_req->dst_index != APBBR_REQN_NONE){ /* quick filter out non-APB reqn */
  1399. uint32_t ahb_ch;
  1400. /* Search for source device whether it is re-routed to AHB DMA channel */
  1401. for (ahb_ch = 0; ahb_ch < DMAD_AHB_MAX_CHANNELS; ++ahb_ch){
  1402. uint32_t ahb_reqn = (IN32(PMU_AHBDMA_REQACK) >> (ahb_ch << 2)) & 0x0000000f;
  1403. if ((ahb_reqn != APBBR_REQN_NONE) &&
  1404. (ahb_reqn == dmad_apb_reqn_route_table[apb_req->dst_index].ahb_reqn)){
  1405. DMAD_TRACE(("dst: re-route DMAC ch %2d to APB.\r\n", ahb_ch));
  1406. /* got it! un-route from AHB back to APB */
  1407. OUT32(dmad_ahb_ch_route_table[ahb_reqn].route_cr,
  1408. ((channel << PMU_CHANNEL_SHIFT) & PMU_CHANNEL_MASK));
  1409. break;
  1410. }
  1411. }
  1412. }
  1413. /* APB Bridge DMA (Channel Setting) */
  1414. /*
  1415. * - CMD
  1416. * ENBDIS : 0 (disable for now)
  1417. * FININTSTS : 0 (clear finishing interrupt status)
  1418. * FININTENB : 1 (enable finishing interrupt)
  1419. * BURMOD : apb_req->burst_mode
  1420. * ERRINTSTS : 0 (clear error interrupt status)
  1421. * ERRINTENB : 1 (enable error interrupt)
  1422. * SRCADRSEL : AHB/APB, driver auto-conf according to apb_req->src_index
  1423. * DESADRSEL : AHB/APB, driver auto-conf according to apb_req->dst_index
  1424. * SRCADR : apb_req->src_addr_ctrl
  1425. * DESADR : apb_req->dst_addr_ctrl
  1426. * REQSEL : apb_req->src_index (? a "req/gnt" device looks to be a src... check to use reqn of src or dst)
  1427. * DATAWIDTH : apb_req->data_width
  1428. */
  1429. /*
  1430. * - CMD
  1431. * ENBDIS
  1432. * FININTSTS
  1433. * FININTENB
  1434. * BURMOD
  1435. * ERRINTSTS
  1436. * ERRINTENB
  1437. * SRCADR
  1438. * DESADR
  1439. * DATAWIDTH
  1440. */
  1441. channel_cmd = ((uint32_t)APBBR_DMA_FINTEN_MASK | APBBR_DMA_ERRINTEN_MASK |
  1442. ((apb_req->burst_mode << APBBR_DMA_BURST_BIT) & APBBR_DMA_BURST_MASK) |
  1443. ((apb_req->src_addr_ctrl << APBBR_DMA_SRCADDRINC_SHIFT) & APBBR_DMA_SRCADDRINC_MASK) |
  1444. ((apb_req->dst_addr_ctrl << APBBR_DMA_DSTADDRINC_SHIFT) & APBBR_DMA_DSTADDRINC_MASK) |
  1445. ((apb_req->data_width << APBBR_DMA_DATAWIDTH_SHIFT) & APBBR_DMA_DATAWIDTH_MASK));
  1446. /*
  1447. * - CMD
  1448. * DSTADRSEL
  1449. * DREQSEL (todo: this is FIA320 bit-mask, check AG101 bit-mask location)
  1450. */
  1451. if (apb_req->dst_index != APBBR_REQN_NONE)
  1452. dst_bus_sel = APBBR_ADDRSEL_APB;
  1453. else
  1454. dst_bus_sel = APBBR_ADDRSEL_AHB;
  1455. channel_cmd |= ((uint32_t)(APBBR_DMA_DSTADDRSEL_MASK &
  1456. (dst_bus_sel << APBBR_DMA_DSTADDRSEL_BIT)) |
  1457. (((uint32_t)apb_req->dst_index << APBBR_DMA_DREQSEL_SHIFT) & APBBR_DMA_DREQSEL_MASK));
  1458. /*
  1459. * - CMD
  1460. * SRCADRSEL
  1461. * SREQSEL (todo: this is FIA320 bit-mask, check AG101 bit-mask location)
  1462. */
  1463. if (apb_req->src_index != APBBR_REQN_NONE)
  1464. src_bus_sel = APBBR_ADDRSEL_APB;
  1465. else
  1466. src_bus_sel = APBBR_ADDRSEL_AHB;
  1467. channel_cmd |= ((uint32_t)(APBBR_DMA_SRCADDRSEL_MASK &
  1468. (src_bus_sel << APBBR_DMA_SRCADDRSEL_BIT)) |
  1469. (((uint32_t)apb_req->src_index << APBBR_DMA_SREQSEL_SHIFT) & APBBR_DMA_SREQSEL_MASK));
  1470. /* - CMD outport */
  1471. OUT32(channel_base + APBBR_DMA_CMD_OFFSET, channel_cmd);
  1472. /* SRCADR and DESADR - not now */
  1473. /* CYC - not now */
  1474. _safe_exit:
  1475. hal_global_int_ctl(core_intl);
  1476. return status;
  1477. }
  1478. /*****************************************************************************
  1479. * FUNCTION
  1480. *
  1481. * _dmad_channel_init
  1482. *
  1483. * DESCRIPTION
  1484. *
  1485. * This function performs the DMA channel HW initialization abstraction.
  1486. * The real initialization task is dispatched according to the requested
  1487. * DMA controller type (AHB DMAC or APB bridge DMA controller).
  1488. *
  1489. * INPUTS
  1490. *
  1491. * ch_req : Pointer to the DMA request descriptor structure
  1492. *
  1493. * OUTPUTS
  1494. *
  1495. * uint32_t : Returns HAL_SUCCESS if successful initialization,
  1496. * else positive value is DMAD-specific error code,
  1497. * else negative value is NU system error code.
  1498. *
  1499. ****************************************************************************/
  1500. uint32_t _dmad_channel_init(const DMAD_CHANNEL_REQUEST_DESC *ch_req){
  1501. uint32_t status;
  1502. DMAD_TRACE(("_dmad_channel_init\r\n"));
  1503. if (ch_req == HAL_NULL)
  1504. return HAL_ERR_INVALID_POINTER;
  1505. if (ch_req->drq == HAL_NULL)
  1506. return HAL_ERR_INVALID_POINTER;
  1507. /* Initialize DMA controller */
  1508. if (ch_req->controller == DMAD_DMAC_AHB_CORE)
  1509. status = _dmad_ahb_init(ch_req);
  1510. else
  1511. status = _dmad_apb_init(ch_req);
  1512. /* Register HISR to perform deffered DMA ISR tasks */
  1513. if (dmad.hisr_registered == 0){
  1514. printf("_dmad_channel_init Register HISR\n");
  1515. dmad.hisr.th.fn = _dmad_hisr;
  1516. dmad.hisr.th.arg = &dmad.hisr;
  1517. dmad.hisr.th.prio = CONFIG_DMAD_HISR_PRIORITY;
  1518. dmad.hisr.th.ptos = &dmad_hisr_stack[DMAD_HISR_STACK_SIZE];
  1519. dmad.hisr.th.stack_size = sizeof(dmad_hisr_stack);
  1520. dmad.hisr.th.name = "DMA BH";
  1521. status = hal_create_bh(&dmad.hisr);
  1522. if (status != HAL_SUCCESS)
  1523. return status;
  1524. dmad.hisr_registered = 1;
  1525. }
  1526. return status;
  1527. }
  1528. /*****************************************************************************
  1529. * FUNCTION
  1530. *
  1531. * _dmad_channel_enable
  1532. *
  1533. * DESCRIPTION
  1534. *
  1535. * This function is a abstraction routine to enable or disable a DMA
  1536. * channel.
  1537. *
  1538. * INPUTS
  1539. *
  1540. * ch_req : Pointer to the DMA request descriptor structure
  1541. *
  1542. * OUTPUTS
  1543. *
  1544. * uint32_t : Returns HAL_SUCCESS if successful enable/disable,
  1545. * else positive value is DMAD-specific error code,
  1546. * else negative value is NU system error code.
  1547. *
  1548. ****************************************************************************/
  1549. uint32_t _dmad_channel_enable(const DMAD_CHANNEL_REQUEST_DESC *ch_req, uint8_t enable){
  1550. DMAD_DRQ *drq;
  1551. if (ch_req == HAL_NULL)
  1552. return HAL_ERR_INVALID_POINTER;
  1553. drq = (DMAD_DRQ *)ch_req->drq;
  1554. if (drq == HAL_NULL)
  1555. return HAL_ERR_INVALID_POINTER;
  1556. /* Enable/disable DMA channel */
  1557. if (ch_req->controller == DMAD_DMAC_AHB_CORE){
  1558. if (enable)
  1559. SETB32(drq->channel_base + DMAC_CSR_OFFSET, DMAC_CSR_CH_EN_BIT);
  1560. else
  1561. CLRB32(drq->channel_base + DMAC_CSR_OFFSET, DMAC_CSR_CH_EN_BIT);
  1562. }
  1563. else { /* APB */
  1564. if (enable)
  1565. SETB32(drq->channel_base + APBBR_DMA_CMD_OFFSET, APBBR_DMA_CHEN_BIT);
  1566. else
  1567. CLRB32(drq->channel_base + APBBR_DMA_CMD_OFFSET, APBBR_DMA_CHEN_BIT);
  1568. }
  1569. return HAL_SUCCESS;
  1570. }
  1571. /*****************************************************************************
  1572. * FUNCTION
  1573. *
  1574. * _dmad_alloc_drb
  1575. *
  1576. * DESCRIPTION
  1577. *
  1578. * This function is used to allocate a DRB (DMA request block) within a DMA
  1579. * channel. DRB is used to queue all DMA submission requests for the
  1580. * channel. Allocated DRB node is moved from the free-list to the ready-
  1581. * list.
  1582. *
  1583. * INPUTS
  1584. *
  1585. * ch_req : (in) Pointer to the DMA request descriptor structure
  1586. * drb : (out) Reference to the pointer of allocated DRB.
  1587. *
  1588. * OUTPUTS
  1589. *
  1590. * uint32_t : Returns HAL_SUCCESS if successful allocation,
  1591. * else positive value is DMAD-specific error code,
  1592. * else negative value is NU system error code.
  1593. *
  1594. ****************************************************************************/
  1595. volatile int taskId=0;
  1596. uint32_t _dmad_alloc_drb(DMAD_CHANNEL_REQUEST_DESC *ch_req, DMAD_DRB **drb){
  1597. uint32_t status = HAL_SUCCESS;
  1598. DMAD_DRQ *drq;
  1599. uint32_t core_intl;
  1600. if (ch_req == HAL_NULL)
  1601. return HAL_ERR_INVALID_POINTER;
  1602. drq = (DMAD_DRQ *)ch_req->drq;
  1603. if (drq == HAL_NULL)
  1604. return HAL_ERR_INVALID_POINTER;
  1605. /* Obtain exclusive access to the drq from other tasks */
  1606. if (hal_current() != HAL_NULL){
  1607. /*
  1608. * Suspending is only valid to the current task -- no need to lock if invoked from HISR.
  1609. * Lock DMA queue to prevent been updated by other tasks
  1610. */
  1611. status = hal_wait_for_mutex(&drq->drb_pool_mutex, HAL_SUSPEND);
  1612. if (status != HAL_SUCCESS)
  1613. return status;
  1614. }
  1615. /* Initialize drb ptr in case of fail allocation */
  1616. *drb = HAL_NULL;
  1617. #ifdef DMAD_POLLING
  1618. core_intl = hal_global_int_ctl(HAL_DISABLE_INTERRUPTS);
  1619. while (drq->fre_head == 0){
  1620. /* Wait for free urbs. Sleep for 50 ms before polling again. */
  1621. hal_global_int_ctl(core_intl);
  1622. hal_sleep(50);
  1623. core_intl = hal_global_int_ctl(HAL_DISABLE_INTERRUPTS);
  1624. }
  1625. #else
  1626. status = hal_pend_semaphore(&drq->drb_sem, HAL_SUSPEND);
  1627. if (status == HAL_ERR_TIMEOUT){
  1628. status = HAL_ERR_NO_MEMORY;
  1629. goto _safe_exit;
  1630. }
  1631. else if (status != HAL_SUCCESS){
  1632. goto _safe_exit;
  1633. }
  1634. core_intl = hal_global_int_ctl(HAL_DISABLE_INTERRUPTS);
  1635. #endif
  1636. _dmad_detach_head(drq->drb_pool, &drq->fre_head, &drq->fre_tail, drb);
  1637. hal_global_int_ctl(core_intl);
  1638. _dmad_attach_tail(drq->drb_pool, &drq->rdy_head, &drq->rdy_tail, (*drb)->node);
  1639. (*drb)->state = DMAD_DRB_STATE_READY;
  1640. (*drb)->completion_sem = HAL_NULL;
  1641. (*drb)->psp = HAL_NULL;
  1642. (*drb)->rcp = HAL_NULL;
  1643. if (ch_req->controller == DMAD_DMAC_AHB_CORE) {
  1644. (*drb)->src_index = ch_req->ahbch_req.src_index;
  1645. (*drb)->dst_index = ch_req->ahbch_req.dst_index;
  1646. } else if (ch_req->controller == DMAD_DMAC_APB_CORE) {
  1647. (*drb)->src_index = ch_req->apbch_req.src_index;
  1648. (*drb)->dst_index = ch_req->apbch_req.dst_index;
  1649. } else
  1650. status = HAL_ERR_NOT_PRESENT;
  1651. goto _safe_exit;
  1652. _safe_exit:
  1653. /* Release locking of this function from other tasks */
  1654. if (hal_current() != HAL_NULL){
  1655. /*
  1656. * Suspending is only valid to the current task -- no need to lock if invoked from HISR.
  1657. * Unlock DMA queue to allow its access from other tasks
  1658. */
  1659. hal_release_mutex(&drq->drb_pool_mutex);
  1660. }
  1661. return status;
  1662. }
  1663. /*****************************************************************************
  1664. * FUNCTION
  1665. *
  1666. * _dmad_free_drb
  1667. *
  1668. * DESCRIPTION
  1669. *
  1670. * This function is used to free a DRB (DMA request block) within a DMA
  1671. * channel. DRB is used to queue all DMA submission requests for the
  1672. * channel. Freed DRB node is moved from the ready-list to the free-
  1673. * list.
  1674. *
  1675. * INPUTS
  1676. *
  1677. * ch_req : (in) Pointer to the DMA request descriptor structure
  1678. * drb : (in) Pointer of a DRB struct to be freed.
  1679. *
  1680. * OUTPUTS
  1681. *
  1682. * uint32_t : Returns HAL_SUCCESS if successful freeing,
  1683. * else positive value is DMAD-specific error code,
  1684. * else negative value is NU system error code.
  1685. *
  1686. ****************************************************************************/
  1687. uint32_t _dmad_free_drb(DMAD_CHANNEL_REQUEST_DESC *ch_req, DMAD_DRB *drb){
  1688. uint32_t status = HAL_SUCCESS;
  1689. DMAD_DRQ *drq;
  1690. uint32_t core_intl;
  1691. if (ch_req == HAL_NULL)
  1692. return HAL_ERR_INVALID_POINTER;
  1693. drq = (DMAD_DRQ *)ch_req->drq;
  1694. if (drq == HAL_NULL)
  1695. return HAL_ERR_INVALID_POINTER;
  1696. /* Obtain exclusive access to the drq from other tasks */
  1697. if (hal_current() != HAL_NULL){
  1698. /* Suspending is only valid to the current task -- no need to lock if invoked from HISR. */
  1699. status = hal_wait_for_mutex(&drq->drb_pool_mutex, HAL_SUSPEND);
  1700. if (status != HAL_SUCCESS)
  1701. return status;
  1702. }
  1703. /* Following code require _safe_exit return path */
  1704. if ((drq->rdy_head == 0) || (drb->node == 0) ||
  1705. (drb->node >= DMAD_DRB_POOL_SIZE)){
  1706. DMAD_TRACE(("Ready-queue is empty or invalid node!\r\n"));
  1707. /* Unlock DMA queue to allow its access from other tasks */
  1708. status = HAL_ERR_INVALID_ENTRY;
  1709. goto _safe_exit;
  1710. }
  1711. core_intl = hal_global_int_ctl(HAL_DISABLE_INTERRUPTS);
  1712. _dmad_detach_node(drq->drb_pool, &drq->rdy_head, &drq->rdy_tail, drb->node);
  1713. _dmad_attach_tail(drq->drb_pool, &drq->fre_head, &drq->fre_tail, drb->node);
  1714. hal_global_int_ctl(core_intl);
  1715. drb->state = DMAD_DRB_STATE_FREE;
  1716. drb->completion_sem = HAL_NULL;
  1717. _safe_exit:
  1718. /* Release locking of this function from other tasks */
  1719. if (hal_current() != HAL_NULL){
  1720. /*
  1721. * Suspending is only valid to the current task -- no need to lock if invoked from HISR
  1722. * Unlock DMA queue to allow its access from other tasks
  1723. */
  1724. hal_release_mutex(&drq->drb_pool_mutex);
  1725. }
  1726. return status;
  1727. }
  1728. int dmad_apb_config_dir(const DMAD_CHANNEL_REQUEST_DESC *ch_req, uint8_t dir)
  1729. {
  1730. uint32_t status = HAL_SUCCESS;
  1731. DMAD_DRQ *drq = (DMAD_DRQ *)ch_req->drq;
  1732. DMAD_APBCH_REQUEST *apb_req = (DMAD_APBCH_REQUEST *)(&ch_req->apbch_req);
  1733. uint32_t channel_base = drq->channel_base;
  1734. uint32_t channel_cmd = 0;
  1735. uint32_t dst_bus_sel;
  1736. uint32_t src_bus_sel;
  1737. channel_cmd = IN32(channel_base + APBBR_DMA_CMD_OFFSET);
  1738. channel_cmd &= ~(uint32_t)
  1739. (APBBR_DMA_SRCADDRINC_MASK | APBBR_DMA_DSTADDRINC_MASK |
  1740. APBBR_DMA_DSTADDRSEL_MASK | APBBR_DMA_DREQSEL_MASK |
  1741. APBBR_DMA_SRCADDRSEL_MASK | APBBR_DMA_SREQSEL_MASK);
  1742. if( dir == 0){
  1743. channel_cmd = ((uint32_t)APBBR_DMA_FINTEN_MASK | APBBR_DMA_ERRINTEN_MASK |
  1744. ((apb_req->src_addr_ctrl << APBBR_DMA_SRCADDRINC_SHIFT) & APBBR_DMA_SRCADDRINC_MASK) |
  1745. ((apb_req->dst_addr_ctrl << APBBR_DMA_DSTADDRINC_SHIFT) & APBBR_DMA_DSTADDRINC_MASK));
  1746. /*
  1747. * - CMD
  1748. * DSTADRSEL
  1749. * DREQSEL (todo: this is FIA320 bit-mask, check AG101 bit-mask location)
  1750. */
  1751. if (apb_req->dst_index != APBBR_REQN_NONE)
  1752. dst_bus_sel = APBBR_ADDRSEL_APB;
  1753. else
  1754. dst_bus_sel = APBBR_ADDRSEL_AHB;
  1755. channel_cmd |= ((uint32_t)(APBBR_DMA_DSTADDRSEL_MASK &
  1756. (dst_bus_sel << APBBR_DMA_DSTADDRSEL_BIT)) |
  1757. (((uint32_t)apb_req->dst_index << APBBR_DMA_DREQSEL_SHIFT) & APBBR_DMA_DREQSEL_MASK));
  1758. /*
  1759. * - CMD
  1760. * SRCADRSEL
  1761. * SREQSEL (todo: this is FIA320 bit-mask, check AG101 bit-mask location)
  1762. */
  1763. if (apb_req->src_index != APBBR_REQN_NONE)
  1764. src_bus_sel = APBBR_ADDRSEL_APB;
  1765. else
  1766. src_bus_sel = APBBR_ADDRSEL_AHB;
  1767. channel_cmd |= ((uint32_t)(APBBR_DMA_SRCADDRSEL_MASK &
  1768. (src_bus_sel << APBBR_DMA_SRCADDRSEL_BIT)) |
  1769. (((uint32_t)apb_req->src_index << APBBR_DMA_SREQSEL_SHIFT) & APBBR_DMA_SREQSEL_MASK));
  1770. /* - CMD outport */
  1771. OUT32(channel_base + APBBR_DMA_CMD_OFFSET, channel_cmd);
  1772. } else {
  1773. channel_cmd = ((uint32_t)APBBR_DMA_FINTEN_MASK | APBBR_DMA_ERRINTEN_MASK |
  1774. ((apb_req->dst_addr_ctrl << APBBR_DMA_SRCADDRINC_SHIFT) & APBBR_DMA_SRCADDRINC_MASK) |
  1775. ((apb_req->src_addr_ctrl << APBBR_DMA_DSTADDRINC_SHIFT) & APBBR_DMA_DSTADDRINC_MASK));
  1776. /*
  1777. * - CMD
  1778. * DSTADRSEL
  1779. * DREQSEL (todo: this is FIA320 bit-mask, check AG101 bit-mask location)
  1780. */
  1781. if (apb_req->src_index != APBBR_REQN_NONE)
  1782. src_bus_sel = APBBR_ADDRSEL_APB;
  1783. else
  1784. src_bus_sel = APBBR_ADDRSEL_AHB;
  1785. channel_cmd |= ((uint32_t)(APBBR_DMA_DSTADDRSEL_MASK &
  1786. (src_bus_sel << APBBR_DMA_DSTADDRSEL_BIT)) |
  1787. (((uint32_t)apb_req->src_index << APBBR_DMA_DREQSEL_SHIFT) & APBBR_DMA_DREQSEL_MASK));
  1788. /*
  1789. * - CMD
  1790. * SRCADRSEL
  1791. * SREQSEL (todo: this is FIA320 bit-mask, check AG101 bit-mask location)
  1792. */
  1793. if (apb_req->dst_index != APBBR_REQN_NONE)
  1794. dst_bus_sel = APBBR_ADDRSEL_APB;
  1795. else
  1796. dst_bus_sel = APBBR_ADDRSEL_AHB;
  1797. channel_cmd |= ((uint32_t)(APBBR_DMA_SRCADDRSEL_MASK &
  1798. (dst_bus_sel << APBBR_DMA_SRCADDRSEL_BIT)) |
  1799. (((uint32_t)apb_req->dst_index << APBBR_DMA_SREQSEL_SHIFT) & APBBR_DMA_SREQSEL_MASK));
  1800. /* - CMD outport */
  1801. OUT32(channel_base + APBBR_DMA_CMD_OFFSET, channel_cmd);
  1802. }
  1803. return status;
  1804. }
  1805. void set_drq_transfer_size(DMAD_CHANNEL_REQUEST_DESC *ch_req, DMAD_DRB *drb)
  1806. {
  1807. int data_width = -1;
  1808. if (ch_req->controller == DMAD_DMAC_AHB_CORE) {
  1809. /* AHB DMA */
  1810. DMAD_AHBCH_REQUEST *ahb_req = (DMAD_AHBCH_REQUEST *)(&ch_req->ahbch_req);
  1811. if (drb->src_index == DMAC_REQN_NONE && drb->src_index == DMAC_REQN_NONE)
  1812. data_width = 0;
  1813. else {
  1814. if (drb->src_index != DMAC_REQN_NONE)
  1815. data_width = 2 - ahb_req->src_width;
  1816. else if (drb->dst_index != DMAC_REQN_NONE)
  1817. data_width = 2 - ahb_req->dst_width;
  1818. }
  1819. } else {
  1820. /* APB DMA */
  1821. DMAD_APBCH_REQUEST *apb_req = (DMAD_APBCH_REQUEST *)(&ch_req->apbch_req);
  1822. data_width = 2 - apb_req->data_width;
  1823. }
  1824. if (data_width < 0)
  1825. KASSERT(1);
  1826. drb->transfer_size = drb->req_size << data_width;
  1827. }
  1828. /*****************************************************************************
  1829. * FUNCTION
  1830. *
  1831. * _dmad_submit_request
  1832. *
  1833. * DESCRIPTION
  1834. *
  1835. * This function is used to submit a DRB (DMA request block) to a DMA
  1836. * channel. DRB is used to queue all DMA submission requests for the
  1837. * channel. Submitted DRB node is moved from the ready-list to the
  1838. * submitted-list. DMA kick-off is performed automatically if the DMA
  1839. * transaction has not started. When the DRB is completed, it will be
  1840. * removed from the submittied-list to the free-list in the DMA ISR.
  1841. *
  1842. * INPUTS
  1843. *
  1844. * ch_req : (in) Pointer to the DMA request descriptor structure
  1845. * drb : (in) Pointer of a DRB struct to be submitted.
  1846. *
  1847. * OUTPUTS
  1848. *
  1849. * uint32_t : Returns HAL_SUCCESS if successful submission,
  1850. * else positive value is DMAD-specific error code,
  1851. * else negative value is NU system error code.
  1852. *
  1853. ****************************************************************************/
  1854. uint32_t _dmad_submit_request(DMAD_CHANNEL_REQUEST_DESC *ch_req, DMAD_DRB *drb){
  1855. uint32_t status = HAL_SUCCESS;
  1856. DMAD_DRQ *drq;
  1857. uint32_t core_intl;
  1858. if (ch_req == HAL_NULL)
  1859. return HAL_ERR_INVALID_POINTER;
  1860. drq = (DMAD_DRQ *)ch_req->drq;
  1861. if (drq == HAL_NULL)
  1862. return HAL_ERR_INVALID_POINTER;
  1863. /* Obtain exclusive access to the drq from other tasks */
  1864. if (hal_current() != HAL_NULL){
  1865. /*
  1866. * Suspending is only valid to the current task -- no need to lock if invoked from HISR
  1867. * Lock DMA queue to prevent been updated by other tasks
  1868. */
  1869. status = hal_wait_for_mutex(&drq->drb_pool_mutex, HAL_SUSPEND);
  1870. if (status != HAL_SUCCESS)
  1871. return status;
  1872. }
  1873. /* Following code require _safe_exit return path */
  1874. if ((drq->rdy_head == 0) || (drb->node == 0) || (drb->node >= DMAD_DRB_POOL_SIZE)){
  1875. status = HAL_ERR_INVALID_ENTRY;
  1876. goto _safe_exit;
  1877. }
  1878. _dmad_detach_node(drq->drb_pool, &drq->rdy_head, &drq->rdy_tail, drb->node);
  1879. core_intl = hal_global_int_ctl(HAL_DISABLE_INTERRUPTS);
  1880. /*
  1881. * writeback d-cache if necessary
  1882. *
  1883. * Note: Here we take the assumption that, after writeback, the memory
  1884. * contents is in physical ram and valid for for dma transfer.
  1885. * Hence, we only need to do writeback at the beginning of the drb
  1886. * submission, and ignore the writeback before kicking off every
  1887. * drb in isr.
  1888. *
  1889. * Place writeback code before interrupt-disable to shorten the
  1890. * disable time. This might generate a penalty of cache-miss
  1891. * if the writeback routine also invalidated the cache contents.
  1892. */
  1893. set_drq_transfer_size(ch_req, drb);
  1894. #if ( defined(CONFIG_CPU_DCACHE_ENABLE) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH) )
  1895. /* source is memory */
  1896. //if (drq->dc_writeback != HAL_NULL && drb->src_index == DMAC_REQN_NONE)
  1897. if ( (unsigned long)drb->src_addr >= NTC0_BONDER_START && (unsigned long)drb->src_addr < NTC0_BONDER_END )//JUNIOR@2013/05/16
  1898. drq->dc_writeback((unsigned long)(drb->src_addr),(unsigned long)(drb->src_addr) + (unsigned long)(drb->transfer_size));
  1899. #endif
  1900. /* Check if submission is performed to an empty queue */
  1901. if (drq->sbt_head == 0){
  1902. _dmad_attach_tail(drq->drb_pool, &drq->sbt_head, &drq->sbt_tail, drb->node);
  1903. drb->state = DMAD_DRB_STATE_SUBMITTED;
  1904. hal_global_int_ctl(core_intl);
  1905. /* pre-submission-programming */
  1906. if (drb->psp)
  1907. drb->psp(drb->data);
  1908. /* DMA is not running, so kick off transmission */
  1909. if (ch_req->controller == DMAD_DMAC_AHB_CORE){ /* AHB */
  1910. /* Source and destination address */
  1911. OUT32(drq->channel_base + DMAC_SRC_ADDR_OFFSET, drb->src_addr);
  1912. OUT32(drq->channel_base + DMAC_DST_ADDR_OFFSET, drb->dst_addr);
  1913. /* Transfer size (in units of source width) */
  1914. OUT32(drq->channel_base + DMAC_SIZE_OFFSET, drb->req_size);
  1915. /* Enable DMA channel (Kick off transmission when client enable it's transfer state) */
  1916. SETB32(drq->channel_base + DMAC_CSR_OFFSET, DMAC_CSR_CH_EN_BIT);
  1917. }
  1918. else { /* APB */
  1919. /* Source and destination address */
  1920. OUT32(drq->channel_base + APBBR_DMA_SAD_OFFSET, drb->src_addr);
  1921. OUT32(drq->channel_base + APBBR_DMA_DAD_OFFSET, drb->dst_addr);
  1922. /* Transfer size (in units of source width) */
  1923. OUT32(drq->channel_base + APBBR_DMA_CYC_OFFSET, drb->req_size & APBBR_DMA_CYC_MASK);
  1924. /* Enable DMA channel (Kick off transmission when client enable it's transfer state) */
  1925. SETB32(drq->channel_base + APBBR_DMA_CMD_OFFSET, APBBR_DMA_CHEN_BIT);
  1926. }
  1927. }
  1928. else {
  1929. /* DMA is already running, so only queue DRB to the end of the list */
  1930. _dmad_attach_tail(drq->drb_pool, &drq->sbt_head, &drq->sbt_tail, drb->node);
  1931. drb->state = DMAD_DRB_STATE_SUBMITTED;
  1932. hal_global_int_ctl(core_intl);
  1933. }
  1934. _safe_exit:
  1935. /* Release locking of this function from other tasks */
  1936. if (hal_current() != HAL_NULL){
  1937. /*
  1938. * Suspending is only valid to the current task -- no need to lock if invoked from HISR
  1939. * Unlock DMA queue to allow its access from other tasks
  1940. */
  1941. hal_release_mutex(&drq->drb_pool_mutex);
  1942. }
  1943. return status;
  1944. }
  1945. /*****************************************************************************
  1946. * FUNCTION
  1947. *
  1948. * _dmad_cancel_request
  1949. *
  1950. * DESCRIPTION
  1951. *
  1952. * This function is used to cancel a submitted DRB (DMA request block)
  1953. * of a DMA channel. DRB is used to queue all DMA submission requests for
  1954. * the channel. Submitted DRB node is moved from the ready-list to the
  1955. * submitted-list. Cancellation will fail if the DRB has already been
  1956. * kicked off and is waiting to be completed.
  1957. *
  1958. * INPUTS
  1959. *
  1960. * ch_req : (in) Pointer to the DMA request descriptor structure
  1961. * drb : (in) Pointer of a DRB struct to be cancelled.
  1962. *
  1963. * OUTPUTS
  1964. *
  1965. * uint32_t : Returns HAL_SUCCESS if successful cancellation,
  1966. * else positive value is DMAD-specific error code,
  1967. * else negative value is NU system error code.
  1968. *
  1969. ****************************************************************************/
  1970. uint32_t _dmad_cancel_request(DMAD_CHANNEL_REQUEST_DESC *ch_req, DMAD_DRB *drb){
  1971. DMAD_DRQ *drq;;
  1972. if (ch_req == HAL_NULL)
  1973. return HAL_ERR_INVALID_POINTER;
  1974. drq = (DMAD_DRQ *)ch_req->drq;
  1975. if (drq == HAL_NULL)
  1976. return HAL_ERR_INVALID_POINTER;
  1977. if (drq->sbt_head == 0)
  1978. return HAL_ERR_INVALID_ENTRY;
  1979. if ((drb->node == 0) || (drb->node >= DMAD_DRB_POOL_SIZE))
  1980. return HAL_ERR_INVALID_ENTRY;
  1981. if (drb->completion_sem != HAL_NULL)
  1982. hal_destroy_semaphore(drb->completion_sem);
  1983. // NDS_DCache_Enable();
  1984. return HAL_ERR_UNAVAILABLE;
  1985. }
  1986. uint32_t _dmad_wait(DMAD_CHANNEL_REQUEST_DESC *ch_req){
  1987. uint32_t status = HAL_SUCCESS;
  1988. DMAD_DRQ *drq;
  1989. uint32_t core_intl;
  1990. if (ch_req == HAL_NULL)
  1991. return HAL_ERR_INVALID_POINTER;
  1992. drq = (DMAD_DRQ *)ch_req->drq;
  1993. if (drq == HAL_NULL)
  1994. return HAL_ERR_INVALID_POINTER;
  1995. /* Obtain exclusive access to the drq from other tasks */
  1996. if (hal_current() != HAL_NULL){
  1997. /*
  1998. * Suspending is only valid to the current task -- no need to lock if invoked from HISR.
  1999. * Lock DMA queue to prevent been updated by other tasks
  2000. */
  2001. status = hal_wait_for_mutex(&drq->drb_pool_mutex, HAL_SUSPEND);
  2002. if (status != HAL_SUCCESS)
  2003. return status;
  2004. }
  2005. #ifdef DMAD_POLLING
  2006. core_intl = hal_global_int_ctl(HAL_DISABLE_INTERRUPTS);
  2007. while (drq->sbt_head != 0){
  2008. hal_global_int_ctl(core_intl);
  2009. hal_sleep(50);
  2010. core_intl = hal_global_int_ctl(HAL_DISABLE_INTERRUPTS);
  2011. }
  2012. #else
  2013. status = hal_pend_semaphore(&drq->drb_sem, 300);
  2014. if (status == HAL_ERR_TIMEOUT){
  2015. status = HAL_ERR_NO_MEMORY;
  2016. goto _safe_exit;
  2017. }
  2018. else if (status != HAL_SUCCESS){
  2019. goto _safe_exit;
  2020. }
  2021. core_intl = hal_global_int_ctl(HAL_DISABLE_INTERRUPTS);
  2022. #endif
  2023. hal_global_int_ctl(core_intl);
  2024. goto _safe_exit;
  2025. _safe_exit:
  2026. /* Release locking of this function from other tasks */
  2027. if (hal_current() != HAL_NULL){
  2028. /*
  2029. * Suspending is only valid to the current task -- no need to lock if invoked from HISR.
  2030. * Unlock DMA queue to allow its access from other tasks
  2031. */
  2032. hal_release_mutex(&drq->drb_pool_mutex);
  2033. }
  2034. return status;
  2035. }