hal_dma.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /*
  2. * Copyright (c) 2019-2025 Allwinner Technology Co., Ltd. ALL rights reserved.
  3. *
  4. * Allwinner is a trademark of Allwinner Technology Co.,Ltd., registered in
  5. * the the People's Republic of China and other countries.
  6. * All Allwinner Technology Co.,Ltd. trademarks are used with permission.
  7. *
  8. * DISCLAIMER
  9. * THIRD PARTY LICENCES MAY BE REQUIRED TO IMPLEMENT THE SOLUTION/PRODUCT.
  10. * IF YOU NEED TO INTEGRATE THIRD PARTY¡¯S TECHNOLOGY (SONY, DTS, DOLBY, AVS OR MPEGLA, ETC.)
  11. * IN ALLWINNERS¡¯SDK OR PRODUCTS, YOU SHALL BE SOLELY RESPONSIBLE TO OBTAIN
  12. * ALL APPROPRIATELY REQUIRED THIRD PARTY LICENCES.
  13. * ALLWINNER SHALL HAVE NO WARRANTY, INDEMNITY OR OTHER OBLIGATIONS WITH RESPECT TO MATTERS
  14. * COVERED UNDER ANY REQUIRED THIRD PARTY LICENSE.
  15. * YOU ARE SOLELY RESPONSIBLE FOR YOUR USAGE OF THIRD PARTY¡¯S TECHNOLOGY.
  16. *
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY ALLWINNER"AS IS" AND TO THE MAXIMUM EXTENT
  19. * PERMITTED BY LAW, ALLWINNER EXPRESSLY DISCLAIMS ALL WARRANTIES OF ANY KIND,
  20. * WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING WITHOUT LIMITATION REGARDING
  21. * THE TITLE, NON-INFRINGEMENT, ACCURACY, CONDITION, COMPLETENESS, PERFORMANCE
  22. * OR MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  23. * IN NO EVENT SHALL ALLWINNER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  25. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  26. * LOSS OF USE, DATA, OR PROFITS, OR BUSINESS INTERRUPTION)
  27. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  28. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  29. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  30. * OF THE POSSIBILITY OF SUCH DAMAGE.
  31. */
  32. #ifndef __SUNXI_HAL_DMA_H__
  33. #define __SUNXI_HAL_DMA_H__
  34. #include <stdint.h>
  35. #include <stdbool.h>
  36. #include <stdio.h>
  37. /*
  38. * include the platform dma header file.
  39. */
  40. #include <dma/platform-dma.h>
  41. #ifdef __cplusplus
  42. extern "C" {
  43. #endif
  44. /* #define DMA_DEBUG */
  45. #define HEXADECIMAL (0x10)
  46. #define REG_INTERVAL (0x04)
  47. #define REG_CL (0x0c)
  48. //TODO:move reg list to sunxiwxx.h
  49. #define HIGH_CHAN 8
  50. #define DMA_IRQ_EN(x) (SUNXI_DMAC_PBASE + (0x00 + ((x) << 2))) /* Interrupt enable register */
  51. #define DMA_IRQ_STAT(x) (SUNXI_DMAC_PBASE + (0x10 + ((x) << 2))) /* Interrupt status register */
  52. #define DMA_SECURE (SUNXI_DMAC_PBASE + 0x20) /* DMA security register */
  53. #define DMA_GATE (SUNXI_DMAC_PBASE + 0x28) /* DMA gating register */
  54. #define DMA_MCLK_GATE 0x04
  55. #define DMA_COMMON_GATE 0x02
  56. #define DMA_CHAN_GATE 0x01
  57. #define DMA_STAT (SUNXI_DMAC_PBASE + 0x30) /* DMA Status Register RO */
  58. #define DMA_ENABLE(x) (SUNXI_DMAC_PBASE + (0x100 + ((x + START_CHAN_OFFSET) << 6))) /* Channels enable register */
  59. #define DMA_PAUSE(x) (SUNXI_DMAC_PBASE + (0x104 + ((x + START_CHAN_OFFSET) << 6))) /* DMA Channels pause register */
  60. #define DMA_LLI_ADDR(x) (SUNXI_DMAC_PBASE + (0x108 + ((x + START_CHAN_OFFSET) << 6))) /* Descriptor address register */
  61. #define DMA_CFG(x) (SUNXI_DMAC_PBASE + (0x10C + ((x + START_CHAN_OFFSET) << 6))) /* Configuration register RO */
  62. #define DMA_CUR_SRC(x) (SUNXI_DMAC_PBASE + (0x110 + ((x + START_CHAN_OFFSET) << 6))) /* Current source address RO */
  63. #define DMA_CUR_DST(x) (SUNXI_DMAC_PBASE + (0x114 + ((x + START_CHAN_OFFSET) << 6))) /* Current destination address RO */
  64. #define DMA_CNT(x) (SUNXI_DMAC_PBASE + (0x118 + ((x + START_CHAN_OFFSET) << 6))) /* Byte counter left register RO */
  65. #define DMA_PARA(x) (SUNXI_DMAC_PBASE + (0x11C + ((x + START_CHAN_OFFSET) << 6))) /* Parameter register RO */
  66. #define LINK_END 0xFFFFF800 /* lastest link must be 0xfffff800 */
  67. /* DMA mode register */
  68. #define DMA_OP_MODE(x) (SUNXI_DMAC_PBASE + (0x128 + ((x + START_CHAN_OFFSET) << 6))) /* DMA mode register */
  69. #define SRC_HS_MASK (0x1 << 2) /* bit 2: Source handshake mode */
  70. #define DST_HS_MASK (0x1 << 3) /* bit 3: Destination handshake mode */
  71. #define SET_OP_MODE(x, val) ({ \
  72. writel(val,DMA_OP_MODE(x)); \
  73. })
  74. #define SHIFT_IRQ_MASK(val, ch) ({ \
  75. (ch + START_CHAN_OFFSET) >= HIGH_CHAN \
  76. ? (val) << ((ch + START_CHAN_OFFSET - HIGH_CHAN) << 2) \
  77. : (val) << ((ch + START_CHAN_OFFSET) << 2); \
  78. })
  79. #define SHIFT_PENDING_MASK(val, ch) ({ \
  80. (ch + START_CHAN_OFFSET) >= HIGH_CHAN \
  81. ? (val) << ((ch + START_CHAN_OFFSET - HIGH_CHAN) << 2) \
  82. : (val) << ((ch + START_CHAN_OFFSET) << 2); \
  83. })
  84. #define IRQ_HALF 0x01 /* Half package transfer interrupt pending */
  85. #define IRQ_PKG 0x02 /* One package complete interrupt pending */
  86. #define IRQ_QUEUE 0x04 /* All list complete transfer interrupt pending */
  87. /* DMA channel configuration register */
  88. /* The detail information of DMA configuration */
  89. #define SRC_WIDTH(x) ((x) << 9)
  90. #define SRC_BURST(x) ((x) << 6)
  91. #define SRC_IO_MODE (0x01 << 8)
  92. #define SRC_LINEAR_MODE (0x00 << 8)
  93. #define SRC_DRQ(x) ((x) << 0)
  94. #define DST_WIDTH(x) ((x) << 25)
  95. #define DST_BURST(x) ((x) << 22)
  96. #define DST_IO_MODE (0x01 << 24)
  97. #define DST_LINEAR_MODE (0x00 << 24)
  98. #define DST_DRQ(x) ((x) << 16)
  99. #define CHAN_START 1
  100. #define CHAN_STOP 0
  101. #define CHAN_PAUSE 1
  102. #define CHAN_RESUME 0
  103. #define NORMAL_WAIT (8 << 0)
  104. #define GET_SRC_DRQ(x) ((x) & 0x000000ff)
  105. #define GET_DST_DRQ(x) ((x) & 0x00ff0000)
  106. struct sunxi_dma_lli {
  107. uint32_t cfg;
  108. uint32_t src;
  109. uint32_t dst;
  110. uint32_t len;
  111. uint32_t para;
  112. uint32_t p_lln;
  113. struct sunxi_dma_lli *vlln;
  114. };
  115. #define sunxi_slave_id(d, s) (((d)<<16) | (s))
  116. typedef void (*dma_callback)(void *param);
  117. /**
  118. * enum dma_slave_buswidth - defines bus width of the DMA slave
  119. * device, source or target buses
  120. */
  121. enum dma_slave_buswidth {
  122. DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
  123. DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
  124. DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
  125. DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
  126. DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
  127. DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
  128. DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
  129. DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
  130. DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
  131. };
  132. enum dma_slave_burst {
  133. DMA_SLAVE_BURST_1 = 1,
  134. DMA_SLAVE_BURST_4 = 4,
  135. DMA_SLAVE_BURST_8 = 8,
  136. DMA_SLAVE_BURST_16 = 16,
  137. };
  138. /**
  139. * enum dma_transfer_direction - dma transfer mode and direction indicator
  140. * @DMA_MEM_TO_MEM: Async/Memcpy mode
  141. * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device
  142. * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory
  143. * @DMA_DEV_TO_DEV: Slave mode & From Device to Device
  144. */
  145. enum dma_transfer_direction {
  146. DMA_MEM_TO_MEM = 0,
  147. DMA_MEM_TO_DEV = 1,
  148. DMA_DEV_TO_MEM = 2,
  149. DMA_DEV_TO_DEV = 3,
  150. DMA_TRANS_NONE,
  151. };
  152. /**
  153. * enum dma_status - DMA transaction status
  154. * @DMA_COMPLETE: transaction completed
  155. * @DMA_IN_PROGRESS: transaction not yet processed
  156. * @DMA_PAUSED: transaction is paused
  157. * @DMA_ERROR: transaction failed
  158. */
  159. enum dma_status {
  160. DMA_INVALID_PARAMETER = -2,
  161. DMA_ERROR = -1,
  162. DMA_COMPLETE,
  163. DMA_IN_PROGRESS,
  164. DMA_PAUSED,
  165. };
  166. /**
  167. * struct dma_slave_config - dma slave channel runtime config
  168. * @direction: whether the data shall go in or out on this slave
  169. * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are
  170. * legal values. DEPRECATED, drivers should use the direction argument
  171. * to the device_prep_slave_sg and device_prep_dma_cyclic functions or
  172. * the dir field in the dma_interleaved_template structure.
  173. * @src_addr: this is the physical address where DMA slave data
  174. * should be read (RX), if the source is memory this argument is
  175. * ignored.
  176. * @dst_addr: this is the physical address where DMA slave data
  177. * should be written (TX), if the source is memory this argument
  178. * is ignored.
  179. * @src_addr_width: this is the width in bytes of the source (RX)
  180. * register where DMA data shall be read. If the source
  181. * is memory this may be ignored depending on architecture.
  182. * Legal values: 1, 2, 4, 8.
  183. * @dst_addr_width: same as src_addr_width but for destination
  184. * target (TX) mutatis mutandis.
  185. * @src_maxburst: the maximum number of words (note: words, as in
  186. * units of the src_addr_width member, not bytes) that can be sent
  187. * in one burst to the device. Typically something like half the
  188. * FIFO depth on I/O peripherals so you don't overflow it. This
  189. * may or may not be applicable on memory sources.
  190. * @dst_maxburst: same as src_maxburst but for destination target
  191. * mutatis mutandis.
  192. * @slave_id: Slave requester id. Only valid for slave channels. The dma
  193. * slave peripheral will have unique id as dma requester which need to be
  194. * pass as slave config.
  195. *
  196. * This struct is passed in as configuration data to a DMA engine
  197. * in order to set up a certain channel for DMA transport at runtime.
  198. * The DMA device/engine has to provide support for an additional
  199. * callback in the dma_device structure, device_config and this struct
  200. * will then be passed in as an argument to the function.
  201. *
  202. * The rationale for adding configuration information to this struct is as
  203. * follows: if it is likely that more than one DMA slave controllers in
  204. * the world will support the configuration option, then make it generic.
  205. * If not: if it is fixed so that it be sent in static from the platform
  206. * data, then prefer to do that.
  207. */
  208. struct dma_slave_config {
  209. enum dma_transfer_direction direction;
  210. unsigned long src_addr;
  211. unsigned long dst_addr;
  212. enum dma_slave_buswidth src_addr_width;
  213. enum dma_slave_buswidth dst_addr_width;
  214. uint32_t src_maxburst;
  215. uint32_t dst_maxburst;
  216. uint32_t slave_id;
  217. };
  218. struct sunxi_dma_chan {
  219. uint8_t used:1;
  220. uint8_t chan_count:4;
  221. bool cyclic:1;
  222. struct dma_slave_config cfg;
  223. uint32_t periods_pos;
  224. uint32_t buf_len;
  225. struct sunxi_dma_lli *desc;
  226. uint32_t irq_type;
  227. dma_callback callback;
  228. void *callback_param;
  229. /* volatile kspinlock_t lock; */
  230. volatile int lock;
  231. };
  232. /** This enum defines the DMA CHANNEL status. */
  233. typedef enum {
  234. HAL_DMA_CHAN_STATUS_BUSY = 0, /* DMA channel status busy */
  235. HAL_DMA_CHAN_STATUS_FREE = 1 /* DMA channel status free */
  236. } hal_dma_chan_status_t;
  237. /** This enum defines the return type of GPIO API. */
  238. typedef enum {
  239. HAL_DMA_STATUS_INVALID_PARAMETER = -22, /* Invalid input parameter. */
  240. HAL_DMA_STATUS_NO_MEM = -12, /* No memory. */
  241. HAL_DMA_STATUS_ERR_PERM = -1, /* Operation not permitted. */
  242. HAL_DMA_STATUS_OK = 0 /* The DMA status ok. */
  243. } hal_dma_status_t;
  244. hal_dma_chan_status_t hal_dma_chan_request(struct sunxi_dma_chan **dma_chan);
  245. hal_dma_status_t hal_dma_prep_memcpy(struct sunxi_dma_chan *chan,
  246. uint32_t dest, uint32_t src, uint32_t len);
  247. hal_dma_status_t hal_dma_prep_device(struct sunxi_dma_chan *chan,
  248. uint32_t dest, uint32_t src,
  249. uint32_t len, enum dma_transfer_direction dir);
  250. hal_dma_status_t hal_dma_prep_cyclic(struct sunxi_dma_chan *chan,
  251. uint32_t buf_addr, uint32_t buf_len,
  252. uint32_t period_len, enum dma_transfer_direction dir);
  253. hal_dma_status_t hal_dma_callback_install(struct sunxi_dma_chan *chan,
  254. dma_callback callback,
  255. void *callback_param);
  256. hal_dma_status_t hal_dma_slave_config(struct sunxi_dma_chan *chan, struct dma_slave_config *config);
  257. enum dma_status hal_dma_tx_status(struct sunxi_dma_chan *chan, uint32_t *left_size);
  258. hal_dma_status_t hal_dma_start(struct sunxi_dma_chan *chan);
  259. hal_dma_status_t hal_dma_stop(struct sunxi_dma_chan *chan);
  260. hal_dma_status_t hal_dma_chan_free(struct sunxi_dma_chan *chan);
  261. hal_dma_status_t hal_dma_chan_desc_free(struct sunxi_dma_chan *chan);
  262. void hal_dma_init(void);
  263. #ifdef __cplusplus
  264. }
  265. #endif
  266. #endif