dma.c 1.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758
  1. /*
  2. * Copyright (c) 2015, Xilinx Inc. and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <metal/errno.h>
  7. #include <string.h>
  8. #include <metal/device.h>
  9. #include <metal/log.h>
  10. #include <metal/dma.h>
  11. #include <metal/atomic.h>
  12. int metal_dma_map(struct metal_device *dev,
  13. uint32_t dir,
  14. struct metal_sg *sg_in,
  15. int nents_in,
  16. struct metal_sg *sg_out)
  17. {
  18. int nents_out;
  19. if (!dev || !sg_in || !sg_out)
  20. return -EINVAL;
  21. if (!dev->bus->ops.dev_dma_map)
  22. return -ENODEV;
  23. /* memory barrier */
  24. if (dir == METAL_DMA_DEV_R)
  25. /* If it is device read, apply memory write fence. */
  26. atomic_thread_fence(memory_order_release);
  27. else
  28. /* If it is device write or device r/w,
  29. apply memory r/w fence. */
  30. atomic_thread_fence(memory_order_acq_rel);
  31. nents_out = dev->bus->ops.dev_dma_map(dev->bus,
  32. dev, dir, sg_in, nents_in, sg_out);
  33. return nents_out;
  34. }
  35. void metal_dma_unmap(struct metal_device *dev,
  36. uint32_t dir,
  37. struct metal_sg *sg,
  38. int nents)
  39. {
  40. /* memory barrier */
  41. if (dir == METAL_DMA_DEV_R)
  42. /* If it is device read, apply memory write fence. */
  43. atomic_thread_fence(memory_order_release);
  44. else
  45. /* If it is device write or device r/w,
  46. apply memory r/w fence. */
  47. atomic_thread_fence(memory_order_acq_rel);
  48. if (!dev || !dev->bus->ops.dev_dma_unmap || !sg)
  49. return;
  50. dev->bus->ops.dev_dma_unmap(dev->bus,
  51. dev, dir, sg, nents);
  52. }