dma-mapping.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. /*
  2. * linux/arch/unicore32/include/asm/dma-mapping.h
  3. *
  4. * Code specific to PKUnity SoC and UniCore ISA
  5. *
  6. * Copyright (C) 2001-2010 GUAN Xue-tao
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #ifndef __UNICORE_DMA_MAPPING_H__
  13. #define __UNICORE_DMA_MAPPING_H__
  14. #ifdef __KERNEL__
  15. #include <linux/mm_types.h>
  16. #include <linux/scatterlist.h>
  17. #include <linux/swiotlb.h>
  18. #include <asm-generic/dma-coherent.h>
  19. #include <asm/memory.h>
  20. #include <asm/cacheflush.h>
  21. extern struct dma_map_ops swiotlb_dma_map_ops;
  22. static inline struct dma_map_ops *get_dma_ops(struct device *dev)
  23. {
  24. return &swiotlb_dma_map_ops;
  25. }
  26. static inline int dma_supported(struct device *dev, u64 mask)
  27. {
  28. struct dma_map_ops *dma_ops = get_dma_ops(dev);
  29. if (unlikely(dma_ops == NULL))
  30. return 0;
  31. return dma_ops->dma_supported(dev, mask);
  32. }
  33. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  34. {
  35. struct dma_map_ops *dma_ops = get_dma_ops(dev);
  36. if (dma_ops->mapping_error)
  37. return dma_ops->mapping_error(dev, dma_addr);
  38. return 0;
  39. }
  40. #include <asm-generic/dma-mapping-common.h>
  41. static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
  42. {
  43. if (dev && dev->dma_mask)
  44. return addr + size - 1 <= *dev->dma_mask;
  45. return 1;
  46. }
  47. static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
  48. {
  49. return paddr;
  50. }
  51. static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
  52. {
  53. return daddr;
  54. }
  55. static inline void dma_mark_clean(void *addr, size_t size) {}
  56. static inline int dma_set_mask(struct device *dev, u64 dma_mask)
  57. {
  58. if (!dev->dma_mask || !dma_supported(dev, dma_mask))
  59. return -EIO;
  60. *dev->dma_mask = dma_mask;
  61. return 0;
  62. }
  63. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  64. dma_addr_t *dma_handle, gfp_t flag)
  65. {
  66. struct dma_map_ops *dma_ops = get_dma_ops(dev);
  67. return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
  68. }
  69. static inline void dma_free_coherent(struct device *dev, size_t size,
  70. void *cpu_addr, dma_addr_t dma_handle)
  71. {
  72. struct dma_map_ops *dma_ops = get_dma_ops(dev);
  73. dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
  74. }
  75. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  76. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  77. static inline void dma_cache_sync(struct device *dev, void *vaddr,
  78. size_t size, enum dma_data_direction direction)
  79. {
  80. unsigned long start = (unsigned long)vaddr;
  81. unsigned long end = start + size;
  82. switch (direction) {
  83. case DMA_NONE:
  84. BUG();
  85. case DMA_FROM_DEVICE:
  86. case DMA_BIDIRECTIONAL: /* writeback and invalidate */
  87. __cpuc_dma_flush_range(start, end);
  88. break;
  89. case DMA_TO_DEVICE: /* writeback only */
  90. __cpuc_dma_clean_range(start, end);
  91. break;
  92. }
  93. }
  94. #endif /* __KERNEL__ */
  95. #endif