dma-mapping.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. /*
  2. * OpenRISC Linux
  3. *
  4. * Linux architectural port borrowing liberally from similar works of
  5. * others. All original copyrights apply as per the original source
  6. * declaration.
  7. *
  8. * OpenRISC implementation:
  9. * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2 of the License, or
  14. * (at your option) any later version.
  15. */
  16. #ifndef __ASM_OPENRISC_DMA_MAPPING_H
  17. #define __ASM_OPENRISC_DMA_MAPPING_H
  18. /*
  19. * See Documentation/PCI/PCI-DMA-mapping.txt and
  20. * Documentation/DMA-API.txt for documentation.
  21. *
  22. * This file is written with the intention of eventually moving over
  23. * to largely using asm-generic/dma-mapping-common.h in its place.
  24. */
  25. #include <linux/dma-debug.h>
  26. #include <asm-generic/dma-coherent.h>
  27. #include <linux/kmemcheck.h>
  28. #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
  29. int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
  30. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  31. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  32. void *or1k_dma_alloc_coherent(struct device *dev, size_t size,
  33. dma_addr_t *dma_handle, gfp_t flag);
  34. void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  35. dma_addr_t dma_handle);
  36. dma_addr_t or1k_map_page(struct device *dev, struct page *page,
  37. unsigned long offset, size_t size,
  38. enum dma_data_direction dir,
  39. struct dma_attrs *attrs);
  40. void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
  41. size_t size, enum dma_data_direction dir,
  42. struct dma_attrs *attrs);
  43. void or1k_sync_single_for_cpu(struct device *dev,
  44. dma_addr_t dma_handle, size_t size,
  45. enum dma_data_direction dir);
  46. void or1k_sync_single_for_device(struct device *dev,
  47. dma_addr_t dma_handle, size_t size,
  48. enum dma_data_direction dir);
  49. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  50. dma_addr_t *dma_handle, gfp_t flag)
  51. {
  52. void *memory;
  53. memory = or1k_dma_alloc_coherent(dev, size, dma_handle, flag);
  54. debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
  55. return memory;
  56. }
  57. static inline void dma_free_coherent(struct device *dev, size_t size,
  58. void *cpu_addr, dma_addr_t dma_handle)
  59. {
  60. debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
  61. or1k_dma_free_coherent(dev, size, cpu_addr, dma_handle);
  62. }
  63. static inline dma_addr_t dma_map_single(struct device *dev, void *ptr,
  64. size_t size,
  65. enum dma_data_direction dir)
  66. {
  67. dma_addr_t addr;
  68. kmemcheck_mark_initialized(ptr, size);
  69. BUG_ON(!valid_dma_direction(dir));
  70. addr = or1k_map_page(dev, virt_to_page(ptr),
  71. (unsigned long)ptr & ~PAGE_MASK, size,
  72. dir, NULL);
  73. debug_dma_map_page(dev, virt_to_page(ptr),
  74. (unsigned long)ptr & ~PAGE_MASK, size,
  75. dir, addr, true);
  76. return addr;
  77. }
  78. static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
  79. size_t size,
  80. enum dma_data_direction dir)
  81. {
  82. BUG_ON(!valid_dma_direction(dir));
  83. or1k_unmap_page(dev, addr, size, dir, NULL);
  84. debug_dma_unmap_page(dev, addr, size, dir, true);
  85. }
  86. static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
  87. size_t size,
  88. enum dma_data_direction dir)
  89. {
  90. BUG_ON(!valid_dma_direction(dir));
  91. or1k_sync_single_for_cpu(dev, addr, size, dir);
  92. debug_dma_sync_single_for_cpu(dev, addr, size, dir);
  93. }
  94. static inline void dma_sync_single_for_device(struct device *dev,
  95. dma_addr_t addr, size_t size,
  96. enum dma_data_direction dir)
  97. {
  98. BUG_ON(!valid_dma_direction(dir));
  99. or1k_sync_single_for_device(dev, addr, size, dir);
  100. debug_dma_sync_single_for_device(dev, addr, size, dir);
  101. }
  102. static inline int dma_supported(struct device *dev, u64 dma_mask)
  103. {
  104. /* Support 32 bit DMA mask exclusively */
  105. return dma_mask == 0xffffffffULL;
  106. }
  107. static inline int dma_set_mask(struct device *dev, u64 dma_mask)
  108. {
  109. if (!dev->dma_mask || !dma_supported(dev, dma_mask))
  110. return -EIO;
  111. *dev->dma_mask = dma_mask;
  112. return 0;
  113. }
  114. #endif /* __ASM_OPENRISC_DMA_MAPPING_H */