dma.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. /*
  2. * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
  3. *
  4. * Provide default implementations of the DMA mapping callbacks for
  5. * directly mapped busses.
  6. */
  7. #include <linux/device.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/dma-debug.h>
  10. #include <linux/lmb.h>
  11. #include <asm/bug.h>
  12. #include <asm/abs_addr.h>
  13. /*
  14. * Generic direct DMA implementation
  15. *
  16. * This implementation supports a per-device offset that can be applied if
  17. * the address at which memory is visible to devices is not 0. Platform code
  18. * can set archdata.dma_data to an unsigned long holding the offset. By
  19. * default the offset is PCI_DRAM_OFFSET.
  20. */
  21. void *dma_direct_alloc_coherent(struct device *dev, size_t size,
  22. dma_addr_t *dma_handle, gfp_t flag)
  23. {
  24. void *ret;
  25. #ifdef CONFIG_NOT_COHERENT_CACHE
  26. ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
  27. if (ret == NULL)
  28. return NULL;
  29. *dma_handle += get_dma_offset(dev);
  30. return ret;
  31. #else
  32. struct page *page;
  33. int node = dev_to_node(dev);
  34. /* ignore region specifiers */
  35. flag &= ~(__GFP_HIGHMEM);
  36. page = alloc_pages_node(node, flag, get_order(size));
  37. if (page == NULL)
  38. return NULL;
  39. ret = page_address(page);
  40. memset(ret, 0, size);
  41. *dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
  42. return ret;
  43. #endif
  44. }
  45. void dma_direct_free_coherent(struct device *dev, size_t size,
  46. void *vaddr, dma_addr_t dma_handle)
  47. {
  48. #ifdef CONFIG_NOT_COHERENT_CACHE
  49. __dma_free_coherent(size, vaddr);
  50. #else
  51. free_pages((unsigned long)vaddr, get_order(size));
  52. #endif
  53. }
  54. static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
  55. int nents, enum dma_data_direction direction,
  56. struct dma_attrs *attrs)
  57. {
  58. struct scatterlist *sg;
  59. int i;
  60. for_each_sg(sgl, sg, nents, i) {
  61. sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
  62. sg->dma_length = sg->length;
  63. __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
  64. }
  65. return nents;
  66. }
  67. static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
  68. int nents, enum dma_data_direction direction,
  69. struct dma_attrs *attrs)
  70. {
  71. }
  72. static int dma_direct_dma_supported(struct device *dev, u64 mask)
  73. {
  74. #ifdef CONFIG_PPC64
  75. /* Could be improved so platforms can set the limit in case
  76. * they have limited DMA windows
  77. */
  78. return mask >= (lmb_end_of_DRAM() - 1);
  79. #else
  80. return 1;
  81. #endif
  82. }
  83. static inline dma_addr_t dma_direct_map_page(struct device *dev,
  84. struct page *page,
  85. unsigned long offset,
  86. size_t size,
  87. enum dma_data_direction dir,
  88. struct dma_attrs *attrs)
  89. {
  90. BUG_ON(dir == DMA_NONE);
  91. __dma_sync_page(page, offset, size, dir);
  92. return page_to_phys(page) + offset + get_dma_offset(dev);
  93. }
  94. static inline void dma_direct_unmap_page(struct device *dev,
  95. dma_addr_t dma_address,
  96. size_t size,
  97. enum dma_data_direction direction,
  98. struct dma_attrs *attrs)
  99. {
  100. }
  101. #ifdef CONFIG_NOT_COHERENT_CACHE
  102. static inline void dma_direct_sync_sg(struct device *dev,
  103. struct scatterlist *sgl, int nents,
  104. enum dma_data_direction direction)
  105. {
  106. struct scatterlist *sg;
  107. int i;
  108. for_each_sg(sgl, sg, nents, i)
  109. __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
  110. }
  111. static inline void dma_direct_sync_single_range(struct device *dev,
  112. dma_addr_t dma_handle, unsigned long offset, size_t size,
  113. enum dma_data_direction direction)
  114. {
  115. __dma_sync(bus_to_virt(dma_handle+offset), size, direction);
  116. }
  117. #endif
  118. struct dma_map_ops dma_direct_ops = {
  119. .alloc_coherent = dma_direct_alloc_coherent,
  120. .free_coherent = dma_direct_free_coherent,
  121. .map_sg = dma_direct_map_sg,
  122. .unmap_sg = dma_direct_unmap_sg,
  123. .dma_supported = dma_direct_dma_supported,
  124. .map_page = dma_direct_map_page,
  125. .unmap_page = dma_direct_unmap_page,
  126. #ifdef CONFIG_NOT_COHERENT_CACHE
  127. .sync_single_range_for_cpu = dma_direct_sync_single_range,
  128. .sync_single_range_for_device = dma_direct_sync_single_range,
  129. .sync_sg_for_cpu = dma_direct_sync_sg,
  130. .sync_sg_for_device = dma_direct_sync_sg,
  131. #endif
  132. };
  133. EXPORT_SYMBOL(dma_direct_ops);
  134. #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
  135. static int __init dma_init(void)
  136. {
  137. dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
  138. return 0;
  139. }
  140. fs_initcall(dma_init);