dma.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. /*
  2. * Copyright (C) 2009-2010 PetaLogix
  3. * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
  4. *
  5. * Provide default implementations of the DMA mapping callbacks for
  6. * directly mapped busses.
  7. */
  8. #include <linux/device.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/dma-debug.h>
  11. #include <asm/bug.h>
  12. #include <asm/cacheflush.h>
  13. /*
  14. * Generic direct DMA implementation
  15. *
  16. * This implementation supports a per-device offset that can be applied if
  17. * the address at which memory is visible to devices is not 0. Platform code
  18. * can set archdata.dma_data to an unsigned long holding the offset. By
  19. * default the offset is PCI_DRAM_OFFSET.
  20. */
  21. static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
  22. size_t size, enum dma_data_direction direction)
  23. {
  24. switch (direction) {
  25. case DMA_TO_DEVICE:
  26. flush_dcache_range(paddr + offset, paddr + offset + size);
  27. break;
  28. case DMA_FROM_DEVICE:
  29. invalidate_dcache_range(paddr + offset, paddr + offset + size);
  30. break;
  31. default:
  32. BUG();
  33. }
  34. }
  35. static unsigned long get_dma_direct_offset(struct device *dev)
  36. {
  37. if (dev)
  38. return (unsigned long)dev->archdata.dma_data;
  39. return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
  40. }
  41. void *dma_direct_alloc_coherent(struct device *dev, size_t size,
  42. dma_addr_t *dma_handle, gfp_t flag)
  43. {
  44. void *ret;
  45. struct page *page;
  46. int node = dev_to_node(dev);
  47. /* ignore region specifiers */
  48. flag &= ~(__GFP_HIGHMEM);
  49. page = alloc_pages_node(node, flag, get_order(size));
  50. if (page == NULL)
  51. return NULL;
  52. ret = page_address(page);
  53. memset(ret, 0, size);
  54. *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
  55. return ret;
  56. }
  57. void dma_direct_free_coherent(struct device *dev, size_t size,
  58. void *vaddr, dma_addr_t dma_handle)
  59. {
  60. free_pages((unsigned long)vaddr, get_order(size));
  61. }
  62. static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
  63. int nents, enum dma_data_direction direction,
  64. struct dma_attrs *attrs)
  65. {
  66. struct scatterlist *sg;
  67. int i;
  68. /* FIXME this part of code is untested */
  69. for_each_sg(sgl, sg, nents, i) {
  70. sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
  71. sg->dma_length = sg->length;
  72. __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
  73. sg->length, direction);
  74. }
  75. return nents;
  76. }
  77. static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
  78. int nents, enum dma_data_direction direction,
  79. struct dma_attrs *attrs)
  80. {
  81. }
  82. static int dma_direct_dma_supported(struct device *dev, u64 mask)
  83. {
  84. return 1;
  85. }
  86. static inline dma_addr_t dma_direct_map_page(struct device *dev,
  87. struct page *page,
  88. unsigned long offset,
  89. size_t size,
  90. enum dma_data_direction direction,
  91. struct dma_attrs *attrs)
  92. {
  93. BUG_ON(direction == DMA_NONE);
  94. __dma_sync_page(page_to_phys(page), offset, size, direction);
  95. return page_to_phys(page) + offset + get_dma_direct_offset(dev);
  96. }
  97. static inline void dma_direct_unmap_page(struct device *dev,
  98. dma_addr_t dma_address,
  99. size_t size,
  100. enum dma_data_direction direction,
  101. struct dma_attrs *attrs)
  102. {
  103. /* There is not necessary to do cache cleanup
  104. *
  105. * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
  106. * dma_address is physical address
  107. */
  108. __dma_sync_page((void *)dma_address, 0 , size, direction);
  109. }
  110. struct dma_map_ops dma_direct_ops = {
  111. .alloc_coherent = dma_direct_alloc_coherent,
  112. .free_coherent = dma_direct_free_coherent,
  113. .map_sg = dma_direct_map_sg,
  114. .unmap_sg = dma_direct_unmap_sg,
  115. .dma_supported = dma_direct_dma_supported,
  116. .map_page = dma_direct_map_page,
  117. .unmap_page = dma_direct_unmap_page,
  118. };
  119. EXPORT_SYMBOL(dma_direct_ops);
  120. /* Number of entries preallocated for DMA-API debugging */
  121. #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
  122. static int __init dma_init(void)
  123. {
  124. dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
  125. return 0;
  126. }
  127. fs_initcall(dma_init);