pci-dma_64.c 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. /*
  2. * Dynamic DMA mapping support.
  3. */
  4. #include <linux/types.h>
  5. #include <linux/mm.h>
  6. #include <linux/string.h>
  7. #include <linux/pci.h>
  8. #include <linux/module.h>
  9. #include <linux/dmar.h>
  10. #include <linux/bootmem.h>
  11. #include <asm/proto.h>
  12. #include <asm/io.h>
  13. #include <asm/gart.h>
  14. #include <asm/calgary.h>
  15. dma_addr_t bad_dma_address __read_mostly;
  16. EXPORT_SYMBOL(bad_dma_address);
  17. /* Dummy device used for NULL arguments (normally ISA). Better would
  18. be probably a smaller DMA mask, but this is bug-to-bug compatible
  19. to i386. */
  20. struct device fallback_dev = {
  21. .bus_id = "fallback device",
  22. .coherent_dma_mask = DMA_32BIT_MASK,
  23. .dma_mask = &fallback_dev.coherent_dma_mask,
  24. };
  25. /* Allocate DMA memory on node near device */
  26. noinline static void *
  27. dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
  28. {
  29. struct page *page;
  30. int node;
  31. node = dev_to_node(dev);
  32. page = alloc_pages_node(node, gfp, order);
  33. return page ? page_address(page) : NULL;
  34. }
  35. /*
  36. * Allocate memory for a coherent mapping.
  37. */
  38. void *
  39. dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  40. gfp_t gfp)
  41. {
  42. void *memory;
  43. unsigned long dma_mask = 0;
  44. u64 bus;
  45. if (!dev)
  46. dev = &fallback_dev;
  47. dma_mask = dev->coherent_dma_mask;
  48. if (dma_mask == 0)
  49. dma_mask = DMA_32BIT_MASK;
  50. /* Device not DMA able */
  51. if (dev->dma_mask == NULL)
  52. return NULL;
  53. /* Don't invoke OOM killer */
  54. gfp |= __GFP_NORETRY;
  55. /* Kludge to make it bug-to-bug compatible with i386. i386
  56. uses the normal dma_mask for alloc_coherent. */
  57. dma_mask &= *dev->dma_mask;
  58. /* Why <=? Even when the mask is smaller than 4GB it is often
  59. larger than 16MB and in this case we have a chance of
  60. finding fitting memory in the next higher zone first. If
  61. not retry with true GFP_DMA. -AK */
  62. if (dma_mask <= DMA_32BIT_MASK)
  63. gfp |= GFP_DMA32;
  64. again:
  65. memory = dma_alloc_pages(dev, gfp, get_order(size));
  66. if (memory == NULL)
  67. return NULL;
  68. {
  69. int high, mmu;
  70. bus = virt_to_bus(memory);
  71. high = (bus + size) >= dma_mask;
  72. mmu = high;
  73. if (force_iommu && !(gfp & GFP_DMA))
  74. mmu = 1;
  75. else if (high) {
  76. free_pages((unsigned long)memory,
  77. get_order(size));
  78. /* Don't use the 16MB ZONE_DMA unless absolutely
  79. needed. It's better to use remapping first. */
  80. if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
  81. gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
  82. goto again;
  83. }
  84. /* Let low level make its own zone decisions */
  85. gfp &= ~(GFP_DMA32|GFP_DMA);
  86. if (dma_ops->alloc_coherent)
  87. return dma_ops->alloc_coherent(dev, size,
  88. dma_handle, gfp);
  89. return NULL;
  90. }
  91. memset(memory, 0, size);
  92. if (!mmu) {
  93. *dma_handle = virt_to_bus(memory);
  94. return memory;
  95. }
  96. }
  97. if (dma_ops->alloc_coherent) {
  98. free_pages((unsigned long)memory, get_order(size));
  99. gfp &= ~(GFP_DMA|GFP_DMA32);
  100. return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
  101. }
  102. if (dma_ops->map_simple) {
  103. *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
  104. size,
  105. PCI_DMA_BIDIRECTIONAL);
  106. if (*dma_handle != bad_dma_address)
  107. return memory;
  108. }
  109. if (panic_on_overflow)
  110. panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size);
  111. free_pages((unsigned long)memory, get_order(size));
  112. return NULL;
  113. }
  114. EXPORT_SYMBOL(dma_alloc_coherent);
  115. /*
  116. * Unmap coherent memory.
  117. * The caller must ensure that the device has finished accessing the mapping.
  118. */
  119. void dma_free_coherent(struct device *dev, size_t size,
  120. void *vaddr, dma_addr_t bus)
  121. {
  122. WARN_ON(irqs_disabled()); /* for portability */
  123. if (dma_ops->unmap_single)
  124. dma_ops->unmap_single(dev, bus, size, 0);
  125. free_pages((unsigned long)vaddr, get_order(size));
  126. }
  127. EXPORT_SYMBOL(dma_free_coherent);