pci-dma_64.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. /*
  2. * Dynamic DMA mapping support.
  3. */
  4. #include <linux/types.h>
  5. #include <linux/mm.h>
  6. #include <linux/string.h>
  7. #include <linux/pci.h>
  8. #include <linux/module.h>
  9. #include <linux/dmar.h>
  10. #include <linux/bootmem.h>
  11. #include <asm/proto.h>
  12. #include <asm/io.h>
  13. #include <asm/gart.h>
  14. #include <asm/calgary.h>
  15. /* Dummy device used for NULL arguments (normally ISA). Better would
  16. be probably a smaller DMA mask, but this is bug-to-bug compatible
  17. to i386. */
  18. struct device fallback_dev = {
  19. .bus_id = "fallback device",
  20. .coherent_dma_mask = DMA_32BIT_MASK,
  21. .dma_mask = &fallback_dev.coherent_dma_mask,
  22. };
  23. /* Allocate DMA memory on node near device */
  24. noinline static void *
  25. dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
  26. {
  27. int node;
  28. node = dev_to_node(dev);
  29. return alloc_pages_node(node, gfp, order);
  30. }
  31. #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
  32. #define dma_release_coherent(dev, order, vaddr) (0)
  33. /*
  34. * Allocate memory for a coherent mapping.
  35. */
  36. void *
  37. dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  38. gfp_t gfp)
  39. {
  40. void *memory;
  41. struct page *page;
  42. unsigned long dma_mask = 0;
  43. u64 bus;
  44. /* ignore region specifiers */
  45. gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
  46. if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
  47. return memory;
  48. if (!dev)
  49. dev = &fallback_dev;
  50. dma_mask = dev->coherent_dma_mask;
  51. if (dma_mask == 0)
  52. dma_mask = DMA_32BIT_MASK;
  53. /* Device not DMA able */
  54. if (dev->dma_mask == NULL)
  55. return NULL;
  56. /* Don't invoke OOM killer */
  57. gfp |= __GFP_NORETRY;
  58. /* Why <=? Even when the mask is smaller than 4GB it is often
  59. larger than 16MB and in this case we have a chance of
  60. finding fitting memory in the next higher zone first. If
  61. not retry with true GFP_DMA. -AK */
  62. if (dma_mask <= DMA_32BIT_MASK)
  63. gfp |= GFP_DMA32;
  64. again:
  65. page = dma_alloc_pages(dev, gfp, get_order(size));
  66. if (page == NULL)
  67. return NULL;
  68. {
  69. int high, mmu;
  70. bus = page_to_phys(page);
  71. memory = page_address(page);
  72. high = (bus + size) >= dma_mask;
  73. mmu = high;
  74. if (force_iommu && !(gfp & GFP_DMA))
  75. mmu = 1;
  76. else if (high) {
  77. free_pages((unsigned long)memory,
  78. get_order(size));
  79. /* Don't use the 16MB ZONE_DMA unless absolutely
  80. needed. It's better to use remapping first. */
  81. if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
  82. gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
  83. goto again;
  84. }
  85. /* Let low level make its own zone decisions */
  86. gfp &= ~(GFP_DMA32|GFP_DMA);
  87. if (dma_ops->alloc_coherent)
  88. return dma_ops->alloc_coherent(dev, size,
  89. dma_handle, gfp);
  90. return NULL;
  91. }
  92. memset(memory, 0, size);
  93. if (!mmu) {
  94. *dma_handle = bus;
  95. return memory;
  96. }
  97. }
  98. if (dma_ops->alloc_coherent) {
  99. free_pages((unsigned long)memory, get_order(size));
  100. gfp &= ~(GFP_DMA|GFP_DMA32);
  101. return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
  102. }
  103. if (dma_ops->map_simple) {
  104. *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
  105. size,
  106. PCI_DMA_BIDIRECTIONAL);
  107. if (*dma_handle != bad_dma_address)
  108. return memory;
  109. }
  110. if (panic_on_overflow)
  111. panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size);
  112. free_pages((unsigned long)memory, get_order(size));
  113. return NULL;
  114. }
  115. EXPORT_SYMBOL(dma_alloc_coherent);
  116. /*
  117. * Unmap coherent memory.
  118. * The caller must ensure that the device has finished accessing the mapping.
  119. */
  120. void dma_free_coherent(struct device *dev, size_t size,
  121. void *vaddr, dma_addr_t bus)
  122. {
  123. int order = get_order(size);
  124. WARN_ON(irqs_disabled()); /* for portability */
  125. if (dma_release_coherent(dev, order, vaddr))
  126. return;
  127. if (dma_ops->unmap_single)
  128. dma_ops->unmap_single(dev, bus, size, 0);
  129. free_pages((unsigned long)vaddr, order);
  130. }
  131. EXPORT_SYMBOL(dma_free_coherent);