pci-dma_64.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /*
  2. * Dynamic DMA mapping support.
  3. */
  4. #include <linux/types.h>
  5. #include <linux/mm.h>
  6. #include <linux/string.h>
  7. #include <linux/pci.h>
  8. #include <linux/module.h>
  9. #include <linux/dmar.h>
  10. #include <linux/bootmem.h>
  11. #include <asm/proto.h>
  12. #include <asm/io.h>
  13. #include <asm/gart.h>
  14. #include <asm/calgary.h>
  15. /* Dummy device used for NULL arguments (normally ISA). Better would
  16. be probably a smaller DMA mask, but this is bug-to-bug compatible
  17. to i386. */
  18. struct device fallback_dev = {
  19. .bus_id = "fallback device",
  20. .coherent_dma_mask = DMA_32BIT_MASK,
  21. .dma_mask = &fallback_dev.coherent_dma_mask,
  22. };
  23. /* Allocate DMA memory on node near device */
  24. noinline static void *
  25. dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
  26. {
  27. struct page *page;
  28. int node;
  29. node = dev_to_node(dev);
  30. page = alloc_pages_node(node, gfp, order);
  31. return page ? page_address(page) : NULL;
  32. }
  33. #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
  34. #define dma_release_coherent(dev, order, vaddr) (0)
  35. /*
  36. * Allocate memory for a coherent mapping.
  37. */
  38. void *
  39. dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  40. gfp_t gfp)
  41. {
  42. void *memory;
  43. unsigned long dma_mask = 0;
  44. u64 bus;
  45. if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
  46. return memory;
  47. if (!dev)
  48. dev = &fallback_dev;
  49. dma_mask = dev->coherent_dma_mask;
  50. if (dma_mask == 0)
  51. dma_mask = DMA_32BIT_MASK;
  52. /* Device not DMA able */
  53. if (dev->dma_mask == NULL)
  54. return NULL;
  55. /* Don't invoke OOM killer */
  56. gfp |= __GFP_NORETRY;
  57. /* Kludge to make it bug-to-bug compatible with i386. i386
  58. uses the normal dma_mask for alloc_coherent. */
  59. dma_mask &= *dev->dma_mask;
  60. /* Why <=? Even when the mask is smaller than 4GB it is often
  61. larger than 16MB and in this case we have a chance of
  62. finding fitting memory in the next higher zone first. If
  63. not retry with true GFP_DMA. -AK */
  64. if (dma_mask <= DMA_32BIT_MASK)
  65. gfp |= GFP_DMA32;
  66. again:
  67. memory = dma_alloc_pages(dev, gfp, get_order(size));
  68. if (memory == NULL)
  69. return NULL;
  70. {
  71. int high, mmu;
  72. bus = virt_to_bus(memory);
  73. high = (bus + size) >= dma_mask;
  74. mmu = high;
  75. if (force_iommu && !(gfp & GFP_DMA))
  76. mmu = 1;
  77. else if (high) {
  78. free_pages((unsigned long)memory,
  79. get_order(size));
  80. /* Don't use the 16MB ZONE_DMA unless absolutely
  81. needed. It's better to use remapping first. */
  82. if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
  83. gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
  84. goto again;
  85. }
  86. /* Let low level make its own zone decisions */
  87. gfp &= ~(GFP_DMA32|GFP_DMA);
  88. if (dma_ops->alloc_coherent)
  89. return dma_ops->alloc_coherent(dev, size,
  90. dma_handle, gfp);
  91. return NULL;
  92. }
  93. memset(memory, 0, size);
  94. if (!mmu) {
  95. *dma_handle = virt_to_bus(memory);
  96. return memory;
  97. }
  98. }
  99. if (dma_ops->alloc_coherent) {
  100. free_pages((unsigned long)memory, get_order(size));
  101. gfp &= ~(GFP_DMA|GFP_DMA32);
  102. return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
  103. }
  104. if (dma_ops->map_simple) {
  105. *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
  106. size,
  107. PCI_DMA_BIDIRECTIONAL);
  108. if (*dma_handle != bad_dma_address)
  109. return memory;
  110. }
  111. if (panic_on_overflow)
  112. panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size);
  113. free_pages((unsigned long)memory, get_order(size));
  114. return NULL;
  115. }
  116. EXPORT_SYMBOL(dma_alloc_coherent);
  117. /*
  118. * Unmap coherent memory.
  119. * The caller must ensure that the device has finished accessing the mapping.
  120. */
  121. void dma_free_coherent(struct device *dev, size_t size,
  122. void *vaddr, dma_addr_t bus)
  123. {
  124. int order = get_order(size);
  125. WARN_ON(irqs_disabled()); /* for portability */
  126. if (dma_release_coherent(dev, order, vaddr))
  127. return;
  128. if (dma_ops->unmap_single)
  129. dma_ops->unmap_single(dev, bus, size, 0);
  130. free_pages((unsigned long)vaddr, order);
  131. }
  132. EXPORT_SYMBOL(dma_free_coherent);