pci-dma_64.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. /*
  2. * Dynamic DMA mapping support.
  3. */
  4. #include <linux/types.h>
  5. #include <linux/mm.h>
  6. #include <linux/string.h>
  7. #include <linux/pci.h>
  8. #include <linux/module.h>
  9. #include <linux/dmar.h>
  10. #include <linux/bootmem.h>
  11. #include <asm/proto.h>
  12. #include <asm/io.h>
  13. #include <asm/gart.h>
  14. #include <asm/calgary.h>
  15. /* Dummy device used for NULL arguments (normally ISA). Better would
  16. be probably a smaller DMA mask, but this is bug-to-bug compatible
  17. to i386. */
  18. struct device fallback_dev = {
  19. .bus_id = "fallback device",
  20. .coherent_dma_mask = DMA_32BIT_MASK,
  21. .dma_mask = &fallback_dev.coherent_dma_mask,
  22. };
  23. /* Allocate DMA memory on node near device */
  24. noinline static void *
  25. dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
  26. {
  27. int node;
  28. node = dev_to_node(dev);
  29. return alloc_pages_node(node, gfp, order);
  30. }
  31. #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
  32. #define dma_release_coherent(dev, order, vaddr) (0)
  33. /*
  34. * Allocate memory for a coherent mapping.
  35. */
  36. void *
  37. dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  38. gfp_t gfp)
  39. {
  40. void *memory;
  41. struct page *page;
  42. unsigned long dma_mask = 0;
  43. u64 bus;
  44. /* ignore region specifiers */
  45. gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
  46. if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
  47. return memory;
  48. if (!dev)
  49. dev = &fallback_dev;
  50. dma_mask = dev->coherent_dma_mask;
  51. if (dma_mask == 0)
  52. dma_mask = DMA_32BIT_MASK;
  53. /* Device not DMA able */
  54. if (dev->dma_mask == NULL)
  55. return NULL;
  56. /* Don't invoke OOM killer */
  57. gfp |= __GFP_NORETRY;
  58. /* Kludge to make it bug-to-bug compatible with i386. i386
  59. uses the normal dma_mask for alloc_coherent. */
  60. dma_mask &= *dev->dma_mask;
  61. /* Why <=? Even when the mask is smaller than 4GB it is often
  62. larger than 16MB and in this case we have a chance of
  63. finding fitting memory in the next higher zone first. If
  64. not retry with true GFP_DMA. -AK */
  65. if (dma_mask <= DMA_32BIT_MASK)
  66. gfp |= GFP_DMA32;
  67. again:
  68. page = dma_alloc_pages(dev, gfp, get_order(size));
  69. if (page == NULL)
  70. return NULL;
  71. {
  72. int high, mmu;
  73. bus = page_to_phys(page);
  74. memory = page_address(page);
  75. high = (bus + size) >= dma_mask;
  76. mmu = high;
  77. if (force_iommu && !(gfp & GFP_DMA))
  78. mmu = 1;
  79. else if (high) {
  80. free_pages((unsigned long)memory,
  81. get_order(size));
  82. /* Don't use the 16MB ZONE_DMA unless absolutely
  83. needed. It's better to use remapping first. */
  84. if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
  85. gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
  86. goto again;
  87. }
  88. /* Let low level make its own zone decisions */
  89. gfp &= ~(GFP_DMA32|GFP_DMA);
  90. if (dma_ops->alloc_coherent)
  91. return dma_ops->alloc_coherent(dev, size,
  92. dma_handle, gfp);
  93. return NULL;
  94. }
  95. memset(memory, 0, size);
  96. if (!mmu) {
  97. *dma_handle = bus;
  98. return memory;
  99. }
  100. }
  101. if (dma_ops->alloc_coherent) {
  102. free_pages((unsigned long)memory, get_order(size));
  103. gfp &= ~(GFP_DMA|GFP_DMA32);
  104. return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
  105. }
  106. if (dma_ops->map_simple) {
  107. *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
  108. size,
  109. PCI_DMA_BIDIRECTIONAL);
  110. if (*dma_handle != bad_dma_address)
  111. return memory;
  112. }
  113. if (panic_on_overflow)
  114. panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size);
  115. free_pages((unsigned long)memory, get_order(size));
  116. return NULL;
  117. }
  118. EXPORT_SYMBOL(dma_alloc_coherent);
  119. /*
  120. * Unmap coherent memory.
  121. * The caller must ensure that the device has finished accessing the mapping.
  122. */
  123. void dma_free_coherent(struct device *dev, size_t size,
  124. void *vaddr, dma_addr_t bus)
  125. {
  126. int order = get_order(size);
  127. WARN_ON(irqs_disabled()); /* for portability */
  128. if (dma_release_coherent(dev, order, vaddr))
  129. return;
  130. if (dma_ops->unmap_single)
  131. dma_ops->unmap_single(dev, bus, size, 0);
  132. free_pages((unsigned long)vaddr, order);
  133. }
  134. EXPORT_SYMBOL(dma_free_coherent);