pci-dma_32.c 2.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. /*
  2. * Dynamic DMA mapping support.
  3. *
  4. * On i386 there is no hardware dynamic DMA address translation,
  5. * so consistent alloc/free are merely page allocation/freeing.
  6. * The rest of the dynamic DMA mapping interface is implemented
  7. * in asm/pci.h.
  8. */
  9. #include <linux/types.h>
  10. #include <linux/mm.h>
  11. #include <linux/string.h>
  12. #include <linux/pci.h>
  13. #include <linux/module.h>
  14. #include <asm/io.h>
  15. /* For i386, we make it point to the NULL address */
  16. dma_addr_t bad_dma_address __read_mostly = 0x0;
  17. EXPORT_SYMBOL(bad_dma_address);
  18. static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
  19. dma_addr_t *dma_handle, void **ret)
  20. {
  21. struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
  22. int order = get_order(size);
  23. if (mem) {
  24. int page = bitmap_find_free_region(mem->bitmap, mem->size,
  25. order);
  26. if (page >= 0) {
  27. *dma_handle = mem->device_base + (page << PAGE_SHIFT);
  28. *ret = mem->virt_base + (page << PAGE_SHIFT);
  29. memset(*ret, 0, size);
  30. }
  31. if (mem->flags & DMA_MEMORY_EXCLUSIVE)
  32. *ret = NULL;
  33. }
  34. return (mem != NULL);
  35. }
  36. static int dma_release_coherent(struct device *dev, int order, void *vaddr)
  37. {
  38. struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
  39. if (mem && vaddr >= mem->virt_base && vaddr <
  40. (mem->virt_base + (mem->size << PAGE_SHIFT))) {
  41. int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
  42. bitmap_release_region(mem->bitmap, page, order);
  43. return 1;
  44. }
  45. return 0;
  46. }
  47. void *dma_alloc_coherent(struct device *dev, size_t size,
  48. dma_addr_t *dma_handle, gfp_t gfp)
  49. {
  50. void *ret = NULL;
  51. int order = get_order(size);
  52. /* ignore region specifiers */
  53. gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
  54. if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &ret))
  55. return ret;
  56. if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
  57. gfp |= GFP_DMA;
  58. ret = (void *)__get_free_pages(gfp, order);
  59. if (ret != NULL) {
  60. memset(ret, 0, size);
  61. *dma_handle = virt_to_phys(ret);
  62. }
  63. return ret;
  64. }
  65. EXPORT_SYMBOL(dma_alloc_coherent);
  66. void dma_free_coherent(struct device *dev, size_t size,
  67. void *vaddr, dma_addr_t dma_handle)
  68. {
  69. int order = get_order(size);
  70. WARN_ON(irqs_disabled()); /* for portability */
  71. if (dma_release_coherent(dev, order, vaddr))
  72. return;
  73. free_pages((unsigned long)vaddr, order);
  74. }
  75. EXPORT_SYMBOL(dma_free_coherent);