dma-coherent.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. /*
  2. * Coherent per-device memory handling.
  3. * Borrowed from i386
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/dma-mapping.h>
  7. struct dma_coherent_mem {
  8. void *virt_base;
  9. u32 device_base;
  10. int size;
  11. int flags;
  12. unsigned long *bitmap;
  13. };
  14. int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
  15. dma_addr_t device_addr, size_t size, int flags)
  16. {
  17. void __iomem *mem_base = NULL;
  18. int pages = size >> PAGE_SHIFT;
  19. int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
  20. if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
  21. goto out;
  22. if (!size)
  23. goto out;
  24. if (dev->dma_mem)
  25. goto out;
  26. /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
  27. mem_base = ioremap(bus_addr, size);
  28. if (!mem_base)
  29. goto out;
  30. dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
  31. if (!dev->dma_mem)
  32. goto out;
  33. dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  34. if (!dev->dma_mem->bitmap)
  35. goto free1_out;
  36. dev->dma_mem->virt_base = mem_base;
  37. dev->dma_mem->device_base = device_addr;
  38. dev->dma_mem->size = pages;
  39. dev->dma_mem->flags = flags;
  40. if (flags & DMA_MEMORY_MAP)
  41. return DMA_MEMORY_MAP;
  42. return DMA_MEMORY_IO;
  43. free1_out:
  44. kfree(dev->dma_mem);
  45. out:
  46. if (mem_base)
  47. iounmap(mem_base);
  48. return 0;
  49. }
  50. EXPORT_SYMBOL(dma_declare_coherent_memory);
  51. void dma_release_declared_memory(struct device *dev)
  52. {
  53. struct dma_coherent_mem *mem = dev->dma_mem;
  54. if (!mem)
  55. return;
  56. dev->dma_mem = NULL;
  57. iounmap(mem->virt_base);
  58. kfree(mem->bitmap);
  59. kfree(mem);
  60. }
  61. EXPORT_SYMBOL(dma_release_declared_memory);
  62. void *dma_mark_declared_memory_occupied(struct device *dev,
  63. dma_addr_t device_addr, size_t size)
  64. {
  65. struct dma_coherent_mem *mem = dev->dma_mem;
  66. int pos, err;
  67. int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
  68. pages >>= PAGE_SHIFT;
  69. if (!mem)
  70. return ERR_PTR(-EINVAL);
  71. pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
  72. err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
  73. if (err != 0)
  74. return ERR_PTR(err);
  75. return mem->virt_base + (pos << PAGE_SHIFT);
  76. }
  77. EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
  78. /**
  79. * Try to allocate memory from the per-device coherent area.
  80. *
  81. * @dev: device from which we allocate memory
  82. * @size: size of requested memory area
  83. * @dma_handle: This will be filled with the correct dma handle
  84. * @ret: This pointer will be filled with the virtual address
  85. * to allocated area.
  86. *
  87. * This function should be only called from per-arch %dma_alloc_coherent()
  88. * to support allocation from per-device coherent memory pools.
  89. *
  90. * Returns 0 if dma_alloc_coherent should continue with allocating from
  91. * generic memory areas, or !0 if dma_alloc_coherent should return %ret.
  92. */
  93. int dma_alloc_from_coherent(struct device *dev, ssize_t size,
  94. dma_addr_t *dma_handle, void **ret)
  95. {
  96. struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
  97. int order = get_order(size);
  98. if (mem) {
  99. int page = bitmap_find_free_region(mem->bitmap, mem->size,
  100. order);
  101. if (page >= 0) {
  102. *dma_handle = mem->device_base + (page << PAGE_SHIFT);
  103. *ret = mem->virt_base + (page << PAGE_SHIFT);
  104. memset(*ret, 0, size);
  105. } else if (mem->flags & DMA_MEMORY_EXCLUSIVE)
  106. *ret = NULL;
  107. }
  108. return (mem != NULL);
  109. }
  110. /**
  111. * Try to free the memory allocated from per-device coherent memory pool.
  112. * @dev: device from which the memory was allocated
  113. * @order: the order of pages allocated
  114. * @vaddr: virtual address of allocated pages
  115. *
  116. * This checks whether the memory was allocated from the per-device
  117. * coherent memory pool and if so, releases that memory.
  118. *
  119. * Returns 1 if we correctly released the memory, or 0 if
  120. * %dma_release_coherent() should proceed with releasing memory from
  121. * generic pools.
  122. */
  123. int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
  124. {
  125. struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
  126. if (mem && vaddr >= mem->virt_base && vaddr <
  127. (mem->virt_base + (mem->size << PAGE_SHIFT))) {
  128. int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
  129. bitmap_release_region(mem->bitmap, page, order);
  130. return 1;
  131. }
  132. return 0;
  133. }