dma-mapping.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. /*
  2. * Dynamic DMA mapping support
  3. *
  4. * Copyright 2005-2009 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later
  7. */
  8. #include <linux/types.h>
  9. #include <linux/mm.h>
  10. #include <linux/string.h>
  11. #include <linux/bootmem.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/device.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/io.h>
  16. #include <linux/scatterlist.h>
  17. #include <asm/cacheflush.h>
  18. #include <asm/bfin-global.h>
  19. static spinlock_t dma_page_lock;
  20. static unsigned int *dma_page;
  21. static unsigned int dma_pages;
  22. static unsigned long dma_base;
  23. static unsigned long dma_size;
  24. static unsigned int dma_initialized;
  25. void dma_alloc_init(unsigned long start, unsigned long end)
  26. {
  27. spin_lock_init(&dma_page_lock);
  28. dma_initialized = 0;
  29. dma_page = (unsigned int *)__get_free_page(GFP_KERNEL);
  30. memset(dma_page, 0, PAGE_SIZE);
  31. dma_base = PAGE_ALIGN(start);
  32. dma_size = PAGE_ALIGN(end) - PAGE_ALIGN(start);
  33. dma_pages = dma_size >> PAGE_SHIFT;
  34. memset((void *)dma_base, 0, DMA_UNCACHED_REGION);
  35. dma_initialized = 1;
  36. printk(KERN_INFO "%s: dma_page @ 0x%p - %d pages at 0x%08lx\n", __func__,
  37. dma_page, dma_pages, dma_base);
  38. }
  39. static inline unsigned int get_pages(size_t size)
  40. {
  41. return ((size - 1) >> PAGE_SHIFT) + 1;
  42. }
  43. static unsigned long __alloc_dma_pages(unsigned int pages)
  44. {
  45. unsigned long ret = 0, flags;
  46. int i, count = 0;
  47. if (dma_initialized == 0)
  48. dma_alloc_init(_ramend - DMA_UNCACHED_REGION, _ramend);
  49. spin_lock_irqsave(&dma_page_lock, flags);
  50. for (i = 0; i < dma_pages;) {
  51. if (dma_page[i++] == 0) {
  52. if (++count == pages) {
  53. while (count--)
  54. dma_page[--i] = 1;
  55. ret = dma_base + (i << PAGE_SHIFT);
  56. break;
  57. }
  58. } else
  59. count = 0;
  60. }
  61. spin_unlock_irqrestore(&dma_page_lock, flags);
  62. return ret;
  63. }
  64. static void __free_dma_pages(unsigned long addr, unsigned int pages)
  65. {
  66. unsigned long page = (addr - dma_base) >> PAGE_SHIFT;
  67. unsigned long flags;
  68. int i;
  69. if ((page + pages) > dma_pages) {
  70. printk(KERN_ERR "%s: freeing outside range.\n", __func__);
  71. BUG();
  72. }
  73. spin_lock_irqsave(&dma_page_lock, flags);
  74. for (i = page; i < page + pages; i++) {
  75. dma_page[i] = 0;
  76. }
  77. spin_unlock_irqrestore(&dma_page_lock, flags);
  78. }
  79. void *dma_alloc_coherent(struct device *dev, size_t size,
  80. dma_addr_t * dma_handle, gfp_t gfp)
  81. {
  82. void *ret;
  83. ret = (void *)__alloc_dma_pages(get_pages(size));
  84. if (ret) {
  85. memset(ret, 0, size);
  86. *dma_handle = virt_to_phys(ret);
  87. }
  88. return ret;
  89. }
  90. EXPORT_SYMBOL(dma_alloc_coherent);
  91. void
  92. dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  93. dma_addr_t dma_handle)
  94. {
  95. __free_dma_pages((unsigned long)vaddr, get_pages(size));
  96. }
  97. EXPORT_SYMBOL(dma_free_coherent);
  98. /*
  99. * Dummy functions defined for some existing drivers
  100. */
  101. dma_addr_t
  102. dma_map_single(struct device *dev, void *ptr, size_t size,
  103. enum dma_data_direction direction)
  104. {
  105. BUG_ON(direction == DMA_NONE);
  106. invalidate_dcache_range((unsigned long)ptr,
  107. (unsigned long)ptr + size);
  108. return (dma_addr_t) ptr;
  109. }
  110. EXPORT_SYMBOL(dma_map_single);
  111. int
  112. dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  113. enum dma_data_direction direction)
  114. {
  115. int i;
  116. BUG_ON(direction == DMA_NONE);
  117. for (i = 0; i < nents; i++, sg++) {
  118. sg->dma_address = (dma_addr_t) sg_virt(sg);
  119. invalidate_dcache_range(sg_dma_address(sg),
  120. sg_dma_address(sg) +
  121. sg_dma_len(sg));
  122. }
  123. return nents;
  124. }
  125. EXPORT_SYMBOL(dma_map_sg);
  126. void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  127. enum dma_data_direction direction)
  128. {
  129. BUG_ON(direction == DMA_NONE);
  130. }
  131. EXPORT_SYMBOL(dma_unmap_single);
  132. void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  133. int nhwentries, enum dma_data_direction direction)
  134. {
  135. BUG_ON(direction == DMA_NONE);
  136. }
  137. EXPORT_SYMBOL(dma_unmap_sg);