dma-coherent.c 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. /*
  2. * Copyright (C) 2004-2006 Atmel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/dma-mapping.h>
  9. #include <asm/addrspace.h>
  10. #include <asm/cacheflush.h>
  11. void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
  12. {
  13. /*
  14. * No need to sync an uncached area
  15. */
  16. if (PXSEG(vaddr) == P2SEG)
  17. return;
  18. switch (direction) {
  19. case DMA_FROM_DEVICE: /* invalidate only */
  20. dma_cache_inv(vaddr, size);
  21. break;
  22. case DMA_TO_DEVICE: /* writeback only */
  23. dma_cache_wback(vaddr, size);
  24. break;
  25. case DMA_BIDIRECTIONAL: /* writeback and invalidate */
  26. dma_cache_wback_inv(vaddr, size);
  27. break;
  28. default:
  29. BUG();
  30. }
  31. }
  32. EXPORT_SYMBOL(dma_cache_sync);
  33. static struct page *__dma_alloc(struct device *dev, size_t size,
  34. dma_addr_t *handle, gfp_t gfp)
  35. {
  36. struct page *page, *free, *end;
  37. int order;
  38. size = PAGE_ALIGN(size);
  39. order = get_order(size);
  40. page = alloc_pages(gfp, order);
  41. if (!page)
  42. return NULL;
  43. split_page(page, order);
  44. /*
  45. * When accessing physical memory with valid cache data, we
  46. * get a cache hit even if the virtual memory region is marked
  47. * as uncached.
  48. *
  49. * Since the memory is newly allocated, there is no point in
  50. * doing a writeback. If the previous owner cares, he should
  51. * have flushed the cache before releasing the memory.
  52. */
  53. invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
  54. *handle = page_to_bus(page);
  55. free = page + (size >> PAGE_SHIFT);
  56. end = page + (1 << order);
  57. /*
  58. * Free any unused pages
  59. */
  60. while (free < end) {
  61. __free_page(free);
  62. free++;
  63. }
  64. return page;
  65. }
  66. static void __dma_free(struct device *dev, size_t size,
  67. struct page *page, dma_addr_t handle)
  68. {
  69. struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
  70. while (page < end)
  71. __free_page(page++);
  72. }
  73. void *dma_alloc_coherent(struct device *dev, size_t size,
  74. dma_addr_t *handle, gfp_t gfp)
  75. {
  76. struct page *page;
  77. void *ret = NULL;
  78. page = __dma_alloc(dev, size, handle, gfp);
  79. if (page)
  80. ret = phys_to_uncached(page_to_phys(page));
  81. return ret;
  82. }
  83. EXPORT_SYMBOL(dma_alloc_coherent);
  84. void dma_free_coherent(struct device *dev, size_t size,
  85. void *cpu_addr, dma_addr_t handle)
  86. {
  87. void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
  88. struct page *page;
  89. pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
  90. cpu_addr, (unsigned long)handle, (unsigned)size);
  91. BUG_ON(!virt_addr_valid(addr));
  92. page = virt_to_page(addr);
  93. __dma_free(dev, size, page, handle);
  94. }
  95. EXPORT_SYMBOL(dma_free_coherent);
  96. void *dma_alloc_writecombine(struct device *dev, size_t size,
  97. dma_addr_t *handle, gfp_t gfp)
  98. {
  99. struct page *page;
  100. dma_addr_t phys;
  101. page = __dma_alloc(dev, size, handle, gfp);
  102. if (!page)
  103. return NULL;
  104. phys = page_to_phys(page);
  105. *handle = phys;
  106. /* Now, map the page into P3 with write-combining turned on */
  107. return __ioremap(phys, size, _PAGE_BUFFER);
  108. }
  109. EXPORT_SYMBOL(dma_alloc_writecombine);
  110. void dma_free_writecombine(struct device *dev, size_t size,
  111. void *cpu_addr, dma_addr_t handle)
  112. {
  113. struct page *page;
  114. iounmap(cpu_addr);
  115. page = phys_to_page(handle);
  116. __dma_free(dev, size, page, handle);
  117. }
  118. EXPORT_SYMBOL(dma_free_writecombine);