consistent.c 2.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. /*
  2. * arch/sh/mm/consistent.c
  3. *
  4. * Copyright (C) 2004 - 2007 Paul Mundt
  5. *
  6. * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License. See the file "COPYING" in the main directory of this archive
  10. * for more details.
  11. */
  12. #include <linux/mm.h>
  13. #include <linux/dma-mapping.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/addrspace.h>
  16. #include <asm/io.h>
  17. struct dma_coherent_mem {
  18. void *virt_base;
  19. u32 device_base;
  20. int size;
  21. int flags;
  22. unsigned long *bitmap;
  23. };
  24. void *dma_alloc_coherent(struct device *dev, size_t size,
  25. dma_addr_t *dma_handle, gfp_t gfp)
  26. {
  27. void *ret, *ret_nocache;
  28. int order = get_order(size);
  29. if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
  30. return ret;
  31. ret = (void *)__get_free_pages(gfp, order);
  32. if (!ret)
  33. return NULL;
  34. memset(ret, 0, size);
  35. /*
  36. * Pages from the page allocator may have data present in
  37. * cache. So flush the cache before using uncached memory.
  38. */
  39. dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
  40. ret_nocache = ioremap_nocache(virt_to_phys(ret), size);
  41. if (!ret_nocache) {
  42. free_pages((unsigned long)ret, order);
  43. return NULL;
  44. }
  45. *dma_handle = virt_to_phys(ret);
  46. return ret_nocache;
  47. }
  48. EXPORT_SYMBOL(dma_alloc_coherent);
  49. void dma_free_coherent(struct device *dev, size_t size,
  50. void *vaddr, dma_addr_t dma_handle)
  51. {
  52. struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
  53. int order = get_order(size);
  54. if (!dma_release_from_coherent(dev, order, vaddr)) {
  55. WARN_ON(irqs_disabled()); /* for portability */
  56. BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE);
  57. free_pages((unsigned long)phys_to_virt(dma_handle), order);
  58. iounmap(vaddr);
  59. }
  60. }
  61. EXPORT_SYMBOL(dma_free_coherent);
  62. void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  63. enum dma_data_direction direction)
  64. {
  65. #ifdef CONFIG_CPU_SH5
  66. void *p1addr = vaddr;
  67. #else
  68. void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
  69. #endif
  70. switch (direction) {
  71. case DMA_FROM_DEVICE: /* invalidate only */
  72. __flush_invalidate_region(p1addr, size);
  73. break;
  74. case DMA_TO_DEVICE: /* writeback only */
  75. __flush_wback_region(p1addr, size);
  76. break;
  77. case DMA_BIDIRECTIONAL: /* writeback and invalidate */
  78. __flush_purge_region(p1addr, size);
  79. break;
  80. default:
  81. BUG();
  82. }
  83. }
  84. EXPORT_SYMBOL(dma_cache_sync);