dma-coherence.h 2.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
  7. *
  8. */
  9. #ifndef __ASM_MACH_IP32_DMA_COHERENCE_H
  10. #define __ASM_MACH_IP32_DMA_COHERENCE_H
  11. #include <asm/ip32/crime.h>
  12. struct device;
  13. /*
  14. * Few notes.
  15. * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
  16. * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for
  17. * native-endian)
  18. * 3. All other devices see memory as one big chunk at 0x40000000
  19. * 4. Non-PCI devices will pass NULL as struct device*
  20. *
  21. * Thus we translate differently, depending on device.
  22. */
  23. #define RAM_OFFSET_MASK 0x3fffffffUL
  24. static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
  25. size_t size)
  26. {
  27. dma_addr_t pa = virt_to_phys(addr) & RAM_OFFSET_MASK;
  28. if (dev == NULL)
  29. pa += CRIME_HI_MEM_BASE;
  30. return pa;
  31. }
  32. static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
  33. {
  34. dma_addr_t pa;
  35. pa = page_to_phys(page) & RAM_OFFSET_MASK;
  36. if (dev == NULL)
  37. pa += CRIME_HI_MEM_BASE;
  38. return pa;
  39. }
  40. /* This is almost certainly wrong but it's what dma-ip32.c used to use */
  41. static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
  42. {
  43. unsigned long addr = dma_addr & RAM_OFFSET_MASK;
  44. if (dma_addr >= 256*1024*1024)
  45. addr += CRIME_HI_MEM_BASE;
  46. return addr;
  47. }
  48. static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
  49. {
  50. }
  51. static inline int plat_dma_supported(struct device *dev, u64 mask)
  52. {
  53. /*
  54. * we fall back to GFP_DMA when the mask isn't all 1s,
  55. * so we can't guarantee allocations that must be
  56. * within a tighter range than GFP_DMA..
  57. */
  58. if (mask < DMA_BIT_MASK(24))
  59. return 0;
  60. return 1;
  61. }
  62. static inline void plat_extra_sync_for_device(struct device *dev)
  63. {
  64. return;
  65. }
  66. static inline int plat_dma_mapping_error(struct device *dev,
  67. dma_addr_t dma_addr)
  68. {
  69. return 0;
  70. }
  71. static inline int plat_device_is_coherent(struct device *dev)
  72. {
  73. return 0; /* IP32 is non-cohernet */
  74. }
  75. #endif /* __ASM_MACH_IP32_DMA_COHERENCE_H */