dma-coherence.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Version from mach-generic modified to support PowerTV port
  7. * Portions Copyright (C) 2009 Cisco Systems, Inc.
  8. * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
  9. *
  10. */
  11. #ifndef __ASM_MACH_POWERTV_DMA_COHERENCE_H
  12. #define __ASM_MACH_POWERTV_DMA_COHERENCE_H
  13. #include <linux/sched.h>
  14. #include <linux/version.h>
  15. #include <linux/device.h>
  16. #include <asm/mach-powertv/asic.h>
  17. static inline bool is_kseg2(void *addr)
  18. {
  19. return (unsigned long)addr >= KSEG2;
  20. }
  21. static inline unsigned long virt_to_phys_from_pte(void *addr)
  22. {
  23. pgd_t *pgd;
  24. pud_t *pud;
  25. pmd_t *pmd;
  26. pte_t *ptep, pte;
  27. unsigned long virt_addr = (unsigned long)addr;
  28. unsigned long phys_addr = 0UL;
  29. /* get the page global directory. */
  30. pgd = pgd_offset_k(virt_addr);
  31. if (!pgd_none(*pgd)) {
  32. /* get the page upper directory */
  33. pud = pud_offset(pgd, virt_addr);
  34. if (!pud_none(*pud)) {
  35. /* get the page middle directory */
  36. pmd = pmd_offset(pud, virt_addr);
  37. if (!pmd_none(*pmd)) {
  38. /* get a pointer to the page table entry */
  39. ptep = pte_offset(pmd, virt_addr);
  40. pte = *ptep;
  41. /* check for a valid page */
  42. if (pte_present(pte)) {
  43. /* get the physical address the page is
  44. * refering to */
  45. phys_addr = (unsigned long)
  46. page_to_phys(pte_page(pte));
  47. /* add the offset within the page */
  48. phys_addr |= (virt_addr & ~PAGE_MASK);
  49. }
  50. }
  51. }
  52. }
  53. return phys_addr;
  54. }
  55. static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
  56. size_t size)
  57. {
  58. if (is_kseg2(addr))
  59. return phys_to_dma(virt_to_phys_from_pte(addr));
  60. else
  61. return phys_to_dma(virt_to_phys(addr));
  62. }
  63. static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
  64. struct page *page)
  65. {
  66. return phys_to_dma(page_to_phys(page));
  67. }
  68. static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
  69. dma_addr_t dma_addr)
  70. {
  71. return dma_to_phys(dma_addr);
  72. }
  73. static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
  74. size_t size, enum dma_data_direction direction)
  75. {
  76. }
  77. static inline int plat_dma_supported(struct device *dev, u64 mask)
  78. {
  79. /*
  80. * we fall back to GFP_DMA when the mask isn't all 1s,
  81. * so we can't guarantee allocations that must be
  82. * within a tighter range than GFP_DMA..
  83. */
  84. if (mask < DMA_BIT_MASK(24))
  85. return 0;
  86. return 1;
  87. }
  88. static inline void plat_extra_sync_for_device(struct device *dev)
  89. {
  90. return;
  91. }
  92. static inline int plat_dma_mapping_error(struct device *dev,
  93. dma_addr_t dma_addr)
  94. {
  95. return 0;
  96. }
  97. static inline int plat_device_is_coherent(struct device *dev)
  98. {
  99. return 0;
  100. }
  101. #endif /* __ASM_MACH_POWERTV_DMA_COHERENCE_H */