ioremap.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. /*
  2. * This program is free software; you can redistribute it and/or
  3. * modify it under the terms of the GNU General Public License
  4. * as published by the Free Software Foundation; either version
  5. * 2 of the License, or (at your option) any later version.
  6. *
  7. * Portions Copyright (C) Cisco Systems, Inc.
  8. */
  9. #ifndef __ASM_MACH_POWERTV_IOREMAP_H
  10. #define __ASM_MACH_POWERTV_IOREMAP_H
  11. #include <linux/types.h>
  12. #include <linux/log2.h>
  13. #include <linux/compiler.h>
  14. #include <asm/pgtable-bits.h>
  15. #include <asm/addrspace.h>
  16. /* We're going to mess with bits, so get sizes */
  17. #define IOR_BPC 8 /* Bits per char */
  18. #define IOR_PHYS_BITS (IOR_BPC * sizeof(phys_addr_t))
  19. #define IOR_DMA_BITS (IOR_BPC * sizeof(dma_addr_t))
  20. /*
  21. * Define the granularity of physical/DMA mapping in terms of the number
  22. * of bits that defines the offset within a grain. These will be the
  23. * least significant bits of the address. The rest of a physical or DMA
  24. * address will be used to index into an appropriate table to find the
  25. * offset to add to the address to yield the corresponding DMA or physical
  26. * address, respectively.
  27. */
  28. #define IOR_LSBITS 22 /* Bits in a grain */
  29. /*
  30. * Compute the number of most significant address bits after removing those
  31. * used for the offset within a grain and then compute the number of table
  32. * entries for the conversion.
  33. */
  34. #define IOR_PHYS_MSBITS (IOR_PHYS_BITS - IOR_LSBITS)
  35. #define IOR_NUM_PHYS_TO_DMA ((phys_addr_t) 1 << IOR_PHYS_MSBITS)
  36. #define IOR_DMA_MSBITS (IOR_DMA_BITS - IOR_LSBITS)
  37. #define IOR_NUM_DMA_TO_PHYS ((dma_addr_t) 1 << IOR_DMA_MSBITS)
  38. /*
  39. * Define data structures used as elements in the arrays for the conversion
  40. * between physical and DMA addresses. We do some slightly fancy math to
  41. * compute the width of the offset element of the conversion tables so
  42. * that we can have the smallest conversion tables. Next, round up the
  43. * sizes to the next higher power of two, i.e. the offset element will have
  44. * 8, 16, 32, 64, etc. bits. This eliminates the need to mask off any
  45. * bits. Finally, we compute a shift value that puts the most significant
  46. * bits of the offset into the most significant bits of the offset element.
  47. * This makes it more efficient on processors without barrel shifters and
  48. * easier to see the values if the conversion table is dumped in binary.
  49. */
  50. #define _IOR_OFFSET_WIDTH(n) (1 << order_base_2(n))
  51. #define IOR_OFFSET_WIDTH(n) \
  52. (_IOR_OFFSET_WIDTH(n) < 8 ? 8 : _IOR_OFFSET_WIDTH(n))
  53. #define IOR_PHYS_OFFSET_BITS IOR_OFFSET_WIDTH(IOR_PHYS_MSBITS)
  54. #define IOR_PHYS_SHIFT (IOR_PHYS_BITS - IOR_PHYS_OFFSET_BITS)
  55. #define IOR_DMA_OFFSET_BITS IOR_OFFSET_WIDTH(IOR_DMA_MSBITS)
  56. #define IOR_DMA_SHIFT (IOR_DMA_BITS - IOR_DMA_OFFSET_BITS)
  57. struct ior_phys_to_dma {
  58. dma_addr_t offset:IOR_DMA_OFFSET_BITS __packed
  59. __aligned((IOR_DMA_OFFSET_BITS / IOR_BPC));
  60. };
  61. struct ior_dma_to_phys {
  62. dma_addr_t offset:IOR_PHYS_OFFSET_BITS __packed
  63. __aligned((IOR_PHYS_OFFSET_BITS / IOR_BPC));
  64. };
  65. extern struct ior_phys_to_dma _ior_phys_to_dma[IOR_NUM_PHYS_TO_DMA];
  66. extern struct ior_dma_to_phys _ior_dma_to_phys[IOR_NUM_DMA_TO_PHYS];
  67. static inline dma_addr_t _phys_to_dma_offset_raw(phys_addr_t phys)
  68. {
  69. return (dma_addr_t)_ior_phys_to_dma[phys >> IOR_LSBITS].offset;
  70. }
  71. static inline dma_addr_t _dma_to_phys_offset_raw(dma_addr_t dma)
  72. {
  73. return (dma_addr_t)_ior_dma_to_phys[dma >> IOR_LSBITS].offset;
  74. }
  75. /* These are not portable and should not be used in drivers. Drivers should
  76. * be using ioremap() and friends to map physical addresses to virtual
  77. * addresses and dma_map*() and friends to map virtual addresses into DMA
  78. * addresses and back.
  79. */
  80. static inline dma_addr_t phys_to_dma(phys_addr_t phys)
  81. {
  82. return phys + (_phys_to_dma_offset_raw(phys) << IOR_PHYS_SHIFT);
  83. }
  84. static inline phys_addr_t dma_to_phys(dma_addr_t dma)
  85. {
  86. return dma + (_dma_to_phys_offset_raw(dma) << IOR_DMA_SHIFT);
  87. }
  88. extern void ioremap_add_map(dma_addr_t phys, phys_addr_t alias,
  89. dma_addr_t size);
  90. /*
  91. * Allow physical addresses to be fixed up to help peripherals located
  92. * outside the low 32-bit range -- generic pass-through version.
  93. */
  94. static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size)
  95. {
  96. return phys_addr;
  97. }
  98. /*
  99. * Handle the special case of addresses the area aliased into the first
  100. * 512 MiB of the processor's physical address space. These turn into either
  101. * kseg0 or kseg1 addresses, depending on flags.
  102. */
  103. static inline void __iomem *plat_ioremap(phys_t start, unsigned long size,
  104. unsigned long flags)
  105. {
  106. phys_addr_t start_offset;
  107. void __iomem *result = NULL;
  108. /* Start by checking to see whether this is an aliased address */
  109. start_offset = _dma_to_phys_offset_raw(start);
  110. /*
  111. * If:
  112. * o the memory is aliased into the first 512 MiB, and
  113. * o the start and end are in the same RAM bank, and
  114. * o we don't have a zero size or wrap around, and
  115. * o we are supposed to create an uncached mapping,
  116. * handle this is a kseg0 or kseg1 address
  117. */
  118. if (start_offset != 0) {
  119. phys_addr_t last;
  120. dma_addr_t dma_to_phys_offset;
  121. last = start + size - 1;
  122. dma_to_phys_offset =
  123. _dma_to_phys_offset_raw(last) << IOR_DMA_SHIFT;
  124. if (dma_to_phys_offset == start_offset &&
  125. size != 0 && start <= last) {
  126. phys_t adjusted_start;
  127. adjusted_start = start + start_offset;
  128. if (flags == _CACHE_UNCACHED)
  129. result = (void __iomem *) (unsigned long)
  130. CKSEG1ADDR(adjusted_start);
  131. else
  132. result = (void __iomem *) (unsigned long)
  133. CKSEG0ADDR(adjusted_start);
  134. }
  135. }
  136. return result;
  137. }
  138. static inline int plat_iounmap(const volatile void __iomem *addr)
  139. {
  140. return 0;
  141. }
  142. #endif /* __ASM_MACH_POWERTV_IOREMAP_H */