io.h 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. #ifndef __ASM_SH64_IO_H
  2. #define __ASM_SH64_IO_H
  3. /*
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * include/asm-sh64/io.h
  9. *
  10. * Copyright (C) 2000, 2001 Paolo Alberelli
  11. * Copyright (C) 2003 Paul Mundt
  12. *
  13. */
  14. /*
  15. * Convention:
  16. * read{b,w,l}/write{b,w,l} are for PCI,
  17. * while in{b,w,l}/out{b,w,l} are for ISA
  18. * These may (will) be platform specific function.
  19. *
  20. * In addition, we have
  21. * ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O.
  22. * which are processor specific. Address should be the result of
  23. * onchip_remap();
  24. */
  25. #include <linux/compiler.h>
  26. #include <asm/cache.h>
  27. #include <asm/system.h>
  28. #include <asm/page.h>
  29. #include <asm-generic/iomap.h>
  30. /*
  31. * Nothing overly special here.. instead of doing the same thing
  32. * over and over again, we just define a set of sh64_in/out functions
  33. * with an implicit size. The traditional read{b,w,l}/write{b,w,l}
  34. * mess is wrapped to this, as are the SH-specific ctrl_in/out routines.
  35. */
  36. static inline unsigned char sh64_in8(const volatile void __iomem *addr)
  37. {
  38. return *(volatile unsigned char __force *)addr;
  39. }
  40. static inline unsigned short sh64_in16(const volatile void __iomem *addr)
  41. {
  42. return *(volatile unsigned short __force *)addr;
  43. }
  44. static inline unsigned int sh64_in32(const volatile void __iomem *addr)
  45. {
  46. return *(volatile unsigned int __force *)addr;
  47. }
  48. static inline unsigned long long sh64_in64(const volatile void __iomem *addr)
  49. {
  50. return *(volatile unsigned long long __force *)addr;
  51. }
  52. static inline void sh64_out8(unsigned char b, volatile void __iomem *addr)
  53. {
  54. *(volatile unsigned char __force *)addr = b;
  55. wmb();
  56. }
  57. static inline void sh64_out16(unsigned short b, volatile void __iomem *addr)
  58. {
  59. *(volatile unsigned short __force *)addr = b;
  60. wmb();
  61. }
  62. static inline void sh64_out32(unsigned int b, volatile void __iomem *addr)
  63. {
  64. *(volatile unsigned int __force *)addr = b;
  65. wmb();
  66. }
  67. static inline void sh64_out64(unsigned long long b, volatile void __iomem *addr)
  68. {
  69. *(volatile unsigned long long __force *)addr = b;
  70. wmb();
  71. }
  72. #define readb(addr) sh64_in8(addr)
  73. #define readw(addr) sh64_in16(addr)
  74. #define readl(addr) sh64_in32(addr)
  75. #define readb_relaxed(addr) sh64_in8(addr)
  76. #define readw_relaxed(addr) sh64_in16(addr)
  77. #define readl_relaxed(addr) sh64_in32(addr)
  78. #define writeb(b, addr) sh64_out8(b, addr)
  79. #define writew(b, addr) sh64_out16(b, addr)
  80. #define writel(b, addr) sh64_out32(b, addr)
  81. #define ctrl_inb(addr) sh64_in8(ioport_map(addr, 1))
  82. #define ctrl_inw(addr) sh64_in16(ioport_map(addr, 2))
  83. #define ctrl_inl(addr) sh64_in32(ioport_map(addr, 4))
  84. #define ctrl_outb(b, addr) sh64_out8(b, ioport_map(addr, 1))
  85. #define ctrl_outw(b, addr) sh64_out16(b, ioport_map(addr, 2))
  86. #define ctrl_outl(b, addr) sh64_out32(b, ioport_map(addr, 4))
  87. #define ioread8(addr) sh64_in8(addr)
  88. #define ioread16(addr) sh64_in16(addr)
  89. #define ioread32(addr) sh64_in32(addr)
  90. #define iowrite8(b, addr) sh64_out8(b, addr)
  91. #define iowrite16(b, addr) sh64_out16(b, addr)
  92. #define iowrite32(b, addr) sh64_out32(b, addr)
  93. #define inb(addr) ctrl_inb(addr)
  94. #define inw(addr) ctrl_inw(addr)
  95. #define inl(addr) ctrl_inl(addr)
  96. #define outb(b, addr) ctrl_outb(b, addr)
  97. #define outw(b, addr) ctrl_outw(b, addr)
  98. #define outl(b, addr) ctrl_outl(b, addr)
  99. void outsw(unsigned long port, const void *addr, unsigned long count);
  100. void insw(unsigned long port, void *addr, unsigned long count);
  101. void outsl(unsigned long port, const void *addr, unsigned long count);
  102. void insl(unsigned long port, void *addr, unsigned long count);
  103. #define __raw_readb readb
  104. #define __raw_readw readw
  105. #define __raw_readl readl
  106. #define __raw_writeb writeb
  107. #define __raw_writew writew
  108. #define __raw_writel writel
  109. void memcpy_toio(void __iomem *to, const void *from, long count);
  110. void memcpy_fromio(void *to, void __iomem *from, long count);
  111. #define mmiowb()
  112. #ifdef __KERNEL__
  113. #ifdef CONFIG_SH_CAYMAN
  114. extern unsigned long smsc_superio_virt;
  115. #endif
  116. #ifdef CONFIG_PCI
  117. extern unsigned long pciio_virt;
  118. #endif
  119. #define IO_SPACE_LIMIT 0xffffffff
  120. /*
  121. * Change virtual addresses to physical addresses and vv.
  122. * These are trivial on the 1:1 Linux/SuperH mapping
  123. */
  124. static inline unsigned long virt_to_phys(volatile void * address)
  125. {
  126. return __pa(address);
  127. }
  128. static inline void * phys_to_virt(unsigned long address)
  129. {
  130. return __va(address);
  131. }
  132. extern void * __ioremap(unsigned long phys_addr, unsigned long size,
  133. unsigned long flags);
  134. static inline void * ioremap(unsigned long phys_addr, unsigned long size)
  135. {
  136. return __ioremap(phys_addr, size, 1);
  137. }
  138. static inline void * ioremap_nocache (unsigned long phys_addr, unsigned long size)
  139. {
  140. return __ioremap(phys_addr, size, 0);
  141. }
  142. extern void iounmap(void *addr);
  143. unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* name);
  144. extern void onchip_unmap(unsigned long vaddr);
  145. /*
  146. * The caches on some architectures aren't dma-coherent and have need to
  147. * handle this in software. There are three types of operations that
  148. * can be applied to dma buffers.
  149. *
  150. * - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
  151. * writing the content of the caches back to memory, if necessary.
  152. * The function also invalidates the affected part of the caches as
  153. * necessary before DMA transfers from outside to memory.
  154. * - dma_cache_inv(start, size) invalidates the affected parts of the
  155. * caches. Dirty lines of the caches may be written back or simply
  156. * be discarded. This operation is necessary before dma operations
  157. * to the memory.
  158. * - dma_cache_wback(start, size) writes back any dirty lines but does
  159. * not invalidate the cache. This can be used before DMA reads from
  160. * memory,
  161. */
  162. static __inline__ void dma_cache_wback_inv (unsigned long start, unsigned long size)
  163. {
  164. unsigned long s = start & L1_CACHE_ALIGN_MASK;
  165. unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
  166. for (; s <= e; s += L1_CACHE_BYTES)
  167. asm volatile ("ocbp %0, 0" : : "r" (s));
  168. }
  169. static __inline__ void dma_cache_inv (unsigned long start, unsigned long size)
  170. {
  171. // Note that caller has to be careful with overzealous
  172. // invalidation should there be partial cache lines at the extremities
  173. // of the specified range
  174. unsigned long s = start & L1_CACHE_ALIGN_MASK;
  175. unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
  176. for (; s <= e; s += L1_CACHE_BYTES)
  177. asm volatile ("ocbi %0, 0" : : "r" (s));
  178. }
  179. static __inline__ void dma_cache_wback (unsigned long start, unsigned long size)
  180. {
  181. unsigned long s = start & L1_CACHE_ALIGN_MASK;
  182. unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
  183. for (; s <= e; s += L1_CACHE_BYTES)
  184. asm volatile ("ocbwb %0, 0" : : "r" (s));
  185. }
  186. /*
  187. * Convert a physical pointer to a virtual kernel pointer for /dev/mem
  188. * access
  189. */
  190. #define xlate_dev_mem_ptr(p) __va(p)
  191. /*
  192. * Convert a virtual cached pointer to an uncached pointer
  193. */
  194. #define xlate_dev_kmem_ptr(p) p
  195. #endif /* __KERNEL__ */
  196. #endif /* __ASM_SH64_IO_H */