io.h 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. #ifndef __ASM_SH64_IO_H
  2. #define __ASM_SH64_IO_H
  3. /*
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * include/asm-sh64/io.h
  9. *
  10. * Copyright (C) 2000, 2001 Paolo Alberelli
  11. * Copyright (C) 2003 Paul Mundt
  12. *
  13. */
  14. /*
  15. * Convention:
  16. * read{b,w,l}/write{b,w,l} are for PCI,
  17. * while in{b,w,l}/out{b,w,l} are for ISA
  18. * These may (will) be platform specific function.
  19. *
  20. * In addition, we have
  21. * ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O.
  22. * which are processor specific. Address should be the result of
  23. * onchip_remap();
  24. */
  25. #include <linux/compiler.h>
  26. #include <asm/cache.h>
  27. #include <asm/system.h>
  28. #include <asm/page.h>
  29. #include <asm-generic/iomap.h>
  30. #define virt_to_bus virt_to_phys
  31. #define bus_to_virt phys_to_virt
  32. #define page_to_bus page_to_phys
  33. /*
  34. * Nothing overly special here.. instead of doing the same thing
  35. * over and over again, we just define a set of sh64_in/out functions
  36. * with an implicit size. The traditional read{b,w,l}/write{b,w,l}
  37. * mess is wrapped to this, as are the SH-specific ctrl_in/out routines.
  38. */
  39. static inline unsigned char sh64_in8(const volatile void __iomem *addr)
  40. {
  41. return *(volatile unsigned char __force *)addr;
  42. }
  43. static inline unsigned short sh64_in16(const volatile void __iomem *addr)
  44. {
  45. return *(volatile unsigned short __force *)addr;
  46. }
  47. static inline unsigned int sh64_in32(const volatile void __iomem *addr)
  48. {
  49. return *(volatile unsigned int __force *)addr;
  50. }
  51. static inline unsigned long long sh64_in64(const volatile void __iomem *addr)
  52. {
  53. return *(volatile unsigned long long __force *)addr;
  54. }
  55. static inline void sh64_out8(unsigned char b, volatile void __iomem *addr)
  56. {
  57. *(volatile unsigned char __force *)addr = b;
  58. wmb();
  59. }
  60. static inline void sh64_out16(unsigned short b, volatile void __iomem *addr)
  61. {
  62. *(volatile unsigned short __force *)addr = b;
  63. wmb();
  64. }
  65. static inline void sh64_out32(unsigned int b, volatile void __iomem *addr)
  66. {
  67. *(volatile unsigned int __force *)addr = b;
  68. wmb();
  69. }
  70. static inline void sh64_out64(unsigned long long b, volatile void __iomem *addr)
  71. {
  72. *(volatile unsigned long long __force *)addr = b;
  73. wmb();
  74. }
  75. #define readb(addr) sh64_in8(addr)
  76. #define readw(addr) sh64_in16(addr)
  77. #define readl(addr) sh64_in32(addr)
  78. #define readb_relaxed(addr) sh64_in8(addr)
  79. #define readw_relaxed(addr) sh64_in16(addr)
  80. #define readl_relaxed(addr) sh64_in32(addr)
  81. #define writeb(b, addr) sh64_out8(b, addr)
  82. #define writew(b, addr) sh64_out16(b, addr)
  83. #define writel(b, addr) sh64_out32(b, addr)
  84. #define ctrl_inb(addr) sh64_in8(ioport_map(addr, 1))
  85. #define ctrl_inw(addr) sh64_in16(ioport_map(addr, 2))
  86. #define ctrl_inl(addr) sh64_in32(ioport_map(addr, 4))
  87. #define ctrl_outb(b, addr) sh64_out8(b, ioport_map(addr, 1))
  88. #define ctrl_outw(b, addr) sh64_out16(b, ioport_map(addr, 2))
  89. #define ctrl_outl(b, addr) sh64_out32(b, ioport_map(addr, 4))
  90. #define ioread8(addr) sh64_in8(addr)
  91. #define ioread16(addr) sh64_in16(addr)
  92. #define ioread32(addr) sh64_in32(addr)
  93. #define iowrite8(b, addr) sh64_out8(b, addr)
  94. #define iowrite16(b, addr) sh64_out16(b, addr)
  95. #define iowrite32(b, addr) sh64_out32(b, addr)
  96. #define inb(addr) ctrl_inb(addr)
  97. #define inw(addr) ctrl_inw(addr)
  98. #define inl(addr) ctrl_inl(addr)
  99. #define outb(b, addr) ctrl_outb(b, addr)
  100. #define outw(b, addr) ctrl_outw(b, addr)
  101. #define outl(b, addr) ctrl_outl(b, addr)
  102. void outsw(unsigned long port, const void *addr, unsigned long count);
  103. void insw(unsigned long port, void *addr, unsigned long count);
  104. void outsl(unsigned long port, const void *addr, unsigned long count);
  105. void insl(unsigned long port, void *addr, unsigned long count);
  106. #define __raw_readb readb
  107. #define __raw_readw readw
  108. #define __raw_readl readl
  109. #define __raw_writeb writeb
  110. #define __raw_writew writew
  111. #define __raw_writel writel
  112. void memcpy_toio(void __iomem *to, const void *from, long count);
  113. void memcpy_fromio(void *to, void __iomem *from, long count);
  114. #define mmiowb()
  115. #ifdef __KERNEL__
  116. #ifdef CONFIG_SH_CAYMAN
  117. extern unsigned long smsc_superio_virt;
  118. #endif
  119. #ifdef CONFIG_PCI
  120. extern unsigned long pciio_virt;
  121. #endif
  122. #define IO_SPACE_LIMIT 0xffffffff
  123. /*
  124. * Change virtual addresses to physical addresses and vv.
  125. * These are trivial on the 1:1 Linux/SuperH mapping
  126. */
  127. static inline unsigned long virt_to_phys(volatile void * address)
  128. {
  129. return __pa(address);
  130. }
  131. static inline void * phys_to_virt(unsigned long address)
  132. {
  133. return __va(address);
  134. }
  135. extern void * __ioremap(unsigned long phys_addr, unsigned long size,
  136. unsigned long flags);
  137. static inline void * ioremap(unsigned long phys_addr, unsigned long size)
  138. {
  139. return __ioremap(phys_addr, size, 1);
  140. }
  141. static inline void * ioremap_nocache (unsigned long phys_addr, unsigned long size)
  142. {
  143. return __ioremap(phys_addr, size, 0);
  144. }
  145. extern void iounmap(void *addr);
  146. unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* name);
  147. extern void onchip_unmap(unsigned long vaddr);
  148. static __inline__ int check_signature(volatile void __iomem *io_addr,
  149. const unsigned char *signature, int length)
  150. {
  151. int retval = 0;
  152. do {
  153. if (readb(io_addr) != *signature)
  154. goto out;
  155. io_addr++;
  156. signature++;
  157. length--;
  158. } while (length);
  159. retval = 1;
  160. out:
  161. return retval;
  162. }
  163. /*
  164. * The caches on some architectures aren't dma-coherent and have need to
  165. * handle this in software. There are three types of operations that
  166. * can be applied to dma buffers.
  167. *
  168. * - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
  169. * writing the content of the caches back to memory, if necessary.
  170. * The function also invalidates the affected part of the caches as
  171. * necessary before DMA transfers from outside to memory.
  172. * - dma_cache_inv(start, size) invalidates the affected parts of the
  173. * caches. Dirty lines of the caches may be written back or simply
  174. * be discarded. This operation is necessary before dma operations
  175. * to the memory.
  176. * - dma_cache_wback(start, size) writes back any dirty lines but does
  177. * not invalidate the cache. This can be used before DMA reads from
  178. * memory,
  179. */
  180. static __inline__ void dma_cache_wback_inv (unsigned long start, unsigned long size)
  181. {
  182. unsigned long s = start & L1_CACHE_ALIGN_MASK;
  183. unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
  184. for (; s <= e; s += L1_CACHE_BYTES)
  185. asm volatile ("ocbp %0, 0" : : "r" (s));
  186. }
  187. static __inline__ void dma_cache_inv (unsigned long start, unsigned long size)
  188. {
  189. // Note that caller has to be careful with overzealous
  190. // invalidation should there be partial cache lines at the extremities
  191. // of the specified range
  192. unsigned long s = start & L1_CACHE_ALIGN_MASK;
  193. unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
  194. for (; s <= e; s += L1_CACHE_BYTES)
  195. asm volatile ("ocbi %0, 0" : : "r" (s));
  196. }
  197. static __inline__ void dma_cache_wback (unsigned long start, unsigned long size)
  198. {
  199. unsigned long s = start & L1_CACHE_ALIGN_MASK;
  200. unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
  201. for (; s <= e; s += L1_CACHE_BYTES)
  202. asm volatile ("ocbwb %0, 0" : : "r" (s));
  203. }
  204. /*
  205. * Convert a physical pointer to a virtual kernel pointer for /dev/mem
  206. * access
  207. */
  208. #define xlate_dev_mem_ptr(p) __va(p)
  209. /*
  210. * Convert a virtual cached pointer to an uncached pointer
  211. */
  212. #define xlate_dev_kmem_ptr(p) p
  213. #endif /* __KERNEL__ */
  214. #endif /* __ASM_SH64_IO_H */