io.h 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. #ifndef __ASM_SH64_IO_H
  2. #define __ASM_SH64_IO_H
  3. /*
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * include/asm-sh64/io.h
  9. *
  10. * Copyright (C) 2000, 2001 Paolo Alberelli
  11. * Copyright (C) 2003 Paul Mundt
  12. *
  13. */
  14. /*
  15. * Convention:
  16. * read{b,w,l}/write{b,w,l} are for PCI,
  17. * while in{b,w,l}/out{b,w,l} are for ISA
  18. * These may (will) be platform specific function.
  19. *
  20. * In addition, we have
  21. * ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O.
  22. * which are processor specific. Address should be the result of
  23. * onchip_remap();
  24. */
  25. #include <linux/compiler.h>
  26. #include <asm/cache.h>
  27. #include <asm/system.h>
  28. #include <asm/page.h>
  29. #include <asm-generic/iomap.h>
  30. #define virt_to_bus virt_to_phys
  31. #define bus_to_virt phys_to_virt
  32. #define page_to_bus page_to_phys
  33. /*
  34. * Nothing overly special here.. instead of doing the same thing
  35. * over and over again, we just define a set of sh64_in/out functions
  36. * with an implicit size. The traditional read{b,w,l}/write{b,w,l}
  37. * mess is wrapped to this, as are the SH-specific ctrl_in/out routines.
  38. */
  39. static inline unsigned char sh64_in8(const volatile void __iomem *addr)
  40. {
  41. return *(volatile unsigned char __force *)addr;
  42. }
  43. static inline unsigned short sh64_in16(const volatile void __iomem *addr)
  44. {
  45. return *(volatile unsigned short __force *)addr;
  46. }
  47. static inline unsigned int sh64_in32(const volatile void __iomem *addr)
  48. {
  49. return *(volatile unsigned int __force *)addr;
  50. }
  51. static inline unsigned long long sh64_in64(const volatile void __iomem *addr)
  52. {
  53. return *(volatile unsigned long long __force *)addr;
  54. }
  55. static inline void sh64_out8(unsigned char b, volatile void __iomem *addr)
  56. {
  57. *(volatile unsigned char __force *)addr = b;
  58. wmb();
  59. }
  60. static inline void sh64_out16(unsigned short b, volatile void __iomem *addr)
  61. {
  62. *(volatile unsigned short __force *)addr = b;
  63. wmb();
  64. }
  65. static inline void sh64_out32(unsigned int b, volatile void __iomem *addr)
  66. {
  67. *(volatile unsigned int __force *)addr = b;
  68. wmb();
  69. }
  70. static inline void sh64_out64(unsigned long long b, volatile void __iomem *addr)
  71. {
  72. *(volatile unsigned long long __force *)addr = b;
  73. wmb();
  74. }
  75. #define readb(addr) sh64_in8(addr)
  76. #define readw(addr) sh64_in16(addr)
  77. #define readl(addr) sh64_in32(addr)
  78. #define readb_relaxed(addr) sh64_in8(addr)
  79. #define readw_relaxed(addr) sh64_in16(addr)
  80. #define readl_relaxed(addr) sh64_in32(addr)
  81. #define writeb(b, addr) sh64_out8(b, addr)
  82. #define writew(b, addr) sh64_out16(b, addr)
  83. #define writel(b, addr) sh64_out32(b, addr)
  84. #define ctrl_inb(addr) sh64_in8(ioport_map(addr, 1))
  85. #define ctrl_inw(addr) sh64_in16(ioport_map(addr, 2))
  86. #define ctrl_inl(addr) sh64_in32(ioport_map(addr, 4))
  87. #define ctrl_outb(b, addr) sh64_out8(b, ioport_map(addr, 1))
  88. #define ctrl_outw(b, addr) sh64_out16(b, ioport_map(addr, 2))
  89. #define ctrl_outl(b, addr) sh64_out32(b, ioport_map(addr, 4))
  90. #define ioread8(addr) sh64_in8(addr)
  91. #define ioread16(addr) sh64_in16(addr)
  92. #define ioread32(addr) sh64_in32(addr)
  93. #define iowrite8(b, addr) sh64_out8(b, addr)
  94. #define iowrite16(b, addr) sh64_out16(b, addr)
  95. #define iowrite32(b, addr) sh64_out32(b, addr)
  96. #define inb(addr) ctrl_inb(addr)
  97. #define inw(addr) ctrl_inw(addr)
  98. #define inl(addr) ctrl_inl(addr)
  99. #define outb(b, addr) ctrl_outb(b, addr)
  100. #define outw(b, addr) ctrl_outw(b, addr)
  101. #define outl(b, addr) ctrl_outl(b, addr)
  102. void outsw(unsigned long port, const void *addr, unsigned long count);
  103. void insw(unsigned long port, void *addr, unsigned long count);
  104. void outsl(unsigned long port, const void *addr, unsigned long count);
  105. void insl(unsigned long port, void *addr, unsigned long count);
  106. void memcpy_toio(void __iomem *to, const void *from, long count);
  107. void memcpy_fromio(void *to, void __iomem *from, long count);
  108. #define mmiowb()
  109. #ifdef __KERNEL__
  110. #ifdef CONFIG_SH_CAYMAN
  111. extern unsigned long smsc_superio_virt;
  112. #endif
  113. #ifdef CONFIG_PCI
  114. extern unsigned long pciio_virt;
  115. #endif
  116. #define IO_SPACE_LIMIT 0xffffffff
  117. /*
  118. * Change virtual addresses to physical addresses and vv.
  119. * These are trivial on the 1:1 Linux/SuperH mapping
  120. */
  121. static inline unsigned long virt_to_phys(volatile void * address)
  122. {
  123. return __pa(address);
  124. }
  125. static inline void * phys_to_virt(unsigned long address)
  126. {
  127. return __va(address);
  128. }
  129. extern void * __ioremap(unsigned long phys_addr, unsigned long size,
  130. unsigned long flags);
  131. static inline void * ioremap(unsigned long phys_addr, unsigned long size)
  132. {
  133. return __ioremap(phys_addr, size, 1);
  134. }
  135. static inline void * ioremap_nocache (unsigned long phys_addr, unsigned long size)
  136. {
  137. return __ioremap(phys_addr, size, 0);
  138. }
  139. extern void iounmap(void *addr);
  140. unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* name);
  141. extern void onchip_unmap(unsigned long vaddr);
  142. static __inline__ int check_signature(volatile void __iomem *io_addr,
  143. const unsigned char *signature, int length)
  144. {
  145. int retval = 0;
  146. do {
  147. if (readb(io_addr) != *signature)
  148. goto out;
  149. io_addr++;
  150. signature++;
  151. length--;
  152. } while (length);
  153. retval = 1;
  154. out:
  155. return retval;
  156. }
  157. /*
  158. * The caches on some architectures aren't dma-coherent and have need to
  159. * handle this in software. There are three types of operations that
  160. * can be applied to dma buffers.
  161. *
  162. * - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
  163. * writing the content of the caches back to memory, if necessary.
  164. * The function also invalidates the affected part of the caches as
  165. * necessary before DMA transfers from outside to memory.
  166. * - dma_cache_inv(start, size) invalidates the affected parts of the
  167. * caches. Dirty lines of the caches may be written back or simply
  168. * be discarded. This operation is necessary before dma operations
  169. * to the memory.
  170. * - dma_cache_wback(start, size) writes back any dirty lines but does
  171. * not invalidate the cache. This can be used before DMA reads from
  172. * memory,
  173. */
  174. static __inline__ void dma_cache_wback_inv (unsigned long start, unsigned long size)
  175. {
  176. unsigned long s = start & L1_CACHE_ALIGN_MASK;
  177. unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
  178. for (; s <= e; s += L1_CACHE_BYTES)
  179. asm volatile ("ocbp %0, 0" : : "r" (s));
  180. }
  181. static __inline__ void dma_cache_inv (unsigned long start, unsigned long size)
  182. {
  183. // Note that caller has to be careful with overzealous
  184. // invalidation should there be partial cache lines at the extremities
  185. // of the specified range
  186. unsigned long s = start & L1_CACHE_ALIGN_MASK;
  187. unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
  188. for (; s <= e; s += L1_CACHE_BYTES)
  189. asm volatile ("ocbi %0, 0" : : "r" (s));
  190. }
  191. static __inline__ void dma_cache_wback (unsigned long start, unsigned long size)
  192. {
  193. unsigned long s = start & L1_CACHE_ALIGN_MASK;
  194. unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
  195. for (; s <= e; s += L1_CACHE_BYTES)
  196. asm volatile ("ocbwb %0, 0" : : "r" (s));
  197. }
  198. /*
  199. * Convert a physical pointer to a virtual kernel pointer for /dev/mem
  200. * access
  201. */
  202. #define xlate_dev_mem_ptr(p) __va(p)
  203. /*
  204. * Convert a virtual cached pointer to an uncached pointer
  205. */
  206. #define xlate_dev_kmem_ptr(p) p
  207. #endif /* __KERNEL__ */
  208. #endif /* __ASM_SH64_IO_H */