io.h 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312
  1. #ifndef __ASM_AVR32_IO_H
  2. #define __ASM_AVR32_IO_H
  3. #include <linux/kernel.h>
  4. #include <linux/string.h>
  5. #include <linux/types.h>
  6. #include <asm/addrspace.h>
  7. #include <asm/byteorder.h>
  8. #include <mach/io.h>
  9. /* virt_to_phys will only work when address is in P1 or P2 */
  10. static __inline__ unsigned long virt_to_phys(volatile void *address)
  11. {
  12. return PHYSADDR(address);
  13. }
  14. static __inline__ void * phys_to_virt(unsigned long address)
  15. {
  16. return (void *)P1SEGADDR(address);
  17. }
  18. #define cached_to_phys(addr) ((unsigned long)PHYSADDR(addr))
  19. #define uncached_to_phys(addr) ((unsigned long)PHYSADDR(addr))
  20. #define phys_to_cached(addr) ((void *)P1SEGADDR(addr))
  21. #define phys_to_uncached(addr) ((void *)P2SEGADDR(addr))
  22. /*
  23. * Generic IO read/write. These perform native-endian accesses. Note
  24. * that some architectures will want to re-define __raw_{read,write}w.
  25. */
  26. extern void __raw_writesb(void __iomem *addr, const void *data, int bytelen);
  27. extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen);
  28. extern void __raw_writesl(void __iomem *addr, const void *data, int longlen);
  29. extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
  30. extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
  31. extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
  32. static inline void __raw_writeb(u8 v, volatile void __iomem *addr)
  33. {
  34. *(volatile u8 __force *)addr = v;
  35. }
  36. static inline void __raw_writew(u16 v, volatile void __iomem *addr)
  37. {
  38. *(volatile u16 __force *)addr = v;
  39. }
  40. static inline void __raw_writel(u32 v, volatile void __iomem *addr)
  41. {
  42. *(volatile u32 __force *)addr = v;
  43. }
  44. static inline u8 __raw_readb(const volatile void __iomem *addr)
  45. {
  46. return *(const volatile u8 __force *)addr;
  47. }
  48. static inline u16 __raw_readw(const volatile void __iomem *addr)
  49. {
  50. return *(const volatile u16 __force *)addr;
  51. }
  52. static inline u32 __raw_readl(const volatile void __iomem *addr)
  53. {
  54. return *(const volatile u32 __force *)addr;
  55. }
  56. /* Convert I/O port address to virtual address */
  57. #ifndef __io
  58. # define __io(p) ((void *)phys_to_uncached(p))
  59. #endif
  60. /*
  61. * Not really sure about the best way to slow down I/O on
  62. * AVR32. Defining it as a no-op until we have an actual test case.
  63. */
  64. #define SLOW_DOWN_IO do { } while (0)
  65. #define __BUILD_MEMORY_SINGLE(pfx, bwl, type) \
  66. static inline void \
  67. pfx##write##bwl(type val, volatile void __iomem *addr) \
  68. { \
  69. volatile type *__addr; \
  70. type __val; \
  71. \
  72. __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr)); \
  73. __val = pfx##ioswab##bwl(__addr, val); \
  74. \
  75. BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
  76. \
  77. *__addr = __val; \
  78. } \
  79. \
  80. static inline type pfx##read##bwl(const volatile void __iomem *addr) \
  81. { \
  82. volatile type *__addr; \
  83. type __val; \
  84. \
  85. __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr)); \
  86. \
  87. BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
  88. \
  89. __val = *__addr; \
  90. return pfx##ioswab##bwl(__addr, __val); \
  91. }
  92. #define __BUILD_IOPORT_SINGLE(pfx, bwl, type, p, slow) \
  93. static inline void pfx##out##bwl##p(type val, unsigned long port) \
  94. { \
  95. volatile type *__addr; \
  96. type __val; \
  97. \
  98. __addr = __io(__swizzle_addr_##bwl(port)); \
  99. __val = pfx##ioswab##bwl(__addr, val); \
  100. \
  101. BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
  102. \
  103. *__addr = __val; \
  104. slow; \
  105. } \
  106. \
  107. static inline type pfx##in##bwl##p(unsigned long port) \
  108. { \
  109. volatile type *__addr; \
  110. type __val; \
  111. \
  112. __addr = __io(__swizzle_addr_##bwl(port)); \
  113. \
  114. BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
  115. \
  116. __val = *__addr; \
  117. slow; \
  118. \
  119. return pfx##ioswab##bwl(__addr, __val); \
  120. }
  121. #define __BUILD_MEMORY_PFX(bus, bwl, type) \
  122. __BUILD_MEMORY_SINGLE(bus, bwl, type)
  123. #define BUILDIO_MEM(bwl, type) \
  124. __BUILD_MEMORY_PFX(, bwl, type) \
  125. __BUILD_MEMORY_PFX(__mem_, bwl, type)
  126. #define __BUILD_IOPORT_PFX(bus, bwl, type) \
  127. __BUILD_IOPORT_SINGLE(bus, bwl, type, ,) \
  128. __BUILD_IOPORT_SINGLE(bus, bwl, type, _p, SLOW_DOWN_IO)
  129. #define BUILDIO_IOPORT(bwl, type) \
  130. __BUILD_IOPORT_PFX(, bwl, type) \
  131. __BUILD_IOPORT_PFX(__mem_, bwl, type)
  132. BUILDIO_MEM(b, u8)
  133. BUILDIO_MEM(w, u16)
  134. BUILDIO_MEM(l, u32)
  135. BUILDIO_IOPORT(b, u8)
  136. BUILDIO_IOPORT(w, u16)
  137. BUILDIO_IOPORT(l, u32)
  138. #define readb_relaxed readb
  139. #define readw_relaxed readw
  140. #define readl_relaxed readl
  141. #define __BUILD_MEMORY_STRING(bwl, type) \
  142. static inline void writes##bwl(volatile void __iomem *addr, \
  143. const void *data, unsigned int count) \
  144. { \
  145. const type *__data = data; \
  146. \
  147. while (count--) \
  148. __mem_write##bwl(*__data++, addr); \
  149. } \
  150. \
  151. static inline void reads##bwl(const volatile void __iomem *addr, \
  152. void *data, unsigned int count) \
  153. { \
  154. type *__data = data; \
  155. \
  156. while (count--) \
  157. *__data++ = __mem_read##bwl(addr); \
  158. }
  159. #define __BUILD_IOPORT_STRING(bwl, type) \
  160. static inline void outs##bwl(unsigned long port, const void *data, \
  161. unsigned int count) \
  162. { \
  163. const type *__data = data; \
  164. \
  165. while (count--) \
  166. __mem_out##bwl(*__data++, port); \
  167. } \
  168. \
  169. static inline void ins##bwl(unsigned long port, void *data, \
  170. unsigned int count) \
  171. { \
  172. type *__data = data; \
  173. \
  174. while (count--) \
  175. *__data++ = __mem_in##bwl(port); \
  176. }
  177. #define BUILDSTRING(bwl, type) \
  178. __BUILD_MEMORY_STRING(bwl, type) \
  179. __BUILD_IOPORT_STRING(bwl, type)
  180. BUILDSTRING(b, u8)
  181. BUILDSTRING(w, u16)
  182. BUILDSTRING(l, u32)
  183. /*
  184. * io{read,write}{8,16,32} macros in both le (for PCI style consumers) and native be
  185. */
  186. #ifndef ioread8
  187. #define ioread8(p) ((unsigned int)readb(p))
  188. #define ioread16(p) ((unsigned int)readw(p))
  189. #define ioread16be(p) ((unsigned int)__raw_readw(p))
  190. #define ioread32(p) ((unsigned int)readl(p))
  191. #define ioread32be(p) ((unsigned int)__raw_readl(p))
  192. #define iowrite8(v,p) writeb(v, p)
  193. #define iowrite16(v,p) writew(v, p)
  194. #define iowrite16be(v,p) __raw_writew(v, p)
  195. #define iowrite32(v,p) writel(v, p)
  196. #define iowrite32be(v,p) __raw_writel(v, p)
  197. #define ioread8_rep(p,d,c) readsb(p,d,c)
  198. #define ioread16_rep(p,d,c) readsw(p,d,c)
  199. #define ioread32_rep(p,d,c) readsl(p,d,c)
  200. #define iowrite8_rep(p,s,c) writesb(p,s,c)
  201. #define iowrite16_rep(p,s,c) writesw(p,s,c)
  202. #define iowrite32_rep(p,s,c) writesl(p,s,c)
  203. #endif
  204. static inline void memcpy_fromio(void * to, const volatile void __iomem *from,
  205. unsigned long count)
  206. {
  207. memcpy(to, (const void __force *)from, count);
  208. }
  209. static inline void memcpy_toio(volatile void __iomem *to, const void * from,
  210. unsigned long count)
  211. {
  212. memcpy((void __force *)to, from, count);
  213. }
  214. static inline void memset_io(volatile void __iomem *addr, unsigned char val,
  215. unsigned long count)
  216. {
  217. memset((void __force *)addr, val, count);
  218. }
  219. #define mmiowb()
  220. #define IO_SPACE_LIMIT 0xffffffff
  221. extern void __iomem *__ioremap(unsigned long offset, size_t size,
  222. unsigned long flags);
  223. extern void __iounmap(void __iomem *addr);
  224. /*
  225. * ioremap - map bus memory into CPU space
  226. * @offset bus address of the memory
  227. * @size size of the resource to map
  228. *
  229. * ioremap performs a platform specific sequence of operations to make
  230. * bus memory CPU accessible via the readb/.../writel functions and
  231. * the other mmio helpers. The returned address is not guaranteed to
  232. * be usable directly as a virtual address.
  233. */
  234. #define ioremap(offset, size) \
  235. __ioremap((offset), (size), 0)
  236. #define ioremap_nocache(offset, size) \
  237. __ioremap((offset), (size), 0)
  238. #define iounmap(addr) \
  239. __iounmap(addr)
  240. #define cached(addr) P1SEGADDR(addr)
  241. #define uncached(addr) P2SEGADDR(addr)
  242. #define virt_to_bus virt_to_phys
  243. #define bus_to_virt phys_to_virt
  244. #define page_to_bus page_to_phys
  245. #define bus_to_page phys_to_page
  246. /*
  247. * Create a virtual mapping cookie for an IO port range. There exists
  248. * no such thing as port-based I/O on AVR32, so a regular ioremap()
  249. * should do what we need.
  250. */
  251. #define ioport_map(port, nr) ioremap(port, nr)
  252. #define ioport_unmap(port) iounmap(port)
  253. /*
  254. * Convert a physical pointer to a virtual kernel pointer for /dev/mem
  255. * access
  256. */
  257. #define xlate_dev_mem_ptr(p) __va(p)
  258. /*
  259. * Convert a virtual cached pointer to an uncached pointer
  260. */
  261. #define xlate_dev_kmem_ptr(p) p
  262. #endif /* __ASM_AVR32_IO_H */