io.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #ifndef _ASM_TILE_IO_H
  15. #define _ASM_TILE_IO_H
  16. #include <linux/kernel.h>
  17. #include <linux/bug.h>
  18. #include <asm/page.h>
  19. #define IO_SPACE_LIMIT 0xfffffffful
  20. /*
  21. * Convert a physical pointer to a virtual kernel pointer for /dev/mem
  22. * access.
  23. */
  24. #define xlate_dev_mem_ptr(p) __va(p)
  25. /*
  26. * Convert a virtual cached pointer to an uncached pointer.
  27. */
  28. #define xlate_dev_kmem_ptr(p) p
  29. /*
  30. * Change "struct page" to physical address.
  31. */
  32. #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
  33. /*
  34. * Some places try to pass in an loff_t for PHYSADDR (?!), so we cast it to
  35. * long before casting it to a pointer to avoid compiler warnings.
  36. */
  37. #if CHIP_HAS_MMIO()
  38. extern void __iomem *ioremap(resource_size_t offset, unsigned long size);
  39. extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
  40. pgprot_t pgprot);
  41. extern void iounmap(volatile void __iomem *addr);
  42. #else
  43. #define ioremap(physaddr, size) ((void __iomem *)(unsigned long)(physaddr))
  44. #define iounmap(addr) ((void)0)
  45. #endif
  46. #define ioremap_nocache(physaddr, size) ioremap(physaddr, size)
  47. #define ioremap_wc(physaddr, size) ioremap(physaddr, size)
  48. #define ioremap_writethrough(physaddr, size) ioremap(physaddr, size)
  49. #define ioremap_fullcache(physaddr, size) ioremap(physaddr, size)
  50. #define mmiowb()
  51. /* Conversion between virtual and physical mappings. */
  52. #define mm_ptov(addr) ((void *)phys_to_virt(addr))
  53. #define mm_vtop(addr) ((unsigned long)virt_to_phys(addr))
  54. #if CHIP_HAS_MMIO()
  55. /*
  56. * We use inline assembly to guarantee that the compiler does not
  57. * split an access into multiple byte-sized accesses as it might
  58. * sometimes do if a register data structure is marked "packed".
  59. * Obviously on tile we can't tolerate such an access being
  60. * actually unaligned, but we want to avoid the case where the
  61. * compiler conservatively would generate multiple accesses even
  62. * for an aligned read or write.
  63. */
  64. static inline u8 __raw_readb(const volatile void __iomem *addr)
  65. {
  66. return *(const volatile u8 __force *)addr;
  67. }
  68. static inline u16 __raw_readw(const volatile void __iomem *addr)
  69. {
  70. u16 ret;
  71. asm volatile("ld2u %0, %1" : "=r" (ret) : "r" (addr));
  72. barrier();
  73. return le16_to_cpu(ret);
  74. }
  75. static inline u32 __raw_readl(const volatile void __iomem *addr)
  76. {
  77. u32 ret;
  78. /* Sign-extend to conform to u32 ABI sign-extension convention. */
  79. asm volatile("ld4s %0, %1" : "=r" (ret) : "r" (addr));
  80. barrier();
  81. return le32_to_cpu(ret);
  82. }
  83. static inline u64 __raw_readq(const volatile void __iomem *addr)
  84. {
  85. u64 ret;
  86. asm volatile("ld %0, %1" : "=r" (ret) : "r" (addr));
  87. barrier();
  88. return le64_to_cpu(ret);
  89. }
  90. static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
  91. {
  92. *(volatile u8 __force *)addr = val;
  93. }
  94. static inline void __raw_writew(u16 val, volatile void __iomem *addr)
  95. {
  96. asm volatile("st2 %0, %1" :: "r" (addr), "r" (cpu_to_le16(val)));
  97. }
  98. static inline void __raw_writel(u32 val, volatile void __iomem *addr)
  99. {
  100. asm volatile("st4 %0, %1" :: "r" (addr), "r" (cpu_to_le32(val)));
  101. }
  102. static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
  103. {
  104. asm volatile("st %0, %1" :: "r" (addr), "r" (cpu_to_le64(val)));
  105. }
  106. /*
  107. * The on-chip I/O hardware on tilegx is configured with VA=PA for the
  108. * kernel's PA range. The low-level APIs and field names use "va" and
  109. * "void *" nomenclature, to be consistent with the general notion
  110. * that the addresses in question are virtualizable, but in the kernel
  111. * context we are actually manipulating PA values. (In other contexts,
  112. * e.g. access from user space, we do in fact use real virtual addresses
  113. * in the va fields.) To allow readers of the code to understand what's
  114. * happening, we direct their attention to this comment by using the
  115. * following two functions that just duplicate __va() and __pa().
  116. */
  117. typedef unsigned long tile_io_addr_t;
  118. static inline tile_io_addr_t va_to_tile_io_addr(void *va)
  119. {
  120. BUILD_BUG_ON(sizeof(phys_addr_t) != sizeof(tile_io_addr_t));
  121. return __pa(va);
  122. }
  123. static inline void *tile_io_addr_to_va(tile_io_addr_t tile_io_addr)
  124. {
  125. return __va(tile_io_addr);
  126. }
  127. #else /* CHIP_HAS_MMIO() */
  128. #ifdef CONFIG_PCI
  129. extern u8 _tile_readb(unsigned long addr);
  130. extern u16 _tile_readw(unsigned long addr);
  131. extern u32 _tile_readl(unsigned long addr);
  132. extern u64 _tile_readq(unsigned long addr);
  133. extern void _tile_writeb(u8 val, unsigned long addr);
  134. extern void _tile_writew(u16 val, unsigned long addr);
  135. extern void _tile_writel(u32 val, unsigned long addr);
  136. extern void _tile_writeq(u64 val, unsigned long addr);
  137. #define __raw_readb(addr) _tile_readb((unsigned long)addr)
  138. #define __raw_readw(addr) _tile_readw((unsigned long)addr)
  139. #define __raw_readl(addr) _tile_readl((unsigned long)addr)
  140. #define __raw_readq(addr) _tile_readq((unsigned long)addr)
  141. #define __raw_writeb(val, addr) _tile_writeb(val, (unsigned long)addr)
  142. #define __raw_writew(val, addr) _tile_writew(val, (unsigned long)addr)
  143. #define __raw_writel(val, addr) _tile_writel(val, (unsigned long)addr)
  144. #define __raw_writeq(val, addr) _tile_writeq(val, (unsigned long)addr)
  145. #else /* CONFIG_PCI */
  146. /*
  147. * The tilepro architecture does not support IOMEM unless PCI is enabled.
  148. * Unfortunately we can't yet simply not declare these methods,
  149. * since some generic code that compiles into the kernel, but
  150. * we never run, uses them unconditionally.
  151. */
  152. static inline int iomem_panic(void)
  153. {
  154. panic("readb/writeb and friends do not exist on tile without PCI");
  155. return 0;
  156. }
  157. static inline u8 readb(unsigned long addr)
  158. {
  159. return iomem_panic();
  160. }
  161. static inline u16 _readw(unsigned long addr)
  162. {
  163. return iomem_panic();
  164. }
  165. static inline u32 readl(unsigned long addr)
  166. {
  167. return iomem_panic();
  168. }
  169. static inline u64 readq(unsigned long addr)
  170. {
  171. return iomem_panic();
  172. }
  173. static inline void writeb(u8 val, unsigned long addr)
  174. {
  175. iomem_panic();
  176. }
  177. static inline void writew(u16 val, unsigned long addr)
  178. {
  179. iomem_panic();
  180. }
  181. static inline void writel(u32 val, unsigned long addr)
  182. {
  183. iomem_panic();
  184. }
  185. static inline void writeq(u64 val, unsigned long addr)
  186. {
  187. iomem_panic();
  188. }
  189. #endif /* CONFIG_PCI */
  190. #endif /* CHIP_HAS_MMIO() */
  191. #define readb __raw_readb
  192. #define readw __raw_readw
  193. #define readl __raw_readl
  194. #define readq __raw_readq
  195. #define writeb __raw_writeb
  196. #define writew __raw_writew
  197. #define writel __raw_writel
  198. #define writeq __raw_writeq
  199. #define readb_relaxed readb
  200. #define readw_relaxed readw
  201. #define readl_relaxed readl
  202. #define readq_relaxed readq
  203. #define ioread8 readb
  204. #define ioread16 readw
  205. #define ioread32 readl
  206. #define ioread64 readq
  207. #define iowrite8 writeb
  208. #define iowrite16 writew
  209. #define iowrite32 writel
  210. #define iowrite64 writeq
  211. #if CHIP_HAS_MMIO() || defined(CONFIG_PCI)
  212. static inline void memset_io(volatile void *dst, int val, size_t len)
  213. {
  214. int x;
  215. BUG_ON((unsigned long)dst & 0x3);
  216. val = (val & 0xff) * 0x01010101;
  217. for (x = 0; x < len; x += 4)
  218. writel(val, dst + x);
  219. }
  220. static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
  221. size_t len)
  222. {
  223. int x;
  224. BUG_ON((unsigned long)src & 0x3);
  225. for (x = 0; x < len; x += 4)
  226. *(u32 *)(dst + x) = readl(src + x);
  227. }
  228. static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
  229. size_t len)
  230. {
  231. int x;
  232. BUG_ON((unsigned long)dst & 0x3);
  233. for (x = 0; x < len; x += 4)
  234. writel(*(u32 *)(src + x), dst + x);
  235. }
  236. #endif
  237. /*
  238. * The Tile architecture does not support IOPORT, even with PCI.
  239. * Unfortunately we can't yet simply not declare these methods,
  240. * since some generic code that compiles into the kernel, but
  241. * we never run, uses them unconditionally.
  242. */
  243. static inline long ioport_panic(void)
  244. {
  245. panic("inb/outb and friends do not exist on tile");
  246. return 0;
  247. }
  248. static inline void __iomem *ioport_map(unsigned long port, unsigned int len)
  249. {
  250. pr_info("ioport_map: mapping IO resources is unsupported on tile.\n");
  251. return NULL;
  252. }
  253. static inline void ioport_unmap(void __iomem *addr)
  254. {
  255. ioport_panic();
  256. }
  257. static inline u8 inb(unsigned long addr)
  258. {
  259. return ioport_panic();
  260. }
  261. static inline u16 inw(unsigned long addr)
  262. {
  263. return ioport_panic();
  264. }
  265. static inline u32 inl(unsigned long addr)
  266. {
  267. return ioport_panic();
  268. }
  269. static inline void outb(u8 b, unsigned long addr)
  270. {
  271. ioport_panic();
  272. }
  273. static inline void outw(u16 b, unsigned long addr)
  274. {
  275. ioport_panic();
  276. }
  277. static inline void outl(u32 b, unsigned long addr)
  278. {
  279. ioport_panic();
  280. }
  281. #define inb_p(addr) inb(addr)
  282. #define inw_p(addr) inw(addr)
  283. #define inl_p(addr) inl(addr)
  284. #define outb_p(x, addr) outb((x), (addr))
  285. #define outw_p(x, addr) outw((x), (addr))
  286. #define outl_p(x, addr) outl((x), (addr))
  287. static inline void insb(unsigned long addr, void *buffer, int count)
  288. {
  289. ioport_panic();
  290. }
  291. static inline void insw(unsigned long addr, void *buffer, int count)
  292. {
  293. ioport_panic();
  294. }
  295. static inline void insl(unsigned long addr, void *buffer, int count)
  296. {
  297. ioport_panic();
  298. }
  299. static inline void outsb(unsigned long addr, const void *buffer, int count)
  300. {
  301. ioport_panic();
  302. }
  303. static inline void outsw(unsigned long addr, const void *buffer, int count)
  304. {
  305. ioport_panic();
  306. }
  307. static inline void outsl(unsigned long addr, const void *buffer, int count)
  308. {
  309. ioport_panic();
  310. }
  311. #define ioread16be(addr) be16_to_cpu(ioread16(addr))
  312. #define ioread32be(addr) be32_to_cpu(ioread32(addr))
  313. #define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr))
  314. #define iowrite32be(v, addr) iowrite32(be32_to_cpu(v), (addr))
  315. #define ioread8_rep(p, dst, count) \
  316. insb((unsigned long) (p), (dst), (count))
  317. #define ioread16_rep(p, dst, count) \
  318. insw((unsigned long) (p), (dst), (count))
  319. #define ioread32_rep(p, dst, count) \
  320. insl((unsigned long) (p), (dst), (count))
  321. #define iowrite8_rep(p, src, count) \
  322. outsb((unsigned long) (p), (src), (count))
  323. #define iowrite16_rep(p, src, count) \
  324. outsw((unsigned long) (p), (src), (count))
  325. #define iowrite32_rep(p, src, count) \
  326. outsl((unsigned long) (p), (src), (count))
  327. #define virt_to_bus virt_to_phys
  328. #define bus_to_virt phys_to_virt
  329. #endif /* _ASM_TILE_IO_H */