raw_io.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. /*
  2. * linux/include/asm-m68k/raw_io.h
  3. *
  4. * 10/20/00 RZ: - created from bits of io.h and ide.h to cleanup namespace
  5. *
  6. */
  7. #ifndef _RAW_IO_H
  8. #define _RAW_IO_H
  9. #ifdef __KERNEL__
  10. #include <asm/types.h>
  11. /* Values for nocacheflag and cmode */
  12. #define IOMAP_FULL_CACHING 0
  13. #define IOMAP_NOCACHE_SER 1
  14. #define IOMAP_NOCACHE_NONSER 2
  15. #define IOMAP_WRITETHROUGH 3
  16. extern void iounmap(void __iomem *addr);
  17. extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
  18. int cacheflag);
  19. extern void __iounmap(void *addr, unsigned long size);
  20. /* ++roman: The assignments to temp. vars avoid that gcc sometimes generates
  21. * two accesses to memory, which may be undesirable for some devices.
  22. */
  23. #define in_8(addr) \
  24. ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
  25. #define in_be16(addr) \
  26. ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
  27. #define in_be32(addr) \
  28. ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
  29. #define in_le16(addr) \
  30. ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; })
  31. #define in_le32(addr) \
  32. ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; })
  33. #define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
  34. #define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
  35. #define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
  36. #define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w))
  37. #define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l))
  38. #define raw_inb in_8
  39. #define raw_inw in_be16
  40. #define raw_inl in_be32
  41. #define __raw_readb in_8
  42. #define __raw_readw in_be16
  43. #define __raw_readl in_be32
  44. #define raw_outb(val,port) out_8((port),(val))
  45. #define raw_outw(val,port) out_be16((port),(val))
  46. #define raw_outl(val,port) out_be32((port),(val))
  47. #define __raw_writeb(val,addr) out_8((addr),(val))
  48. #define __raw_writew(val,addr) out_be16((addr),(val))
  49. #define __raw_writel(val,addr) out_be32((addr),(val))
  50. static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len)
  51. {
  52. unsigned int i;
  53. for (i = 0; i < len; i++)
  54. *buf++ = in_8(port);
  55. }
  56. static inline void raw_outsb(volatile u8 __iomem *port, const u8 *buf,
  57. unsigned int len)
  58. {
  59. unsigned int i;
  60. for (i = 0; i < len; i++)
  61. out_8(port, *buf++);
  62. }
  63. static inline void raw_insw(volatile u16 __iomem *port, u16 *buf, unsigned int nr)
  64. {
  65. unsigned int tmp;
  66. if (nr & 15) {
  67. tmp = (nr & 15) - 1;
  68. asm volatile (
  69. "1: movew %2@,%0@+; dbra %1,1b"
  70. : "=a" (buf), "=d" (tmp)
  71. : "a" (port), "0" (buf),
  72. "1" (tmp));
  73. }
  74. if (nr >> 4) {
  75. tmp = (nr >> 4) - 1;
  76. asm volatile (
  77. "1: "
  78. "movew %2@,%0@+; "
  79. "movew %2@,%0@+; "
  80. "movew %2@,%0@+; "
  81. "movew %2@,%0@+; "
  82. "movew %2@,%0@+; "
  83. "movew %2@,%0@+; "
  84. "movew %2@,%0@+; "
  85. "movew %2@,%0@+; "
  86. "movew %2@,%0@+; "
  87. "movew %2@,%0@+; "
  88. "movew %2@,%0@+; "
  89. "movew %2@,%0@+; "
  90. "movew %2@,%0@+; "
  91. "movew %2@,%0@+; "
  92. "movew %2@,%0@+; "
  93. "movew %2@,%0@+; "
  94. "dbra %1,1b"
  95. : "=a" (buf), "=d" (tmp)
  96. : "a" (port), "0" (buf),
  97. "1" (tmp));
  98. }
  99. }
  100. static inline void raw_outsw(volatile u16 __iomem *port, const u16 *buf,
  101. unsigned int nr)
  102. {
  103. unsigned int tmp;
  104. if (nr & 15) {
  105. tmp = (nr & 15) - 1;
  106. asm volatile (
  107. "1: movew %0@+,%2@; dbra %1,1b"
  108. : "=a" (buf), "=d" (tmp)
  109. : "a" (port), "0" (buf),
  110. "1" (tmp));
  111. }
  112. if (nr >> 4) {
  113. tmp = (nr >> 4) - 1;
  114. asm volatile (
  115. "1: "
  116. "movew %0@+,%2@; "
  117. "movew %0@+,%2@; "
  118. "movew %0@+,%2@; "
  119. "movew %0@+,%2@; "
  120. "movew %0@+,%2@; "
  121. "movew %0@+,%2@; "
  122. "movew %0@+,%2@; "
  123. "movew %0@+,%2@; "
  124. "movew %0@+,%2@; "
  125. "movew %0@+,%2@; "
  126. "movew %0@+,%2@; "
  127. "movew %0@+,%2@; "
  128. "movew %0@+,%2@; "
  129. "movew %0@+,%2@; "
  130. "movew %0@+,%2@; "
  131. "movew %0@+,%2@; "
  132. "dbra %1,1b"
  133. : "=a" (buf), "=d" (tmp)
  134. : "a" (port), "0" (buf),
  135. "1" (tmp));
  136. }
  137. }
  138. static inline void raw_insl(volatile u32 __iomem *port, u32 *buf, unsigned int nr)
  139. {
  140. unsigned int tmp;
  141. if (nr & 15) {
  142. tmp = (nr & 15) - 1;
  143. asm volatile (
  144. "1: movel %2@,%0@+; dbra %1,1b"
  145. : "=a" (buf), "=d" (tmp)
  146. : "a" (port), "0" (buf),
  147. "1" (tmp));
  148. }
  149. if (nr >> 4) {
  150. tmp = (nr >> 4) - 1;
  151. asm volatile (
  152. "1: "
  153. "movel %2@,%0@+; "
  154. "movel %2@,%0@+; "
  155. "movel %2@,%0@+; "
  156. "movel %2@,%0@+; "
  157. "movel %2@,%0@+; "
  158. "movel %2@,%0@+; "
  159. "movel %2@,%0@+; "
  160. "movel %2@,%0@+; "
  161. "movel %2@,%0@+; "
  162. "movel %2@,%0@+; "
  163. "movel %2@,%0@+; "
  164. "movel %2@,%0@+; "
  165. "movel %2@,%0@+; "
  166. "movel %2@,%0@+; "
  167. "movel %2@,%0@+; "
  168. "movel %2@,%0@+; "
  169. "dbra %1,1b"
  170. : "=a" (buf), "=d" (tmp)
  171. : "a" (port), "0" (buf),
  172. "1" (tmp));
  173. }
  174. }
  175. static inline void raw_outsl(volatile u32 __iomem *port, const u32 *buf,
  176. unsigned int nr)
  177. {
  178. unsigned int tmp;
  179. if (nr & 15) {
  180. tmp = (nr & 15) - 1;
  181. asm volatile (
  182. "1: movel %0@+,%2@; dbra %1,1b"
  183. : "=a" (buf), "=d" (tmp)
  184. : "a" (port), "0" (buf),
  185. "1" (tmp));
  186. }
  187. if (nr >> 4) {
  188. tmp = (nr >> 4) - 1;
  189. asm volatile (
  190. "1: "
  191. "movel %0@+,%2@; "
  192. "movel %0@+,%2@; "
  193. "movel %0@+,%2@; "
  194. "movel %0@+,%2@; "
  195. "movel %0@+,%2@; "
  196. "movel %0@+,%2@; "
  197. "movel %0@+,%2@; "
  198. "movel %0@+,%2@; "
  199. "movel %0@+,%2@; "
  200. "movel %0@+,%2@; "
  201. "movel %0@+,%2@; "
  202. "movel %0@+,%2@; "
  203. "movel %0@+,%2@; "
  204. "movel %0@+,%2@; "
  205. "movel %0@+,%2@; "
  206. "movel %0@+,%2@; "
  207. "dbra %1,1b"
  208. : "=a" (buf), "=d" (tmp)
  209. : "a" (port), "0" (buf),
  210. "1" (tmp));
  211. }
  212. }
  213. static inline void raw_insw_swapw(volatile u16 __iomem *port, u16 *buf,
  214. unsigned int nr)
  215. {
  216. if ((nr) % 8)
  217. __asm__ __volatile__
  218. ("\tmovel %0,%/a0\n\t"
  219. "movel %1,%/a1\n\t"
  220. "movel %2,%/d6\n\t"
  221. "subql #1,%/d6\n"
  222. "1:\tmovew %/a0@,%/d0\n\t"
  223. "rolw #8,%/d0\n\t"
  224. "movew %/d0,%/a1@+\n\t"
  225. "dbra %/d6,1b"
  226. :
  227. : "g" (port), "g" (buf), "g" (nr)
  228. : "d0", "a0", "a1", "d6");
  229. else
  230. __asm__ __volatile__
  231. ("movel %0,%/a0\n\t"
  232. "movel %1,%/a1\n\t"
  233. "movel %2,%/d6\n\t"
  234. "lsrl #3,%/d6\n\t"
  235. "subql #1,%/d6\n"
  236. "1:\tmovew %/a0@,%/d0\n\t"
  237. "rolw #8,%/d0\n\t"
  238. "movew %/d0,%/a1@+\n\t"
  239. "movew %/a0@,%/d0\n\t"
  240. "rolw #8,%/d0\n\t"
  241. "movew %/d0,%/a1@+\n\t"
  242. "movew %/a0@,%/d0\n\t"
  243. "rolw #8,%/d0\n\t"
  244. "movew %/d0,%/a1@+\n\t"
  245. "movew %/a0@,%/d0\n\t"
  246. "rolw #8,%/d0\n\t"
  247. "movew %/d0,%/a1@+\n\t"
  248. "movew %/a0@,%/d0\n\t"
  249. "rolw #8,%/d0\n\t"
  250. "movew %/d0,%/a1@+\n\t"
  251. "movew %/a0@,%/d0\n\t"
  252. "rolw #8,%/d0\n\t"
  253. "movew %/d0,%/a1@+\n\t"
  254. "movew %/a0@,%/d0\n\t"
  255. "rolw #8,%/d0\n\t"
  256. "movew %/d0,%/a1@+\n\t"
  257. "movew %/a0@,%/d0\n\t"
  258. "rolw #8,%/d0\n\t"
  259. "movew %/d0,%/a1@+\n\t"
  260. "dbra %/d6,1b"
  261. :
  262. : "g" (port), "g" (buf), "g" (nr)
  263. : "d0", "a0", "a1", "d6");
  264. }
  265. static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
  266. unsigned int nr)
  267. {
  268. if ((nr) % 8)
  269. __asm__ __volatile__
  270. ("movel %0,%/a0\n\t"
  271. "movel %1,%/a1\n\t"
  272. "movel %2,%/d6\n\t"
  273. "subql #1,%/d6\n"
  274. "1:\tmovew %/a1@+,%/d0\n\t"
  275. "rolw #8,%/d0\n\t"
  276. "movew %/d0,%/a0@\n\t"
  277. "dbra %/d6,1b"
  278. :
  279. : "g" (port), "g" (buf), "g" (nr)
  280. : "d0", "a0", "a1", "d6");
  281. else
  282. __asm__ __volatile__
  283. ("movel %0,%/a0\n\t"
  284. "movel %1,%/a1\n\t"
  285. "movel %2,%/d6\n\t"
  286. "lsrl #3,%/d6\n\t"
  287. "subql #1,%/d6\n"
  288. "1:\tmovew %/a1@+,%/d0\n\t"
  289. "rolw #8,%/d0\n\t"
  290. "movew %/d0,%/a0@\n\t"
  291. "movew %/a1@+,%/d0\n\t"
  292. "rolw #8,%/d0\n\t"
  293. "movew %/d0,%/a0@\n\t"
  294. "movew %/a1@+,%/d0\n\t"
  295. "rolw #8,%/d0\n\t"
  296. "movew %/d0,%/a0@\n\t"
  297. "movew %/a1@+,%/d0\n\t"
  298. "rolw #8,%/d0\n\t"
  299. "movew %/d0,%/a0@\n\t"
  300. "movew %/a1@+,%/d0\n\t"
  301. "rolw #8,%/d0\n\t"
  302. "movew %/d0,%/a0@\n\t"
  303. "movew %/a1@+,%/d0\n\t"
  304. "rolw #8,%/d0\n\t"
  305. "movew %/d0,%/a0@\n\t"
  306. "movew %/a1@+,%/d0\n\t"
  307. "rolw #8,%/d0\n\t"
  308. "movew %/d0,%/a0@\n\t"
  309. "movew %/a1@+,%/d0\n\t"
  310. "rolw #8,%/d0\n\t"
  311. "movew %/d0,%/a0@\n\t"
  312. "dbra %/d6,1b"
  313. :
  314. : "g" (port), "g" (buf), "g" (nr)
  315. : "d0", "a0", "a1", "d6");
  316. }
  317. #endif /* __KERNEL__ */
  318. #endif /* _RAW_IO_H */