raw_io.h 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. /*
  2. * linux/include/asm-m68k/raw_io.h
  3. *
  4. * 10/20/00 RZ: - created from bits of io.h and ide.h to cleanup namespace
  5. *
  6. */
  7. #ifndef _RAW_IO_H
  8. #define _RAW_IO_H
  9. #ifdef __KERNEL__
  10. #include <asm/types.h>
  11. /* Values for nocacheflag and cmode */
  12. #define IOMAP_FULL_CACHING 0
  13. #define IOMAP_NOCACHE_SER 1
  14. #define IOMAP_NOCACHE_NONSER 2
  15. #define IOMAP_WRITETHROUGH 3
  16. extern void iounmap(void __iomem *addr);
  17. extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
  18. int cacheflag);
  19. extern void __iounmap(void *addr, unsigned long size);
  20. /* ++roman: The assignments to temp. vars avoid that gcc sometimes generates
  21. * two accesses to memory, which may be undesirable for some devices.
  22. */
  23. #define in_8(addr) \
  24. ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
  25. #define in_be16(addr) \
  26. ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
  27. #define in_be32(addr) \
  28. ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
  29. #define in_le16(addr) \
  30. ({ u16 __v = le16_to_cpu(*(__force volatile u16 *) (addr)); __v; })
  31. #define in_le32(addr) \
  32. ({ u32 __v = le32_to_cpu(*(__force volatile u32 *) (addr)); __v; })
  33. #define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
  34. #define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
  35. #define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
  36. #define out_le16(addr,w) (void)((*(__force volatile u16 *) (addr)) = cpu_to_le16(w))
  37. #define out_le32(addr,l) (void)((*(__force volatile u32 *) (addr)) = cpu_to_le32(l))
  38. #define raw_inb in_8
  39. #define raw_inw in_be16
  40. #define raw_inl in_be32
  41. #define raw_outb(val,port) out_8((port),(val))
  42. #define raw_outw(val,port) out_be16((port),(val))
  43. #define raw_outl(val,port) out_be32((port),(val))
  44. static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len)
  45. {
  46. unsigned int i;
  47. for (i = 0; i < len; i++)
  48. *buf++ = in_8(port);
  49. }
  50. static inline void raw_outsb(volatile u8 __iomem *port, const u8 *buf,
  51. unsigned int len)
  52. {
  53. unsigned int i;
  54. for (i = 0; i < len; i++)
  55. out_8(port, *buf++);
  56. }
  57. static inline void raw_insw(volatile u16 __iomem *port, u16 *buf, unsigned int nr)
  58. {
  59. unsigned int tmp;
  60. if (nr & 15) {
  61. tmp = (nr & 15) - 1;
  62. asm volatile (
  63. "1: movew %2@,%0@+; dbra %1,1b"
  64. : "=a" (buf), "=d" (tmp)
  65. : "a" (port), "0" (buf),
  66. "1" (tmp));
  67. }
  68. if (nr >> 4) {
  69. tmp = (nr >> 4) - 1;
  70. asm volatile (
  71. "1: "
  72. "movew %2@,%0@+; "
  73. "movew %2@,%0@+; "
  74. "movew %2@,%0@+; "
  75. "movew %2@,%0@+; "
  76. "movew %2@,%0@+; "
  77. "movew %2@,%0@+; "
  78. "movew %2@,%0@+; "
  79. "movew %2@,%0@+; "
  80. "movew %2@,%0@+; "
  81. "movew %2@,%0@+; "
  82. "movew %2@,%0@+; "
  83. "movew %2@,%0@+; "
  84. "movew %2@,%0@+; "
  85. "movew %2@,%0@+; "
  86. "movew %2@,%0@+; "
  87. "movew %2@,%0@+; "
  88. "dbra %1,1b"
  89. : "=a" (buf), "=d" (tmp)
  90. : "a" (port), "0" (buf),
  91. "1" (tmp));
  92. }
  93. }
  94. static inline void raw_outsw(volatile u16 __iomem *port, const u16 *buf,
  95. unsigned int nr)
  96. {
  97. unsigned int tmp;
  98. if (nr & 15) {
  99. tmp = (nr & 15) - 1;
  100. asm volatile (
  101. "1: movew %0@+,%2@; dbra %1,1b"
  102. : "=a" (buf), "=d" (tmp)
  103. : "a" (port), "0" (buf),
  104. "1" (tmp));
  105. }
  106. if (nr >> 4) {
  107. tmp = (nr >> 4) - 1;
  108. asm volatile (
  109. "1: "
  110. "movew %0@+,%2@; "
  111. "movew %0@+,%2@; "
  112. "movew %0@+,%2@; "
  113. "movew %0@+,%2@; "
  114. "movew %0@+,%2@; "
  115. "movew %0@+,%2@; "
  116. "movew %0@+,%2@; "
  117. "movew %0@+,%2@; "
  118. "movew %0@+,%2@; "
  119. "movew %0@+,%2@; "
  120. "movew %0@+,%2@; "
  121. "movew %0@+,%2@; "
  122. "movew %0@+,%2@; "
  123. "movew %0@+,%2@; "
  124. "movew %0@+,%2@; "
  125. "movew %0@+,%2@; "
  126. "dbra %1,1b"
  127. : "=a" (buf), "=d" (tmp)
  128. : "a" (port), "0" (buf),
  129. "1" (tmp));
  130. }
  131. }
  132. static inline void raw_insl(volatile u32 __iomem *port, u32 *buf, unsigned int nr)
  133. {
  134. unsigned int tmp;
  135. if (nr & 15) {
  136. tmp = (nr & 15) - 1;
  137. asm volatile (
  138. "1: movel %2@,%0@+; dbra %1,1b"
  139. : "=a" (buf), "=d" (tmp)
  140. : "a" (port), "0" (buf),
  141. "1" (tmp));
  142. }
  143. if (nr >> 4) {
  144. tmp = (nr >> 4) - 1;
  145. asm volatile (
  146. "1: "
  147. "movel %2@,%0@+; "
  148. "movel %2@,%0@+; "
  149. "movel %2@,%0@+; "
  150. "movel %2@,%0@+; "
  151. "movel %2@,%0@+; "
  152. "movel %2@,%0@+; "
  153. "movel %2@,%0@+; "
  154. "movel %2@,%0@+; "
  155. "movel %2@,%0@+; "
  156. "movel %2@,%0@+; "
  157. "movel %2@,%0@+; "
  158. "movel %2@,%0@+; "
  159. "movel %2@,%0@+; "
  160. "movel %2@,%0@+; "
  161. "movel %2@,%0@+; "
  162. "movel %2@,%0@+; "
  163. "dbra %1,1b"
  164. : "=a" (buf), "=d" (tmp)
  165. : "a" (port), "0" (buf),
  166. "1" (tmp));
  167. }
  168. }
  169. static inline void raw_outsl(volatile u32 __iomem *port, const u32 *buf,
  170. unsigned int nr)
  171. {
  172. unsigned int tmp;
  173. if (nr & 15) {
  174. tmp = (nr & 15) - 1;
  175. asm volatile (
  176. "1: movel %0@+,%2@; dbra %1,1b"
  177. : "=a" (buf), "=d" (tmp)
  178. : "a" (port), "0" (buf),
  179. "1" (tmp));
  180. }
  181. if (nr >> 4) {
  182. tmp = (nr >> 4) - 1;
  183. asm volatile (
  184. "1: "
  185. "movel %0@+,%2@; "
  186. "movel %0@+,%2@; "
  187. "movel %0@+,%2@; "
  188. "movel %0@+,%2@; "
  189. "movel %0@+,%2@; "
  190. "movel %0@+,%2@; "
  191. "movel %0@+,%2@; "
  192. "movel %0@+,%2@; "
  193. "movel %0@+,%2@; "
  194. "movel %0@+,%2@; "
  195. "movel %0@+,%2@; "
  196. "movel %0@+,%2@; "
  197. "movel %0@+,%2@; "
  198. "movel %0@+,%2@; "
  199. "movel %0@+,%2@; "
  200. "movel %0@+,%2@; "
  201. "dbra %1,1b"
  202. : "=a" (buf), "=d" (tmp)
  203. : "a" (port), "0" (buf),
  204. "1" (tmp));
  205. }
  206. }
  207. static inline void raw_insw_swapw(volatile u16 __iomem *port, u16 *buf,
  208. unsigned int nr)
  209. {
  210. if ((nr) % 8)
  211. __asm__ __volatile__
  212. ("\tmovel %0,%/a0\n\t"
  213. "movel %1,%/a1\n\t"
  214. "movel %2,%/d6\n\t"
  215. "subql #1,%/d6\n"
  216. "1:\tmovew %/a0@,%/d0\n\t"
  217. "rolw #8,%/d0\n\t"
  218. "movew %/d0,%/a1@+\n\t"
  219. "dbra %/d6,1b"
  220. :
  221. : "g" (port), "g" (buf), "g" (nr)
  222. : "d0", "a0", "a1", "d6");
  223. else
  224. __asm__ __volatile__
  225. ("movel %0,%/a0\n\t"
  226. "movel %1,%/a1\n\t"
  227. "movel %2,%/d6\n\t"
  228. "lsrl #3,%/d6\n\t"
  229. "subql #1,%/d6\n"
  230. "1:\tmovew %/a0@,%/d0\n\t"
  231. "rolw #8,%/d0\n\t"
  232. "movew %/d0,%/a1@+\n\t"
  233. "movew %/a0@,%/d0\n\t"
  234. "rolw #8,%/d0\n\t"
  235. "movew %/d0,%/a1@+\n\t"
  236. "movew %/a0@,%/d0\n\t"
  237. "rolw #8,%/d0\n\t"
  238. "movew %/d0,%/a1@+\n\t"
  239. "movew %/a0@,%/d0\n\t"
  240. "rolw #8,%/d0\n\t"
  241. "movew %/d0,%/a1@+\n\t"
  242. "movew %/a0@,%/d0\n\t"
  243. "rolw #8,%/d0\n\t"
  244. "movew %/d0,%/a1@+\n\t"
  245. "movew %/a0@,%/d0\n\t"
  246. "rolw #8,%/d0\n\t"
  247. "movew %/d0,%/a1@+\n\t"
  248. "movew %/a0@,%/d0\n\t"
  249. "rolw #8,%/d0\n\t"
  250. "movew %/d0,%/a1@+\n\t"
  251. "movew %/a0@,%/d0\n\t"
  252. "rolw #8,%/d0\n\t"
  253. "movew %/d0,%/a1@+\n\t"
  254. "dbra %/d6,1b"
  255. :
  256. : "g" (port), "g" (buf), "g" (nr)
  257. : "d0", "a0", "a1", "d6");
  258. }
  259. static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
  260. unsigned int nr)
  261. {
  262. if ((nr) % 8)
  263. __asm__ __volatile__
  264. ("movel %0,%/a0\n\t"
  265. "movel %1,%/a1\n\t"
  266. "movel %2,%/d6\n\t"
  267. "subql #1,%/d6\n"
  268. "1:\tmovew %/a1@+,%/d0\n\t"
  269. "rolw #8,%/d0\n\t"
  270. "movew %/d0,%/a0@\n\t"
  271. "dbra %/d6,1b"
  272. :
  273. : "g" (port), "g" (buf), "g" (nr)
  274. : "d0", "a0", "a1", "d6");
  275. else
  276. __asm__ __volatile__
  277. ("movel %0,%/a0\n\t"
  278. "movel %1,%/a1\n\t"
  279. "movel %2,%/d6\n\t"
  280. "lsrl #3,%/d6\n\t"
  281. "subql #1,%/d6\n"
  282. "1:\tmovew %/a1@+,%/d0\n\t"
  283. "rolw #8,%/d0\n\t"
  284. "movew %/d0,%/a0@\n\t"
  285. "movew %/a1@+,%/d0\n\t"
  286. "rolw #8,%/d0\n\t"
  287. "movew %/d0,%/a0@\n\t"
  288. "movew %/a1@+,%/d0\n\t"
  289. "rolw #8,%/d0\n\t"
  290. "movew %/d0,%/a0@\n\t"
  291. "movew %/a1@+,%/d0\n\t"
  292. "rolw #8,%/d0\n\t"
  293. "movew %/d0,%/a0@\n\t"
  294. "movew %/a1@+,%/d0\n\t"
  295. "rolw #8,%/d0\n\t"
  296. "movew %/d0,%/a0@\n\t"
  297. "movew %/a1@+,%/d0\n\t"
  298. "rolw #8,%/d0\n\t"
  299. "movew %/d0,%/a0@\n\t"
  300. "movew %/a1@+,%/d0\n\t"
  301. "rolw #8,%/d0\n\t"
  302. "movew %/d0,%/a0@\n\t"
  303. "movew %/a1@+,%/d0\n\t"
  304. "rolw #8,%/d0\n\t"
  305. "movew %/d0,%/a0@\n\t"
  306. "dbra %/d6,1b"
  307. :
  308. : "g" (port), "g" (buf), "g" (nr)
  309. : "d0", "a0", "a1", "d6");
  310. }
  311. #endif /* __KERNEL__ */
  312. #endif /* _RAW_IO_H */