io.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569
  1. /*
  2. * arch/arm/mach-ixp4xx/include/mach/io.h
  3. *
  4. * Author: Deepak Saxena <dsaxena@plexity.net>
  5. *
  6. * Copyright (C) 2002-2005 MontaVista Software, Inc.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #ifndef __ASM_ARM_ARCH_IO_H
  13. #define __ASM_ARM_ARCH_IO_H
  14. #include <linux/bitops.h>
  15. #include <mach/hardware.h>
  16. #define IO_SPACE_LIMIT 0xffff0000
  17. extern int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data);
  18. extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
  19. /*
  20. * IXP4xx provides two methods of accessing PCI memory space:
  21. *
  22. * 1) A direct mapped window from 0x48000000 to 0x4bffffff (64MB).
  23. * To access PCI via this space, we simply ioremap() the BAR
  24. * into the kernel and we can use the standard read[bwl]/write[bwl]
  25. * macros. This is the preffered method due to speed but it
  26. * limits the system to just 64MB of PCI memory. This can be
  27. * problamatic if using video cards and other memory-heavy
  28. * targets.
  29. *
  30. * 2) If > 64MB of memory space is required, the IXP4xx can be configured
  31. * to use indirect registers to access PCI (as we do below for I/O
  32. * transactions). This allows for up to 128MB (0x48000000 to 0x4fffffff)
  33. * of memory on the bus. The disadvantage of this is that every
  34. * PCI access requires three local register accesses plus a spinlock,
  35. * but in some cases the performance hit is acceptable. In addition,
  36. * you cannot mmap() PCI devices in this case.
  37. *
  38. */
  39. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  40. #define __mem_pci(a) (a)
  41. #else
  42. #include <linux/mm.h>
  43. /*
  44. * In the case of using indirect PCI, we simply return the actual PCI
  45. * address and our read/write implementation use that to drive the
  46. * access registers. If something outside of PCI is ioremap'd, we
  47. * fallback to the default.
  48. */
  49. static inline void __iomem *
  50. __ixp4xx_ioremap(unsigned long addr, size_t size, unsigned int mtype)
  51. {
  52. if((addr < PCIBIOS_MIN_MEM) || (addr > 0x4fffffff))
  53. return __arm_ioremap(addr, size, mtype);
  54. return (void __iomem *)addr;
  55. }
  56. static inline void
  57. __ixp4xx_iounmap(void __iomem *addr)
  58. {
  59. if ((__force u32)addr >= VMALLOC_START)
  60. __iounmap(addr);
  61. }
  62. #define __arch_ioremap(a, s, f) __ixp4xx_ioremap(a, s, f)
  63. #define __arch_iounmap(a) __ixp4xx_iounmap(a)
  64. #define writeb(v, p) __ixp4xx_writeb(v, p)
  65. #define writew(v, p) __ixp4xx_writew(v, p)
  66. #define writel(v, p) __ixp4xx_writel(v, p)
  67. #define writesb(p, v, l) __ixp4xx_writesb(p, v, l)
  68. #define writesw(p, v, l) __ixp4xx_writesw(p, v, l)
  69. #define writesl(p, v, l) __ixp4xx_writesl(p, v, l)
  70. #define readb(p) __ixp4xx_readb(p)
  71. #define readw(p) __ixp4xx_readw(p)
  72. #define readl(p) __ixp4xx_readl(p)
  73. #define readsb(p, v, l) __ixp4xx_readsb(p, v, l)
  74. #define readsw(p, v, l) __ixp4xx_readsw(p, v, l)
  75. #define readsl(p, v, l) __ixp4xx_readsl(p, v, l)
  76. static inline void
  77. __ixp4xx_writeb(u8 value, volatile void __iomem *p)
  78. {
  79. u32 addr = (u32)p;
  80. u32 n, byte_enables, data;
  81. if (addr >= VMALLOC_START) {
  82. __raw_writeb(value, addr);
  83. return;
  84. }
  85. n = addr % 4;
  86. byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
  87. data = value << (8*n);
  88. ixp4xx_pci_write(addr, byte_enables | NP_CMD_MEMWRITE, data);
  89. }
  90. static inline void
  91. __ixp4xx_writesb(volatile void __iomem *bus_addr, const u8 *vaddr, int count)
  92. {
  93. while (count--)
  94. writeb(*vaddr++, bus_addr);
  95. }
  96. static inline void
  97. __ixp4xx_writew(u16 value, volatile void __iomem *p)
  98. {
  99. u32 addr = (u32)p;
  100. u32 n, byte_enables, data;
  101. if (addr >= VMALLOC_START) {
  102. __raw_writew(value, addr);
  103. return;
  104. }
  105. n = addr % 4;
  106. byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
  107. data = value << (8*n);
  108. ixp4xx_pci_write(addr, byte_enables | NP_CMD_MEMWRITE, data);
  109. }
  110. static inline void
  111. __ixp4xx_writesw(volatile void __iomem *bus_addr, const u16 *vaddr, int count)
  112. {
  113. while (count--)
  114. writew(*vaddr++, bus_addr);
  115. }
  116. static inline void
  117. __ixp4xx_writel(u32 value, volatile void __iomem *p)
  118. {
  119. u32 addr = (__force u32)p;
  120. if (addr >= VMALLOC_START) {
  121. __raw_writel(value, p);
  122. return;
  123. }
  124. ixp4xx_pci_write(addr, NP_CMD_MEMWRITE, value);
  125. }
  126. static inline void
  127. __ixp4xx_writesl(volatile void __iomem *bus_addr, const u32 *vaddr, int count)
  128. {
  129. while (count--)
  130. writel(*vaddr++, bus_addr);
  131. }
  132. static inline unsigned char
  133. __ixp4xx_readb(const volatile void __iomem *p)
  134. {
  135. u32 addr = (u32)p;
  136. u32 n, byte_enables, data;
  137. if (addr >= VMALLOC_START)
  138. return __raw_readb(addr);
  139. n = addr % 4;
  140. byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
  141. if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_MEMREAD, &data))
  142. return 0xff;
  143. return data >> (8*n);
  144. }
  145. static inline void
  146. __ixp4xx_readsb(const volatile void __iomem *bus_addr, u8 *vaddr, u32 count)
  147. {
  148. while (count--)
  149. *vaddr++ = readb(bus_addr);
  150. }
  151. static inline unsigned short
  152. __ixp4xx_readw(const volatile void __iomem *p)
  153. {
  154. u32 addr = (u32)p;
  155. u32 n, byte_enables, data;
  156. if (addr >= VMALLOC_START)
  157. return __raw_readw(addr);
  158. n = addr % 4;
  159. byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
  160. if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_MEMREAD, &data))
  161. return 0xffff;
  162. return data>>(8*n);
  163. }
  164. static inline void
  165. __ixp4xx_readsw(const volatile void __iomem *bus_addr, u16 *vaddr, u32 count)
  166. {
  167. while (count--)
  168. *vaddr++ = readw(bus_addr);
  169. }
  170. static inline unsigned long
  171. __ixp4xx_readl(const volatile void __iomem *p)
  172. {
  173. u32 addr = (__force u32)p;
  174. u32 data;
  175. if (addr >= VMALLOC_START)
  176. return __raw_readl(p);
  177. if (ixp4xx_pci_read(addr, NP_CMD_MEMREAD, &data))
  178. return 0xffffffff;
  179. return data;
  180. }
  181. static inline void
  182. __ixp4xx_readsl(const volatile void __iomem *bus_addr, u32 *vaddr, u32 count)
  183. {
  184. while (count--)
  185. *vaddr++ = readl(bus_addr);
  186. }
  187. /*
  188. * We can use the built-in functions b/c they end up calling writeb/readb
  189. */
  190. #define memset_io(c,v,l) _memset_io((c),(v),(l))
  191. #define memcpy_fromio(a,c,l) _memcpy_fromio((a),(c),(l))
  192. #define memcpy_toio(c,a,l) _memcpy_toio((c),(a),(l))
  193. #endif
  194. #ifndef CONFIG_PCI
  195. #define __io(v) v
  196. #else
  197. /*
  198. * IXP4xx does not have a transparent cpu -> PCI I/O translation
  199. * window. Instead, it has a set of registers that must be tweaked
  200. * with the proper byte lanes, command types, and address for the
  201. * transaction. This means that we need to override the default
  202. * I/O functions.
  203. */
  204. #define outb(p, v) __ixp4xx_outb(p, v)
  205. #define outw(p, v) __ixp4xx_outw(p, v)
  206. #define outl(p, v) __ixp4xx_outl(p, v)
  207. #define outsb(p, v, l) __ixp4xx_outsb(p, v, l)
  208. #define outsw(p, v, l) __ixp4xx_outsw(p, v, l)
  209. #define outsl(p, v, l) __ixp4xx_outsl(p, v, l)
  210. #define inb(p) __ixp4xx_inb(p)
  211. #define inw(p) __ixp4xx_inw(p)
  212. #define inl(p) __ixp4xx_inl(p)
  213. #define insb(p, v, l) __ixp4xx_insb(p, v, l)
  214. #define insw(p, v, l) __ixp4xx_insw(p, v, l)
  215. #define insl(p, v, l) __ixp4xx_insl(p, v, l)
  216. static inline void
  217. __ixp4xx_outb(u8 value, u32 addr)
  218. {
  219. u32 n, byte_enables, data;
  220. n = addr % 4;
  221. byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
  222. data = value << (8*n);
  223. ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
  224. }
  225. static inline void
  226. __ixp4xx_outsb(u32 io_addr, const u8 *vaddr, u32 count)
  227. {
  228. while (count--)
  229. outb(*vaddr++, io_addr);
  230. }
  231. static inline void
  232. __ixp4xx_outw(u16 value, u32 addr)
  233. {
  234. u32 n, byte_enables, data;
  235. n = addr % 4;
  236. byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
  237. data = value << (8*n);
  238. ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
  239. }
  240. static inline void
  241. __ixp4xx_outsw(u32 io_addr, const u16 *vaddr, u32 count)
  242. {
  243. while (count--)
  244. outw(cpu_to_le16(*vaddr++), io_addr);
  245. }
  246. static inline void
  247. __ixp4xx_outl(u32 value, u32 addr)
  248. {
  249. ixp4xx_pci_write(addr, NP_CMD_IOWRITE, value);
  250. }
  251. static inline void
  252. __ixp4xx_outsl(u32 io_addr, const u32 *vaddr, u32 count)
  253. {
  254. while (count--)
  255. outl(*vaddr++, io_addr);
  256. }
  257. static inline u8
  258. __ixp4xx_inb(u32 addr)
  259. {
  260. u32 n, byte_enables, data;
  261. n = addr % 4;
  262. byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
  263. if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_IOREAD, &data))
  264. return 0xff;
  265. return data >> (8*n);
  266. }
  267. static inline void
  268. __ixp4xx_insb(u32 io_addr, u8 *vaddr, u32 count)
  269. {
  270. while (count--)
  271. *vaddr++ = inb(io_addr);
  272. }
  273. static inline u16
  274. __ixp4xx_inw(u32 addr)
  275. {
  276. u32 n, byte_enables, data;
  277. n = addr % 4;
  278. byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
  279. if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_IOREAD, &data))
  280. return 0xffff;
  281. return data>>(8*n);
  282. }
  283. static inline void
  284. __ixp4xx_insw(u32 io_addr, u16 *vaddr, u32 count)
  285. {
  286. while (count--)
  287. *vaddr++ = le16_to_cpu(inw(io_addr));
  288. }
  289. static inline u32
  290. __ixp4xx_inl(u32 addr)
  291. {
  292. u32 data;
  293. if (ixp4xx_pci_read(addr, NP_CMD_IOREAD, &data))
  294. return 0xffffffff;
  295. return data;
  296. }
  297. static inline void
  298. __ixp4xx_insl(u32 io_addr, u32 *vaddr, u32 count)
  299. {
  300. while (count--)
  301. *vaddr++ = inl(io_addr);
  302. }
  303. #define PIO_OFFSET 0x10000UL
  304. #define PIO_MASK 0x0ffffUL
  305. #define __is_io_address(p) (((unsigned long)p >= PIO_OFFSET) && \
  306. ((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
  307. static inline unsigned int
  308. __ixp4xx_ioread8(const void __iomem *addr)
  309. {
  310. unsigned long port = (unsigned long __force)addr;
  311. if (__is_io_address(port))
  312. return (unsigned int)__ixp4xx_inb(port & PIO_MASK);
  313. else
  314. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  315. return (unsigned int)__raw_readb(port);
  316. #else
  317. return (unsigned int)__ixp4xx_readb(addr);
  318. #endif
  319. }
  320. static inline void
  321. __ixp4xx_ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
  322. {
  323. unsigned long port = (unsigned long __force)addr;
  324. if (__is_io_address(port))
  325. __ixp4xx_insb(port & PIO_MASK, vaddr, count);
  326. else
  327. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  328. __raw_readsb(addr, vaddr, count);
  329. #else
  330. __ixp4xx_readsb(addr, vaddr, count);
  331. #endif
  332. }
  333. static inline unsigned int
  334. __ixp4xx_ioread16(const void __iomem *addr)
  335. {
  336. unsigned long port = (unsigned long __force)addr;
  337. if (__is_io_address(port))
  338. return (unsigned int)__ixp4xx_inw(port & PIO_MASK);
  339. else
  340. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  341. return le16_to_cpu(__raw_readw((u32)port));
  342. #else
  343. return (unsigned int)__ixp4xx_readw(addr);
  344. #endif
  345. }
  346. static inline void
  347. __ixp4xx_ioread16_rep(const void __iomem *addr, void *vaddr, u32 count)
  348. {
  349. unsigned long port = (unsigned long __force)addr;
  350. if (__is_io_address(port))
  351. __ixp4xx_insw(port & PIO_MASK, vaddr, count);
  352. else
  353. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  354. __raw_readsw(addr, vaddr, count);
  355. #else
  356. __ixp4xx_readsw(addr, vaddr, count);
  357. #endif
  358. }
  359. static inline unsigned int
  360. __ixp4xx_ioread32(const void __iomem *addr)
  361. {
  362. unsigned long port = (unsigned long __force)addr;
  363. if (__is_io_address(port))
  364. return (unsigned int)__ixp4xx_inl(port & PIO_MASK);
  365. else {
  366. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  367. return le32_to_cpu((__force __le32)__raw_readl(addr));
  368. #else
  369. return (unsigned int)__ixp4xx_readl(addr);
  370. #endif
  371. }
  372. }
  373. static inline void
  374. __ixp4xx_ioread32_rep(const void __iomem *addr, void *vaddr, u32 count)
  375. {
  376. unsigned long port = (unsigned long __force)addr;
  377. if (__is_io_address(port))
  378. __ixp4xx_insl(port & PIO_MASK, vaddr, count);
  379. else
  380. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  381. __raw_readsl(addr, vaddr, count);
  382. #else
  383. __ixp4xx_readsl(addr, vaddr, count);
  384. #endif
  385. }
  386. static inline void
  387. __ixp4xx_iowrite8(u8 value, void __iomem *addr)
  388. {
  389. unsigned long port = (unsigned long __force)addr;
  390. if (__is_io_address(port))
  391. __ixp4xx_outb(value, port & PIO_MASK);
  392. else
  393. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  394. __raw_writeb(value, port);
  395. #else
  396. __ixp4xx_writeb(value, addr);
  397. #endif
  398. }
  399. static inline void
  400. __ixp4xx_iowrite8_rep(void __iomem *addr, const void *vaddr, u32 count)
  401. {
  402. unsigned long port = (unsigned long __force)addr;
  403. if (__is_io_address(port))
  404. __ixp4xx_outsb(port & PIO_MASK, vaddr, count);
  405. else
  406. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  407. __raw_writesb(addr, vaddr, count);
  408. #else
  409. __ixp4xx_writesb(addr, vaddr, count);
  410. #endif
  411. }
  412. static inline void
  413. __ixp4xx_iowrite16(u16 value, void __iomem *addr)
  414. {
  415. unsigned long port = (unsigned long __force)addr;
  416. if (__is_io_address(port))
  417. __ixp4xx_outw(value, port & PIO_MASK);
  418. else
  419. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  420. __raw_writew(cpu_to_le16(value), addr);
  421. #else
  422. __ixp4xx_writew(value, addr);
  423. #endif
  424. }
  425. static inline void
  426. __ixp4xx_iowrite16_rep(void __iomem *addr, const void *vaddr, u32 count)
  427. {
  428. unsigned long port = (unsigned long __force)addr;
  429. if (__is_io_address(port))
  430. __ixp4xx_outsw(port & PIO_MASK, vaddr, count);
  431. else
  432. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  433. __raw_writesw(addr, vaddr, count);
  434. #else
  435. __ixp4xx_writesw(addr, vaddr, count);
  436. #endif
  437. }
  438. static inline void
  439. __ixp4xx_iowrite32(u32 value, void __iomem *addr)
  440. {
  441. unsigned long port = (unsigned long __force)addr;
  442. if (__is_io_address(port))
  443. __ixp4xx_outl(value, port & PIO_MASK);
  444. else
  445. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  446. __raw_writel((u32 __force)cpu_to_le32(value), addr);
  447. #else
  448. __ixp4xx_writel(value, addr);
  449. #endif
  450. }
  451. static inline void
  452. __ixp4xx_iowrite32_rep(void __iomem *addr, const void *vaddr, u32 count)
  453. {
  454. unsigned long port = (unsigned long __force)addr;
  455. if (__is_io_address(port))
  456. __ixp4xx_outsl(port & PIO_MASK, vaddr, count);
  457. else
  458. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  459. __raw_writesl(addr, vaddr, count);
  460. #else
  461. __ixp4xx_writesl(addr, vaddr, count);
  462. #endif
  463. }
  464. #define ioread8(p) __ixp4xx_ioread8(p)
  465. #define ioread16(p) __ixp4xx_ioread16(p)
  466. #define ioread32(p) __ixp4xx_ioread32(p)
  467. #define ioread8_rep(p, v, c) __ixp4xx_ioread8_rep(p, v, c)
  468. #define ioread16_rep(p, v, c) __ixp4xx_ioread16_rep(p, v, c)
  469. #define ioread32_rep(p, v, c) __ixp4xx_ioread32_rep(p, v, c)
  470. #define iowrite8(v,p) __ixp4xx_iowrite8(v,p)
  471. #define iowrite16(v,p) __ixp4xx_iowrite16(v,p)
  472. #define iowrite32(v,p) __ixp4xx_iowrite32(v,p)
  473. #define iowrite8_rep(p, v, c) __ixp4xx_iowrite8_rep(p, v, c)
  474. #define iowrite16_rep(p, v, c) __ixp4xx_iowrite16_rep(p, v, c)
  475. #define iowrite32_rep(p, v, c) __ixp4xx_iowrite32_rep(p, v, c)
  476. #define ioport_map(port, nr) ((void __iomem*)(port + PIO_OFFSET))
  477. #define ioport_unmap(addr)
  478. #endif // !CONFIG_PCI
  479. #endif // __ASM_ARM_ARCH_IO_H