io.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567
  1. /*
  2. * arch/arm/mach-ixp4xx/include/mach/io.h
  3. *
  4. * Author: Deepak Saxena <dsaxena@plexity.net>
  5. *
  6. * Copyright (C) 2002-2005 MontaVista Software, Inc.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #ifndef __ASM_ARM_ARCH_IO_H
  13. #define __ASM_ARM_ARCH_IO_H
  14. #include <linux/bitops.h>
  15. #include <mach/hardware.h>
  16. #define IO_SPACE_LIMIT 0xffff0000
  17. extern int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data);
  18. extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
  19. /*
  20. * IXP4xx provides two methods of accessing PCI memory space:
  21. *
  22. * 1) A direct mapped window from 0x48000000 to 0x4bffffff (64MB).
  23. * To access PCI via this space, we simply ioremap() the BAR
  24. * into the kernel and we can use the standard read[bwl]/write[bwl]
  25. * macros. This is the preffered method due to speed but it
  26. * limits the system to just 64MB of PCI memory. This can be
  27. * problamatic if using video cards and other memory-heavy
  28. * targets.
  29. *
  30. * 2) If > 64MB of memory space is required, the IXP4xx can be configured
  31. * to use indirect registers to access PCI (as we do below for I/O
  32. * transactions). This allows for up to 128MB (0x48000000 to 0x4fffffff)
  33. * of memory on the bus. The disadvantage of this is that every
  34. * PCI access requires three local register accesses plus a spinlock,
  35. * but in some cases the performance hit is acceptable. In addition,
  36. * you cannot mmap() PCI devices in this case.
  37. *
  38. */
  39. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  40. #define __mem_pci(a) (a)
  41. #else
  42. /*
  43. * In the case of using indirect PCI, we simply return the actual PCI
  44. * address and our read/write implementation use that to drive the
  45. * access registers. If something outside of PCI is ioremap'd, we
  46. * fallback to the default.
  47. */
  48. static inline void __iomem *
  49. __ixp4xx_ioremap(unsigned long addr, size_t size, unsigned int mtype)
  50. {
  51. if((addr < PCIBIOS_MIN_MEM) || (addr > 0x4fffffff))
  52. return __arm_ioremap(addr, size, mtype);
  53. return (void __iomem *)addr;
  54. }
  55. static inline void
  56. __ixp4xx_iounmap(void __iomem *addr)
  57. {
  58. if ((__force u32)addr >= VMALLOC_START)
  59. __iounmap(addr);
  60. }
  61. #define __arch_ioremap(a, s, f) __ixp4xx_ioremap(a, s, f)
  62. #define __arch_iounmap(a) __ixp4xx_iounmap(a)
  63. #define writeb(v, p) __ixp4xx_writeb(v, p)
  64. #define writew(v, p) __ixp4xx_writew(v, p)
  65. #define writel(v, p) __ixp4xx_writel(v, p)
  66. #define writesb(p, v, l) __ixp4xx_writesb(p, v, l)
  67. #define writesw(p, v, l) __ixp4xx_writesw(p, v, l)
  68. #define writesl(p, v, l) __ixp4xx_writesl(p, v, l)
  69. #define readb(p) __ixp4xx_readb(p)
  70. #define readw(p) __ixp4xx_readw(p)
  71. #define readl(p) __ixp4xx_readl(p)
  72. #define readsb(p, v, l) __ixp4xx_readsb(p, v, l)
  73. #define readsw(p, v, l) __ixp4xx_readsw(p, v, l)
  74. #define readsl(p, v, l) __ixp4xx_readsl(p, v, l)
  75. static inline void
  76. __ixp4xx_writeb(u8 value, volatile void __iomem *p)
  77. {
  78. u32 addr = (u32)p;
  79. u32 n, byte_enables, data;
  80. if (addr >= VMALLOC_START) {
  81. __raw_writeb(value, addr);
  82. return;
  83. }
  84. n = addr % 4;
  85. byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
  86. data = value << (8*n);
  87. ixp4xx_pci_write(addr, byte_enables | NP_CMD_MEMWRITE, data);
  88. }
  89. static inline void
  90. __ixp4xx_writesb(volatile void __iomem *bus_addr, const u8 *vaddr, int count)
  91. {
  92. while (count--)
  93. writeb(*vaddr++, bus_addr);
  94. }
  95. static inline void
  96. __ixp4xx_writew(u16 value, volatile void __iomem *p)
  97. {
  98. u32 addr = (u32)p;
  99. u32 n, byte_enables, data;
  100. if (addr >= VMALLOC_START) {
  101. __raw_writew(value, addr);
  102. return;
  103. }
  104. n = addr % 4;
  105. byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
  106. data = value << (8*n);
  107. ixp4xx_pci_write(addr, byte_enables | NP_CMD_MEMWRITE, data);
  108. }
  109. static inline void
  110. __ixp4xx_writesw(volatile void __iomem *bus_addr, const u16 *vaddr, int count)
  111. {
  112. while (count--)
  113. writew(*vaddr++, bus_addr);
  114. }
  115. static inline void
  116. __ixp4xx_writel(u32 value, volatile void __iomem *p)
  117. {
  118. u32 addr = (__force u32)p;
  119. if (addr >= VMALLOC_START) {
  120. __raw_writel(value, p);
  121. return;
  122. }
  123. ixp4xx_pci_write(addr, NP_CMD_MEMWRITE, value);
  124. }
  125. static inline void
  126. __ixp4xx_writesl(volatile void __iomem *bus_addr, const u32 *vaddr, int count)
  127. {
  128. while (count--)
  129. writel(*vaddr++, bus_addr);
  130. }
  131. static inline unsigned char
  132. __ixp4xx_readb(const volatile void __iomem *p)
  133. {
  134. u32 addr = (u32)p;
  135. u32 n, byte_enables, data;
  136. if (addr >= VMALLOC_START)
  137. return __raw_readb(addr);
  138. n = addr % 4;
  139. byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
  140. if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_MEMREAD, &data))
  141. return 0xff;
  142. return data >> (8*n);
  143. }
  144. static inline void
  145. __ixp4xx_readsb(const volatile void __iomem *bus_addr, u8 *vaddr, u32 count)
  146. {
  147. while (count--)
  148. *vaddr++ = readb(bus_addr);
  149. }
  150. static inline unsigned short
  151. __ixp4xx_readw(const volatile void __iomem *p)
  152. {
  153. u32 addr = (u32)p;
  154. u32 n, byte_enables, data;
  155. if (addr >= VMALLOC_START)
  156. return __raw_readw(addr);
  157. n = addr % 4;
  158. byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
  159. if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_MEMREAD, &data))
  160. return 0xffff;
  161. return data>>(8*n);
  162. }
  163. static inline void
  164. __ixp4xx_readsw(const volatile void __iomem *bus_addr, u16 *vaddr, u32 count)
  165. {
  166. while (count--)
  167. *vaddr++ = readw(bus_addr);
  168. }
  169. static inline unsigned long
  170. __ixp4xx_readl(const volatile void __iomem *p)
  171. {
  172. u32 addr = (__force u32)p;
  173. u32 data;
  174. if (addr >= VMALLOC_START)
  175. return __raw_readl(p);
  176. if (ixp4xx_pci_read(addr, NP_CMD_MEMREAD, &data))
  177. return 0xffffffff;
  178. return data;
  179. }
  180. static inline void
  181. __ixp4xx_readsl(const volatile void __iomem *bus_addr, u32 *vaddr, u32 count)
  182. {
  183. while (count--)
  184. *vaddr++ = readl(bus_addr);
  185. }
  186. /*
  187. * We can use the built-in functions b/c they end up calling writeb/readb
  188. */
  189. #define memset_io(c,v,l) _memset_io((c),(v),(l))
  190. #define memcpy_fromio(a,c,l) _memcpy_fromio((a),(c),(l))
  191. #define memcpy_toio(c,a,l) _memcpy_toio((c),(a),(l))
  192. #endif
  193. #ifndef CONFIG_PCI
  194. #define __io(v) __typesafe_io(v)
  195. #else
  196. /*
  197. * IXP4xx does not have a transparent cpu -> PCI I/O translation
  198. * window. Instead, it has a set of registers that must be tweaked
  199. * with the proper byte lanes, command types, and address for the
  200. * transaction. This means that we need to override the default
  201. * I/O functions.
  202. */
  203. #define outb(p, v) __ixp4xx_outb(p, v)
  204. #define outw(p, v) __ixp4xx_outw(p, v)
  205. #define outl(p, v) __ixp4xx_outl(p, v)
  206. #define outsb(p, v, l) __ixp4xx_outsb(p, v, l)
  207. #define outsw(p, v, l) __ixp4xx_outsw(p, v, l)
  208. #define outsl(p, v, l) __ixp4xx_outsl(p, v, l)
  209. #define inb(p) __ixp4xx_inb(p)
  210. #define inw(p) __ixp4xx_inw(p)
  211. #define inl(p) __ixp4xx_inl(p)
  212. #define insb(p, v, l) __ixp4xx_insb(p, v, l)
  213. #define insw(p, v, l) __ixp4xx_insw(p, v, l)
  214. #define insl(p, v, l) __ixp4xx_insl(p, v, l)
  215. static inline void
  216. __ixp4xx_outb(u8 value, u32 addr)
  217. {
  218. u32 n, byte_enables, data;
  219. n = addr % 4;
  220. byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
  221. data = value << (8*n);
  222. ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
  223. }
  224. static inline void
  225. __ixp4xx_outsb(u32 io_addr, const u8 *vaddr, u32 count)
  226. {
  227. while (count--)
  228. outb(*vaddr++, io_addr);
  229. }
  230. static inline void
  231. __ixp4xx_outw(u16 value, u32 addr)
  232. {
  233. u32 n, byte_enables, data;
  234. n = addr % 4;
  235. byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
  236. data = value << (8*n);
  237. ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
  238. }
  239. static inline void
  240. __ixp4xx_outsw(u32 io_addr, const u16 *vaddr, u32 count)
  241. {
  242. while (count--)
  243. outw(cpu_to_le16(*vaddr++), io_addr);
  244. }
  245. static inline void
  246. __ixp4xx_outl(u32 value, u32 addr)
  247. {
  248. ixp4xx_pci_write(addr, NP_CMD_IOWRITE, value);
  249. }
  250. static inline void
  251. __ixp4xx_outsl(u32 io_addr, const u32 *vaddr, u32 count)
  252. {
  253. while (count--)
  254. outl(*vaddr++, io_addr);
  255. }
  256. static inline u8
  257. __ixp4xx_inb(u32 addr)
  258. {
  259. u32 n, byte_enables, data;
  260. n = addr % 4;
  261. byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
  262. if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_IOREAD, &data))
  263. return 0xff;
  264. return data >> (8*n);
  265. }
  266. static inline void
  267. __ixp4xx_insb(u32 io_addr, u8 *vaddr, u32 count)
  268. {
  269. while (count--)
  270. *vaddr++ = inb(io_addr);
  271. }
  272. static inline u16
  273. __ixp4xx_inw(u32 addr)
  274. {
  275. u32 n, byte_enables, data;
  276. n = addr % 4;
  277. byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
  278. if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_IOREAD, &data))
  279. return 0xffff;
  280. return data>>(8*n);
  281. }
  282. static inline void
  283. __ixp4xx_insw(u32 io_addr, u16 *vaddr, u32 count)
  284. {
  285. while (count--)
  286. *vaddr++ = le16_to_cpu(inw(io_addr));
  287. }
  288. static inline u32
  289. __ixp4xx_inl(u32 addr)
  290. {
  291. u32 data;
  292. if (ixp4xx_pci_read(addr, NP_CMD_IOREAD, &data))
  293. return 0xffffffff;
  294. return data;
  295. }
  296. static inline void
  297. __ixp4xx_insl(u32 io_addr, u32 *vaddr, u32 count)
  298. {
  299. while (count--)
  300. *vaddr++ = inl(io_addr);
  301. }
  302. #define PIO_OFFSET 0x10000UL
  303. #define PIO_MASK 0x0ffffUL
  304. #define __is_io_address(p) (((unsigned long)p >= PIO_OFFSET) && \
  305. ((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
  306. static inline unsigned int
  307. __ixp4xx_ioread8(const void __iomem *addr)
  308. {
  309. unsigned long port = (unsigned long __force)addr;
  310. if (__is_io_address(port))
  311. return (unsigned int)__ixp4xx_inb(port & PIO_MASK);
  312. else
  313. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  314. return (unsigned int)__raw_readb(port);
  315. #else
  316. return (unsigned int)__ixp4xx_readb(addr);
  317. #endif
  318. }
  319. static inline void
  320. __ixp4xx_ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
  321. {
  322. unsigned long port = (unsigned long __force)addr;
  323. if (__is_io_address(port))
  324. __ixp4xx_insb(port & PIO_MASK, vaddr, count);
  325. else
  326. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  327. __raw_readsb(addr, vaddr, count);
  328. #else
  329. __ixp4xx_readsb(addr, vaddr, count);
  330. #endif
  331. }
  332. static inline unsigned int
  333. __ixp4xx_ioread16(const void __iomem *addr)
  334. {
  335. unsigned long port = (unsigned long __force)addr;
  336. if (__is_io_address(port))
  337. return (unsigned int)__ixp4xx_inw(port & PIO_MASK);
  338. else
  339. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  340. return le16_to_cpu(__raw_readw((u32)port));
  341. #else
  342. return (unsigned int)__ixp4xx_readw(addr);
  343. #endif
  344. }
  345. static inline void
  346. __ixp4xx_ioread16_rep(const void __iomem *addr, void *vaddr, u32 count)
  347. {
  348. unsigned long port = (unsigned long __force)addr;
  349. if (__is_io_address(port))
  350. __ixp4xx_insw(port & PIO_MASK, vaddr, count);
  351. else
  352. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  353. __raw_readsw(addr, vaddr, count);
  354. #else
  355. __ixp4xx_readsw(addr, vaddr, count);
  356. #endif
  357. }
  358. static inline unsigned int
  359. __ixp4xx_ioread32(const void __iomem *addr)
  360. {
  361. unsigned long port = (unsigned long __force)addr;
  362. if (__is_io_address(port))
  363. return (unsigned int)__ixp4xx_inl(port & PIO_MASK);
  364. else {
  365. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  366. return le32_to_cpu((__force __le32)__raw_readl(addr));
  367. #else
  368. return (unsigned int)__ixp4xx_readl(addr);
  369. #endif
  370. }
  371. }
  372. static inline void
  373. __ixp4xx_ioread32_rep(const void __iomem *addr, void *vaddr, u32 count)
  374. {
  375. unsigned long port = (unsigned long __force)addr;
  376. if (__is_io_address(port))
  377. __ixp4xx_insl(port & PIO_MASK, vaddr, count);
  378. else
  379. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  380. __raw_readsl(addr, vaddr, count);
  381. #else
  382. __ixp4xx_readsl(addr, vaddr, count);
  383. #endif
  384. }
  385. static inline void
  386. __ixp4xx_iowrite8(u8 value, void __iomem *addr)
  387. {
  388. unsigned long port = (unsigned long __force)addr;
  389. if (__is_io_address(port))
  390. __ixp4xx_outb(value, port & PIO_MASK);
  391. else
  392. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  393. __raw_writeb(value, port);
  394. #else
  395. __ixp4xx_writeb(value, addr);
  396. #endif
  397. }
  398. static inline void
  399. __ixp4xx_iowrite8_rep(void __iomem *addr, const void *vaddr, u32 count)
  400. {
  401. unsigned long port = (unsigned long __force)addr;
  402. if (__is_io_address(port))
  403. __ixp4xx_outsb(port & PIO_MASK, vaddr, count);
  404. else
  405. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  406. __raw_writesb(addr, vaddr, count);
  407. #else
  408. __ixp4xx_writesb(addr, vaddr, count);
  409. #endif
  410. }
  411. static inline void
  412. __ixp4xx_iowrite16(u16 value, void __iomem *addr)
  413. {
  414. unsigned long port = (unsigned long __force)addr;
  415. if (__is_io_address(port))
  416. __ixp4xx_outw(value, port & PIO_MASK);
  417. else
  418. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  419. __raw_writew(cpu_to_le16(value), addr);
  420. #else
  421. __ixp4xx_writew(value, addr);
  422. #endif
  423. }
  424. static inline void
  425. __ixp4xx_iowrite16_rep(void __iomem *addr, const void *vaddr, u32 count)
  426. {
  427. unsigned long port = (unsigned long __force)addr;
  428. if (__is_io_address(port))
  429. __ixp4xx_outsw(port & PIO_MASK, vaddr, count);
  430. else
  431. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  432. __raw_writesw(addr, vaddr, count);
  433. #else
  434. __ixp4xx_writesw(addr, vaddr, count);
  435. #endif
  436. }
  437. static inline void
  438. __ixp4xx_iowrite32(u32 value, void __iomem *addr)
  439. {
  440. unsigned long port = (unsigned long __force)addr;
  441. if (__is_io_address(port))
  442. __ixp4xx_outl(value, port & PIO_MASK);
  443. else
  444. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  445. __raw_writel((u32 __force)cpu_to_le32(value), addr);
  446. #else
  447. __ixp4xx_writel(value, addr);
  448. #endif
  449. }
  450. static inline void
  451. __ixp4xx_iowrite32_rep(void __iomem *addr, const void *vaddr, u32 count)
  452. {
  453. unsigned long port = (unsigned long __force)addr;
  454. if (__is_io_address(port))
  455. __ixp4xx_outsl(port & PIO_MASK, vaddr, count);
  456. else
  457. #ifndef CONFIG_IXP4XX_INDIRECT_PCI
  458. __raw_writesl(addr, vaddr, count);
  459. #else
  460. __ixp4xx_writesl(addr, vaddr, count);
  461. #endif
  462. }
  463. #define ioread8(p) __ixp4xx_ioread8(p)
  464. #define ioread16(p) __ixp4xx_ioread16(p)
  465. #define ioread32(p) __ixp4xx_ioread32(p)
  466. #define ioread8_rep(p, v, c) __ixp4xx_ioread8_rep(p, v, c)
  467. #define ioread16_rep(p, v, c) __ixp4xx_ioread16_rep(p, v, c)
  468. #define ioread32_rep(p, v, c) __ixp4xx_ioread32_rep(p, v, c)
  469. #define iowrite8(v,p) __ixp4xx_iowrite8(v,p)
  470. #define iowrite16(v,p) __ixp4xx_iowrite16(v,p)
  471. #define iowrite32(v,p) __ixp4xx_iowrite32(v,p)
  472. #define iowrite8_rep(p, v, c) __ixp4xx_iowrite8_rep(p, v, c)
  473. #define iowrite16_rep(p, v, c) __ixp4xx_iowrite16_rep(p, v, c)
  474. #define iowrite32_rep(p, v, c) __ixp4xx_iowrite32_rep(p, v, c)
  475. #define ioport_map(port, nr) ((void __iomem*)(port + PIO_OFFSET))
  476. #define ioport_unmap(addr)
  477. #endif // !CONFIG_PCI
  478. #endif // __ASM_ARM_ARCH_IO_H