|
@@ -25,184 +25,54 @@
|
|
|
#define XCHAL_KIO_SIZE 0x10000000
|
|
|
|
|
|
#define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x))
|
|
|
+#define IO_SPACE_LIMIT ~0
|
|
|
|
|
|
+#ifdef CONFIG_MMU
|
|
|
/*
|
|
|
- * swap functions to change byte order from little-endian to big-endian and
|
|
|
- * vice versa.
|
|
|
- */
|
|
|
-
|
|
|
-static inline unsigned short _swapw (unsigned short v)
|
|
|
-{
|
|
|
- return (v << 8) | (v >> 8);
|
|
|
-}
|
|
|
-
|
|
|
-static inline unsigned int _swapl (unsigned int v)
|
|
|
-{
|
|
|
- return (v << 24) | ((v & 0xff00) << 8) | ((v >> 8) & 0xff00) | (v >> 24);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Change virtual addresses to physical addresses and vv.
|
|
|
- * These are trivial on the 1:1 Linux/Xtensa mapping
|
|
|
- */
|
|
|
-
|
|
|
-static inline unsigned long virt_to_phys(volatile void * address)
|
|
|
-{
|
|
|
- return __pa(address);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void * phys_to_virt(unsigned long address)
|
|
|
-{
|
|
|
- return __va(address);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * virt_to_bus and bus_to_virt are deprecated.
|
|
|
- */
|
|
|
-
|
|
|
-#define virt_to_bus(x) virt_to_phys(x)
|
|
|
-#define bus_to_virt(x) phys_to_virt(x)
|
|
|
-
|
|
|
-/*
|
|
|
- * Return the virtual (cached) address for the specified bus memory.
|
|
|
+ * Return the virtual address for the specified bus memory.
|
|
|
* Note that we currently don't support any address outside the KIO segment.
|
|
|
*/
|
|
|
-
|
|
|
-static inline void *ioremap(unsigned long offset, unsigned long size)
|
|
|
+static inline void __iomem *ioremap_nocache(unsigned long offset,
|
|
|
+ unsigned long size)
|
|
|
{
|
|
|
-#ifdef CONFIG_MMU
|
|
|
if (offset >= XCHAL_KIO_PADDR
|
|
|
- && offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
|
|
|
+ && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
|
|
|
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
|
|
|
else
|
|
|
BUG();
|
|
|
-#else
|
|
|
- return (void *)offset;
|
|
|
-#endif
|
|
|
}
|
|
|
|
|
|
-static inline void *ioremap_nocache(unsigned long offset, unsigned long size)
|
|
|
+static inline void __iomem *ioremap_cache(unsigned long offset,
|
|
|
+ unsigned long size)
|
|
|
{
|
|
|
-#ifdef CONFIG_MMU
|
|
|
if (offset >= XCHAL_KIO_PADDR
|
|
|
- && offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
|
|
|
+ && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
|
|
|
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
|
|
|
else
|
|
|
BUG();
|
|
|
-#else
|
|
|
- return (void *)offset;
|
|
|
-#endif
|
|
|
-}
|
|
|
-
|
|
|
-static inline void iounmap(void *addr)
|
|
|
-{
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Generic I/O
|
|
|
- */
|
|
|
-
|
|
|
-#define readb(addr) \
|
|
|
- ({ unsigned char __v = (*(volatile unsigned char *)(addr)); __v; })
|
|
|
-#define readw(addr) \
|
|
|
- ({ unsigned short __v = (*(volatile unsigned short *)(addr)); __v; })
|
|
|
-#define readl(addr) \
|
|
|
- ({ unsigned int __v = (*(volatile unsigned int *)(addr)); __v; })
|
|
|
-#define writeb(b, addr) (void)((*(volatile unsigned char *)(addr)) = (b))
|
|
|
-#define writew(b, addr) (void)((*(volatile unsigned short *)(addr)) = (b))
|
|
|
-#define writel(b, addr) (void)((*(volatile unsigned int *)(addr)) = (b))
|
|
|
+#define ioremap_wc ioremap_nocache
|
|
|
|
|
|
-static inline __u8 __raw_readb(const volatile void __iomem *addr)
|
|
|
-{
|
|
|
- return *(__force volatile __u8 *)(addr);
|
|
|
-}
|
|
|
-static inline __u16 __raw_readw(const volatile void __iomem *addr)
|
|
|
-{
|
|
|
- return *(__force volatile __u16 *)(addr);
|
|
|
-}
|
|
|
-static inline __u32 __raw_readl(const volatile void __iomem *addr)
|
|
|
+static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
|
|
|
{
|
|
|
- return *(__force volatile __u32 *)(addr);
|
|
|
+ return ioremap_nocache(offset, size);
|
|
|
}
|
|
|
-static inline void __raw_writeb(__u8 b, volatile void __iomem *addr)
|
|
|
-{
|
|
|
- *(__force volatile __u8 *)(addr) = b;
|
|
|
-}
|
|
|
-static inline void __raw_writew(__u16 b, volatile void __iomem *addr)
|
|
|
-{
|
|
|
- *(__force volatile __u16 *)(addr) = b;
|
|
|
-}
|
|
|
-static inline void __raw_writel(__u32 b, volatile void __iomem *addr)
|
|
|
+
|
|
|
+static inline void iounmap(volatile void __iomem *addr)
|
|
|
{
|
|
|
- *(__force volatile __u32 *)(addr) = b;
|
|
|
}
|
|
|
-
|
|
|
-/* These are the definitions for the x86 IO instructions
|
|
|
- * inb/inw/inl/outb/outw/outl, the "string" versions
|
|
|
- * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions
|
|
|
- * inb_p/inw_p/...
|
|
|
- * The macros don't do byte-swapping.
|
|
|
- */
|
|
|
-
|
|
|
-#define inb(port) readb((u8 *)((port)))
|
|
|
-#define outb(val, port) writeb((val),(u8 *)((unsigned long)(port)))
|
|
|
-#define inw(port) readw((u16 *)((port)))
|
|
|
-#define outw(val, port) writew((val),(u16 *)((unsigned long)(port)))
|
|
|
-#define inl(port) readl((u32 *)((port)))
|
|
|
-#define outl(val, port) writel((val),(u32 *)((unsigned long)(port)))
|
|
|
-
|
|
|
-#define inb_p(port) inb((port))
|
|
|
-#define outb_p(val, port) outb((val), (port))
|
|
|
-#define inw_p(port) inw((port))
|
|
|
-#define outw_p(val, port) outw((val), (port))
|
|
|
-#define inl_p(port) inl((port))
|
|
|
-#define outl_p(val, port) outl((val), (port))
|
|
|
-
|
|
|
-extern void insb (unsigned long port, void *dst, unsigned long count);
|
|
|
-extern void insw (unsigned long port, void *dst, unsigned long count);
|
|
|
-extern void insl (unsigned long port, void *dst, unsigned long count);
|
|
|
-extern void outsb (unsigned long port, const void *src, unsigned long count);
|
|
|
-extern void outsw (unsigned long port, const void *src, unsigned long count);
|
|
|
-extern void outsl (unsigned long port, const void *src, unsigned long count);
|
|
|
-
|
|
|
-#define IO_SPACE_LIMIT ~0
|
|
|
-
|
|
|
-#define memset_io(a,b,c) memset((void *)(a),(b),(c))
|
|
|
-#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
|
|
|
-#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
|
|
|
-
|
|
|
-/* At this point the Xtensa doesn't provide byte swap instructions */
|
|
|
-
|
|
|
-#ifdef __XTENSA_EB__
|
|
|
-# define in_8(addr) (*(u8*)(addr))
|
|
|
-# define in_le16(addr) _swapw(*(u16*)(addr))
|
|
|
-# define in_le32(addr) _swapl(*(u32*)(addr))
|
|
|
-# define out_8(b, addr) *(u8*)(addr) = (b)
|
|
|
-# define out_le16(b, addr) *(u16*)(addr) = _swapw(b)
|
|
|
-# define out_le32(b, addr) *(u32*)(addr) = _swapl(b)
|
|
|
-#elif defined(__XTENSA_EL__)
|
|
|
-# define in_8(addr) (*(u8*)(addr))
|
|
|
-# define in_le16(addr) (*(u16*)(addr))
|
|
|
-# define in_le32(addr) (*(u32*)(addr))
|
|
|
-# define out_8(b, addr) *(u8*)(addr) = (b)
|
|
|
-# define out_le16(b, addr) *(u16*)(addr) = (b)
|
|
|
-# define out_le32(b, addr) *(u32*)(addr) = (b)
|
|
|
-#else
|
|
|
-# error processor byte order undefined!
|
|
|
-#endif
|
|
|
-
|
|
|
-
|
|
|
-/*
|
|
|
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem access
|
|
|
- */
|
|
|
-#define xlate_dev_mem_ptr(p) __va(p)
|
|
|
+#endif /* CONFIG_MMU */
|
|
|
|
|
|
/*
|
|
|
- * Convert a virtual cached pointer to an uncached pointer
|
|
|
+ * Generic I/O
|
|
|
*/
|
|
|
-#define xlate_dev_kmem_ptr(p) p
|
|
|
-
|
|
|
+#define readb_relaxed readb
|
|
|
+#define readw_relaxed readw
|
|
|
+#define readl_relaxed readl
|
|
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
|
|
+#include <asm-generic/io.h>
|
|
|
+
|
|
|
#endif /* _XTENSA_IO_H */
|