io.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. #ifndef _BFIN_IO_H
  2. #define _BFIN_IO_H
  3. #ifdef __KERNEL__
  4. #ifndef __ASSEMBLY__
  5. #include <linux/types.h>
  6. #endif
  7. #include <linux/compiler.h>
  8. /*
  9. * These are for ISA/PCI shared memory _only_ and should never be used
  10. * on any other type of memory, including Zorro memory. They are meant to
  11. * access the bus in the bus byte order which is little-endian!.
  12. *
  13. * readX/writeX() are used to access memory mapped devices. On some
  14. * architectures the memory mapped IO stuff needs to be accessed
  15. * differently. On the bfin architecture, we just read/write the
  16. * memory location directly.
  17. */
  18. #ifndef __ASSEMBLY__
  19. static inline unsigned char readb(void __iomem *addr)
  20. {
  21. unsigned int val;
  22. int tmp;
  23. __asm__ __volatile__ ("cli %1;\n\t"
  24. "NOP; NOP; SSYNC;\n\t"
  25. "%0 = b [%2] (z);\n\t"
  26. "sti %1;\n\t"
  27. : "=d"(val), "=d"(tmp): "a"(addr)
  28. );
  29. return (unsigned char) val;
  30. }
  31. static inline unsigned short readw(void __iomem *addr)
  32. {
  33. unsigned int val;
  34. int tmp;
  35. __asm__ __volatile__ ("cli %1;\n\t"
  36. "NOP; NOP; SSYNC;\n\t"
  37. "%0 = w [%2] (z);\n\t"
  38. "sti %1;\n\t"
  39. : "=d"(val), "=d"(tmp): "a"(addr)
  40. );
  41. return (unsigned short) val;
  42. }
  43. static inline unsigned int readl(void __iomem *addr)
  44. {
  45. unsigned int val;
  46. int tmp;
  47. __asm__ __volatile__ ("cli %1;\n\t"
  48. "NOP; NOP; SSYNC;\n\t"
  49. "%0 = [%2];\n\t"
  50. "sti %1;\n\t"
  51. : "=d"(val), "=d"(tmp): "a"(addr)
  52. );
  53. return val;
  54. }
  55. #endif /* __ASSEMBLY__ */
  56. #define writeb(b,addr) (void)((*(volatile unsigned char *) (addr)) = (b))
  57. #define writew(b,addr) (void)((*(volatile unsigned short *) (addr)) = (b))
  58. #define writel(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b))
  59. #define __raw_readb readb
  60. #define __raw_readw readw
  61. #define __raw_readl readl
  62. #define __raw_writeb writeb
  63. #define __raw_writew writew
  64. #define __raw_writel writel
  65. #define memset_io(a,b,c) memset((void *)(a),(b),(c))
  66. #define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
  67. #define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
  68. #define inb(addr) readb(addr)
  69. #define inw(addr) readw(addr)
  70. #define inl(addr) readl(addr)
  71. #define outb(x,addr) ((void) writeb(x,addr))
  72. #define outw(x,addr) ((void) writew(x,addr))
  73. #define outl(x,addr) ((void) writel(x,addr))
  74. #define inb_p(addr) inb(addr)
  75. #define inw_p(addr) inw(addr)
  76. #define inl_p(addr) inl(addr)
  77. #define outb_p(x,addr) outb(x,addr)
  78. #define outw_p(x,addr) outw(x,addr)
  79. #define outl_p(x,addr) outl(x,addr)
  80. #define ioread8_rep(a,d,c) insb(a,d,c)
  81. #define ioread16_rep(a,d,c) insw(a,d,c)
  82. #define ioread32_rep(a,d,c) insl(a,d,c)
  83. #define iowrite8_rep(a,s,c) outsb(a,s,c)
  84. #define iowrite16_rep(a,s,c) outsw(a,s,c)
  85. #define iowrite32_rep(a,s,c) outsl(a,s,c)
  86. #define ioread8(X) readb(X)
  87. #define ioread16(X) readw(X)
  88. #define ioread32(X) readl(X)
  89. #define iowrite8(val,X) writeb(val,X)
  90. #define iowrite16(val,X) writew(val,X)
  91. #define iowrite32(val,X) writel(val,X)
  92. #define IO_SPACE_LIMIT 0xffffffff
  93. /* Values for nocacheflag and cmode */
  94. #define IOMAP_NOCACHE_SER 1
  95. #ifndef __ASSEMBLY__
  96. extern void outsb(void __iomem *port, const void *addr, unsigned long count);
  97. extern void outsw(void __iomem *port, const void *addr, unsigned long count);
  98. extern void outsl(void __iomem *port, const void *addr, unsigned long count);
  99. extern void insb(const void __iomem *port, void *addr, unsigned long count);
  100. extern void insw(const void __iomem *port, void *addr, unsigned long count);
  101. extern void insl(const void __iomem *port, void *addr, unsigned long count);
  102. /*
  103. * Map some physical address range into the kernel address space.
  104. */
  105. static inline void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
  106. int cacheflag)
  107. {
  108. return (void __iomem *)physaddr;
  109. }
  110. /*
  111. * Unmap a ioremap()ed region again
  112. */
  113. static inline void iounmap(void *addr)
  114. {
  115. }
  116. /*
  117. * __iounmap unmaps nearly everything, so be careful
  118. * it doesn't free currently pointer/page tables anymore but it
  119. * wans't used anyway and might be added later.
  120. */
  121. static inline void __iounmap(void *addr, unsigned long size)
  122. {
  123. }
  124. /*
  125. * Set new cache mode for some kernel address space.
  126. * The caller must push data for that range itself, if such data may already
  127. * be in the cache.
  128. */
  129. static inline void kernel_set_cachemode(void *addr, unsigned long size,
  130. int cmode)
  131. {
  132. }
  133. static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
  134. {
  135. return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
  136. }
  137. static inline void __iomem *ioremap_nocache(unsigned long physaddr,
  138. unsigned long size)
  139. {
  140. return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
  141. }
  142. extern void blkfin_inv_cache_all(void);
  143. #endif
  144. #define ioport_map(port, nr) ((void __iomem*)(port))
  145. #define ioport_unmap(addr)
  146. #define dma_cache_inv(_start,_size) do { blkfin_inv_cache_all();} while (0)
  147. #define dma_cache_wback(_start,_size) do { } while (0)
  148. #define dma_cache_wback_inv(_start,_size) do { blkfin_inv_cache_all();} while (0)
  149. /* Pages to physical address... */
  150. #define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
  151. #define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT)
  152. #define mm_ptov(vaddr) ((void *) (vaddr))
  153. #define mm_vtop(vaddr) ((unsigned long) (vaddr))
  154. #define phys_to_virt(vaddr) ((void *) (vaddr))
  155. #define virt_to_phys(vaddr) ((unsigned long) (vaddr))
  156. #define virt_to_bus virt_to_phys
  157. #define bus_to_virt phys_to_virt
  158. /*
  159. * Convert a physical pointer to a virtual kernel pointer for /dev/mem
  160. * access
  161. */
  162. #define xlate_dev_mem_ptr(p) __va(p)
  163. /*
  164. * Convert a virtual cached pointer to an uncached pointer
  165. */
  166. #define xlate_dev_kmem_ptr(p) p
  167. #endif /* __KERNEL__ */
  168. #endif /* _BFIN_IO_H */