pci_io.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. #ifndef _ASM_S390_PCI_IO_H
  2. #define _ASM_S390_PCI_IO_H
  3. #ifdef CONFIG_PCI
  4. #include <linux/kernel.h>
  5. #include <linux/slab.h>
  6. #include <asm/pci_insn.h>
  7. /* I/O Map */
  8. #define ZPCI_IOMAP_MAX_ENTRIES 0x7fff
  9. #define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000ULL
  10. #define ZPCI_IOMAP_ADDR_IDX_MASK 0x7fff000000000000ULL
  11. #define ZPCI_IOMAP_ADDR_OFF_MASK 0x0000ffffffffffffULL
  12. struct zpci_iomap_entry {
  13. u32 fh;
  14. u8 bar;
  15. };
  16. extern struct zpci_iomap_entry *zpci_iomap_start;
  17. #define ZPCI_IDX(addr) \
  18. (((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> 48)
  19. #define ZPCI_OFFSET(addr) \
  20. ((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK)
  21. #define ZPCI_CREATE_REQ(handle, space, len) \
  22. ((u64) handle << 32 | space << 16 | len)
  23. #define zpci_read(LENGTH, RETTYPE) \
  24. static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
  25. { \
  26. struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
  27. u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
  28. u64 data; \
  29. int rc; \
  30. \
  31. rc = pcilg_instr(&data, req, ZPCI_OFFSET(addr)); \
  32. if (rc) \
  33. data = -1ULL; \
  34. return (RETTYPE) data; \
  35. }
  36. #define zpci_write(LENGTH, VALTYPE) \
  37. static inline void zpci_write_##VALTYPE(VALTYPE val, \
  38. const volatile void __iomem *addr) \
  39. { \
  40. struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
  41. u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
  42. u64 data = (VALTYPE) val; \
  43. \
  44. pcistg_instr(data, req, ZPCI_OFFSET(addr)); \
  45. }
  46. zpci_read(8, u64)
  47. zpci_read(4, u32)
  48. zpci_read(2, u16)
  49. zpci_read(1, u8)
  50. zpci_write(8, u64)
  51. zpci_write(4, u32)
  52. zpci_write(2, u16)
  53. zpci_write(1, u8)
  54. static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len)
  55. {
  56. u64 val;
  57. switch (len) {
  58. case 1:
  59. val = (u64) *((u8 *) data);
  60. break;
  61. case 2:
  62. val = (u64) *((u16 *) data);
  63. break;
  64. case 4:
  65. val = (u64) *((u32 *) data);
  66. break;
  67. case 8:
  68. val = (u64) *((u64 *) data);
  69. break;
  70. default:
  71. val = 0; /* let FW report error */
  72. break;
  73. }
  74. return pcistg_instr(val, req, offset);
  75. }
  76. static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
  77. {
  78. u64 data;
  79. u8 cc;
  80. cc = pcilg_instr(&data, req, offset);
  81. switch (len) {
  82. case 1:
  83. *((u8 *) dst) = (u8) data;
  84. break;
  85. case 2:
  86. *((u16 *) dst) = (u16) data;
  87. break;
  88. case 4:
  89. *((u32 *) dst) = (u32) data;
  90. break;
  91. case 8:
  92. *((u64 *) dst) = (u64) data;
  93. break;
  94. }
  95. return cc;
  96. }
  97. static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
  98. {
  99. return pcistb_instr(data, req, offset);
  100. }
  101. static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
  102. {
  103. int count = len > max ? max : len, size = 1;
  104. while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
  105. dst = dst >> 1;
  106. src = src >> 1;
  107. size = size << 1;
  108. }
  109. return size;
  110. }
  111. static inline int zpci_memcpy_fromio(void *dst,
  112. const volatile void __iomem *src,
  113. unsigned long n)
  114. {
  115. struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(src)];
  116. u64 req, offset = ZPCI_OFFSET(src);
  117. int size, rc = 0;
  118. while (n > 0) {
  119. size = zpci_get_max_write_size((u64) src, (u64) dst, n, 8);
  120. req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
  121. rc = zpci_read_single(req, dst, offset, size);
  122. if (rc)
  123. break;
  124. offset += size;
  125. dst += size;
  126. n -= size;
  127. }
  128. return rc;
  129. }
  130. static inline int zpci_memcpy_toio(volatile void __iomem *dst,
  131. const void *src, unsigned long n)
  132. {
  133. struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
  134. u64 req, offset = ZPCI_OFFSET(dst);
  135. int size, rc = 0;
  136. if (!src)
  137. return -EINVAL;
  138. while (n > 0) {
  139. size = zpci_get_max_write_size((u64) dst, (u64) src, n, 128);
  140. req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
  141. if (size > 8) /* main path */
  142. rc = zpci_write_block(req, src, offset);
  143. else
  144. rc = zpci_write_single(req, src, offset, size);
  145. if (rc)
  146. break;
  147. offset += size;
  148. src += size;
  149. n -= size;
  150. }
  151. return rc;
  152. }
  153. static inline int zpci_memset_io(volatile void __iomem *dst,
  154. unsigned char val, size_t count)
  155. {
  156. u8 *src = kmalloc(count, GFP_KERNEL);
  157. int rc;
  158. if (src == NULL)
  159. return -ENOMEM;
  160. memset(src, val, count);
  161. rc = zpci_memcpy_toio(dst, src, count);
  162. kfree(src);
  163. return rc;
  164. }
  165. #endif /* CONFIG_PCI */
  166. #endif /* _ASM_S390_PCI_IO_H */