pci_io.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. #ifndef _ASM_S390_PCI_IO_H
  2. #define _ASM_S390_PCI_IO_H
  3. #ifdef CONFIG_PCI
  4. #include <linux/kernel.h>
  5. #include <linux/slab.h>
  6. #include <asm/pci_insn.h>
  7. /* I/O Map */
  8. #define ZPCI_IOMAP_SHIFT 48
  9. #define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL
  10. #define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1)
  11. #define ZPCI_IOMAP_MAX_ENTRIES \
  12. ((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT))
  13. #define ZPCI_IOMAP_ADDR_IDX_MASK \
  14. (~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE)
  15. struct zpci_iomap_entry {
  16. u32 fh;
  17. u8 bar;
  18. u16 count;
  19. };
  20. extern struct zpci_iomap_entry *zpci_iomap_start;
  21. #define ZPCI_ADDR(idx) (ZPCI_IOMAP_ADDR_BASE | ((u64) idx << ZPCI_IOMAP_SHIFT))
  22. #define ZPCI_IDX(addr) \
  23. (((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> ZPCI_IOMAP_SHIFT)
  24. #define ZPCI_OFFSET(addr) \
  25. ((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK)
  26. #define ZPCI_CREATE_REQ(handle, space, len) \
  27. ((u64) handle << 32 | space << 16 | len)
  28. #define zpci_read(LENGTH, RETTYPE) \
  29. static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
  30. { \
  31. struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
  32. u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
  33. u64 data; \
  34. int rc; \
  35. \
  36. rc = zpci_load(&data, req, ZPCI_OFFSET(addr)); \
  37. if (rc) \
  38. data = -1ULL; \
  39. return (RETTYPE) data; \
  40. }
  41. #define zpci_write(LENGTH, VALTYPE) \
  42. static inline void zpci_write_##VALTYPE(VALTYPE val, \
  43. const volatile void __iomem *addr) \
  44. { \
  45. struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
  46. u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
  47. u64 data = (VALTYPE) val; \
  48. \
  49. zpci_store(data, req, ZPCI_OFFSET(addr)); \
  50. }
  51. zpci_read(8, u64)
  52. zpci_read(4, u32)
  53. zpci_read(2, u16)
  54. zpci_read(1, u8)
  55. zpci_write(8, u64)
  56. zpci_write(4, u32)
  57. zpci_write(2, u16)
  58. zpci_write(1, u8)
  59. static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len)
  60. {
  61. u64 val;
  62. switch (len) {
  63. case 1:
  64. val = (u64) *((u8 *) data);
  65. break;
  66. case 2:
  67. val = (u64) *((u16 *) data);
  68. break;
  69. case 4:
  70. val = (u64) *((u32 *) data);
  71. break;
  72. case 8:
  73. val = (u64) *((u64 *) data);
  74. break;
  75. default:
  76. val = 0; /* let FW report error */
  77. break;
  78. }
  79. return zpci_store(val, req, offset);
  80. }
  81. static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
  82. {
  83. u64 data;
  84. int cc;
  85. cc = zpci_load(&data, req, offset);
  86. if (cc)
  87. goto out;
  88. switch (len) {
  89. case 1:
  90. *((u8 *) dst) = (u8) data;
  91. break;
  92. case 2:
  93. *((u16 *) dst) = (u16) data;
  94. break;
  95. case 4:
  96. *((u32 *) dst) = (u32) data;
  97. break;
  98. case 8:
  99. *((u64 *) dst) = (u64) data;
  100. break;
  101. }
  102. out:
  103. return cc;
  104. }
  105. static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
  106. {
  107. return zpci_store_block(data, req, offset);
  108. }
  109. static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
  110. {
  111. int count = len > max ? max : len, size = 1;
  112. while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
  113. dst = dst >> 1;
  114. src = src >> 1;
  115. size = size << 1;
  116. }
  117. return size;
  118. }
  119. static inline int zpci_memcpy_fromio(void *dst,
  120. const volatile void __iomem *src,
  121. unsigned long n)
  122. {
  123. struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(src)];
  124. u64 req, offset = ZPCI_OFFSET(src);
  125. int size, rc = 0;
  126. while (n > 0) {
  127. size = zpci_get_max_write_size((u64 __force) src,
  128. (u64) dst, n, 8);
  129. req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
  130. rc = zpci_read_single(req, dst, offset, size);
  131. if (rc)
  132. break;
  133. offset += size;
  134. dst += size;
  135. n -= size;
  136. }
  137. return rc;
  138. }
  139. static inline int zpci_memcpy_toio(volatile void __iomem *dst,
  140. const void *src, unsigned long n)
  141. {
  142. struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
  143. u64 req, offset = ZPCI_OFFSET(dst);
  144. int size, rc = 0;
  145. if (!src)
  146. return -EINVAL;
  147. while (n > 0) {
  148. size = zpci_get_max_write_size((u64 __force) dst,
  149. (u64) src, n, 128);
  150. req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
  151. if (size > 8) /* main path */
  152. rc = zpci_write_block(req, src, offset);
  153. else
  154. rc = zpci_write_single(req, src, offset, size);
  155. if (rc)
  156. break;
  157. offset += size;
  158. src += size;
  159. n -= size;
  160. }
  161. return rc;
  162. }
  163. static inline int zpci_memset_io(volatile void __iomem *dst,
  164. unsigned char val, size_t count)
  165. {
  166. u8 *src = kmalloc(count, GFP_KERNEL);
  167. int rc;
  168. if (src == NULL)
  169. return -ENOMEM;
  170. memset(src, val, count);
  171. rc = zpci_memcpy_toio(dst, src, count);
  172. kfree(src);
  173. return rc;
  174. }
  175. #endif /* CONFIG_PCI */
  176. #endif /* _ASM_S390_PCI_IO_H */