ehea_qmr.h 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. /*
  2. * linux/drivers/net/ehea/ehea_qmr.h
  3. *
  4. * eHEA ethernet device driver for IBM eServer System p
  5. *
  6. * (C) Copyright IBM Corp. 2006
  7. *
  8. * Authors:
  9. * Christoph Raisch <raisch@de.ibm.com>
  10. * Jan-Bernd Themann <themann@de.ibm.com>
  11. * Thomas Klein <tklein@de.ibm.com>
  12. *
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2, or (at your option)
  17. * any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, write to the Free Software
  26. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  27. */
  28. #ifndef __EHEA_QMR_H__
  29. #define __EHEA_QMR_H__
  30. #include "ehea.h"
  31. #include "ehea_hw.h"
  32. /*
  33. * page size of ehea hardware queues
  34. */
  35. #define EHEA_PAGESHIFT 12
  36. #define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
  37. #define EHEA_SECTSIZE (1UL << 24)
  38. #define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
  39. #if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE
  40. #error eHEA module can't work if kernel sectionsize < ehea sectionsize
  41. #endif
  42. /* Some abbreviations used here:
  43. *
  44. * WQE - Work Queue Entry
  45. * SWQE - Send Work Queue Entry
  46. * RWQE - Receive Work Queue Entry
  47. * CQE - Completion Queue Entry
  48. * EQE - Event Queue Entry
  49. * MR - Memory Region
  50. */
  51. /* Use of WR_ID field for EHEA */
  52. #define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19)
  53. #define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23)
  54. #define EHEA_SWQE2_TYPE 0x1
  55. #define EHEA_SWQE3_TYPE 0x2
  56. #define EHEA_RWQE2_TYPE 0x3
  57. #define EHEA_RWQE3_TYPE 0x4
  58. #define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47)
  59. #define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63)
  60. struct ehea_vsgentry {
  61. u64 vaddr;
  62. u32 l_key;
  63. u32 len;
  64. };
  65. /* maximum number of sg entries allowed in a WQE */
  66. #define EHEA_MAX_WQE_SG_ENTRIES 252
  67. #define SWQE2_MAX_IMM (0xD0 - 0x30)
  68. #define SWQE3_MAX_IMM 224
  69. /* tx control flags for swqe */
  70. #define EHEA_SWQE_CRC 0x8000
  71. #define EHEA_SWQE_IP_CHECKSUM 0x4000
  72. #define EHEA_SWQE_TCP_CHECKSUM 0x2000
  73. #define EHEA_SWQE_TSO 0x1000
  74. #define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800
  75. #define EHEA_SWQE_VLAN_INSERT 0x0400
  76. #define EHEA_SWQE_IMM_DATA_PRESENT 0x0200
  77. #define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100
  78. #define EHEA_SWQE_WRAP_CTL_REC 0x0080
  79. #define EHEA_SWQE_WRAP_CTL_FORCE 0x0040
  80. #define EHEA_SWQE_BIND 0x0020
  81. #define EHEA_SWQE_PURGE 0x0010
  82. /* sizeof(struct ehea_swqe) less the union */
  83. #define SWQE_HEADER_SIZE 32
  84. struct ehea_swqe {
  85. u64 wr_id;
  86. u16 tx_control;
  87. u16 vlan_tag;
  88. u8 reserved1;
  89. u8 ip_start;
  90. u8 ip_end;
  91. u8 immediate_data_length;
  92. u8 tcp_offset;
  93. u8 reserved2;
  94. u16 tcp_end;
  95. u8 wrap_tag;
  96. u8 descriptors; /* number of valid descriptors in WQE */
  97. u16 reserved3;
  98. u16 reserved4;
  99. u16 mss;
  100. u32 reserved5;
  101. union {
  102. /* Send WQE Format 1 */
  103. struct {
  104. struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
  105. } no_immediate_data;
  106. /* Send WQE Format 2 */
  107. struct {
  108. struct ehea_vsgentry sg_entry;
  109. /* 0x30 */
  110. u8 immediate_data[SWQE2_MAX_IMM];
  111. /* 0xd0 */
  112. struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
  113. } immdata_desc __attribute__ ((packed));
  114. /* Send WQE Format 3 */
  115. struct {
  116. u8 immediate_data[SWQE3_MAX_IMM];
  117. } immdata_nodesc;
  118. } u;
  119. };
  120. struct ehea_rwqe {
  121. u64 wr_id; /* work request ID */
  122. u8 reserved1[5];
  123. u8 data_segments;
  124. u16 reserved2;
  125. u64 reserved3;
  126. u64 reserved4;
  127. struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
  128. };
  129. #define EHEA_CQE_VLAN_TAG_XTRACT 0x0400
  130. #define EHEA_CQE_TYPE_RQ 0x60
  131. #define EHEA_CQE_STAT_ERR_MASK 0x720F
  132. #define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F
  133. #define EHEA_CQE_STAT_ERR_TCP 0x4000
  134. #define EHEA_CQE_STAT_ERR_IP 0x2000
  135. #define EHEA_CQE_STAT_ERR_CRC 0x1000
  136. struct ehea_cqe {
  137. u64 wr_id; /* work request ID from WQE */
  138. u8 type;
  139. u8 valid;
  140. u16 status;
  141. u16 reserved1;
  142. u16 num_bytes_transfered;
  143. u16 vlan_tag;
  144. u16 inet_checksum_value;
  145. u8 reserved2;
  146. u8 header_length;
  147. u16 reserved3;
  148. u16 page_offset;
  149. u16 wqe_count;
  150. u32 qp_token;
  151. u32 timestamp;
  152. u32 reserved4;
  153. u64 reserved5[3];
  154. };
  155. #define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0)
  156. #define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1)
  157. #define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7)
  158. #define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31)
  159. #define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63)
  160. #define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63)
  161. #define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63)
  162. #define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
  163. #define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63)
  164. #define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63)
  165. #define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
  166. #define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
  167. struct ehea_eqe {
  168. u64 entry;
  169. };
  170. #define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52,63)
  171. #define ERROR_DATA_TYPE EHEA_BMASK_IBM(0,7)
  172. static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
  173. {
  174. struct ehea_page *current_page;
  175. if (q_offset >= queue->queue_length)
  176. q_offset -= queue->queue_length;
  177. current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
  178. return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
  179. }
  180. static inline void *hw_qeit_get(struct hw_queue *queue)
  181. {
  182. return hw_qeit_calc(queue, queue->current_q_offset);
  183. }
  184. static inline void hw_qeit_inc(struct hw_queue *queue)
  185. {
  186. queue->current_q_offset += queue->qe_size;
  187. if (queue->current_q_offset >= queue->queue_length) {
  188. queue->current_q_offset = 0;
  189. /* toggle the valid flag */
  190. queue->toggle_state = (~queue->toggle_state) & 1;
  191. }
  192. }
  193. static inline void *hw_qeit_get_inc(struct hw_queue *queue)
  194. {
  195. void *retvalue = hw_qeit_get(queue);
  196. hw_qeit_inc(queue);
  197. return retvalue;
  198. }
  199. static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
  200. {
  201. struct ehea_cqe *retvalue = hw_qeit_get(queue);
  202. u8 valid = retvalue->valid;
  203. void *pref;
  204. if ((valid >> 7) == (queue->toggle_state & 1)) {
  205. /* this is a good one */
  206. hw_qeit_inc(queue);
  207. pref = hw_qeit_calc(queue, queue->current_q_offset);
  208. prefetch(pref);
  209. prefetch(pref + 128);
  210. } else
  211. retvalue = NULL;
  212. return retvalue;
  213. }
  214. static inline void *hw_qeit_get_valid(struct hw_queue *queue)
  215. {
  216. struct ehea_cqe *retvalue = hw_qeit_get(queue);
  217. void *pref;
  218. u8 valid;
  219. pref = hw_qeit_calc(queue, queue->current_q_offset);
  220. prefetch(pref);
  221. prefetch(pref + 128);
  222. prefetch(pref + 256);
  223. valid = retvalue->valid;
  224. if (!((valid >> 7) == (queue->toggle_state & 1)))
  225. retvalue = NULL;
  226. return retvalue;
  227. }
  228. static inline void *hw_qeit_reset(struct hw_queue *queue)
  229. {
  230. queue->current_q_offset = 0;
  231. return hw_qeit_get(queue);
  232. }
  233. static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
  234. {
  235. u64 last_entry_in_q = queue->queue_length - queue->qe_size;
  236. void *retvalue;
  237. retvalue = hw_qeit_get(queue);
  238. queue->current_q_offset += queue->qe_size;
  239. if (queue->current_q_offset > last_entry_in_q) {
  240. queue->current_q_offset = 0;
  241. queue->toggle_state = (~queue->toggle_state) & 1;
  242. }
  243. return retvalue;
  244. }
  245. static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
  246. {
  247. void *retvalue = hw_qeit_get(queue);
  248. u32 qe = *(u8*)retvalue;
  249. if ((qe >> 7) == (queue->toggle_state & 1))
  250. hw_qeit_eq_get_inc(queue);
  251. else
  252. retvalue = NULL;
  253. return retvalue;
  254. }
  255. static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
  256. int rq_nr)
  257. {
  258. struct hw_queue *queue;
  259. if (rq_nr == 1)
  260. queue = &qp->hw_rqueue1;
  261. else if (rq_nr == 2)
  262. queue = &qp->hw_rqueue2;
  263. else
  264. queue = &qp->hw_rqueue3;
  265. return hw_qeit_get_inc(queue);
  266. }
  267. static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
  268. int *wqe_index)
  269. {
  270. struct hw_queue *queue = &my_qp->hw_squeue;
  271. struct ehea_swqe *wqe_p;
  272. *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
  273. wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue);
  274. return wqe_p;
  275. }
  276. static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
  277. {
  278. iosync();
  279. ehea_update_sqa(my_qp, 1);
  280. }
  281. static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
  282. {
  283. struct hw_queue *queue = &qp->hw_rqueue1;
  284. *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
  285. return hw_qeit_get_valid(queue);
  286. }
  287. static inline void ehea_inc_cq(struct ehea_cq *cq)
  288. {
  289. hw_qeit_inc(&cq->hw_queue);
  290. }
  291. static inline void ehea_inc_rq1(struct ehea_qp *qp)
  292. {
  293. hw_qeit_inc(&qp->hw_rqueue1);
  294. }
  295. static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
  296. {
  297. return hw_qeit_get_valid(&my_cq->hw_queue);
  298. }
  299. #define EHEA_CQ_REGISTER_ORIG 0
  300. #define EHEA_EQ_REGISTER_ORIG 0
  301. enum ehea_eq_type {
  302. EHEA_EQ = 0, /* event queue */
  303. EHEA_NEQ /* notification event queue */
  304. };
  305. struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
  306. enum ehea_eq_type type,
  307. const u32 length, const u8 eqe_gen);
  308. int ehea_destroy_eq(struct ehea_eq *eq);
  309. struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
  310. struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
  311. u64 eq_handle, u32 cq_token);
  312. int ehea_destroy_cq(struct ehea_cq *cq);
  313. struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd,
  314. struct ehea_qp_init_attr *init_attr);
  315. int ehea_destroy_qp(struct ehea_qp *qp);
  316. int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr);
  317. int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
  318. struct ehea_mr *shared_mr);
  319. int ehea_rem_mr(struct ehea_mr *mr);
  320. void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
  321. int ehea_create_busmap( void );
  322. void ehea_destroy_busmap( void );
  323. u64 ehea_map_vaddr(void *caddr);
  324. #endif /* __EHEA_QMR_H__ */