ehca_reqs.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. /*
  2. * IBM eServer eHCA Infiniband device driver for Linux on POWER
  3. *
  4. * post_send/recv, poll_cq, req_notify
  5. *
  6. * Authors: Waleri Fomin <fomin@de.ibm.com>
  7. * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
  8. * Reinhard Ernst <rernst@de.ibm.com>
  9. *
  10. * Copyright (c) 2005 IBM Corporation
  11. *
  12. * All rights reserved.
  13. *
  14. * This source code is distributed under a dual license of GPL v2.0 and OpenIB
  15. * BSD.
  16. *
  17. * OpenIB BSD License
  18. *
  19. * Redistribution and use in source and binary forms, with or without
  20. * modification, are permitted provided that the following conditions are met:
  21. *
  22. * Redistributions of source code must retain the above copyright notice, this
  23. * list of conditions and the following disclaimer.
  24. *
  25. * Redistributions in binary form must reproduce the above copyright notice,
  26. * this list of conditions and the following disclaimer in the documentation
  27. * and/or other materials
  28. * provided with the distribution.
  29. *
  30. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  31. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  32. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  34. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  35. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  36. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  37. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  38. * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  39. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  40. * POSSIBILITY OF SUCH DAMAGE.
  41. */
  42. #include <asm-powerpc/system.h>
  43. #include "ehca_classes.h"
  44. #include "ehca_tools.h"
  45. #include "ehca_qes.h"
  46. #include "ehca_iverbs.h"
  47. #include "hcp_if.h"
  48. #include "hipz_fns.h"
  49. static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
  50. struct ehca_wqe *wqe_p,
  51. struct ib_recv_wr *recv_wr)
  52. {
  53. u8 cnt_ds;
  54. if (unlikely((recv_wr->num_sge < 0) ||
  55. (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
  56. ehca_gen_err("Invalid number of WQE SGE. "
  57. "num_sqe=%x max_nr_of_sg=%x",
  58. recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
  59. return -EINVAL; /* invalid SG list length */
  60. }
  61. /* clear wqe header until sglist */
  62. memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
  63. wqe_p->work_request_id = recv_wr->wr_id;
  64. wqe_p->nr_of_data_seg = recv_wr->num_sge;
  65. for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
  66. wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
  67. recv_wr->sg_list[cnt_ds].addr;
  68. wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
  69. recv_wr->sg_list[cnt_ds].lkey;
  70. wqe_p->u.all_rcv.sg_list[cnt_ds].length =
  71. recv_wr->sg_list[cnt_ds].length;
  72. }
  73. if (ehca_debug_level) {
  74. ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue);
  75. ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
  76. }
  77. return 0;
  78. }
  79. #if defined(DEBUG_GSI_SEND_WR)
  80. /* need ib_mad struct */
  81. #include <rdma/ib_mad.h>
  82. static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
  83. {
  84. int idx;
  85. int j;
  86. while (send_wr) {
  87. struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr;
  88. struct ib_sge *sge = send_wr->sg_list;
  89. ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x "
  90. "send_flags=%x opcode=%x",idx, send_wr->wr_id,
  91. send_wr->num_sge, send_wr->send_flags,
  92. send_wr->opcode);
  93. if (mad_hdr) {
  94. ehca_gen_dbg("send_wr#%x mad_hdr base_version=%x "
  95. "mgmt_class=%x class_version=%x method=%x "
  96. "status=%x class_specific=%x tid=%lx "
  97. "attr_id=%x resv=%x attr_mod=%x",
  98. idx, mad_hdr->base_version,
  99. mad_hdr->mgmt_class,
  100. mad_hdr->class_version, mad_hdr->method,
  101. mad_hdr->status, mad_hdr->class_specific,
  102. mad_hdr->tid, mad_hdr->attr_id,
  103. mad_hdr->resv,
  104. mad_hdr->attr_mod);
  105. }
  106. for (j = 0; j < send_wr->num_sge; j++) {
  107. u8 *data = (u8 *) abs_to_virt(sge->addr);
  108. ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x "
  109. "lkey=%x",
  110. idx, j, data, sge->length, sge->lkey);
  111. /* assume length is n*16 */
  112. ehca_dmp(data, sge->length, "send_wr#%x sge#%x",
  113. idx, j);
  114. sge++;
  115. } /* eof for j */
  116. idx++;
  117. send_wr = send_wr->next;
  118. } /* eof while send_wr */
  119. }
  120. #endif /* DEBUG_GSI_SEND_WR */
  121. static inline int ehca_write_swqe(struct ehca_qp *qp,
  122. struct ehca_wqe *wqe_p,
  123. const struct ib_send_wr *send_wr)
  124. {
  125. u32 idx;
  126. u64 dma_length;
  127. struct ehca_av *my_av;
  128. u32 remote_qkey = send_wr->wr.ud.remote_qkey;
  129. if (unlikely((send_wr->num_sge < 0) ||
  130. (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
  131. ehca_gen_err("Invalid number of WQE SGE. "
  132. "num_sqe=%x max_nr_of_sg=%x",
  133. send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
  134. return -EINVAL; /* invalid SG list length */
  135. }
  136. /* clear wqe header until sglist */
  137. memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
  138. wqe_p->work_request_id = send_wr->wr_id;
  139. switch (send_wr->opcode) {
  140. case IB_WR_SEND:
  141. case IB_WR_SEND_WITH_IMM:
  142. wqe_p->optype = WQE_OPTYPE_SEND;
  143. break;
  144. case IB_WR_RDMA_WRITE:
  145. case IB_WR_RDMA_WRITE_WITH_IMM:
  146. wqe_p->optype = WQE_OPTYPE_RDMAWRITE;
  147. break;
  148. case IB_WR_RDMA_READ:
  149. wqe_p->optype = WQE_OPTYPE_RDMAREAD;
  150. break;
  151. default:
  152. ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
  153. return -EINVAL; /* invalid opcode */
  154. }
  155. wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE;
  156. wqe_p->wr_flag = 0;
  157. if (send_wr->send_flags & IB_SEND_SIGNALED)
  158. wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
  159. if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
  160. send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
  161. /* this might not work as long as HW does not support it */
  162. wqe_p->immediate_data = be32_to_cpu(send_wr->imm_data);
  163. wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
  164. }
  165. wqe_p->nr_of_data_seg = send_wr->num_sge;
  166. switch (qp->qp_type) {
  167. case IB_QPT_SMI:
  168. case IB_QPT_GSI:
  169. /* no break is intential here */
  170. case IB_QPT_UD:
  171. /* IB 1.2 spec C10-15 compliance */
  172. if (send_wr->wr.ud.remote_qkey & 0x80000000)
  173. remote_qkey = qp->qkey;
  174. wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8;
  175. wqe_p->local_ee_context_qkey = remote_qkey;
  176. if (!send_wr->wr.ud.ah) {
  177. ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
  178. return -EINVAL;
  179. }
  180. my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah);
  181. wqe_p->u.ud_av.ud_av = my_av->av;
  182. /*
  183. * omitted check of IB_SEND_INLINE
  184. * since HW does not support it
  185. */
  186. for (idx = 0; idx < send_wr->num_sge; idx++) {
  187. wqe_p->u.ud_av.sg_list[idx].vaddr =
  188. send_wr->sg_list[idx].addr;
  189. wqe_p->u.ud_av.sg_list[idx].lkey =
  190. send_wr->sg_list[idx].lkey;
  191. wqe_p->u.ud_av.sg_list[idx].length =
  192. send_wr->sg_list[idx].length;
  193. } /* eof for idx */
  194. if (qp->qp_type == IB_QPT_SMI ||
  195. qp->qp_type == IB_QPT_GSI)
  196. wqe_p->u.ud_av.ud_av.pmtu = 1;
  197. if (qp->qp_type == IB_QPT_GSI) {
  198. wqe_p->pkeyi = send_wr->wr.ud.pkey_index;
  199. #ifdef DEBUG_GSI_SEND_WR
  200. trace_send_wr_ud(send_wr);
  201. #endif /* DEBUG_GSI_SEND_WR */
  202. }
  203. break;
  204. case IB_QPT_UC:
  205. if (send_wr->send_flags & IB_SEND_FENCE)
  206. wqe_p->wr_flag |= WQE_WRFLAG_FENCE;
  207. /* no break is intentional here */
  208. case IB_QPT_RC:
  209. /* TODO: atomic not implemented */
  210. wqe_p->u.nud.remote_virtual_adress =
  211. send_wr->wr.rdma.remote_addr;
  212. wqe_p->u.nud.rkey = send_wr->wr.rdma.rkey;
  213. /*
  214. * omitted checking of IB_SEND_INLINE
  215. * since HW does not support it
  216. */
  217. dma_length = 0;
  218. for (idx = 0; idx < send_wr->num_sge; idx++) {
  219. wqe_p->u.nud.sg_list[idx].vaddr =
  220. send_wr->sg_list[idx].addr;
  221. wqe_p->u.nud.sg_list[idx].lkey =
  222. send_wr->sg_list[idx].lkey;
  223. wqe_p->u.nud.sg_list[idx].length =
  224. send_wr->sg_list[idx].length;
  225. dma_length += send_wr->sg_list[idx].length;
  226. } /* eof idx */
  227. wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
  228. break;
  229. default:
  230. ehca_gen_err("Invalid qptype=%x", qp->qp_type);
  231. return -EINVAL;
  232. }
  233. if (ehca_debug_level) {
  234. ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
  235. ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
  236. }
  237. return 0;
  238. }
  239. /* map_ib_wc_status converts raw cqe_status to ib_wc_status */
  240. static inline void map_ib_wc_status(u32 cqe_status,
  241. enum ib_wc_status *wc_status)
  242. {
  243. if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) {
  244. switch (cqe_status & 0x3F) {
  245. case 0x01:
  246. case 0x21:
  247. *wc_status = IB_WC_LOC_LEN_ERR;
  248. break;
  249. case 0x02:
  250. case 0x22:
  251. *wc_status = IB_WC_LOC_QP_OP_ERR;
  252. break;
  253. case 0x03:
  254. case 0x23:
  255. *wc_status = IB_WC_LOC_EEC_OP_ERR;
  256. break;
  257. case 0x04:
  258. case 0x24:
  259. *wc_status = IB_WC_LOC_PROT_ERR;
  260. break;
  261. case 0x05:
  262. case 0x25:
  263. *wc_status = IB_WC_WR_FLUSH_ERR;
  264. break;
  265. case 0x06:
  266. *wc_status = IB_WC_MW_BIND_ERR;
  267. break;
  268. case 0x07: /* remote error - look into bits 20:24 */
  269. switch ((cqe_status
  270. & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) {
  271. case 0x0:
  272. /*
  273. * PSN Sequence Error!
  274. * couldn't find a matching status!
  275. */
  276. *wc_status = IB_WC_GENERAL_ERR;
  277. break;
  278. case 0x1:
  279. *wc_status = IB_WC_REM_INV_REQ_ERR;
  280. break;
  281. case 0x2:
  282. *wc_status = IB_WC_REM_ACCESS_ERR;
  283. break;
  284. case 0x3:
  285. *wc_status = IB_WC_REM_OP_ERR;
  286. break;
  287. case 0x4:
  288. *wc_status = IB_WC_REM_INV_RD_REQ_ERR;
  289. break;
  290. }
  291. break;
  292. case 0x08:
  293. *wc_status = IB_WC_RETRY_EXC_ERR;
  294. break;
  295. case 0x09:
  296. *wc_status = IB_WC_RNR_RETRY_EXC_ERR;
  297. break;
  298. case 0x0A:
  299. case 0x2D:
  300. *wc_status = IB_WC_REM_ABORT_ERR;
  301. break;
  302. case 0x0B:
  303. case 0x2E:
  304. *wc_status = IB_WC_INV_EECN_ERR;
  305. break;
  306. case 0x0C:
  307. case 0x2F:
  308. *wc_status = IB_WC_INV_EEC_STATE_ERR;
  309. break;
  310. case 0x0D:
  311. *wc_status = IB_WC_BAD_RESP_ERR;
  312. break;
  313. case 0x10:
  314. /* WQE purged */
  315. *wc_status = IB_WC_WR_FLUSH_ERR;
  316. break;
  317. default:
  318. *wc_status = IB_WC_FATAL_ERR;
  319. }
  320. } else
  321. *wc_status = IB_WC_SUCCESS;
  322. }
  323. int ehca_post_send(struct ib_qp *qp,
  324. struct ib_send_wr *send_wr,
  325. struct ib_send_wr **bad_send_wr)
  326. {
  327. struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
  328. struct ib_send_wr *cur_send_wr;
  329. struct ehca_wqe *wqe_p;
  330. int wqe_cnt = 0;
  331. int ret = 0;
  332. unsigned long spl_flags;
  333. /* LOCK the QUEUE */
  334. spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
  335. /* loop processes list of send reqs */
  336. for (cur_send_wr = send_wr; cur_send_wr != NULL;
  337. cur_send_wr = cur_send_wr->next) {
  338. u64 start_offset = my_qp->ipz_squeue.current_q_offset;
  339. /* get pointer next to free WQE */
  340. wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
  341. if (unlikely(!wqe_p)) {
  342. /* too many posted work requests: queue overflow */
  343. if (bad_send_wr)
  344. *bad_send_wr = cur_send_wr;
  345. if (wqe_cnt == 0) {
  346. ret = -ENOMEM;
  347. ehca_err(qp->device, "Too many posted WQEs "
  348. "qp_num=%x", qp->qp_num);
  349. }
  350. goto post_send_exit0;
  351. }
  352. /* write a SEND WQE into the QUEUE */
  353. ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr);
  354. /*
  355. * if something failed,
  356. * reset the free entry pointer to the start value
  357. */
  358. if (unlikely(ret)) {
  359. my_qp->ipz_squeue.current_q_offset = start_offset;
  360. *bad_send_wr = cur_send_wr;
  361. if (wqe_cnt == 0) {
  362. ret = -EINVAL;
  363. ehca_err(qp->device, "Could not write WQE "
  364. "qp_num=%x", qp->qp_num);
  365. }
  366. goto post_send_exit0;
  367. }
  368. wqe_cnt++;
  369. ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
  370. my_qp, qp->qp_num, wqe_cnt);
  371. } /* eof for cur_send_wr */
  372. post_send_exit0:
  373. /* UNLOCK the QUEUE */
  374. spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags);
  375. iosync(); /* serialize GAL register access */
  376. hipz_update_sqa(my_qp, wqe_cnt);
  377. return ret;
  378. }
  379. int ehca_post_recv(struct ib_qp *qp,
  380. struct ib_recv_wr *recv_wr,
  381. struct ib_recv_wr **bad_recv_wr)
  382. {
  383. struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
  384. struct ib_recv_wr *cur_recv_wr;
  385. struct ehca_wqe *wqe_p;
  386. int wqe_cnt = 0;
  387. int ret = 0;
  388. unsigned long spl_flags;
  389. /* LOCK the QUEUE */
  390. spin_lock_irqsave(&my_qp->spinlock_r, spl_flags);
  391. /* loop processes list of send reqs */
  392. for (cur_recv_wr = recv_wr; cur_recv_wr != NULL;
  393. cur_recv_wr = cur_recv_wr->next) {
  394. u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
  395. /* get pointer next to free WQE */
  396. wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
  397. if (unlikely(!wqe_p)) {
  398. /* too many posted work requests: queue overflow */
  399. if (bad_recv_wr)
  400. *bad_recv_wr = cur_recv_wr;
  401. if (wqe_cnt == 0) {
  402. ret = -ENOMEM;
  403. ehca_err(qp->device, "Too many posted WQEs "
  404. "qp_num=%x", qp->qp_num);
  405. }
  406. goto post_recv_exit0;
  407. }
  408. /* write a RECV WQE into the QUEUE */
  409. ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr);
  410. /*
  411. * if something failed,
  412. * reset the free entry pointer to the start value
  413. */
  414. if (unlikely(ret)) {
  415. my_qp->ipz_rqueue.current_q_offset = start_offset;
  416. *bad_recv_wr = cur_recv_wr;
  417. if (wqe_cnt == 0) {
  418. ret = -EINVAL;
  419. ehca_err(qp->device, "Could not write WQE "
  420. "qp_num=%x", qp->qp_num);
  421. }
  422. goto post_recv_exit0;
  423. }
  424. wqe_cnt++;
  425. ehca_gen_dbg("ehca_qp=%p qp_num=%x wqe_cnt=%d",
  426. my_qp, qp->qp_num, wqe_cnt);
  427. } /* eof for cur_recv_wr */
  428. post_recv_exit0:
  429. spin_unlock_irqrestore(&my_qp->spinlock_r, spl_flags);
  430. iosync(); /* serialize GAL register access */
  431. hipz_update_rqa(my_qp, wqe_cnt);
  432. return ret;
  433. }
  434. /*
  435. * ib_wc_opcode table converts ehca wc opcode to ib
  436. * Since we use zero to indicate invalid opcode, the actual ib opcode must
  437. * be decremented!!!
  438. */
  439. static const u8 ib_wc_opcode[255] = {
  440. [0x01] = IB_WC_RECV+1,
  441. [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
  442. [0x04] = IB_WC_BIND_MW+1,
  443. [0x08] = IB_WC_FETCH_ADD+1,
  444. [0x10] = IB_WC_COMP_SWAP+1,
  445. [0x20] = IB_WC_RDMA_WRITE+1,
  446. [0x40] = IB_WC_RDMA_READ+1,
  447. [0x80] = IB_WC_SEND+1
  448. };
  449. /* internal function to poll one entry of cq */
  450. static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
  451. {
  452. int ret = 0;
  453. struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
  454. struct ehca_cqe *cqe;
  455. int cqe_count = 0;
  456. poll_cq_one_read_cqe:
  457. cqe = (struct ehca_cqe *)
  458. ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
  459. if (!cqe) {
  460. ret = -EAGAIN;
  461. ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p "
  462. "cq_num=%x ret=%x", my_cq, my_cq->cq_number, ret);
  463. goto poll_cq_one_exit0;
  464. }
  465. /* prevents loads being reordered across this point */
  466. rmb();
  467. cqe_count++;
  468. if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
  469. struct ehca_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number);
  470. int purgeflag;
  471. unsigned long spl_flags;
  472. if (!qp) {
  473. ehca_err(cq->device, "cq_num=%x qp_num=%x "
  474. "could not find qp -> ignore cqe",
  475. my_cq->cq_number, cqe->local_qp_number);
  476. ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
  477. my_cq->cq_number, cqe->local_qp_number);
  478. /* ignore this purged cqe */
  479. goto poll_cq_one_read_cqe;
  480. }
  481. spin_lock_irqsave(&qp->spinlock_s, spl_flags);
  482. purgeflag = qp->sqerr_purgeflag;
  483. spin_unlock_irqrestore(&qp->spinlock_s, spl_flags);
  484. if (purgeflag) {
  485. ehca_dbg(cq->device, "Got CQE with purged bit qp_num=%x "
  486. "src_qp=%x",
  487. cqe->local_qp_number, cqe->remote_qp_number);
  488. if (ehca_debug_level)
  489. ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
  490. cqe->local_qp_number,
  491. cqe->remote_qp_number);
  492. /*
  493. * ignore this to avoid double cqes of bad wqe
  494. * that caused sqe and turn off purge flag
  495. */
  496. qp->sqerr_purgeflag = 0;
  497. goto poll_cq_one_read_cqe;
  498. }
  499. }
  500. /* tracing cqe */
  501. if (ehca_debug_level) {
  502. ehca_dbg(cq->device,
  503. "Received COMPLETION ehca_cq=%p cq_num=%x -----",
  504. my_cq, my_cq->cq_number);
  505. ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
  506. my_cq, my_cq->cq_number);
  507. ehca_dbg(cq->device,
  508. "ehca_cq=%p cq_num=%x -------------------------",
  509. my_cq, my_cq->cq_number);
  510. }
  511. /* we got a completion! */
  512. wc->wr_id = cqe->work_request_id;
  513. /* eval ib_wc_opcode */
  514. wc->opcode = ib_wc_opcode[cqe->optype]-1;
  515. if (unlikely(wc->opcode == -1)) {
  516. ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x "
  517. "ehca_cq=%p cq_num=%x",
  518. cqe->optype, cqe->status, my_cq, my_cq->cq_number);
  519. /* dump cqe for other infos */
  520. ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
  521. my_cq, my_cq->cq_number);
  522. /* update also queue adder to throw away this entry!!! */
  523. goto poll_cq_one_exit0;
  524. }
  525. /* eval ib_wc_status */
  526. if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) {
  527. /* complete with errors */
  528. map_ib_wc_status(cqe->status, &wc->status);
  529. wc->vendor_err = wc->status;
  530. } else
  531. wc->status = IB_WC_SUCCESS;
  532. wc->qp = NULL;
  533. wc->byte_len = cqe->nr_bytes_transferred;
  534. wc->pkey_index = cqe->pkey_index;
  535. wc->slid = cqe->rlid;
  536. wc->dlid_path_bits = cqe->dlid;
  537. wc->src_qp = cqe->remote_qp_number;
  538. wc->wc_flags = cqe->w_completion_flags;
  539. wc->imm_data = cpu_to_be32(cqe->immediate_data);
  540. wc->sl = cqe->service_level;
  541. if (wc->status != IB_WC_SUCCESS)
  542. ehca_dbg(cq->device,
  543. "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
  544. "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
  545. "cqe=%p", my_cq, my_cq->cq_number, cqe->optype,
  546. cqe->status, cqe->local_qp_number,
  547. cqe->remote_qp_number, cqe->work_request_id, cqe);
  548. poll_cq_one_exit0:
  549. if (cqe_count > 0)
  550. hipz_update_feca(my_cq, cqe_count);
  551. return ret;
  552. }
  553. int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
  554. {
  555. struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
  556. int nr;
  557. struct ib_wc *current_wc = wc;
  558. int ret = 0;
  559. unsigned long spl_flags;
  560. if (num_entries < 1) {
  561. ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
  562. "cq_num=%x", num_entries, my_cq, my_cq->cq_number);
  563. ret = -EINVAL;
  564. goto poll_cq_exit0;
  565. }
  566. spin_lock_irqsave(&my_cq->spinlock, spl_flags);
  567. for (nr = 0; nr < num_entries; nr++) {
  568. ret = ehca_poll_cq_one(cq, current_wc);
  569. if (ret)
  570. break;
  571. current_wc++;
  572. } /* eof for nr */
  573. spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
  574. if (ret == -EAGAIN || !ret)
  575. ret = nr;
  576. poll_cq_exit0:
  577. return ret;
  578. }
  579. int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
  580. {
  581. struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
  582. unsigned long spl_flags;
  583. int ret = 0;
  584. switch (notify_flags & IB_CQ_SOLICITED_MASK) {
  585. case IB_CQ_SOLICITED:
  586. hipz_set_cqx_n0(my_cq, 1);
  587. break;
  588. case IB_CQ_NEXT_COMP:
  589. hipz_set_cqx_n1(my_cq, 1);
  590. break;
  591. default:
  592. return -EINVAL;
  593. }
  594. if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
  595. spin_lock_irqsave(&my_cq->spinlock, spl_flags);
  596. ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
  597. spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
  598. }
  599. return ret;
  600. }