ipath_ruc.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. /*
  2. * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
  3. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/spinlock.h>
  34. #include "ipath_verbs.h"
  35. #include "ipath_kernel.h"
  36. /*
  37. * Convert the AETH RNR timeout code into the number of milliseconds.
  38. */
  39. const u32 ib_ipath_rnr_table[32] = {
  40. 656, /* 0 */
  41. 1, /* 1 */
  42. 1, /* 2 */
  43. 1, /* 3 */
  44. 1, /* 4 */
  45. 1, /* 5 */
  46. 1, /* 6 */
  47. 1, /* 7 */
  48. 1, /* 8 */
  49. 1, /* 9 */
  50. 1, /* A */
  51. 1, /* B */
  52. 1, /* C */
  53. 1, /* D */
  54. 2, /* E */
  55. 2, /* F */
  56. 3, /* 10 */
  57. 4, /* 11 */
  58. 6, /* 12 */
  59. 8, /* 13 */
  60. 11, /* 14 */
  61. 16, /* 15 */
  62. 21, /* 16 */
  63. 31, /* 17 */
  64. 41, /* 18 */
  65. 62, /* 19 */
  66. 82, /* 1A */
  67. 123, /* 1B */
  68. 164, /* 1C */
  69. 246, /* 1D */
  70. 328, /* 1E */
  71. 492 /* 1F */
  72. };
  73. /**
  74. * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
  75. * @qp: the QP
  76. *
  77. * XXX Use a simple list for now. We might need a priority
  78. * queue if we have lots of QPs waiting for RNR timeouts
  79. * but that should be rare.
  80. */
  81. void ipath_insert_rnr_queue(struct ipath_qp *qp)
  82. {
  83. struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
  84. unsigned long flags;
  85. spin_lock_irqsave(&dev->pending_lock, flags);
  86. if (list_empty(&dev->rnrwait))
  87. list_add(&qp->timerwait, &dev->rnrwait);
  88. else {
  89. struct list_head *l = &dev->rnrwait;
  90. struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp,
  91. timerwait);
  92. while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
  93. qp->s_rnr_timeout -= nqp->s_rnr_timeout;
  94. l = l->next;
  95. if (l->next == &dev->rnrwait)
  96. break;
  97. nqp = list_entry(l->next, struct ipath_qp,
  98. timerwait);
  99. }
  100. list_add(&qp->timerwait, l);
  101. }
  102. spin_unlock_irqrestore(&dev->pending_lock, flags);
  103. }
  104. /**
  105. * ipath_init_sge - Validate a RWQE and fill in the SGE state
  106. * @qp: the QP
  107. *
  108. * Return 1 if OK.
  109. */
  110. int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
  111. u32 *lengthp, struct ipath_sge_state *ss)
  112. {
  113. int i, j, ret;
  114. struct ib_wc wc;
  115. *lengthp = 0;
  116. for (i = j = 0; i < wqe->num_sge; i++) {
  117. if (wqe->sg_list[i].length == 0)
  118. continue;
  119. /* Check LKEY */
  120. if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge,
  121. &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
  122. goto bad_lkey;
  123. *lengthp += wqe->sg_list[i].length;
  124. j++;
  125. }
  126. ss->num_sge = j;
  127. ret = 1;
  128. goto bail;
  129. bad_lkey:
  130. wc.wr_id = wqe->wr_id;
  131. wc.status = IB_WC_LOC_PROT_ERR;
  132. wc.opcode = IB_WC_RECV;
  133. wc.vendor_err = 0;
  134. wc.byte_len = 0;
  135. wc.imm_data = 0;
  136. wc.qp = &qp->ibqp;
  137. wc.src_qp = 0;
  138. wc.wc_flags = 0;
  139. wc.pkey_index = 0;
  140. wc.slid = 0;
  141. wc.sl = 0;
  142. wc.dlid_path_bits = 0;
  143. wc.port_num = 0;
  144. /* Signal solicited completion event. */
  145. ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
  146. ret = 0;
  147. bail:
  148. return ret;
  149. }
  150. /**
  151. * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
  152. * @qp: the QP
  153. * @wr_id_only: update wr_id only, not SGEs
  154. *
  155. * Return 0 if no RWQE is available, otherwise return 1.
  156. *
  157. * Can be called from interrupt level.
  158. */
  159. int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
  160. {
  161. unsigned long flags;
  162. struct ipath_rq *rq;
  163. struct ipath_rwq *wq;
  164. struct ipath_srq *srq;
  165. struct ipath_rwqe *wqe;
  166. void (*handler)(struct ib_event *, void *);
  167. u32 tail;
  168. int ret;
  169. qp->r_sge.sg_list = qp->r_sg_list;
  170. if (qp->ibqp.srq) {
  171. srq = to_isrq(qp->ibqp.srq);
  172. handler = srq->ibsrq.event_handler;
  173. rq = &srq->rq;
  174. } else {
  175. srq = NULL;
  176. handler = NULL;
  177. rq = &qp->r_rq;
  178. }
  179. spin_lock_irqsave(&rq->lock, flags);
  180. wq = rq->wq;
  181. tail = wq->tail;
  182. /* Validate tail before using it since it is user writable. */
  183. if (tail >= rq->size)
  184. tail = 0;
  185. do {
  186. if (unlikely(tail == wq->head)) {
  187. spin_unlock_irqrestore(&rq->lock, flags);
  188. ret = 0;
  189. goto bail;
  190. }
  191. /* Make sure entry is read after head index is read. */
  192. smp_rmb();
  193. wqe = get_rwqe_ptr(rq, tail);
  194. if (++tail >= rq->size)
  195. tail = 0;
  196. } while (!wr_id_only && !ipath_init_sge(qp, wqe, &qp->r_len,
  197. &qp->r_sge));
  198. qp->r_wr_id = wqe->wr_id;
  199. wq->tail = tail;
  200. ret = 1;
  201. qp->r_wrid_valid = 1;
  202. if (handler) {
  203. u32 n;
  204. /*
  205. * validate head pointer value and compute
  206. * the number of remaining WQEs.
  207. */
  208. n = wq->head;
  209. if (n >= rq->size)
  210. n = 0;
  211. if (n < tail)
  212. n += rq->size - tail;
  213. else
  214. n -= tail;
  215. if (n < srq->limit) {
  216. struct ib_event ev;
  217. srq->limit = 0;
  218. spin_unlock_irqrestore(&rq->lock, flags);
  219. ev.device = qp->ibqp.device;
  220. ev.element.srq = qp->ibqp.srq;
  221. ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
  222. handler(&ev, srq->ibsrq.srq_context);
  223. goto bail;
  224. }
  225. }
  226. spin_unlock_irqrestore(&rq->lock, flags);
  227. bail:
  228. return ret;
  229. }
  230. /**
  231. * ipath_ruc_loopback - handle UC and RC lookback requests
  232. * @sqp: the sending QP
  233. *
  234. * This is called from ipath_do_send() to
  235. * forward a WQE addressed to the same HCA.
  236. * Note that although we are single threaded due to the tasklet, we still
  237. * have to protect against post_send(). We don't have to worry about
  238. * receive interrupts since this is a connected protocol and all packets
  239. * will pass through here.
  240. */
  241. static void ipath_ruc_loopback(struct ipath_qp *sqp)
  242. {
  243. struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
  244. struct ipath_qp *qp;
  245. struct ipath_swqe *wqe;
  246. struct ipath_sge *sge;
  247. unsigned long flags;
  248. struct ib_wc wc;
  249. u64 sdata;
  250. atomic64_t *maddr;
  251. qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
  252. if (!qp) {
  253. dev->n_pkt_drops++;
  254. return;
  255. }
  256. again:
  257. spin_lock_irqsave(&sqp->s_lock, flags);
  258. if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK) ||
  259. sqp->s_rnr_timeout) {
  260. spin_unlock_irqrestore(&sqp->s_lock, flags);
  261. goto done;
  262. }
  263. /* Get the next send request. */
  264. if (sqp->s_last == sqp->s_head) {
  265. /* Send work queue is empty. */
  266. spin_unlock_irqrestore(&sqp->s_lock, flags);
  267. goto done;
  268. }
  269. /*
  270. * We can rely on the entry not changing without the s_lock
  271. * being held until we update s_last.
  272. */
  273. wqe = get_swqe_ptr(sqp, sqp->s_last);
  274. spin_unlock_irqrestore(&sqp->s_lock, flags);
  275. wc.wc_flags = 0;
  276. wc.imm_data = 0;
  277. sqp->s_sge.sge = wqe->sg_list[0];
  278. sqp->s_sge.sg_list = wqe->sg_list + 1;
  279. sqp->s_sge.num_sge = wqe->wr.num_sge;
  280. sqp->s_len = wqe->length;
  281. switch (wqe->wr.opcode) {
  282. case IB_WR_SEND_WITH_IMM:
  283. wc.wc_flags = IB_WC_WITH_IMM;
  284. wc.imm_data = wqe->wr.imm_data;
  285. /* FALLTHROUGH */
  286. case IB_WR_SEND:
  287. if (!ipath_get_rwqe(qp, 0)) {
  288. rnr_nak:
  289. /* Handle RNR NAK */
  290. if (qp->ibqp.qp_type == IB_QPT_UC)
  291. goto send_comp;
  292. if (sqp->s_rnr_retry == 0) {
  293. wc.status = IB_WC_RNR_RETRY_EXC_ERR;
  294. goto err;
  295. }
  296. if (sqp->s_rnr_retry_cnt < 7)
  297. sqp->s_rnr_retry--;
  298. dev->n_rnr_naks++;
  299. sqp->s_rnr_timeout =
  300. ib_ipath_rnr_table[qp->r_min_rnr_timer];
  301. ipath_insert_rnr_queue(sqp);
  302. goto done;
  303. }
  304. break;
  305. case IB_WR_RDMA_WRITE_WITH_IMM:
  306. if (unlikely(!(qp->qp_access_flags &
  307. IB_ACCESS_REMOTE_WRITE))) {
  308. wc.status = IB_WC_REM_INV_REQ_ERR;
  309. goto err;
  310. }
  311. wc.wc_flags = IB_WC_WITH_IMM;
  312. wc.imm_data = wqe->wr.imm_data;
  313. if (!ipath_get_rwqe(qp, 1))
  314. goto rnr_nak;
  315. /* FALLTHROUGH */
  316. case IB_WR_RDMA_WRITE:
  317. if (unlikely(!(qp->qp_access_flags &
  318. IB_ACCESS_REMOTE_WRITE))) {
  319. wc.status = IB_WC_REM_INV_REQ_ERR;
  320. goto err;
  321. }
  322. if (wqe->length == 0)
  323. break;
  324. if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length,
  325. wqe->wr.wr.rdma.remote_addr,
  326. wqe->wr.wr.rdma.rkey,
  327. IB_ACCESS_REMOTE_WRITE))) {
  328. acc_err:
  329. wc.status = IB_WC_REM_ACCESS_ERR;
  330. err:
  331. wc.wr_id = wqe->wr.wr_id;
  332. wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
  333. wc.vendor_err = 0;
  334. wc.byte_len = 0;
  335. wc.qp = &sqp->ibqp;
  336. wc.src_qp = sqp->remote_qpn;
  337. wc.pkey_index = 0;
  338. wc.slid = sqp->remote_ah_attr.dlid;
  339. wc.sl = sqp->remote_ah_attr.sl;
  340. wc.dlid_path_bits = 0;
  341. wc.port_num = 0;
  342. spin_lock_irqsave(&sqp->s_lock, flags);
  343. ipath_sqerror_qp(sqp, &wc);
  344. spin_unlock_irqrestore(&sqp->s_lock, flags);
  345. goto done;
  346. }
  347. break;
  348. case IB_WR_RDMA_READ:
  349. if (unlikely(!(qp->qp_access_flags &
  350. IB_ACCESS_REMOTE_READ))) {
  351. wc.status = IB_WC_REM_INV_REQ_ERR;
  352. goto err;
  353. }
  354. if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
  355. wqe->wr.wr.rdma.remote_addr,
  356. wqe->wr.wr.rdma.rkey,
  357. IB_ACCESS_REMOTE_READ)))
  358. goto acc_err;
  359. qp->r_sge.sge = wqe->sg_list[0];
  360. qp->r_sge.sg_list = wqe->sg_list + 1;
  361. qp->r_sge.num_sge = wqe->wr.num_sge;
  362. break;
  363. case IB_WR_ATOMIC_CMP_AND_SWP:
  364. case IB_WR_ATOMIC_FETCH_AND_ADD:
  365. if (unlikely(!(qp->qp_access_flags &
  366. IB_ACCESS_REMOTE_ATOMIC))) {
  367. wc.status = IB_WC_REM_INV_REQ_ERR;
  368. goto err;
  369. }
  370. if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
  371. wqe->wr.wr.atomic.remote_addr,
  372. wqe->wr.wr.atomic.rkey,
  373. IB_ACCESS_REMOTE_ATOMIC)))
  374. goto acc_err;
  375. /* Perform atomic OP and save result. */
  376. maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
  377. sdata = wqe->wr.wr.atomic.compare_add;
  378. *(u64 *) sqp->s_sge.sge.vaddr =
  379. (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
  380. (u64) atomic64_add_return(sdata, maddr) - sdata :
  381. (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
  382. sdata, wqe->wr.wr.atomic.swap);
  383. goto send_comp;
  384. default:
  385. goto done;
  386. }
  387. sge = &sqp->s_sge.sge;
  388. while (sqp->s_len) {
  389. u32 len = sqp->s_len;
  390. if (len > sge->length)
  391. len = sge->length;
  392. if (len > sge->sge_length)
  393. len = sge->sge_length;
  394. BUG_ON(len == 0);
  395. ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
  396. sge->vaddr += len;
  397. sge->length -= len;
  398. sge->sge_length -= len;
  399. if (sge->sge_length == 0) {
  400. if (--sqp->s_sge.num_sge)
  401. *sge = *sqp->s_sge.sg_list++;
  402. } else if (sge->length == 0 && sge->mr != NULL) {
  403. if (++sge->n >= IPATH_SEGSZ) {
  404. if (++sge->m >= sge->mr->mapsz)
  405. break;
  406. sge->n = 0;
  407. }
  408. sge->vaddr =
  409. sge->mr->map[sge->m]->segs[sge->n].vaddr;
  410. sge->length =
  411. sge->mr->map[sge->m]->segs[sge->n].length;
  412. }
  413. sqp->s_len -= len;
  414. }
  415. if (wqe->wr.opcode == IB_WR_RDMA_WRITE ||
  416. wqe->wr.opcode == IB_WR_RDMA_READ)
  417. goto send_comp;
  418. if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
  419. wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
  420. else
  421. wc.opcode = IB_WC_RECV;
  422. wc.wr_id = qp->r_wr_id;
  423. wc.status = IB_WC_SUCCESS;
  424. wc.vendor_err = 0;
  425. wc.byte_len = wqe->length;
  426. wc.qp = &qp->ibqp;
  427. wc.src_qp = qp->remote_qpn;
  428. wc.pkey_index = 0;
  429. wc.slid = qp->remote_ah_attr.dlid;
  430. wc.sl = qp->remote_ah_attr.sl;
  431. wc.dlid_path_bits = 0;
  432. wc.port_num = 1;
  433. /* Signal completion event if the solicited bit is set. */
  434. ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
  435. wqe->wr.send_flags & IB_SEND_SOLICITED);
  436. send_comp:
  437. sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
  438. ipath_send_complete(sqp, wqe, IB_WC_SUCCESS);
  439. goto again;
  440. done:
  441. if (atomic_dec_and_test(&qp->refcount))
  442. wake_up(&qp->wait);
  443. }
  444. static void want_buffer(struct ipath_devdata *dd)
  445. {
  446. set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
  447. ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
  448. dd->ipath_sendctrl);
  449. }
  450. /**
  451. * ipath_no_bufs_available - tell the layer driver we need buffers
  452. * @qp: the QP that caused the problem
  453. * @dev: the device we ran out of buffers on
  454. *
  455. * Called when we run out of PIO buffers.
  456. */
  457. static void ipath_no_bufs_available(struct ipath_qp *qp,
  458. struct ipath_ibdev *dev)
  459. {
  460. unsigned long flags;
  461. /*
  462. * Note that as soon as want_buffer() is called and
  463. * possibly before it returns, ipath_ib_piobufavail()
  464. * could be called. If we are still in the tasklet function,
  465. * tasklet_hi_schedule() will not call us until the next time
  466. * tasklet_hi_schedule() is called.
  467. * We leave the busy flag set so that another post send doesn't
  468. * try to put the same QP on the piowait list again.
  469. */
  470. spin_lock_irqsave(&dev->pending_lock, flags);
  471. list_add_tail(&qp->piowait, &dev->piowait);
  472. spin_unlock_irqrestore(&dev->pending_lock, flags);
  473. want_buffer(dev->dd);
  474. dev->n_piowait++;
  475. }
  476. /**
  477. * ipath_make_grh - construct a GRH header
  478. * @dev: a pointer to the ipath device
  479. * @hdr: a pointer to the GRH header being constructed
  480. * @grh: the global route address to send to
  481. * @hwords: the number of 32 bit words of header being sent
  482. * @nwords: the number of 32 bit words of data being sent
  483. *
  484. * Return the size of the header in 32 bit words.
  485. */
  486. u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
  487. struct ib_global_route *grh, u32 hwords, u32 nwords)
  488. {
  489. hdr->version_tclass_flow =
  490. cpu_to_be32((6 << 28) |
  491. (grh->traffic_class << 20) |
  492. grh->flow_label);
  493. hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
  494. /* next_hdr is defined by C8-7 in ch. 8.4.1 */
  495. hdr->next_hdr = 0x1B;
  496. hdr->hop_limit = grh->hop_limit;
  497. /* The SGID is 32-bit aligned. */
  498. hdr->sgid.global.subnet_prefix = dev->gid_prefix;
  499. hdr->sgid.global.interface_id = dev->dd->ipath_guid;
  500. hdr->dgid = grh->dgid;
  501. /* GRH header size in 32-bit words. */
  502. return sizeof(struct ib_grh) / sizeof(u32);
  503. }
  504. void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
  505. struct ipath_other_headers *ohdr,
  506. u32 bth0, u32 bth2)
  507. {
  508. u16 lrh0;
  509. u32 nwords;
  510. u32 extra_bytes;
  511. /* Construct the header. */
  512. extra_bytes = -qp->s_cur_size & 3;
  513. nwords = (qp->s_cur_size + extra_bytes) >> 2;
  514. lrh0 = IPATH_LRH_BTH;
  515. if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
  516. qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
  517. &qp->remote_ah_attr.grh,
  518. qp->s_hdrwords, nwords);
  519. lrh0 = IPATH_LRH_GRH;
  520. }
  521. lrh0 |= qp->remote_ah_attr.sl << 4;
  522. qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
  523. qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
  524. qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
  525. qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
  526. bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
  527. bth0 |= extra_bytes << 20;
  528. ohdr->bth[0] = cpu_to_be32(bth0 | (1 << 22));
  529. ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
  530. ohdr->bth[2] = cpu_to_be32(bth2);
  531. }
  532. /**
  533. * ipath_do_send - perform a send on a QP
  534. * @data: contains a pointer to the QP
  535. *
  536. * Process entries in the send work queue until credit or queue is
  537. * exhausted. Only allow one CPU to send a packet per QP (tasklet).
  538. * Otherwise, two threads could send packets out of order.
  539. */
  540. void ipath_do_send(unsigned long data)
  541. {
  542. struct ipath_qp *qp = (struct ipath_qp *)data;
  543. struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
  544. int (*make_req)(struct ipath_qp *qp);
  545. if (test_and_set_bit(IPATH_S_BUSY, &qp->s_busy))
  546. goto bail;
  547. if ((qp->ibqp.qp_type == IB_QPT_RC ||
  548. qp->ibqp.qp_type == IB_QPT_UC) &&
  549. qp->remote_ah_attr.dlid == dev->dd->ipath_lid) {
  550. ipath_ruc_loopback(qp);
  551. goto clear;
  552. }
  553. if (qp->ibqp.qp_type == IB_QPT_RC)
  554. make_req = ipath_make_rc_req;
  555. else if (qp->ibqp.qp_type == IB_QPT_UC)
  556. make_req = ipath_make_uc_req;
  557. else
  558. make_req = ipath_make_ud_req;
  559. again:
  560. /* Check for a constructed packet to be sent. */
  561. if (qp->s_hdrwords != 0) {
  562. /*
  563. * If no PIO bufs are available, return. An interrupt will
  564. * call ipath_ib_piobufavail() when one is available.
  565. */
  566. if (ipath_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
  567. qp->s_cur_sge, qp->s_cur_size)) {
  568. ipath_no_bufs_available(qp, dev);
  569. goto bail;
  570. }
  571. dev->n_unicast_xmit++;
  572. /* Record that we sent the packet and s_hdr is empty. */
  573. qp->s_hdrwords = 0;
  574. }
  575. if (make_req(qp))
  576. goto again;
  577. clear:
  578. clear_bit(IPATH_S_BUSY, &qp->s_busy);
  579. bail:;
  580. }
  581. void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
  582. enum ib_wc_status status)
  583. {
  584. u32 last = qp->s_last;
  585. if (++last == qp->s_size)
  586. last = 0;
  587. qp->s_last = last;
  588. /* See ch. 11.2.4.1 and 10.7.3.1 */
  589. if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
  590. (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
  591. status != IB_WC_SUCCESS) {
  592. struct ib_wc wc;
  593. wc.wr_id = wqe->wr.wr_id;
  594. wc.status = status;
  595. wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
  596. wc.vendor_err = 0;
  597. wc.byte_len = wqe->length;
  598. wc.imm_data = 0;
  599. wc.qp = &qp->ibqp;
  600. wc.src_qp = 0;
  601. wc.wc_flags = 0;
  602. wc.pkey_index = 0;
  603. wc.slid = 0;
  604. wc.sl = 0;
  605. wc.dlid_path_bits = 0;
  606. wc.port_num = 0;
  607. ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
  608. }
  609. }