qib_ruc.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817
  1. /*
  2. * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
  3. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/spinlock.h>
  34. #include "qib.h"
  35. #include "qib_mad.h"
  36. /*
  37. * Convert the AETH RNR timeout code into the number of microseconds.
  38. */
  39. const u32 ib_qib_rnr_table[32] = {
  40. 655360, /* 00: 655.36 */
  41. 10, /* 01: .01 */
  42. 20, /* 02 .02 */
  43. 30, /* 03: .03 */
  44. 40, /* 04: .04 */
  45. 60, /* 05: .06 */
  46. 80, /* 06: .08 */
  47. 120, /* 07: .12 */
  48. 160, /* 08: .16 */
  49. 240, /* 09: .24 */
  50. 320, /* 0A: .32 */
  51. 480, /* 0B: .48 */
  52. 640, /* 0C: .64 */
  53. 960, /* 0D: .96 */
  54. 1280, /* 0E: 1.28 */
  55. 1920, /* 0F: 1.92 */
  56. 2560, /* 10: 2.56 */
  57. 3840, /* 11: 3.84 */
  58. 5120, /* 12: 5.12 */
  59. 7680, /* 13: 7.68 */
  60. 10240, /* 14: 10.24 */
  61. 15360, /* 15: 15.36 */
  62. 20480, /* 16: 20.48 */
  63. 30720, /* 17: 30.72 */
  64. 40960, /* 18: 40.96 */
  65. 61440, /* 19: 61.44 */
  66. 81920, /* 1A: 81.92 */
  67. 122880, /* 1B: 122.88 */
  68. 163840, /* 1C: 163.84 */
  69. 245760, /* 1D: 245.76 */
  70. 327680, /* 1E: 327.68 */
  71. 491520 /* 1F: 491.52 */
  72. };
  73. /*
  74. * Validate a RWQE and fill in the SGE state.
  75. * Return 1 if OK.
  76. */
  77. static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
  78. {
  79. int i, j, ret;
  80. struct ib_wc wc;
  81. struct qib_lkey_table *rkt;
  82. struct qib_pd *pd;
  83. struct qib_sge_state *ss;
  84. rkt = &to_idev(qp->ibqp.device)->lk_table;
  85. pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
  86. ss = &qp->r_sge;
  87. ss->sg_list = qp->r_sg_list;
  88. qp->r_len = 0;
  89. for (i = j = 0; i < wqe->num_sge; i++) {
  90. if (wqe->sg_list[i].length == 0)
  91. continue;
  92. /* Check LKEY */
  93. if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
  94. &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
  95. goto bad_lkey;
  96. qp->r_len += wqe->sg_list[i].length;
  97. j++;
  98. }
  99. ss->num_sge = j;
  100. ss->total_len = qp->r_len;
  101. ret = 1;
  102. goto bail;
  103. bad_lkey:
  104. while (j) {
  105. struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
  106. atomic_dec(&sge->mr->refcount);
  107. }
  108. ss->num_sge = 0;
  109. memset(&wc, 0, sizeof(wc));
  110. wc.wr_id = wqe->wr_id;
  111. wc.status = IB_WC_LOC_PROT_ERR;
  112. wc.opcode = IB_WC_RECV;
  113. wc.qp = &qp->ibqp;
  114. /* Signal solicited completion event. */
  115. qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
  116. ret = 0;
  117. bail:
  118. return ret;
  119. }
  120. /**
  121. * qib_get_rwqe - copy the next RWQE into the QP's RWQE
  122. * @qp: the QP
  123. * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
  124. *
  125. * Return -1 if there is a local error, 0 if no RWQE is available,
  126. * otherwise return 1.
  127. *
  128. * Can be called from interrupt level.
  129. */
  130. int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
  131. {
  132. unsigned long flags;
  133. struct qib_rq *rq;
  134. struct qib_rwq *wq;
  135. struct qib_srq *srq;
  136. struct qib_rwqe *wqe;
  137. void (*handler)(struct ib_event *, void *);
  138. u32 tail;
  139. int ret;
  140. if (qp->ibqp.srq) {
  141. srq = to_isrq(qp->ibqp.srq);
  142. handler = srq->ibsrq.event_handler;
  143. rq = &srq->rq;
  144. } else {
  145. srq = NULL;
  146. handler = NULL;
  147. rq = &qp->r_rq;
  148. }
  149. spin_lock_irqsave(&rq->lock, flags);
  150. if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
  151. ret = 0;
  152. goto unlock;
  153. }
  154. wq = rq->wq;
  155. tail = wq->tail;
  156. /* Validate tail before using it since it is user writable. */
  157. if (tail >= rq->size)
  158. tail = 0;
  159. if (unlikely(tail == wq->head)) {
  160. ret = 0;
  161. goto unlock;
  162. }
  163. /* Make sure entry is read after head index is read. */
  164. smp_rmb();
  165. wqe = get_rwqe_ptr(rq, tail);
  166. /*
  167. * Even though we update the tail index in memory, the verbs
  168. * consumer is not supposed to post more entries until a
  169. * completion is generated.
  170. */
  171. if (++tail >= rq->size)
  172. tail = 0;
  173. wq->tail = tail;
  174. if (!wr_id_only && !qib_init_sge(qp, wqe)) {
  175. ret = -1;
  176. goto unlock;
  177. }
  178. qp->r_wr_id = wqe->wr_id;
  179. ret = 1;
  180. set_bit(QIB_R_WRID_VALID, &qp->r_aflags);
  181. if (handler) {
  182. u32 n;
  183. /*
  184. * Validate head pointer value and compute
  185. * the number of remaining WQEs.
  186. */
  187. n = wq->head;
  188. if (n >= rq->size)
  189. n = 0;
  190. if (n < tail)
  191. n += rq->size - tail;
  192. else
  193. n -= tail;
  194. if (n < srq->limit) {
  195. struct ib_event ev;
  196. srq->limit = 0;
  197. spin_unlock_irqrestore(&rq->lock, flags);
  198. ev.device = qp->ibqp.device;
  199. ev.element.srq = qp->ibqp.srq;
  200. ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
  201. handler(&ev, srq->ibsrq.srq_context);
  202. goto bail;
  203. }
  204. }
  205. unlock:
  206. spin_unlock_irqrestore(&rq->lock, flags);
  207. bail:
  208. return ret;
  209. }
  210. /*
  211. * Switch to alternate path.
  212. * The QP s_lock should be held and interrupts disabled.
  213. */
  214. void qib_migrate_qp(struct qib_qp *qp)
  215. {
  216. struct ib_event ev;
  217. qp->s_mig_state = IB_MIG_MIGRATED;
  218. qp->remote_ah_attr = qp->alt_ah_attr;
  219. qp->port_num = qp->alt_ah_attr.port_num;
  220. qp->s_pkey_index = qp->s_alt_pkey_index;
  221. ev.device = qp->ibqp.device;
  222. ev.element.qp = &qp->ibqp;
  223. ev.event = IB_EVENT_PATH_MIG;
  224. qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
  225. }
  226. static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
  227. {
  228. if (!index) {
  229. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  230. return ppd->guid;
  231. } else
  232. return ibp->guids[index - 1];
  233. }
  234. static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
  235. {
  236. return (gid->global.interface_id == id &&
  237. (gid->global.subnet_prefix == gid_prefix ||
  238. gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
  239. }
  240. /*
  241. *
  242. * This should be called with the QP s_lock held.
  243. */
  244. int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
  245. int has_grh, struct qib_qp *qp, u32 bth0)
  246. {
  247. __be64 guid;
  248. if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
  249. if (!has_grh) {
  250. if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
  251. goto err;
  252. } else {
  253. if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
  254. goto err;
  255. guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
  256. if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
  257. goto err;
  258. if (!gid_ok(&hdr->u.l.grh.sgid,
  259. qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
  260. qp->alt_ah_attr.grh.dgid.global.interface_id))
  261. goto err;
  262. }
  263. if (!qib_pkey_ok((u16)bth0,
  264. qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
  265. qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
  266. (u16)bth0,
  267. (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
  268. 0, qp->ibqp.qp_num,
  269. hdr->lrh[3], hdr->lrh[1]);
  270. goto err;
  271. }
  272. /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
  273. if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
  274. ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
  275. goto err;
  276. qib_migrate_qp(qp);
  277. } else {
  278. if (!has_grh) {
  279. if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
  280. goto err;
  281. } else {
  282. if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
  283. goto err;
  284. guid = get_sguid(ibp,
  285. qp->remote_ah_attr.grh.sgid_index);
  286. if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
  287. goto err;
  288. if (!gid_ok(&hdr->u.l.grh.sgid,
  289. qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
  290. qp->remote_ah_attr.grh.dgid.global.interface_id))
  291. goto err;
  292. }
  293. if (!qib_pkey_ok((u16)bth0,
  294. qib_get_pkey(ibp, qp->s_pkey_index))) {
  295. qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
  296. (u16)bth0,
  297. (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
  298. 0, qp->ibqp.qp_num,
  299. hdr->lrh[3], hdr->lrh[1]);
  300. goto err;
  301. }
  302. /* Validate the SLID. See Ch. 9.6.1.5 */
  303. if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
  304. ppd_from_ibp(ibp)->port != qp->port_num)
  305. goto err;
  306. if (qp->s_mig_state == IB_MIG_REARM &&
  307. !(bth0 & IB_BTH_MIG_REQ))
  308. qp->s_mig_state = IB_MIG_ARMED;
  309. }
  310. return 0;
  311. err:
  312. return 1;
  313. }
  314. /**
  315. * qib_ruc_loopback - handle UC and RC lookback requests
  316. * @sqp: the sending QP
  317. *
  318. * This is called from qib_do_send() to
  319. * forward a WQE addressed to the same HCA.
  320. * Note that although we are single threaded due to the tasklet, we still
  321. * have to protect against post_send(). We don't have to worry about
  322. * receive interrupts since this is a connected protocol and all packets
  323. * will pass through here.
  324. */
  325. static void qib_ruc_loopback(struct qib_qp *sqp)
  326. {
  327. struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
  328. struct qib_qp *qp;
  329. struct qib_swqe *wqe;
  330. struct qib_sge *sge;
  331. unsigned long flags;
  332. struct ib_wc wc;
  333. u64 sdata;
  334. atomic64_t *maddr;
  335. enum ib_wc_status send_status;
  336. int release;
  337. int ret;
  338. /*
  339. * Note that we check the responder QP state after
  340. * checking the requester's state.
  341. */
  342. qp = qib_lookup_qpn(ibp, sqp->remote_qpn);
  343. spin_lock_irqsave(&sqp->s_lock, flags);
  344. /* Return if we are already busy processing a work request. */
  345. if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) ||
  346. !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND))
  347. goto unlock;
  348. sqp->s_flags |= QIB_S_BUSY;
  349. again:
  350. if (sqp->s_last == sqp->s_head)
  351. goto clr_busy;
  352. wqe = get_swqe_ptr(sqp, sqp->s_last);
  353. /* Return if it is not OK to start a new work reqeust. */
  354. if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
  355. if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND))
  356. goto clr_busy;
  357. /* We are in the error state, flush the work request. */
  358. send_status = IB_WC_WR_FLUSH_ERR;
  359. goto flush_send;
  360. }
  361. /*
  362. * We can rely on the entry not changing without the s_lock
  363. * being held until we update s_last.
  364. * We increment s_cur to indicate s_last is in progress.
  365. */
  366. if (sqp->s_last == sqp->s_cur) {
  367. if (++sqp->s_cur >= sqp->s_size)
  368. sqp->s_cur = 0;
  369. }
  370. spin_unlock_irqrestore(&sqp->s_lock, flags);
  371. if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
  372. qp->ibqp.qp_type != sqp->ibqp.qp_type) {
  373. ibp->n_pkt_drops++;
  374. /*
  375. * For RC, the requester would timeout and retry so
  376. * shortcut the timeouts and just signal too many retries.
  377. */
  378. if (sqp->ibqp.qp_type == IB_QPT_RC)
  379. send_status = IB_WC_RETRY_EXC_ERR;
  380. else
  381. send_status = IB_WC_SUCCESS;
  382. goto serr;
  383. }
  384. memset(&wc, 0, sizeof wc);
  385. send_status = IB_WC_SUCCESS;
  386. release = 1;
  387. sqp->s_sge.sge = wqe->sg_list[0];
  388. sqp->s_sge.sg_list = wqe->sg_list + 1;
  389. sqp->s_sge.num_sge = wqe->wr.num_sge;
  390. sqp->s_len = wqe->length;
  391. switch (wqe->wr.opcode) {
  392. case IB_WR_SEND_WITH_IMM:
  393. wc.wc_flags = IB_WC_WITH_IMM;
  394. wc.ex.imm_data = wqe->wr.ex.imm_data;
  395. /* FALLTHROUGH */
  396. case IB_WR_SEND:
  397. ret = qib_get_rwqe(qp, 0);
  398. if (ret < 0)
  399. goto op_err;
  400. if (!ret)
  401. goto rnr_nak;
  402. break;
  403. case IB_WR_RDMA_WRITE_WITH_IMM:
  404. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
  405. goto inv_err;
  406. wc.wc_flags = IB_WC_WITH_IMM;
  407. wc.ex.imm_data = wqe->wr.ex.imm_data;
  408. ret = qib_get_rwqe(qp, 1);
  409. if (ret < 0)
  410. goto op_err;
  411. if (!ret)
  412. goto rnr_nak;
  413. /* FALLTHROUGH */
  414. case IB_WR_RDMA_WRITE:
  415. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
  416. goto inv_err;
  417. if (wqe->length == 0)
  418. break;
  419. if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
  420. wqe->wr.wr.rdma.remote_addr,
  421. wqe->wr.wr.rdma.rkey,
  422. IB_ACCESS_REMOTE_WRITE)))
  423. goto acc_err;
  424. qp->r_sge.sg_list = NULL;
  425. qp->r_sge.num_sge = 1;
  426. qp->r_sge.total_len = wqe->length;
  427. break;
  428. case IB_WR_RDMA_READ:
  429. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
  430. goto inv_err;
  431. if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
  432. wqe->wr.wr.rdma.remote_addr,
  433. wqe->wr.wr.rdma.rkey,
  434. IB_ACCESS_REMOTE_READ)))
  435. goto acc_err;
  436. release = 0;
  437. sqp->s_sge.sg_list = NULL;
  438. sqp->s_sge.num_sge = 1;
  439. qp->r_sge.sge = wqe->sg_list[0];
  440. qp->r_sge.sg_list = wqe->sg_list + 1;
  441. qp->r_sge.num_sge = wqe->wr.num_sge;
  442. qp->r_sge.total_len = wqe->length;
  443. break;
  444. case IB_WR_ATOMIC_CMP_AND_SWP:
  445. case IB_WR_ATOMIC_FETCH_AND_ADD:
  446. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
  447. goto inv_err;
  448. if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
  449. wqe->wr.wr.atomic.remote_addr,
  450. wqe->wr.wr.atomic.rkey,
  451. IB_ACCESS_REMOTE_ATOMIC)))
  452. goto acc_err;
  453. /* Perform atomic OP and save result. */
  454. maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
  455. sdata = wqe->wr.wr.atomic.compare_add;
  456. *(u64 *) sqp->s_sge.sge.vaddr =
  457. (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
  458. (u64) atomic64_add_return(sdata, maddr) - sdata :
  459. (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
  460. sdata, wqe->wr.wr.atomic.swap);
  461. atomic_dec(&qp->r_sge.sge.mr->refcount);
  462. qp->r_sge.num_sge = 0;
  463. goto send_comp;
  464. default:
  465. send_status = IB_WC_LOC_QP_OP_ERR;
  466. goto serr;
  467. }
  468. sge = &sqp->s_sge.sge;
  469. while (sqp->s_len) {
  470. u32 len = sqp->s_len;
  471. if (len > sge->length)
  472. len = sge->length;
  473. if (len > sge->sge_length)
  474. len = sge->sge_length;
  475. BUG_ON(len == 0);
  476. qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
  477. sge->vaddr += len;
  478. sge->length -= len;
  479. sge->sge_length -= len;
  480. if (sge->sge_length == 0) {
  481. if (!release)
  482. atomic_dec(&sge->mr->refcount);
  483. if (--sqp->s_sge.num_sge)
  484. *sge = *sqp->s_sge.sg_list++;
  485. } else if (sge->length == 0 && sge->mr->lkey) {
  486. if (++sge->n >= QIB_SEGSZ) {
  487. if (++sge->m >= sge->mr->mapsz)
  488. break;
  489. sge->n = 0;
  490. }
  491. sge->vaddr =
  492. sge->mr->map[sge->m]->segs[sge->n].vaddr;
  493. sge->length =
  494. sge->mr->map[sge->m]->segs[sge->n].length;
  495. }
  496. sqp->s_len -= len;
  497. }
  498. if (release)
  499. while (qp->r_sge.num_sge) {
  500. atomic_dec(&qp->r_sge.sge.mr->refcount);
  501. if (--qp->r_sge.num_sge)
  502. qp->r_sge.sge = *qp->r_sge.sg_list++;
  503. }
  504. if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
  505. goto send_comp;
  506. if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
  507. wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
  508. else
  509. wc.opcode = IB_WC_RECV;
  510. wc.wr_id = qp->r_wr_id;
  511. wc.status = IB_WC_SUCCESS;
  512. wc.byte_len = wqe->length;
  513. wc.qp = &qp->ibqp;
  514. wc.src_qp = qp->remote_qpn;
  515. wc.slid = qp->remote_ah_attr.dlid;
  516. wc.sl = qp->remote_ah_attr.sl;
  517. wc.port_num = 1;
  518. /* Signal completion event if the solicited bit is set. */
  519. qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
  520. wqe->wr.send_flags & IB_SEND_SOLICITED);
  521. send_comp:
  522. spin_lock_irqsave(&sqp->s_lock, flags);
  523. ibp->n_loop_pkts++;
  524. flush_send:
  525. sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
  526. qib_send_complete(sqp, wqe, send_status);
  527. goto again;
  528. rnr_nak:
  529. /* Handle RNR NAK */
  530. if (qp->ibqp.qp_type == IB_QPT_UC)
  531. goto send_comp;
  532. ibp->n_rnr_naks++;
  533. /*
  534. * Note: we don't need the s_lock held since the BUSY flag
  535. * makes this single threaded.
  536. */
  537. if (sqp->s_rnr_retry == 0) {
  538. send_status = IB_WC_RNR_RETRY_EXC_ERR;
  539. goto serr;
  540. }
  541. if (sqp->s_rnr_retry_cnt < 7)
  542. sqp->s_rnr_retry--;
  543. spin_lock_irqsave(&sqp->s_lock, flags);
  544. if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK))
  545. goto clr_busy;
  546. sqp->s_flags |= QIB_S_WAIT_RNR;
  547. sqp->s_timer.function = qib_rc_rnr_retry;
  548. sqp->s_timer.expires = jiffies +
  549. usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
  550. add_timer(&sqp->s_timer);
  551. goto clr_busy;
  552. op_err:
  553. send_status = IB_WC_REM_OP_ERR;
  554. wc.status = IB_WC_LOC_QP_OP_ERR;
  555. goto err;
  556. inv_err:
  557. send_status = IB_WC_REM_INV_REQ_ERR;
  558. wc.status = IB_WC_LOC_QP_OP_ERR;
  559. goto err;
  560. acc_err:
  561. send_status = IB_WC_REM_ACCESS_ERR;
  562. wc.status = IB_WC_LOC_PROT_ERR;
  563. err:
  564. /* responder goes to error state */
  565. qib_rc_error(qp, wc.status);
  566. serr:
  567. spin_lock_irqsave(&sqp->s_lock, flags);
  568. qib_send_complete(sqp, wqe, send_status);
  569. if (sqp->ibqp.qp_type == IB_QPT_RC) {
  570. int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
  571. sqp->s_flags &= ~QIB_S_BUSY;
  572. spin_unlock_irqrestore(&sqp->s_lock, flags);
  573. if (lastwqe) {
  574. struct ib_event ev;
  575. ev.device = sqp->ibqp.device;
  576. ev.element.qp = &sqp->ibqp;
  577. ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
  578. sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
  579. }
  580. goto done;
  581. }
  582. clr_busy:
  583. sqp->s_flags &= ~QIB_S_BUSY;
  584. unlock:
  585. spin_unlock_irqrestore(&sqp->s_lock, flags);
  586. done:
  587. if (qp && atomic_dec_and_test(&qp->refcount))
  588. wake_up(&qp->wait);
  589. }
  590. /**
  591. * qib_make_grh - construct a GRH header
  592. * @ibp: a pointer to the IB port
  593. * @hdr: a pointer to the GRH header being constructed
  594. * @grh: the global route address to send to
  595. * @hwords: the number of 32 bit words of header being sent
  596. * @nwords: the number of 32 bit words of data being sent
  597. *
  598. * Return the size of the header in 32 bit words.
  599. */
  600. u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
  601. struct ib_global_route *grh, u32 hwords, u32 nwords)
  602. {
  603. hdr->version_tclass_flow =
  604. cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
  605. (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
  606. (grh->flow_label << IB_GRH_FLOW_SHIFT));
  607. hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
  608. /* next_hdr is defined by C8-7 in ch. 8.4.1 */
  609. hdr->next_hdr = IB_GRH_NEXT_HDR;
  610. hdr->hop_limit = grh->hop_limit;
  611. /* The SGID is 32-bit aligned. */
  612. hdr->sgid.global.subnet_prefix = ibp->gid_prefix;
  613. hdr->sgid.global.interface_id = grh->sgid_index ?
  614. ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
  615. hdr->dgid = grh->dgid;
  616. /* GRH header size in 32-bit words. */
  617. return sizeof(struct ib_grh) / sizeof(u32);
  618. }
  619. void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
  620. u32 bth0, u32 bth2)
  621. {
  622. struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
  623. u16 lrh0;
  624. u32 nwords;
  625. u32 extra_bytes;
  626. /* Construct the header. */
  627. extra_bytes = -qp->s_cur_size & 3;
  628. nwords = (qp->s_cur_size + extra_bytes) >> 2;
  629. lrh0 = QIB_LRH_BTH;
  630. if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
  631. qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
  632. &qp->remote_ah_attr.grh,
  633. qp->s_hdrwords, nwords);
  634. lrh0 = QIB_LRH_GRH;
  635. }
  636. lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
  637. qp->remote_ah_attr.sl << 4;
  638. qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
  639. qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
  640. qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
  641. qp->s_hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
  642. qp->remote_ah_attr.src_path_bits);
  643. bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
  644. bth0 |= extra_bytes << 20;
  645. if (qp->s_mig_state == IB_MIG_MIGRATED)
  646. bth0 |= IB_BTH_MIG_REQ;
  647. ohdr->bth[0] = cpu_to_be32(bth0);
  648. ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
  649. ohdr->bth[2] = cpu_to_be32(bth2);
  650. }
  651. /**
  652. * qib_do_send - perform a send on a QP
  653. * @work: contains a pointer to the QP
  654. *
  655. * Process entries in the send work queue until credit or queue is
  656. * exhausted. Only allow one CPU to send a packet per QP (tasklet).
  657. * Otherwise, two threads could send packets out of order.
  658. */
  659. void qib_do_send(struct work_struct *work)
  660. {
  661. struct qib_qp *qp = container_of(work, struct qib_qp, s_work);
  662. struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
  663. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  664. int (*make_req)(struct qib_qp *qp);
  665. unsigned long flags;
  666. if ((qp->ibqp.qp_type == IB_QPT_RC ||
  667. qp->ibqp.qp_type == IB_QPT_UC) &&
  668. (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
  669. qib_ruc_loopback(qp);
  670. return;
  671. }
  672. if (qp->ibqp.qp_type == IB_QPT_RC)
  673. make_req = qib_make_rc_req;
  674. else if (qp->ibqp.qp_type == IB_QPT_UC)
  675. make_req = qib_make_uc_req;
  676. else
  677. make_req = qib_make_ud_req;
  678. spin_lock_irqsave(&qp->s_lock, flags);
  679. /* Return if we are already busy processing a work request. */
  680. if (!qib_send_ok(qp)) {
  681. spin_unlock_irqrestore(&qp->s_lock, flags);
  682. return;
  683. }
  684. qp->s_flags |= QIB_S_BUSY;
  685. spin_unlock_irqrestore(&qp->s_lock, flags);
  686. do {
  687. /* Check for a constructed packet to be sent. */
  688. if (qp->s_hdrwords != 0) {
  689. /*
  690. * If the packet cannot be sent now, return and
  691. * the send tasklet will be woken up later.
  692. */
  693. if (qib_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
  694. qp->s_cur_sge, qp->s_cur_size))
  695. break;
  696. /* Record that s_hdr is empty. */
  697. qp->s_hdrwords = 0;
  698. }
  699. } while (make_req(qp));
  700. }
  701. /*
  702. * This should be called with s_lock held.
  703. */
  704. void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
  705. enum ib_wc_status status)
  706. {
  707. u32 old_last, last;
  708. unsigned i;
  709. if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
  710. return;
  711. for (i = 0; i < wqe->wr.num_sge; i++) {
  712. struct qib_sge *sge = &wqe->sg_list[i];
  713. atomic_dec(&sge->mr->refcount);
  714. }
  715. if (qp->ibqp.qp_type == IB_QPT_UD ||
  716. qp->ibqp.qp_type == IB_QPT_SMI ||
  717. qp->ibqp.qp_type == IB_QPT_GSI)
  718. atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
  719. /* See ch. 11.2.4.1 and 10.7.3.1 */
  720. if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
  721. (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
  722. status != IB_WC_SUCCESS) {
  723. struct ib_wc wc;
  724. memset(&wc, 0, sizeof wc);
  725. wc.wr_id = wqe->wr.wr_id;
  726. wc.status = status;
  727. wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
  728. wc.qp = &qp->ibqp;
  729. if (status == IB_WC_SUCCESS)
  730. wc.byte_len = wqe->length;
  731. qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
  732. status != IB_WC_SUCCESS);
  733. }
  734. last = qp->s_last;
  735. old_last = last;
  736. if (++last >= qp->s_size)
  737. last = 0;
  738. qp->s_last = last;
  739. if (qp->s_acked == old_last)
  740. qp->s_acked = last;
  741. if (qp->s_cur == old_last)
  742. qp->s_cur = last;
  743. if (qp->s_tail == old_last)
  744. qp->s_tail = last;
  745. if (qp->state == IB_QPS_SQD && last == qp->s_cur)
  746. qp->s_draining = 0;
  747. }