ipath_uc.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. /*
  2. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include "ipath_verbs.h"
  33. #include "ips_common.h"
  34. /* cut down ridiculously long IB macro names */
  35. #define OP(x) IB_OPCODE_UC_##x
  36. static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe,
  37. struct ib_wc *wc)
  38. {
  39. if (++qp->s_last == qp->s_size)
  40. qp->s_last = 0;
  41. if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
  42. (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
  43. wc->wr_id = wqe->wr.wr_id;
  44. wc->status = IB_WC_SUCCESS;
  45. wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
  46. wc->vendor_err = 0;
  47. wc->byte_len = wqe->length;
  48. wc->qp_num = qp->ibqp.qp_num;
  49. wc->src_qp = qp->remote_qpn;
  50. wc->pkey_index = 0;
  51. wc->slid = qp->remote_ah_attr.dlid;
  52. wc->sl = qp->remote_ah_attr.sl;
  53. wc->dlid_path_bits = 0;
  54. wc->port_num = 0;
  55. ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 0);
  56. }
  57. wqe = get_swqe_ptr(qp, qp->s_last);
  58. }
  59. /**
  60. * ipath_do_uc_send - do a send on a UC queue
  61. * @data: contains a pointer to the QP to send on
  62. *
  63. * Process entries in the send work queue until the queue is exhausted.
  64. * Only allow one CPU to send a packet per QP (tasklet).
  65. * Otherwise, after we drop the QP lock, two threads could send
  66. * packets out of order.
  67. * This is similar to ipath_do_rc_send() below except we don't have
  68. * timeouts or resends.
  69. */
  70. void ipath_do_uc_send(unsigned long data)
  71. {
  72. struct ipath_qp *qp = (struct ipath_qp *)data;
  73. struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
  74. struct ipath_swqe *wqe;
  75. unsigned long flags;
  76. u16 lrh0;
  77. u32 hwords;
  78. u32 nwords;
  79. u32 extra_bytes;
  80. u32 bth0;
  81. u32 bth2;
  82. u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
  83. u32 len;
  84. struct ipath_other_headers *ohdr;
  85. struct ib_wc wc;
  86. if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
  87. goto bail;
  88. if (unlikely(qp->remote_ah_attr.dlid ==
  89. ipath_layer_get_lid(dev->dd))) {
  90. /* Pass in an uninitialized ib_wc to save stack space. */
  91. ipath_ruc_loopback(qp, &wc);
  92. clear_bit(IPATH_S_BUSY, &qp->s_flags);
  93. goto bail;
  94. }
  95. ohdr = &qp->s_hdr.u.oth;
  96. if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
  97. ohdr = &qp->s_hdr.u.l.oth;
  98. again:
  99. /* Check for a constructed packet to be sent. */
  100. if (qp->s_hdrwords != 0) {
  101. /*
  102. * If no PIO bufs are available, return.
  103. * An interrupt will call ipath_ib_piobufavail()
  104. * when one is available.
  105. */
  106. if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
  107. (u32 *) &qp->s_hdr,
  108. qp->s_cur_size,
  109. qp->s_cur_sge)) {
  110. ipath_no_bufs_available(qp, dev);
  111. goto bail;
  112. }
  113. dev->n_unicast_xmit++;
  114. /* Record that we sent the packet and s_hdr is empty. */
  115. qp->s_hdrwords = 0;
  116. }
  117. lrh0 = IPS_LRH_BTH;
  118. /* header size in 32-bit words LRH+BTH = (8+12)/4. */
  119. hwords = 5;
  120. /*
  121. * The lock is needed to synchronize between
  122. * setting qp->s_ack_state and post_send().
  123. */
  124. spin_lock_irqsave(&qp->s_lock, flags);
  125. if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK))
  126. goto done;
  127. bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
  128. /* Send a request. */
  129. wqe = get_swqe_ptr(qp, qp->s_last);
  130. switch (qp->s_state) {
  131. default:
  132. /*
  133. * Signal the completion of the last send (if there is
  134. * one).
  135. */
  136. if (qp->s_last != qp->s_tail)
  137. complete_last_send(qp, wqe, &wc);
  138. /* Check if send work queue is empty. */
  139. if (qp->s_tail == qp->s_head)
  140. goto done;
  141. /*
  142. * Start a new request.
  143. */
  144. qp->s_psn = wqe->psn = qp->s_next_psn;
  145. qp->s_sge.sge = wqe->sg_list[0];
  146. qp->s_sge.sg_list = wqe->sg_list + 1;
  147. qp->s_sge.num_sge = wqe->wr.num_sge;
  148. qp->s_len = len = wqe->length;
  149. switch (wqe->wr.opcode) {
  150. case IB_WR_SEND:
  151. case IB_WR_SEND_WITH_IMM:
  152. if (len > pmtu) {
  153. qp->s_state = OP(SEND_FIRST);
  154. len = pmtu;
  155. break;
  156. }
  157. if (wqe->wr.opcode == IB_WR_SEND)
  158. qp->s_state = OP(SEND_ONLY);
  159. else {
  160. qp->s_state =
  161. OP(SEND_ONLY_WITH_IMMEDIATE);
  162. /* Immediate data comes after the BTH */
  163. ohdr->u.imm_data = wqe->wr.imm_data;
  164. hwords += 1;
  165. }
  166. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  167. bth0 |= 1 << 23;
  168. break;
  169. case IB_WR_RDMA_WRITE:
  170. case IB_WR_RDMA_WRITE_WITH_IMM:
  171. ohdr->u.rc.reth.vaddr =
  172. cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
  173. ohdr->u.rc.reth.rkey =
  174. cpu_to_be32(wqe->wr.wr.rdma.rkey);
  175. ohdr->u.rc.reth.length = cpu_to_be32(len);
  176. hwords += sizeof(struct ib_reth) / 4;
  177. if (len > pmtu) {
  178. qp->s_state = OP(RDMA_WRITE_FIRST);
  179. len = pmtu;
  180. break;
  181. }
  182. if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
  183. qp->s_state = OP(RDMA_WRITE_ONLY);
  184. else {
  185. qp->s_state =
  186. OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
  187. /* Immediate data comes after the RETH */
  188. ohdr->u.rc.imm_data = wqe->wr.imm_data;
  189. hwords += 1;
  190. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  191. bth0 |= 1 << 23;
  192. }
  193. break;
  194. default:
  195. goto done;
  196. }
  197. if (++qp->s_tail >= qp->s_size)
  198. qp->s_tail = 0;
  199. break;
  200. case OP(SEND_FIRST):
  201. qp->s_state = OP(SEND_MIDDLE);
  202. /* FALLTHROUGH */
  203. case OP(SEND_MIDDLE):
  204. len = qp->s_len;
  205. if (len > pmtu) {
  206. len = pmtu;
  207. break;
  208. }
  209. if (wqe->wr.opcode == IB_WR_SEND)
  210. qp->s_state = OP(SEND_LAST);
  211. else {
  212. qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
  213. /* Immediate data comes after the BTH */
  214. ohdr->u.imm_data = wqe->wr.imm_data;
  215. hwords += 1;
  216. }
  217. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  218. bth0 |= 1 << 23;
  219. break;
  220. case OP(RDMA_WRITE_FIRST):
  221. qp->s_state = OP(RDMA_WRITE_MIDDLE);
  222. /* FALLTHROUGH */
  223. case OP(RDMA_WRITE_MIDDLE):
  224. len = qp->s_len;
  225. if (len > pmtu) {
  226. len = pmtu;
  227. break;
  228. }
  229. if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
  230. qp->s_state = OP(RDMA_WRITE_LAST);
  231. else {
  232. qp->s_state =
  233. OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
  234. /* Immediate data comes after the BTH */
  235. ohdr->u.imm_data = wqe->wr.imm_data;
  236. hwords += 1;
  237. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  238. bth0 |= 1 << 23;
  239. }
  240. break;
  241. }
  242. bth2 = qp->s_next_psn++ & IPS_PSN_MASK;
  243. qp->s_len -= len;
  244. bth0 |= qp->s_state << 24;
  245. spin_unlock_irqrestore(&qp->s_lock, flags);
  246. /* Construct the header. */
  247. extra_bytes = (4 - len) & 3;
  248. nwords = (len + extra_bytes) >> 2;
  249. if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
  250. /* Header size in 32-bit words. */
  251. hwords += 10;
  252. lrh0 = IPS_LRH_GRH;
  253. qp->s_hdr.u.l.grh.version_tclass_flow =
  254. cpu_to_be32((6 << 28) |
  255. (qp->remote_ah_attr.grh.traffic_class
  256. << 20) |
  257. qp->remote_ah_attr.grh.flow_label);
  258. qp->s_hdr.u.l.grh.paylen =
  259. cpu_to_be16(((hwords - 12) + nwords +
  260. SIZE_OF_CRC) << 2);
  261. /* next_hdr is defined by C8-7 in ch. 8.4.1 */
  262. qp->s_hdr.u.l.grh.next_hdr = 0x1B;
  263. qp->s_hdr.u.l.grh.hop_limit =
  264. qp->remote_ah_attr.grh.hop_limit;
  265. /* The SGID is 32-bit aligned. */
  266. qp->s_hdr.u.l.grh.sgid.global.subnet_prefix =
  267. dev->gid_prefix;
  268. qp->s_hdr.u.l.grh.sgid.global.interface_id =
  269. ipath_layer_get_guid(dev->dd);
  270. qp->s_hdr.u.l.grh.dgid = qp->remote_ah_attr.grh.dgid;
  271. }
  272. qp->s_hdrwords = hwords;
  273. qp->s_cur_sge = &qp->s_sge;
  274. qp->s_cur_size = len;
  275. lrh0 |= qp->remote_ah_attr.sl << 4;
  276. qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
  277. /* DEST LID */
  278. qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
  279. qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC);
  280. qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
  281. bth0 |= extra_bytes << 20;
  282. ohdr->bth[0] = cpu_to_be32(bth0);
  283. ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
  284. ohdr->bth[2] = cpu_to_be32(bth2);
  285. /* Check for more work to do. */
  286. goto again;
  287. done:
  288. spin_unlock_irqrestore(&qp->s_lock, flags);
  289. clear_bit(IPATH_S_BUSY, &qp->s_flags);
  290. bail:
  291. return;
  292. }
  293. /**
  294. * ipath_uc_rcv - handle an incoming UC packet
  295. * @dev: the device the packet came in on
  296. * @hdr: the header of the packet
  297. * @has_grh: true if the packet has a GRH
  298. * @data: the packet data
  299. * @tlen: the length of the packet
  300. * @qp: the QP for this packet.
  301. *
  302. * This is called from ipath_qp_rcv() to process an incoming UC packet
  303. * for the given QP.
  304. * Called at interrupt level.
  305. */
  306. void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
  307. int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
  308. {
  309. struct ipath_other_headers *ohdr;
  310. int opcode;
  311. u32 hdrsize;
  312. u32 psn;
  313. u32 pad;
  314. unsigned long flags;
  315. struct ib_wc wc;
  316. u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
  317. struct ib_reth *reth;
  318. int header_in_data;
  319. /* Check for GRH */
  320. if (!has_grh) {
  321. ohdr = &hdr->u.oth;
  322. hdrsize = 8 + 12; /* LRH + BTH */
  323. psn = be32_to_cpu(ohdr->bth[2]);
  324. header_in_data = 0;
  325. } else {
  326. ohdr = &hdr->u.l.oth;
  327. hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
  328. /*
  329. * The header with GRH is 60 bytes and the
  330. * core driver sets the eager header buffer
  331. * size to 56 bytes so the last 4 bytes of
  332. * the BTH header (PSN) is in the data buffer.
  333. */
  334. header_in_data =
  335. ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
  336. if (header_in_data) {
  337. psn = be32_to_cpu(((__be32 *) data)[0]);
  338. data += sizeof(__be32);
  339. } else
  340. psn = be32_to_cpu(ohdr->bth[2]);
  341. }
  342. /*
  343. * The opcode is in the low byte when its in network order
  344. * (top byte when in host order).
  345. */
  346. opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
  347. wc.imm_data = 0;
  348. wc.wc_flags = 0;
  349. spin_lock_irqsave(&qp->r_rq.lock, flags);
  350. /* Compare the PSN verses the expected PSN. */
  351. if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) {
  352. /*
  353. * Handle a sequence error.
  354. * Silently drop any current message.
  355. */
  356. qp->r_psn = psn;
  357. inv:
  358. qp->r_state = OP(SEND_LAST);
  359. switch (opcode) {
  360. case OP(SEND_FIRST):
  361. case OP(SEND_ONLY):
  362. case OP(SEND_ONLY_WITH_IMMEDIATE):
  363. goto send_first;
  364. case OP(RDMA_WRITE_FIRST):
  365. case OP(RDMA_WRITE_ONLY):
  366. case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
  367. goto rdma_first;
  368. default:
  369. dev->n_pkt_drops++;
  370. goto done;
  371. }
  372. }
  373. /* Check for opcode sequence errors. */
  374. switch (qp->r_state) {
  375. case OP(SEND_FIRST):
  376. case OP(SEND_MIDDLE):
  377. if (opcode == OP(SEND_MIDDLE) ||
  378. opcode == OP(SEND_LAST) ||
  379. opcode == OP(SEND_LAST_WITH_IMMEDIATE))
  380. break;
  381. goto inv;
  382. case OP(RDMA_WRITE_FIRST):
  383. case OP(RDMA_WRITE_MIDDLE):
  384. if (opcode == OP(RDMA_WRITE_MIDDLE) ||
  385. opcode == OP(RDMA_WRITE_LAST) ||
  386. opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
  387. break;
  388. goto inv;
  389. default:
  390. if (opcode == OP(SEND_FIRST) ||
  391. opcode == OP(SEND_ONLY) ||
  392. opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
  393. opcode == OP(RDMA_WRITE_FIRST) ||
  394. opcode == OP(RDMA_WRITE_ONLY) ||
  395. opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
  396. break;
  397. goto inv;
  398. }
  399. /* OK, process the packet. */
  400. switch (opcode) {
  401. case OP(SEND_FIRST):
  402. case OP(SEND_ONLY):
  403. case OP(SEND_ONLY_WITH_IMMEDIATE):
  404. send_first:
  405. if (qp->r_reuse_sge) {
  406. qp->r_reuse_sge = 0;
  407. qp->r_sge = qp->s_rdma_sge;
  408. } else if (!ipath_get_rwqe(qp, 0)) {
  409. dev->n_pkt_drops++;
  410. goto done;
  411. }
  412. /* Save the WQE so we can reuse it in case of an error. */
  413. qp->s_rdma_sge = qp->r_sge;
  414. qp->r_rcv_len = 0;
  415. if (opcode == OP(SEND_ONLY))
  416. goto send_last;
  417. else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
  418. goto send_last_imm;
  419. /* FALLTHROUGH */
  420. case OP(SEND_MIDDLE):
  421. /* Check for invalid length PMTU or posted rwqe len. */
  422. if (unlikely(tlen != (hdrsize + pmtu + 4))) {
  423. qp->r_reuse_sge = 1;
  424. dev->n_pkt_drops++;
  425. goto done;
  426. }
  427. qp->r_rcv_len += pmtu;
  428. if (unlikely(qp->r_rcv_len > qp->r_len)) {
  429. qp->r_reuse_sge = 1;
  430. dev->n_pkt_drops++;
  431. goto done;
  432. }
  433. ipath_copy_sge(&qp->r_sge, data, pmtu);
  434. break;
  435. case OP(SEND_LAST_WITH_IMMEDIATE):
  436. send_last_imm:
  437. if (header_in_data) {
  438. wc.imm_data = *(__be32 *) data;
  439. data += sizeof(__be32);
  440. } else {
  441. /* Immediate data comes after BTH */
  442. wc.imm_data = ohdr->u.imm_data;
  443. }
  444. hdrsize += 4;
  445. wc.wc_flags = IB_WC_WITH_IMM;
  446. /* FALLTHROUGH */
  447. case OP(SEND_LAST):
  448. send_last:
  449. /* Get the number of bytes the message was padded by. */
  450. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  451. /* Check for invalid length. */
  452. /* XXX LAST len should be >= 1 */
  453. if (unlikely(tlen < (hdrsize + pad + 4))) {
  454. qp->r_reuse_sge = 1;
  455. dev->n_pkt_drops++;
  456. goto done;
  457. }
  458. /* Don't count the CRC. */
  459. tlen -= (hdrsize + pad + 4);
  460. wc.byte_len = tlen + qp->r_rcv_len;
  461. if (unlikely(wc.byte_len > qp->r_len)) {
  462. qp->r_reuse_sge = 1;
  463. dev->n_pkt_drops++;
  464. goto done;
  465. }
  466. /* XXX Need to free SGEs */
  467. last_imm:
  468. ipath_copy_sge(&qp->r_sge, data, tlen);
  469. wc.wr_id = qp->r_wr_id;
  470. wc.status = IB_WC_SUCCESS;
  471. wc.opcode = IB_WC_RECV;
  472. wc.vendor_err = 0;
  473. wc.qp_num = qp->ibqp.qp_num;
  474. wc.src_qp = qp->remote_qpn;
  475. wc.pkey_index = 0;
  476. wc.slid = qp->remote_ah_attr.dlid;
  477. wc.sl = qp->remote_ah_attr.sl;
  478. wc.dlid_path_bits = 0;
  479. wc.port_num = 0;
  480. /* Signal completion event if the solicited bit is set. */
  481. ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
  482. (ohdr->bth[0] &
  483. __constant_cpu_to_be32(1 << 23)) != 0);
  484. break;
  485. case OP(RDMA_WRITE_FIRST):
  486. case OP(RDMA_WRITE_ONLY):
  487. case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
  488. rdma_first:
  489. /* RETH comes after BTH */
  490. if (!header_in_data)
  491. reth = &ohdr->u.rc.reth;
  492. else {
  493. reth = (struct ib_reth *)data;
  494. data += sizeof(*reth);
  495. }
  496. hdrsize += sizeof(*reth);
  497. qp->r_len = be32_to_cpu(reth->length);
  498. qp->r_rcv_len = 0;
  499. if (qp->r_len != 0) {
  500. u32 rkey = be32_to_cpu(reth->rkey);
  501. u64 vaddr = be64_to_cpu(reth->vaddr);
  502. /* Check rkey */
  503. if (unlikely(!ipath_rkey_ok(
  504. dev, &qp->r_sge, qp->r_len,
  505. vaddr, rkey,
  506. IB_ACCESS_REMOTE_WRITE))) {
  507. dev->n_pkt_drops++;
  508. goto done;
  509. }
  510. } else {
  511. qp->r_sge.sg_list = NULL;
  512. qp->r_sge.sge.mr = NULL;
  513. qp->r_sge.sge.vaddr = NULL;
  514. qp->r_sge.sge.length = 0;
  515. qp->r_sge.sge.sge_length = 0;
  516. }
  517. if (unlikely(!(qp->qp_access_flags &
  518. IB_ACCESS_REMOTE_WRITE))) {
  519. dev->n_pkt_drops++;
  520. goto done;
  521. }
  522. if (opcode == OP(RDMA_WRITE_ONLY))
  523. goto rdma_last;
  524. else if (opcode ==
  525. OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
  526. goto rdma_last_imm;
  527. /* FALLTHROUGH */
  528. case OP(RDMA_WRITE_MIDDLE):
  529. /* Check for invalid length PMTU or posted rwqe len. */
  530. if (unlikely(tlen != (hdrsize + pmtu + 4))) {
  531. dev->n_pkt_drops++;
  532. goto done;
  533. }
  534. qp->r_rcv_len += pmtu;
  535. if (unlikely(qp->r_rcv_len > qp->r_len)) {
  536. dev->n_pkt_drops++;
  537. goto done;
  538. }
  539. ipath_copy_sge(&qp->r_sge, data, pmtu);
  540. break;
  541. case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
  542. rdma_last_imm:
  543. /* Get the number of bytes the message was padded by. */
  544. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  545. /* Check for invalid length. */
  546. /* XXX LAST len should be >= 1 */
  547. if (unlikely(tlen < (hdrsize + pad + 4))) {
  548. dev->n_pkt_drops++;
  549. goto done;
  550. }
  551. /* Don't count the CRC. */
  552. tlen -= (hdrsize + pad + 4);
  553. if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
  554. dev->n_pkt_drops++;
  555. goto done;
  556. }
  557. if (qp->r_reuse_sge) {
  558. qp->r_reuse_sge = 0;
  559. } else if (!ipath_get_rwqe(qp, 1)) {
  560. dev->n_pkt_drops++;
  561. goto done;
  562. }
  563. if (header_in_data) {
  564. wc.imm_data = *(__be32 *) data;
  565. data += sizeof(__be32);
  566. } else {
  567. /* Immediate data comes after BTH */
  568. wc.imm_data = ohdr->u.imm_data;
  569. }
  570. hdrsize += 4;
  571. wc.wc_flags = IB_WC_WITH_IMM;
  572. wc.byte_len = 0;
  573. goto last_imm;
  574. case OP(RDMA_WRITE_LAST):
  575. rdma_last:
  576. /* Get the number of bytes the message was padded by. */
  577. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  578. /* Check for invalid length. */
  579. /* XXX LAST len should be >= 1 */
  580. if (unlikely(tlen < (hdrsize + pad + 4))) {
  581. dev->n_pkt_drops++;
  582. goto done;
  583. }
  584. /* Don't count the CRC. */
  585. tlen -= (hdrsize + pad + 4);
  586. if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
  587. dev->n_pkt_drops++;
  588. goto done;
  589. }
  590. ipath_copy_sge(&qp->r_sge, data, tlen);
  591. break;
  592. default:
  593. /* Drop packet for unknown opcodes. */
  594. spin_unlock_irqrestore(&qp->r_rq.lock, flags);
  595. dev->n_pkt_drops++;
  596. goto bail;
  597. }
  598. qp->r_psn++;
  599. qp->r_state = opcode;
  600. done:
  601. spin_unlock_irqrestore(&qp->r_rq.lock, flags);
  602. bail:
  603. return;
  604. }