ipath_rc.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939
  1. /*
  2. * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
  3. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include "ipath_verbs.h"
  34. #include "ipath_kernel.h"
  35. /* cut down ridiculously long IB macro names */
  36. #define OP(x) IB_OPCODE_RC_##x
  37. static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe,
  38. u32 psn, u32 pmtu)
  39. {
  40. u32 len;
  41. len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
  42. ss->sge = wqe->sg_list[0];
  43. ss->sg_list = wqe->sg_list + 1;
  44. ss->num_sge = wqe->wr.num_sge;
  45. ipath_skip_sge(ss, len);
  46. return wqe->length - len;
  47. }
  48. /**
  49. * ipath_init_restart- initialize the qp->s_sge after a restart
  50. * @qp: the QP who's SGE we're restarting
  51. * @wqe: the work queue to initialize the QP's SGE from
  52. *
  53. * The QP s_lock should be held and interrupts disabled.
  54. */
  55. static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
  56. {
  57. struct ipath_ibdev *dev;
  58. qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn,
  59. ib_mtu_enum_to_int(qp->path_mtu));
  60. dev = to_idev(qp->ibqp.device);
  61. spin_lock(&dev->pending_lock);
  62. if (list_empty(&qp->timerwait))
  63. list_add_tail(&qp->timerwait,
  64. &dev->pending[dev->pending_index]);
  65. spin_unlock(&dev->pending_lock);
  66. }
  67. /**
  68. * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
  69. * @qp: a pointer to the QP
  70. * @ohdr: a pointer to the IB header being constructed
  71. * @pmtu: the path MTU
  72. *
  73. * Return 1 if constructed; otherwise, return 0.
  74. * Note that we are in the responder's side of the QP context.
  75. * Note the QP s_lock must be held.
  76. */
  77. static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp,
  78. struct ipath_other_headers *ohdr, u32 pmtu)
  79. {
  80. struct ipath_ack_entry *e;
  81. u32 hwords;
  82. u32 len;
  83. u32 bth0;
  84. u32 bth2;
  85. /* header size in 32-bit words LRH+BTH = (8+12)/4. */
  86. hwords = 5;
  87. switch (qp->s_ack_state) {
  88. case OP(RDMA_READ_RESPONSE_LAST):
  89. case OP(RDMA_READ_RESPONSE_ONLY):
  90. case OP(ATOMIC_ACKNOWLEDGE):
  91. /*
  92. * We can increment the tail pointer now that the last
  93. * response has been sent instead of only being
  94. * constructed.
  95. */
  96. if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
  97. qp->s_tail_ack_queue = 0;
  98. /* FALLTHROUGH */
  99. case OP(SEND_ONLY):
  100. case OP(ACKNOWLEDGE):
  101. /* Check for no next entry in the queue. */
  102. if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
  103. if (qp->s_flags & IPATH_S_ACK_PENDING)
  104. goto normal;
  105. qp->s_ack_state = OP(ACKNOWLEDGE);
  106. goto bail;
  107. }
  108. e = &qp->s_ack_queue[qp->s_tail_ack_queue];
  109. if (e->opcode == OP(RDMA_READ_REQUEST)) {
  110. /* Copy SGE state in case we need to resend */
  111. qp->s_ack_rdma_sge = e->rdma_sge;
  112. qp->s_cur_sge = &qp->s_ack_rdma_sge;
  113. len = e->rdma_sge.sge.sge_length;
  114. if (len > pmtu) {
  115. len = pmtu;
  116. qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
  117. } else {
  118. qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
  119. e->sent = 1;
  120. }
  121. ohdr->u.aeth = ipath_compute_aeth(qp);
  122. hwords++;
  123. qp->s_ack_rdma_psn = e->psn;
  124. bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
  125. } else {
  126. /* COMPARE_SWAP or FETCH_ADD */
  127. qp->s_cur_sge = NULL;
  128. len = 0;
  129. qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
  130. ohdr->u.at.aeth = ipath_compute_aeth(qp);
  131. ohdr->u.at.atomic_ack_eth[0] =
  132. cpu_to_be32(e->atomic_data >> 32);
  133. ohdr->u.at.atomic_ack_eth[1] =
  134. cpu_to_be32(e->atomic_data);
  135. hwords += sizeof(ohdr->u.at) / sizeof(u32);
  136. bth2 = e->psn;
  137. e->sent = 1;
  138. }
  139. bth0 = qp->s_ack_state << 24;
  140. break;
  141. case OP(RDMA_READ_RESPONSE_FIRST):
  142. qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
  143. /* FALLTHROUGH */
  144. case OP(RDMA_READ_RESPONSE_MIDDLE):
  145. len = qp->s_ack_rdma_sge.sge.sge_length;
  146. if (len > pmtu)
  147. len = pmtu;
  148. else {
  149. ohdr->u.aeth = ipath_compute_aeth(qp);
  150. hwords++;
  151. qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
  152. qp->s_ack_queue[qp->s_tail_ack_queue].sent = 1;
  153. }
  154. bth0 = qp->s_ack_state << 24;
  155. bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
  156. break;
  157. default:
  158. normal:
  159. /*
  160. * Send a regular ACK.
  161. * Set the s_ack_state so we wait until after sending
  162. * the ACK before setting s_ack_state to ACKNOWLEDGE
  163. * (see above).
  164. */
  165. qp->s_ack_state = OP(SEND_ONLY);
  166. qp->s_flags &= ~IPATH_S_ACK_PENDING;
  167. qp->s_cur_sge = NULL;
  168. if (qp->s_nak_state)
  169. ohdr->u.aeth =
  170. cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
  171. (qp->s_nak_state <<
  172. IPATH_AETH_CREDIT_SHIFT));
  173. else
  174. ohdr->u.aeth = ipath_compute_aeth(qp);
  175. hwords++;
  176. len = 0;
  177. bth0 = OP(ACKNOWLEDGE) << 24;
  178. bth2 = qp->s_ack_psn & IPATH_PSN_MASK;
  179. }
  180. qp->s_hdrwords = hwords;
  181. qp->s_cur_size = len;
  182. ipath_make_ruc_header(dev, qp, ohdr, bth0, bth2);
  183. return 1;
  184. bail:
  185. return 0;
  186. }
  187. /**
  188. * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
  189. * @qp: a pointer to the QP
  190. *
  191. * Return 1 if constructed; otherwise, return 0.
  192. */
  193. int ipath_make_rc_req(struct ipath_qp *qp)
  194. {
  195. struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
  196. struct ipath_other_headers *ohdr;
  197. struct ipath_sge_state *ss;
  198. struct ipath_swqe *wqe;
  199. u32 hwords;
  200. u32 len;
  201. u32 bth0;
  202. u32 bth2;
  203. u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
  204. char newreq;
  205. unsigned long flags;
  206. int ret = 0;
  207. ohdr = &qp->s_hdr.u.oth;
  208. if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
  209. ohdr = &qp->s_hdr.u.l.oth;
  210. /*
  211. * The lock is needed to synchronize between the sending tasklet,
  212. * the receive interrupt handler, and timeout resends.
  213. */
  214. spin_lock_irqsave(&qp->s_lock, flags);
  215. /* Sending responses has higher priority over sending requests. */
  216. if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
  217. (qp->s_flags & IPATH_S_ACK_PENDING) ||
  218. qp->s_ack_state != OP(ACKNOWLEDGE)) &&
  219. ipath_make_rc_ack(dev, qp, ohdr, pmtu))
  220. goto done;
  221. if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
  222. qp->s_rnr_timeout || qp->s_wait_credit)
  223. goto bail;
  224. /* Limit the number of packets sent without an ACK. */
  225. if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) {
  226. qp->s_wait_credit = 1;
  227. dev->n_rc_stalls++;
  228. goto bail;
  229. }
  230. /* header size in 32-bit words LRH+BTH = (8+12)/4. */
  231. hwords = 5;
  232. bth0 = 1 << 22; /* Set M bit */
  233. /* Send a request. */
  234. wqe = get_swqe_ptr(qp, qp->s_cur);
  235. switch (qp->s_state) {
  236. default:
  237. /*
  238. * Resend an old request or start a new one.
  239. *
  240. * We keep track of the current SWQE so that
  241. * we don't reset the "furthest progress" state
  242. * if we need to back up.
  243. */
  244. newreq = 0;
  245. if (qp->s_cur == qp->s_tail) {
  246. /* Check if send work queue is empty. */
  247. if (qp->s_tail == qp->s_head)
  248. goto bail;
  249. /*
  250. * If a fence is requested, wait for previous
  251. * RDMA read and atomic operations to finish.
  252. */
  253. if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
  254. qp->s_num_rd_atomic) {
  255. qp->s_flags |= IPATH_S_FENCE_PENDING;
  256. goto bail;
  257. }
  258. wqe->psn = qp->s_next_psn;
  259. newreq = 1;
  260. }
  261. /*
  262. * Note that we have to be careful not to modify the
  263. * original work request since we may need to resend
  264. * it.
  265. */
  266. len = wqe->length;
  267. ss = &qp->s_sge;
  268. bth2 = 0;
  269. switch (wqe->wr.opcode) {
  270. case IB_WR_SEND:
  271. case IB_WR_SEND_WITH_IMM:
  272. /* If no credit, return. */
  273. if (qp->s_lsn != (u32) -1 &&
  274. ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
  275. goto bail;
  276. wqe->lpsn = wqe->psn;
  277. if (len > pmtu) {
  278. wqe->lpsn += (len - 1) / pmtu;
  279. qp->s_state = OP(SEND_FIRST);
  280. len = pmtu;
  281. break;
  282. }
  283. if (wqe->wr.opcode == IB_WR_SEND)
  284. qp->s_state = OP(SEND_ONLY);
  285. else {
  286. qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
  287. /* Immediate data comes after the BTH */
  288. ohdr->u.imm_data = wqe->wr.imm_data;
  289. hwords += 1;
  290. }
  291. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  292. bth0 |= 1 << 23;
  293. bth2 = 1 << 31; /* Request ACK. */
  294. if (++qp->s_cur == qp->s_size)
  295. qp->s_cur = 0;
  296. break;
  297. case IB_WR_RDMA_WRITE:
  298. if (newreq && qp->s_lsn != (u32) -1)
  299. qp->s_lsn++;
  300. /* FALLTHROUGH */
  301. case IB_WR_RDMA_WRITE_WITH_IMM:
  302. /* If no credit, return. */
  303. if (qp->s_lsn != (u32) -1 &&
  304. ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
  305. goto bail;
  306. ohdr->u.rc.reth.vaddr =
  307. cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
  308. ohdr->u.rc.reth.rkey =
  309. cpu_to_be32(wqe->wr.wr.rdma.rkey);
  310. ohdr->u.rc.reth.length = cpu_to_be32(len);
  311. hwords += sizeof(struct ib_reth) / sizeof(u32);
  312. wqe->lpsn = wqe->psn;
  313. if (len > pmtu) {
  314. wqe->lpsn += (len - 1) / pmtu;
  315. qp->s_state = OP(RDMA_WRITE_FIRST);
  316. len = pmtu;
  317. break;
  318. }
  319. if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
  320. qp->s_state = OP(RDMA_WRITE_ONLY);
  321. else {
  322. qp->s_state =
  323. OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
  324. /* Immediate data comes after RETH */
  325. ohdr->u.rc.imm_data = wqe->wr.imm_data;
  326. hwords += 1;
  327. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  328. bth0 |= 1 << 23;
  329. }
  330. bth2 = 1 << 31; /* Request ACK. */
  331. if (++qp->s_cur == qp->s_size)
  332. qp->s_cur = 0;
  333. break;
  334. case IB_WR_RDMA_READ:
  335. /*
  336. * Don't allow more operations to be started
  337. * than the QP limits allow.
  338. */
  339. if (newreq) {
  340. if (qp->s_num_rd_atomic >=
  341. qp->s_max_rd_atomic) {
  342. qp->s_flags |= IPATH_S_RDMAR_PENDING;
  343. goto bail;
  344. }
  345. qp->s_num_rd_atomic++;
  346. if (qp->s_lsn != (u32) -1)
  347. qp->s_lsn++;
  348. /*
  349. * Adjust s_next_psn to count the
  350. * expected number of responses.
  351. */
  352. if (len > pmtu)
  353. qp->s_next_psn += (len - 1) / pmtu;
  354. wqe->lpsn = qp->s_next_psn++;
  355. }
  356. ohdr->u.rc.reth.vaddr =
  357. cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
  358. ohdr->u.rc.reth.rkey =
  359. cpu_to_be32(wqe->wr.wr.rdma.rkey);
  360. ohdr->u.rc.reth.length = cpu_to_be32(len);
  361. qp->s_state = OP(RDMA_READ_REQUEST);
  362. hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
  363. ss = NULL;
  364. len = 0;
  365. if (++qp->s_cur == qp->s_size)
  366. qp->s_cur = 0;
  367. break;
  368. case IB_WR_ATOMIC_CMP_AND_SWP:
  369. case IB_WR_ATOMIC_FETCH_AND_ADD:
  370. /*
  371. * Don't allow more operations to be started
  372. * than the QP limits allow.
  373. */
  374. if (newreq) {
  375. if (qp->s_num_rd_atomic >=
  376. qp->s_max_rd_atomic) {
  377. qp->s_flags |= IPATH_S_RDMAR_PENDING;
  378. goto bail;
  379. }
  380. qp->s_num_rd_atomic++;
  381. if (qp->s_lsn != (u32) -1)
  382. qp->s_lsn++;
  383. wqe->lpsn = wqe->psn;
  384. }
  385. if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
  386. qp->s_state = OP(COMPARE_SWAP);
  387. ohdr->u.atomic_eth.swap_data = cpu_to_be64(
  388. wqe->wr.wr.atomic.swap);
  389. ohdr->u.atomic_eth.compare_data = cpu_to_be64(
  390. wqe->wr.wr.atomic.compare_add);
  391. } else {
  392. qp->s_state = OP(FETCH_ADD);
  393. ohdr->u.atomic_eth.swap_data = cpu_to_be64(
  394. wqe->wr.wr.atomic.compare_add);
  395. ohdr->u.atomic_eth.compare_data = 0;
  396. }
  397. ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
  398. wqe->wr.wr.atomic.remote_addr >> 32);
  399. ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
  400. wqe->wr.wr.atomic.remote_addr);
  401. ohdr->u.atomic_eth.rkey = cpu_to_be32(
  402. wqe->wr.wr.atomic.rkey);
  403. hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
  404. ss = NULL;
  405. len = 0;
  406. if (++qp->s_cur == qp->s_size)
  407. qp->s_cur = 0;
  408. break;
  409. default:
  410. goto bail;
  411. }
  412. qp->s_sge.sge = wqe->sg_list[0];
  413. qp->s_sge.sg_list = wqe->sg_list + 1;
  414. qp->s_sge.num_sge = wqe->wr.num_sge;
  415. qp->s_len = wqe->length;
  416. if (newreq) {
  417. qp->s_tail++;
  418. if (qp->s_tail >= qp->s_size)
  419. qp->s_tail = 0;
  420. }
  421. bth2 |= qp->s_psn & IPATH_PSN_MASK;
  422. if (wqe->wr.opcode == IB_WR_RDMA_READ)
  423. qp->s_psn = wqe->lpsn + 1;
  424. else {
  425. qp->s_psn++;
  426. if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
  427. qp->s_next_psn = qp->s_psn;
  428. }
  429. /*
  430. * Put the QP on the pending list so lost ACKs will cause
  431. * a retry. More than one request can be pending so the
  432. * QP may already be on the dev->pending list.
  433. */
  434. spin_lock(&dev->pending_lock);
  435. if (list_empty(&qp->timerwait))
  436. list_add_tail(&qp->timerwait,
  437. &dev->pending[dev->pending_index]);
  438. spin_unlock(&dev->pending_lock);
  439. break;
  440. case OP(RDMA_READ_RESPONSE_FIRST):
  441. /*
  442. * This case can only happen if a send is restarted.
  443. * See ipath_restart_rc().
  444. */
  445. ipath_init_restart(qp, wqe);
  446. /* FALLTHROUGH */
  447. case OP(SEND_FIRST):
  448. qp->s_state = OP(SEND_MIDDLE);
  449. /* FALLTHROUGH */
  450. case OP(SEND_MIDDLE):
  451. bth2 = qp->s_psn++ & IPATH_PSN_MASK;
  452. if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
  453. qp->s_next_psn = qp->s_psn;
  454. ss = &qp->s_sge;
  455. len = qp->s_len;
  456. if (len > pmtu) {
  457. len = pmtu;
  458. break;
  459. }
  460. if (wqe->wr.opcode == IB_WR_SEND)
  461. qp->s_state = OP(SEND_LAST);
  462. else {
  463. qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
  464. /* Immediate data comes after the BTH */
  465. ohdr->u.imm_data = wqe->wr.imm_data;
  466. hwords += 1;
  467. }
  468. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  469. bth0 |= 1 << 23;
  470. bth2 |= 1 << 31; /* Request ACK. */
  471. qp->s_cur++;
  472. if (qp->s_cur >= qp->s_size)
  473. qp->s_cur = 0;
  474. break;
  475. case OP(RDMA_READ_RESPONSE_LAST):
  476. /*
  477. * This case can only happen if a RDMA write is restarted.
  478. * See ipath_restart_rc().
  479. */
  480. ipath_init_restart(qp, wqe);
  481. /* FALLTHROUGH */
  482. case OP(RDMA_WRITE_FIRST):
  483. qp->s_state = OP(RDMA_WRITE_MIDDLE);
  484. /* FALLTHROUGH */
  485. case OP(RDMA_WRITE_MIDDLE):
  486. bth2 = qp->s_psn++ & IPATH_PSN_MASK;
  487. if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
  488. qp->s_next_psn = qp->s_psn;
  489. ss = &qp->s_sge;
  490. len = qp->s_len;
  491. if (len > pmtu) {
  492. len = pmtu;
  493. break;
  494. }
  495. if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
  496. qp->s_state = OP(RDMA_WRITE_LAST);
  497. else {
  498. qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
  499. /* Immediate data comes after the BTH */
  500. ohdr->u.imm_data = wqe->wr.imm_data;
  501. hwords += 1;
  502. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  503. bth0 |= 1 << 23;
  504. }
  505. bth2 |= 1 << 31; /* Request ACK. */
  506. qp->s_cur++;
  507. if (qp->s_cur >= qp->s_size)
  508. qp->s_cur = 0;
  509. break;
  510. case OP(RDMA_READ_RESPONSE_MIDDLE):
  511. /*
  512. * This case can only happen if a RDMA read is restarted.
  513. * See ipath_restart_rc().
  514. */
  515. ipath_init_restart(qp, wqe);
  516. len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
  517. ohdr->u.rc.reth.vaddr =
  518. cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
  519. ohdr->u.rc.reth.rkey =
  520. cpu_to_be32(wqe->wr.wr.rdma.rkey);
  521. ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
  522. qp->s_state = OP(RDMA_READ_REQUEST);
  523. hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
  524. bth2 = qp->s_psn++ & IPATH_PSN_MASK;
  525. if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
  526. qp->s_next_psn = qp->s_psn;
  527. ss = NULL;
  528. len = 0;
  529. qp->s_cur++;
  530. if (qp->s_cur == qp->s_size)
  531. qp->s_cur = 0;
  532. break;
  533. }
  534. if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0)
  535. bth2 |= 1 << 31; /* Request ACK. */
  536. qp->s_len -= len;
  537. qp->s_hdrwords = hwords;
  538. qp->s_cur_sge = ss;
  539. qp->s_cur_size = len;
  540. ipath_make_ruc_header(dev, qp, ohdr, bth0 | (qp->s_state << 24), bth2);
  541. done:
  542. ret = 1;
  543. bail:
  544. spin_unlock_irqrestore(&qp->s_lock, flags);
  545. return ret;
  546. }
  547. /**
  548. * send_rc_ack - Construct an ACK packet and send it
  549. * @qp: a pointer to the QP
  550. *
  551. * This is called from ipath_rc_rcv() and only uses the receive
  552. * side QP state.
  553. * Note that RDMA reads and atomics are handled in the
  554. * send side QP state and tasklet.
  555. */
  556. static void send_rc_ack(struct ipath_qp *qp)
  557. {
  558. struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
  559. u16 lrh0;
  560. u32 bth0;
  561. u32 hwords;
  562. struct ipath_ib_header hdr;
  563. struct ipath_other_headers *ohdr;
  564. unsigned long flags;
  565. /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
  566. if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
  567. (qp->s_flags & IPATH_S_ACK_PENDING) ||
  568. qp->s_ack_state != OP(ACKNOWLEDGE))
  569. goto queue_ack;
  570. /* Construct the header. */
  571. ohdr = &hdr.u.oth;
  572. lrh0 = IPATH_LRH_BTH;
  573. /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
  574. hwords = 6;
  575. if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
  576. hwords += ipath_make_grh(dev, &hdr.u.l.grh,
  577. &qp->remote_ah_attr.grh,
  578. hwords, 0);
  579. ohdr = &hdr.u.l.oth;
  580. lrh0 = IPATH_LRH_GRH;
  581. }
  582. /* read pkey_index w/o lock (its atomic) */
  583. bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index) |
  584. (OP(ACKNOWLEDGE) << 24) | (1 << 22);
  585. if (qp->r_nak_state)
  586. ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
  587. (qp->r_nak_state <<
  588. IPATH_AETH_CREDIT_SHIFT));
  589. else
  590. ohdr->u.aeth = ipath_compute_aeth(qp);
  591. lrh0 |= qp->remote_ah_attr.sl << 4;
  592. hdr.lrh[0] = cpu_to_be16(lrh0);
  593. hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
  594. hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
  595. hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
  596. ohdr->bth[0] = cpu_to_be32(bth0);
  597. ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
  598. ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
  599. /*
  600. * If we can send the ACK, clear the ACK state.
  601. */
  602. if (ipath_verbs_send(qp, &hdr, hwords, NULL, 0) == 0) {
  603. dev->n_unicast_xmit++;
  604. goto done;
  605. }
  606. /*
  607. * We are out of PIO buffers at the moment.
  608. * Pass responsibility for sending the ACK to the
  609. * send tasklet so that when a PIO buffer becomes
  610. * available, the ACK is sent ahead of other outgoing
  611. * packets.
  612. */
  613. dev->n_rc_qacks++;
  614. queue_ack:
  615. spin_lock_irqsave(&qp->s_lock, flags);
  616. dev->n_rc_qacks++;
  617. qp->s_flags |= IPATH_S_ACK_PENDING;
  618. qp->s_nak_state = qp->r_nak_state;
  619. qp->s_ack_psn = qp->r_ack_psn;
  620. spin_unlock_irqrestore(&qp->s_lock, flags);
  621. /* Call ipath_do_rc_send() in another thread. */
  622. tasklet_hi_schedule(&qp->s_task);
  623. done:
  624. return;
  625. }
  626. /**
  627. * reset_psn - reset the QP state to send starting from PSN
  628. * @qp: the QP
  629. * @psn: the packet sequence number to restart at
  630. *
  631. * This is called from ipath_rc_rcv() to process an incoming RC ACK
  632. * for the given QP.
  633. * Called at interrupt level with the QP s_lock held.
  634. */
  635. static void reset_psn(struct ipath_qp *qp, u32 psn)
  636. {
  637. u32 n = qp->s_last;
  638. struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
  639. u32 opcode;
  640. qp->s_cur = n;
  641. /*
  642. * If we are starting the request from the beginning,
  643. * let the normal send code handle initialization.
  644. */
  645. if (ipath_cmp24(psn, wqe->psn) <= 0) {
  646. qp->s_state = OP(SEND_LAST);
  647. goto done;
  648. }
  649. /* Find the work request opcode corresponding to the given PSN. */
  650. opcode = wqe->wr.opcode;
  651. for (;;) {
  652. int diff;
  653. if (++n == qp->s_size)
  654. n = 0;
  655. if (n == qp->s_tail)
  656. break;
  657. wqe = get_swqe_ptr(qp, n);
  658. diff = ipath_cmp24(psn, wqe->psn);
  659. if (diff < 0)
  660. break;
  661. qp->s_cur = n;
  662. /*
  663. * If we are starting the request from the beginning,
  664. * let the normal send code handle initialization.
  665. */
  666. if (diff == 0) {
  667. qp->s_state = OP(SEND_LAST);
  668. goto done;
  669. }
  670. opcode = wqe->wr.opcode;
  671. }
  672. /*
  673. * Set the state to restart in the middle of a request.
  674. * Don't change the s_sge, s_cur_sge, or s_cur_size.
  675. * See ipath_do_rc_send().
  676. */
  677. switch (opcode) {
  678. case IB_WR_SEND:
  679. case IB_WR_SEND_WITH_IMM:
  680. qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
  681. break;
  682. case IB_WR_RDMA_WRITE:
  683. case IB_WR_RDMA_WRITE_WITH_IMM:
  684. qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
  685. break;
  686. case IB_WR_RDMA_READ:
  687. qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
  688. break;
  689. default:
  690. /*
  691. * This case shouldn't happen since its only
  692. * one PSN per req.
  693. */
  694. qp->s_state = OP(SEND_LAST);
  695. }
  696. done:
  697. qp->s_psn = psn;
  698. }
  699. /**
  700. * ipath_restart_rc - back up requester to resend the last un-ACKed request
  701. * @qp: the QP to restart
  702. * @psn: packet sequence number for the request
  703. * @wc: the work completion request
  704. *
  705. * The QP s_lock should be held and interrupts disabled.
  706. */
  707. void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
  708. {
  709. struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
  710. struct ipath_ibdev *dev;
  711. if (qp->s_retry == 0) {
  712. wc->wr_id = wqe->wr.wr_id;
  713. wc->status = IB_WC_RETRY_EXC_ERR;
  714. wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
  715. wc->vendor_err = 0;
  716. wc->byte_len = 0;
  717. wc->qp = &qp->ibqp;
  718. wc->imm_data = 0;
  719. wc->src_qp = qp->remote_qpn;
  720. wc->wc_flags = 0;
  721. wc->pkey_index = 0;
  722. wc->slid = qp->remote_ah_attr.dlid;
  723. wc->sl = qp->remote_ah_attr.sl;
  724. wc->dlid_path_bits = 0;
  725. wc->port_num = 0;
  726. ipath_sqerror_qp(qp, wc);
  727. goto bail;
  728. }
  729. qp->s_retry--;
  730. /*
  731. * Remove the QP from the timeout queue.
  732. * Note: it may already have been removed by ipath_ib_timer().
  733. */
  734. dev = to_idev(qp->ibqp.device);
  735. spin_lock(&dev->pending_lock);
  736. if (!list_empty(&qp->timerwait))
  737. list_del_init(&qp->timerwait);
  738. spin_unlock(&dev->pending_lock);
  739. if (wqe->wr.opcode == IB_WR_RDMA_READ)
  740. dev->n_rc_resends++;
  741. else
  742. dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK;
  743. reset_psn(qp, psn);
  744. tasklet_hi_schedule(&qp->s_task);
  745. bail:
  746. return;
  747. }
  748. static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
  749. {
  750. if (qp->s_last_psn != psn) {
  751. qp->s_last_psn = psn;
  752. if (qp->s_wait_credit) {
  753. qp->s_wait_credit = 0;
  754. tasklet_hi_schedule(&qp->s_task);
  755. }
  756. }
  757. }
  758. /**
  759. * do_rc_ack - process an incoming RC ACK
  760. * @qp: the QP the ACK came in on
  761. * @psn: the packet sequence number of the ACK
  762. * @opcode: the opcode of the request that resulted in the ACK
  763. *
  764. * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
  765. * for the given QP.
  766. * Called at interrupt level with the QP s_lock held and interrupts disabled.
  767. * Returns 1 if OK, 0 if current operation should be aborted (NAK).
  768. */
  769. static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
  770. u64 val)
  771. {
  772. struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
  773. struct ib_wc wc;
  774. struct ipath_swqe *wqe;
  775. int ret = 0;
  776. u32 ack_psn;
  777. int diff;
  778. /*
  779. * Remove the QP from the timeout queue (or RNR timeout queue).
  780. * If ipath_ib_timer() has already removed it,
  781. * it's OK since we hold the QP s_lock and ipath_restart_rc()
  782. * just won't find anything to restart if we ACK everything.
  783. */
  784. spin_lock(&dev->pending_lock);
  785. if (!list_empty(&qp->timerwait))
  786. list_del_init(&qp->timerwait);
  787. spin_unlock(&dev->pending_lock);
  788. /*
  789. * Note that NAKs implicitly ACK outstanding SEND and RDMA write
  790. * requests and implicitly NAK RDMA read and atomic requests issued
  791. * before the NAK'ed request. The MSN won't include the NAK'ed
  792. * request but will include an ACK'ed request(s).
  793. */
  794. ack_psn = psn;
  795. if (aeth >> 29)
  796. ack_psn--;
  797. wqe = get_swqe_ptr(qp, qp->s_last);
  798. /*
  799. * The MSN might be for a later WQE than the PSN indicates so
  800. * only complete WQEs that the PSN finishes.
  801. */
  802. while ((diff = ipath_cmp24(ack_psn, wqe->lpsn)) >= 0) {
  803. /*
  804. * RDMA_READ_RESPONSE_ONLY is a special case since
  805. * we want to generate completion events for everything
  806. * before the RDMA read, copy the data, then generate
  807. * the completion for the read.
  808. */
  809. if (wqe->wr.opcode == IB_WR_RDMA_READ &&
  810. opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
  811. diff == 0) {
  812. ret = 1;
  813. goto bail;
  814. }
  815. /*
  816. * If this request is a RDMA read or atomic, and the ACK is
  817. * for a later operation, this ACK NAKs the RDMA read or
  818. * atomic. In other words, only a RDMA_READ_LAST or ONLY
  819. * can ACK a RDMA read and likewise for atomic ops. Note
  820. * that the NAK case can only happen if relaxed ordering is
  821. * used and requests are sent after an RDMA read or atomic
  822. * is sent but before the response is received.
  823. */
  824. if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
  825. (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
  826. ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  827. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
  828. (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
  829. /*
  830. * The last valid PSN seen is the previous
  831. * request's.
  832. */
  833. update_last_psn(qp, wqe->psn - 1);
  834. /* Retry this request. */
  835. ipath_restart_rc(qp, wqe->psn, &wc);
  836. /*
  837. * No need to process the ACK/NAK since we are
  838. * restarting an earlier request.
  839. */
  840. goto bail;
  841. }
  842. if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  843. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
  844. *(u64 *) wqe->sg_list[0].vaddr = val;
  845. if (qp->s_num_rd_atomic &&
  846. (wqe->wr.opcode == IB_WR_RDMA_READ ||
  847. wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  848. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
  849. qp->s_num_rd_atomic--;
  850. /* Restart sending task if fence is complete */
  851. if ((qp->s_flags & IPATH_S_FENCE_PENDING) &&
  852. !qp->s_num_rd_atomic) {
  853. qp->s_flags &= ~IPATH_S_FENCE_PENDING;
  854. tasklet_hi_schedule(&qp->s_task);
  855. } else if (qp->s_flags & IPATH_S_RDMAR_PENDING) {
  856. qp->s_flags &= ~IPATH_S_RDMAR_PENDING;
  857. tasklet_hi_schedule(&qp->s_task);
  858. }
  859. }
  860. /* Post a send completion queue entry if requested. */
  861. if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
  862. (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
  863. wc.wr_id = wqe->wr.wr_id;
  864. wc.status = IB_WC_SUCCESS;
  865. wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
  866. wc.vendor_err = 0;
  867. wc.byte_len = wqe->length;
  868. wc.imm_data = 0;
  869. wc.qp = &qp->ibqp;
  870. wc.src_qp = qp->remote_qpn;
  871. wc.wc_flags = 0;
  872. wc.pkey_index = 0;
  873. wc.slid = qp->remote_ah_attr.dlid;
  874. wc.sl = qp->remote_ah_attr.sl;
  875. wc.dlid_path_bits = 0;
  876. wc.port_num = 0;
  877. ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
  878. }
  879. qp->s_retry = qp->s_retry_cnt;
  880. /*
  881. * If we are completing a request which is in the process of
  882. * being resent, we can stop resending it since we know the
  883. * responder has already seen it.
  884. */
  885. if (qp->s_last == qp->s_cur) {
  886. if (++qp->s_cur >= qp->s_size)
  887. qp->s_cur = 0;
  888. qp->s_last = qp->s_cur;
  889. if (qp->s_last == qp->s_tail)
  890. break;
  891. wqe = get_swqe_ptr(qp, qp->s_cur);
  892. qp->s_state = OP(SEND_LAST);
  893. qp->s_psn = wqe->psn;
  894. } else {
  895. if (++qp->s_last >= qp->s_size)
  896. qp->s_last = 0;
  897. if (qp->s_last == qp->s_tail)
  898. break;
  899. wqe = get_swqe_ptr(qp, qp->s_last);
  900. }
  901. }
  902. switch (aeth >> 29) {
  903. case 0: /* ACK */
  904. dev->n_rc_acks++;
  905. /* If this is a partial ACK, reset the retransmit timer. */
  906. if (qp->s_last != qp->s_tail) {
  907. spin_lock(&dev->pending_lock);
  908. if (list_empty(&qp->timerwait))
  909. list_add_tail(&qp->timerwait,
  910. &dev->pending[dev->pending_index]);
  911. spin_unlock(&dev->pending_lock);
  912. /*
  913. * If we get a partial ACK for a resent operation,
  914. * we can stop resending the earlier packets and
  915. * continue with the next packet the receiver wants.
  916. */
  917. if (ipath_cmp24(qp->s_psn, psn) <= 0) {
  918. reset_psn(qp, psn + 1);
  919. tasklet_hi_schedule(&qp->s_task);
  920. }
  921. } else if (ipath_cmp24(qp->s_psn, psn) <= 0) {
  922. qp->s_state = OP(SEND_LAST);
  923. qp->s_psn = psn + 1;
  924. }
  925. ipath_get_credit(qp, aeth);
  926. qp->s_rnr_retry = qp->s_rnr_retry_cnt;
  927. qp->s_retry = qp->s_retry_cnt;
  928. update_last_psn(qp, psn);
  929. ret = 1;
  930. goto bail;
  931. case 1: /* RNR NAK */
  932. dev->n_rnr_naks++;
  933. if (qp->s_last == qp->s_tail)
  934. goto bail;
  935. if (qp->s_rnr_retry == 0) {
  936. wc.status = IB_WC_RNR_RETRY_EXC_ERR;
  937. goto class_b;
  938. }
  939. if (qp->s_rnr_retry_cnt < 7)
  940. qp->s_rnr_retry--;
  941. /* The last valid PSN is the previous PSN. */
  942. update_last_psn(qp, psn - 1);
  943. if (wqe->wr.opcode == IB_WR_RDMA_READ)
  944. dev->n_rc_resends++;
  945. else
  946. dev->n_rc_resends +=
  947. (qp->s_psn - psn) & IPATH_PSN_MASK;
  948. reset_psn(qp, psn);
  949. qp->s_rnr_timeout =
  950. ib_ipath_rnr_table[(aeth >> IPATH_AETH_CREDIT_SHIFT) &
  951. IPATH_AETH_CREDIT_MASK];
  952. ipath_insert_rnr_queue(qp);
  953. goto bail;
  954. case 3: /* NAK */
  955. if (qp->s_last == qp->s_tail)
  956. goto bail;
  957. /* The last valid PSN is the previous PSN. */
  958. update_last_psn(qp, psn - 1);
  959. switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
  960. IPATH_AETH_CREDIT_MASK) {
  961. case 0: /* PSN sequence error */
  962. dev->n_seq_naks++;
  963. /*
  964. * Back up to the responder's expected PSN.
  965. * Note that we might get a NAK in the middle of an
  966. * RDMA READ response which terminates the RDMA
  967. * READ.
  968. */
  969. ipath_restart_rc(qp, psn, &wc);
  970. break;
  971. case 1: /* Invalid Request */
  972. wc.status = IB_WC_REM_INV_REQ_ERR;
  973. dev->n_other_naks++;
  974. goto class_b;
  975. case 2: /* Remote Access Error */
  976. wc.status = IB_WC_REM_ACCESS_ERR;
  977. dev->n_other_naks++;
  978. goto class_b;
  979. case 3: /* Remote Operation Error */
  980. wc.status = IB_WC_REM_OP_ERR;
  981. dev->n_other_naks++;
  982. class_b:
  983. wc.wr_id = wqe->wr.wr_id;
  984. wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
  985. wc.vendor_err = 0;
  986. wc.byte_len = 0;
  987. wc.qp = &qp->ibqp;
  988. wc.imm_data = 0;
  989. wc.src_qp = qp->remote_qpn;
  990. wc.wc_flags = 0;
  991. wc.pkey_index = 0;
  992. wc.slid = qp->remote_ah_attr.dlid;
  993. wc.sl = qp->remote_ah_attr.sl;
  994. wc.dlid_path_bits = 0;
  995. wc.port_num = 0;
  996. ipath_sqerror_qp(qp, &wc);
  997. break;
  998. default:
  999. /* Ignore other reserved NAK error codes */
  1000. goto reserved;
  1001. }
  1002. qp->s_rnr_retry = qp->s_rnr_retry_cnt;
  1003. goto bail;
  1004. default: /* 2: reserved */
  1005. reserved:
  1006. /* Ignore reserved NAK codes. */
  1007. goto bail;
  1008. }
  1009. bail:
  1010. return ret;
  1011. }
  1012. /**
  1013. * ipath_rc_rcv_resp - process an incoming RC response packet
  1014. * @dev: the device this packet came in on
  1015. * @ohdr: the other headers for this packet
  1016. * @data: the packet data
  1017. * @tlen: the packet length
  1018. * @qp: the QP for this packet
  1019. * @opcode: the opcode for this packet
  1020. * @psn: the packet sequence number for this packet
  1021. * @hdrsize: the header length
  1022. * @pmtu: the path MTU
  1023. * @header_in_data: true if part of the header data is in the data buffer
  1024. *
  1025. * This is called from ipath_rc_rcv() to process an incoming RC response
  1026. * packet for the given QP.
  1027. * Called at interrupt level.
  1028. */
  1029. static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
  1030. struct ipath_other_headers *ohdr,
  1031. void *data, u32 tlen,
  1032. struct ipath_qp *qp,
  1033. u32 opcode,
  1034. u32 psn, u32 hdrsize, u32 pmtu,
  1035. int header_in_data)
  1036. {
  1037. struct ipath_swqe *wqe;
  1038. unsigned long flags;
  1039. struct ib_wc wc;
  1040. int diff;
  1041. u32 pad;
  1042. u32 aeth;
  1043. u64 val;
  1044. spin_lock_irqsave(&qp->s_lock, flags);
  1045. /* Ignore invalid responses. */
  1046. if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
  1047. goto ack_done;
  1048. /* Ignore duplicate responses. */
  1049. diff = ipath_cmp24(psn, qp->s_last_psn);
  1050. if (unlikely(diff <= 0)) {
  1051. /* Update credits for "ghost" ACKs */
  1052. if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
  1053. if (!header_in_data)
  1054. aeth = be32_to_cpu(ohdr->u.aeth);
  1055. else {
  1056. aeth = be32_to_cpu(((__be32 *) data)[0]);
  1057. data += sizeof(__be32);
  1058. }
  1059. if ((aeth >> 29) == 0)
  1060. ipath_get_credit(qp, aeth);
  1061. }
  1062. goto ack_done;
  1063. }
  1064. if (unlikely(qp->s_last == qp->s_tail))
  1065. goto ack_done;
  1066. wqe = get_swqe_ptr(qp, qp->s_last);
  1067. switch (opcode) {
  1068. case OP(ACKNOWLEDGE):
  1069. case OP(ATOMIC_ACKNOWLEDGE):
  1070. case OP(RDMA_READ_RESPONSE_FIRST):
  1071. if (!header_in_data)
  1072. aeth = be32_to_cpu(ohdr->u.aeth);
  1073. else {
  1074. aeth = be32_to_cpu(((__be32 *) data)[0]);
  1075. data += sizeof(__be32);
  1076. }
  1077. if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
  1078. if (!header_in_data) {
  1079. __be32 *p = ohdr->u.at.atomic_ack_eth;
  1080. val = ((u64) be32_to_cpu(p[0]) << 32) |
  1081. be32_to_cpu(p[1]);
  1082. } else
  1083. val = be64_to_cpu(((__be64 *) data)[0]);
  1084. } else
  1085. val = 0;
  1086. if (!do_rc_ack(qp, aeth, psn, opcode, val) ||
  1087. opcode != OP(RDMA_READ_RESPONSE_FIRST))
  1088. goto ack_done;
  1089. hdrsize += 4;
  1090. wqe = get_swqe_ptr(qp, qp->s_last);
  1091. if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
  1092. goto ack_op_err;
  1093. /*
  1094. * If this is a response to a resent RDMA read, we
  1095. * have to be careful to copy the data to the right
  1096. * location.
  1097. */
  1098. qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
  1099. wqe, psn, pmtu);
  1100. goto read_middle;
  1101. case OP(RDMA_READ_RESPONSE_MIDDLE):
  1102. /* no AETH, no ACK */
  1103. if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
  1104. dev->n_rdma_seq++;
  1105. ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
  1106. goto ack_done;
  1107. }
  1108. if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
  1109. goto ack_op_err;
  1110. read_middle:
  1111. if (unlikely(tlen != (hdrsize + pmtu + 4)))
  1112. goto ack_len_err;
  1113. if (unlikely(pmtu >= qp->s_rdma_read_len))
  1114. goto ack_len_err;
  1115. /* We got a response so update the timeout. */
  1116. spin_lock(&dev->pending_lock);
  1117. if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
  1118. list_move_tail(&qp->timerwait,
  1119. &dev->pending[dev->pending_index]);
  1120. spin_unlock(&dev->pending_lock);
  1121. if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
  1122. qp->s_retry = qp->s_retry_cnt;
  1123. /*
  1124. * Update the RDMA receive state but do the copy w/o
  1125. * holding the locks and blocking interrupts.
  1126. */
  1127. qp->s_rdma_read_len -= pmtu;
  1128. update_last_psn(qp, psn);
  1129. spin_unlock_irqrestore(&qp->s_lock, flags);
  1130. ipath_copy_sge(&qp->s_rdma_read_sge, data, pmtu);
  1131. goto bail;
  1132. case OP(RDMA_READ_RESPONSE_ONLY):
  1133. if (!header_in_data)
  1134. aeth = be32_to_cpu(ohdr->u.aeth);
  1135. else
  1136. aeth = be32_to_cpu(((__be32 *) data)[0]);
  1137. if (!do_rc_ack(qp, aeth, psn, opcode, 0))
  1138. goto ack_done;
  1139. /* Get the number of bytes the message was padded by. */
  1140. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  1141. /*
  1142. * Check that the data size is >= 0 && <= pmtu.
  1143. * Remember to account for the AETH header (4) and
  1144. * ICRC (4).
  1145. */
  1146. if (unlikely(tlen < (hdrsize + pad + 8)))
  1147. goto ack_len_err;
  1148. /*
  1149. * If this is a response to a resent RDMA read, we
  1150. * have to be careful to copy the data to the right
  1151. * location.
  1152. */
  1153. wqe = get_swqe_ptr(qp, qp->s_last);
  1154. qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
  1155. wqe, psn, pmtu);
  1156. goto read_last;
  1157. case OP(RDMA_READ_RESPONSE_LAST):
  1158. /* ACKs READ req. */
  1159. if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
  1160. dev->n_rdma_seq++;
  1161. ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
  1162. goto ack_done;
  1163. }
  1164. if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
  1165. goto ack_op_err;
  1166. /* Get the number of bytes the message was padded by. */
  1167. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  1168. /*
  1169. * Check that the data size is >= 1 && <= pmtu.
  1170. * Remember to account for the AETH header (4) and
  1171. * ICRC (4).
  1172. */
  1173. if (unlikely(tlen <= (hdrsize + pad + 8)))
  1174. goto ack_len_err;
  1175. read_last:
  1176. tlen -= hdrsize + pad + 8;
  1177. if (unlikely(tlen != qp->s_rdma_read_len))
  1178. goto ack_len_err;
  1179. if (!header_in_data)
  1180. aeth = be32_to_cpu(ohdr->u.aeth);
  1181. else {
  1182. aeth = be32_to_cpu(((__be32 *) data)[0]);
  1183. data += sizeof(__be32);
  1184. }
  1185. ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen);
  1186. (void) do_rc_ack(qp, aeth, psn,
  1187. OP(RDMA_READ_RESPONSE_LAST), 0);
  1188. goto ack_done;
  1189. }
  1190. ack_done:
  1191. spin_unlock_irqrestore(&qp->s_lock, flags);
  1192. goto bail;
  1193. ack_op_err:
  1194. wc.status = IB_WC_LOC_QP_OP_ERR;
  1195. goto ack_err;
  1196. ack_len_err:
  1197. wc.status = IB_WC_LOC_LEN_ERR;
  1198. ack_err:
  1199. wc.wr_id = wqe->wr.wr_id;
  1200. wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
  1201. wc.vendor_err = 0;
  1202. wc.byte_len = 0;
  1203. wc.imm_data = 0;
  1204. wc.qp = &qp->ibqp;
  1205. wc.src_qp = qp->remote_qpn;
  1206. wc.wc_flags = 0;
  1207. wc.pkey_index = 0;
  1208. wc.slid = qp->remote_ah_attr.dlid;
  1209. wc.sl = qp->remote_ah_attr.sl;
  1210. wc.dlid_path_bits = 0;
  1211. wc.port_num = 0;
  1212. ipath_sqerror_qp(qp, &wc);
  1213. spin_unlock_irqrestore(&qp->s_lock, flags);
  1214. bail:
  1215. return;
  1216. }
  1217. /**
  1218. * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
  1219. * @dev: the device this packet came in on
  1220. * @ohdr: the other headers for this packet
  1221. * @data: the packet data
  1222. * @qp: the QP for this packet
  1223. * @opcode: the opcode for this packet
  1224. * @psn: the packet sequence number for this packet
  1225. * @diff: the difference between the PSN and the expected PSN
  1226. * @header_in_data: true if part of the header data is in the data buffer
  1227. *
  1228. * This is called from ipath_rc_rcv() to process an unexpected
  1229. * incoming RC packet for the given QP.
  1230. * Called at interrupt level.
  1231. * Return 1 if no more processing is needed; otherwise return 0 to
  1232. * schedule a response to be sent.
  1233. */
  1234. static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
  1235. struct ipath_other_headers *ohdr,
  1236. void *data,
  1237. struct ipath_qp *qp,
  1238. u32 opcode,
  1239. u32 psn,
  1240. int diff,
  1241. int header_in_data)
  1242. {
  1243. struct ipath_ack_entry *e;
  1244. u8 i, prev;
  1245. int old_req;
  1246. unsigned long flags;
  1247. if (diff > 0) {
  1248. /*
  1249. * Packet sequence error.
  1250. * A NAK will ACK earlier sends and RDMA writes.
  1251. * Don't queue the NAK if we already sent one.
  1252. */
  1253. if (!qp->r_nak_state) {
  1254. qp->r_nak_state = IB_NAK_PSN_ERROR;
  1255. /* Use the expected PSN. */
  1256. qp->r_ack_psn = qp->r_psn;
  1257. goto send_ack;
  1258. }
  1259. goto done;
  1260. }
  1261. /*
  1262. * Handle a duplicate request. Don't re-execute SEND, RDMA
  1263. * write or atomic op. Don't NAK errors, just silently drop
  1264. * the duplicate request. Note that r_sge, r_len, and
  1265. * r_rcv_len may be in use so don't modify them.
  1266. *
  1267. * We are supposed to ACK the earliest duplicate PSN but we
  1268. * can coalesce an outstanding duplicate ACK. We have to
  1269. * send the earliest so that RDMA reads can be restarted at
  1270. * the requester's expected PSN.
  1271. *
  1272. * First, find where this duplicate PSN falls within the
  1273. * ACKs previously sent.
  1274. */
  1275. psn &= IPATH_PSN_MASK;
  1276. e = NULL;
  1277. old_req = 1;
  1278. spin_lock_irqsave(&qp->s_lock, flags);
  1279. for (i = qp->r_head_ack_queue; ; i = prev) {
  1280. if (i == qp->s_tail_ack_queue)
  1281. old_req = 0;
  1282. if (i)
  1283. prev = i - 1;
  1284. else
  1285. prev = IPATH_MAX_RDMA_ATOMIC;
  1286. if (prev == qp->r_head_ack_queue) {
  1287. e = NULL;
  1288. break;
  1289. }
  1290. e = &qp->s_ack_queue[prev];
  1291. if (!e->opcode) {
  1292. e = NULL;
  1293. break;
  1294. }
  1295. if (ipath_cmp24(psn, e->psn) >= 0) {
  1296. if (prev == qp->s_tail_ack_queue)
  1297. old_req = 0;
  1298. break;
  1299. }
  1300. }
  1301. switch (opcode) {
  1302. case OP(RDMA_READ_REQUEST): {
  1303. struct ib_reth *reth;
  1304. u32 offset;
  1305. u32 len;
  1306. /*
  1307. * If we didn't find the RDMA read request in the ack queue,
  1308. * or the send tasklet is already backed up to send an
  1309. * earlier entry, we can ignore this request.
  1310. */
  1311. if (!e || e->opcode != OP(RDMA_READ_REQUEST) || old_req)
  1312. goto unlock_done;
  1313. /* RETH comes after BTH */
  1314. if (!header_in_data)
  1315. reth = &ohdr->u.rc.reth;
  1316. else {
  1317. reth = (struct ib_reth *)data;
  1318. data += sizeof(*reth);
  1319. }
  1320. /*
  1321. * Address range must be a subset of the original
  1322. * request and start on pmtu boundaries.
  1323. * We reuse the old ack_queue slot since the requester
  1324. * should not back up and request an earlier PSN for the
  1325. * same request.
  1326. */
  1327. offset = ((psn - e->psn) & IPATH_PSN_MASK) *
  1328. ib_mtu_enum_to_int(qp->path_mtu);
  1329. len = be32_to_cpu(reth->length);
  1330. if (unlikely(offset + len > e->rdma_sge.sge.sge_length))
  1331. goto unlock_done;
  1332. if (len != 0) {
  1333. u32 rkey = be32_to_cpu(reth->rkey);
  1334. u64 vaddr = be64_to_cpu(reth->vaddr);
  1335. int ok;
  1336. ok = ipath_rkey_ok(qp, &e->rdma_sge,
  1337. len, vaddr, rkey,
  1338. IB_ACCESS_REMOTE_READ);
  1339. if (unlikely(!ok))
  1340. goto unlock_done;
  1341. } else {
  1342. e->rdma_sge.sg_list = NULL;
  1343. e->rdma_sge.num_sge = 0;
  1344. e->rdma_sge.sge.mr = NULL;
  1345. e->rdma_sge.sge.vaddr = NULL;
  1346. e->rdma_sge.sge.length = 0;
  1347. e->rdma_sge.sge.sge_length = 0;
  1348. }
  1349. e->psn = psn;
  1350. qp->s_ack_state = OP(ACKNOWLEDGE);
  1351. qp->s_tail_ack_queue = prev;
  1352. break;
  1353. }
  1354. case OP(COMPARE_SWAP):
  1355. case OP(FETCH_ADD): {
  1356. /*
  1357. * If we didn't find the atomic request in the ack queue
  1358. * or the send tasklet is already backed up to send an
  1359. * earlier entry, we can ignore this request.
  1360. */
  1361. if (!e || e->opcode != (u8) opcode || old_req)
  1362. goto unlock_done;
  1363. qp->s_ack_state = OP(ACKNOWLEDGE);
  1364. qp->s_tail_ack_queue = prev;
  1365. break;
  1366. }
  1367. default:
  1368. if (old_req)
  1369. goto unlock_done;
  1370. /*
  1371. * Resend the most recent ACK if this request is
  1372. * after all the previous RDMA reads and atomics.
  1373. */
  1374. if (i == qp->r_head_ack_queue) {
  1375. spin_unlock_irqrestore(&qp->s_lock, flags);
  1376. qp->r_nak_state = 0;
  1377. qp->r_ack_psn = qp->r_psn - 1;
  1378. goto send_ack;
  1379. }
  1380. /*
  1381. * Try to send a simple ACK to work around a Mellanox bug
  1382. * which doesn't accept a RDMA read response or atomic
  1383. * response as an ACK for earlier SENDs or RDMA writes.
  1384. */
  1385. if (qp->r_head_ack_queue == qp->s_tail_ack_queue &&
  1386. !(qp->s_flags & IPATH_S_ACK_PENDING) &&
  1387. qp->s_ack_state == OP(ACKNOWLEDGE)) {
  1388. spin_unlock_irqrestore(&qp->s_lock, flags);
  1389. qp->r_nak_state = 0;
  1390. qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
  1391. goto send_ack;
  1392. }
  1393. /*
  1394. * Resend the RDMA read or atomic op which
  1395. * ACKs this duplicate request.
  1396. */
  1397. qp->s_ack_state = OP(ACKNOWLEDGE);
  1398. qp->s_tail_ack_queue = i;
  1399. break;
  1400. }
  1401. qp->r_nak_state = 0;
  1402. tasklet_hi_schedule(&qp->s_task);
  1403. unlock_done:
  1404. spin_unlock_irqrestore(&qp->s_lock, flags);
  1405. done:
  1406. return 1;
  1407. send_ack:
  1408. return 0;
  1409. }
  1410. static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
  1411. {
  1412. unsigned long flags;
  1413. int lastwqe;
  1414. spin_lock_irqsave(&qp->s_lock, flags);
  1415. qp->state = IB_QPS_ERR;
  1416. lastwqe = ipath_error_qp(qp, err);
  1417. spin_unlock_irqrestore(&qp->s_lock, flags);
  1418. if (lastwqe) {
  1419. struct ib_event ev;
  1420. ev.device = qp->ibqp.device;
  1421. ev.element.qp = &qp->ibqp;
  1422. ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
  1423. qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
  1424. }
  1425. }
  1426. static inline void ipath_update_ack_queue(struct ipath_qp *qp, unsigned n)
  1427. {
  1428. unsigned long flags;
  1429. unsigned next;
  1430. next = n + 1;
  1431. if (next > IPATH_MAX_RDMA_ATOMIC)
  1432. next = 0;
  1433. spin_lock_irqsave(&qp->s_lock, flags);
  1434. if (n == qp->s_tail_ack_queue) {
  1435. qp->s_tail_ack_queue = next;
  1436. qp->s_ack_state = OP(ACKNOWLEDGE);
  1437. }
  1438. spin_unlock_irqrestore(&qp->s_lock, flags);
  1439. }
  1440. /**
  1441. * ipath_rc_rcv - process an incoming RC packet
  1442. * @dev: the device this packet came in on
  1443. * @hdr: the header of this packet
  1444. * @has_grh: true if the header has a GRH
  1445. * @data: the packet data
  1446. * @tlen: the packet length
  1447. * @qp: the QP for this packet
  1448. *
  1449. * This is called from ipath_qp_rcv() to process an incoming RC packet
  1450. * for the given QP.
  1451. * Called at interrupt level.
  1452. */
  1453. void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
  1454. int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
  1455. {
  1456. struct ipath_other_headers *ohdr;
  1457. u32 opcode;
  1458. u32 hdrsize;
  1459. u32 psn;
  1460. u32 pad;
  1461. struct ib_wc wc;
  1462. u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
  1463. int diff;
  1464. struct ib_reth *reth;
  1465. int header_in_data;
  1466. /* Validate the SLID. See Ch. 9.6.1.5 */
  1467. if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
  1468. goto done;
  1469. /* Check for GRH */
  1470. if (!has_grh) {
  1471. ohdr = &hdr->u.oth;
  1472. hdrsize = 8 + 12; /* LRH + BTH */
  1473. psn = be32_to_cpu(ohdr->bth[2]);
  1474. header_in_data = 0;
  1475. } else {
  1476. ohdr = &hdr->u.l.oth;
  1477. hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
  1478. /*
  1479. * The header with GRH is 60 bytes and the core driver sets
  1480. * the eager header buffer size to 56 bytes so the last 4
  1481. * bytes of the BTH header (PSN) is in the data buffer.
  1482. */
  1483. header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
  1484. if (header_in_data) {
  1485. psn = be32_to_cpu(((__be32 *) data)[0]);
  1486. data += sizeof(__be32);
  1487. } else
  1488. psn = be32_to_cpu(ohdr->bth[2]);
  1489. }
  1490. /*
  1491. * Process responses (ACKs) before anything else. Note that the
  1492. * packet sequence number will be for something in the send work
  1493. * queue rather than the expected receive packet sequence number.
  1494. * In other words, this QP is the requester.
  1495. */
  1496. opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
  1497. if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
  1498. opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
  1499. ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
  1500. hdrsize, pmtu, header_in_data);
  1501. goto done;
  1502. }
  1503. /* Compute 24 bits worth of difference. */
  1504. diff = ipath_cmp24(psn, qp->r_psn);
  1505. if (unlikely(diff)) {
  1506. if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
  1507. psn, diff, header_in_data))
  1508. goto done;
  1509. goto send_ack;
  1510. }
  1511. /* Check for opcode sequence errors. */
  1512. switch (qp->r_state) {
  1513. case OP(SEND_FIRST):
  1514. case OP(SEND_MIDDLE):
  1515. if (opcode == OP(SEND_MIDDLE) ||
  1516. opcode == OP(SEND_LAST) ||
  1517. opcode == OP(SEND_LAST_WITH_IMMEDIATE))
  1518. break;
  1519. nack_inv:
  1520. ipath_rc_error(qp, IB_WC_REM_INV_REQ_ERR);
  1521. qp->r_nak_state = IB_NAK_INVALID_REQUEST;
  1522. qp->r_ack_psn = qp->r_psn;
  1523. goto send_ack;
  1524. case OP(RDMA_WRITE_FIRST):
  1525. case OP(RDMA_WRITE_MIDDLE):
  1526. if (opcode == OP(RDMA_WRITE_MIDDLE) ||
  1527. opcode == OP(RDMA_WRITE_LAST) ||
  1528. opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
  1529. break;
  1530. goto nack_inv;
  1531. default:
  1532. if (opcode == OP(SEND_MIDDLE) ||
  1533. opcode == OP(SEND_LAST) ||
  1534. opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
  1535. opcode == OP(RDMA_WRITE_MIDDLE) ||
  1536. opcode == OP(RDMA_WRITE_LAST) ||
  1537. opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
  1538. goto nack_inv;
  1539. /*
  1540. * Note that it is up to the requester to not send a new
  1541. * RDMA read or atomic operation before receiving an ACK
  1542. * for the previous operation.
  1543. */
  1544. break;
  1545. }
  1546. wc.imm_data = 0;
  1547. wc.wc_flags = 0;
  1548. /* OK, process the packet. */
  1549. switch (opcode) {
  1550. case OP(SEND_FIRST):
  1551. if (!ipath_get_rwqe(qp, 0)) {
  1552. rnr_nak:
  1553. qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
  1554. qp->r_ack_psn = qp->r_psn;
  1555. goto send_ack;
  1556. }
  1557. qp->r_rcv_len = 0;
  1558. /* FALLTHROUGH */
  1559. case OP(SEND_MIDDLE):
  1560. case OP(RDMA_WRITE_MIDDLE):
  1561. send_middle:
  1562. /* Check for invalid length PMTU or posted rwqe len. */
  1563. if (unlikely(tlen != (hdrsize + pmtu + 4)))
  1564. goto nack_inv;
  1565. qp->r_rcv_len += pmtu;
  1566. if (unlikely(qp->r_rcv_len > qp->r_len))
  1567. goto nack_inv;
  1568. ipath_copy_sge(&qp->r_sge, data, pmtu);
  1569. break;
  1570. case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
  1571. /* consume RWQE */
  1572. if (!ipath_get_rwqe(qp, 1))
  1573. goto rnr_nak;
  1574. goto send_last_imm;
  1575. case OP(SEND_ONLY):
  1576. case OP(SEND_ONLY_WITH_IMMEDIATE):
  1577. if (!ipath_get_rwqe(qp, 0))
  1578. goto rnr_nak;
  1579. qp->r_rcv_len = 0;
  1580. if (opcode == OP(SEND_ONLY))
  1581. goto send_last;
  1582. /* FALLTHROUGH */
  1583. case OP(SEND_LAST_WITH_IMMEDIATE):
  1584. send_last_imm:
  1585. if (header_in_data) {
  1586. wc.imm_data = *(__be32 *) data;
  1587. data += sizeof(__be32);
  1588. } else {
  1589. /* Immediate data comes after BTH */
  1590. wc.imm_data = ohdr->u.imm_data;
  1591. }
  1592. hdrsize += 4;
  1593. wc.wc_flags = IB_WC_WITH_IMM;
  1594. /* FALLTHROUGH */
  1595. case OP(SEND_LAST):
  1596. case OP(RDMA_WRITE_LAST):
  1597. send_last:
  1598. /* Get the number of bytes the message was padded by. */
  1599. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  1600. /* Check for invalid length. */
  1601. /* XXX LAST len should be >= 1 */
  1602. if (unlikely(tlen < (hdrsize + pad + 4)))
  1603. goto nack_inv;
  1604. /* Don't count the CRC. */
  1605. tlen -= (hdrsize + pad + 4);
  1606. wc.byte_len = tlen + qp->r_rcv_len;
  1607. if (unlikely(wc.byte_len > qp->r_len))
  1608. goto nack_inv;
  1609. ipath_copy_sge(&qp->r_sge, data, tlen);
  1610. qp->r_msn++;
  1611. if (!qp->r_wrid_valid)
  1612. break;
  1613. qp->r_wrid_valid = 0;
  1614. wc.wr_id = qp->r_wr_id;
  1615. wc.status = IB_WC_SUCCESS;
  1616. wc.opcode = IB_WC_RECV;
  1617. wc.vendor_err = 0;
  1618. wc.qp = &qp->ibqp;
  1619. wc.src_qp = qp->remote_qpn;
  1620. wc.pkey_index = 0;
  1621. wc.slid = qp->remote_ah_attr.dlid;
  1622. wc.sl = qp->remote_ah_attr.sl;
  1623. wc.dlid_path_bits = 0;
  1624. wc.port_num = 0;
  1625. /* Signal completion event if the solicited bit is set. */
  1626. ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
  1627. (ohdr->bth[0] &
  1628. __constant_cpu_to_be32(1 << 23)) != 0);
  1629. break;
  1630. case OP(RDMA_WRITE_FIRST):
  1631. case OP(RDMA_WRITE_ONLY):
  1632. case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
  1633. if (unlikely(!(qp->qp_access_flags &
  1634. IB_ACCESS_REMOTE_WRITE)))
  1635. goto nack_inv;
  1636. /* consume RWQE */
  1637. /* RETH comes after BTH */
  1638. if (!header_in_data)
  1639. reth = &ohdr->u.rc.reth;
  1640. else {
  1641. reth = (struct ib_reth *)data;
  1642. data += sizeof(*reth);
  1643. }
  1644. hdrsize += sizeof(*reth);
  1645. qp->r_len = be32_to_cpu(reth->length);
  1646. qp->r_rcv_len = 0;
  1647. if (qp->r_len != 0) {
  1648. u32 rkey = be32_to_cpu(reth->rkey);
  1649. u64 vaddr = be64_to_cpu(reth->vaddr);
  1650. int ok;
  1651. /* Check rkey & NAK */
  1652. ok = ipath_rkey_ok(qp, &qp->r_sge,
  1653. qp->r_len, vaddr, rkey,
  1654. IB_ACCESS_REMOTE_WRITE);
  1655. if (unlikely(!ok))
  1656. goto nack_acc;
  1657. } else {
  1658. qp->r_sge.sg_list = NULL;
  1659. qp->r_sge.sge.mr = NULL;
  1660. qp->r_sge.sge.vaddr = NULL;
  1661. qp->r_sge.sge.length = 0;
  1662. qp->r_sge.sge.sge_length = 0;
  1663. }
  1664. if (opcode == OP(RDMA_WRITE_FIRST))
  1665. goto send_middle;
  1666. else if (opcode == OP(RDMA_WRITE_ONLY))
  1667. goto send_last;
  1668. if (!ipath_get_rwqe(qp, 1))
  1669. goto rnr_nak;
  1670. goto send_last_imm;
  1671. case OP(RDMA_READ_REQUEST): {
  1672. struct ipath_ack_entry *e;
  1673. u32 len;
  1674. u8 next;
  1675. if (unlikely(!(qp->qp_access_flags &
  1676. IB_ACCESS_REMOTE_READ)))
  1677. goto nack_inv;
  1678. next = qp->r_head_ack_queue + 1;
  1679. if (next > IPATH_MAX_RDMA_ATOMIC)
  1680. next = 0;
  1681. if (unlikely(next == qp->s_tail_ack_queue)) {
  1682. if (!qp->s_ack_queue[next].sent)
  1683. goto nack_inv;
  1684. ipath_update_ack_queue(qp, next);
  1685. }
  1686. e = &qp->s_ack_queue[qp->r_head_ack_queue];
  1687. /* RETH comes after BTH */
  1688. if (!header_in_data)
  1689. reth = &ohdr->u.rc.reth;
  1690. else {
  1691. reth = (struct ib_reth *)data;
  1692. data += sizeof(*reth);
  1693. }
  1694. len = be32_to_cpu(reth->length);
  1695. if (len) {
  1696. u32 rkey = be32_to_cpu(reth->rkey);
  1697. u64 vaddr = be64_to_cpu(reth->vaddr);
  1698. int ok;
  1699. /* Check rkey & NAK */
  1700. ok = ipath_rkey_ok(qp, &e->rdma_sge, len, vaddr,
  1701. rkey, IB_ACCESS_REMOTE_READ);
  1702. if (unlikely(!ok))
  1703. goto nack_acc;
  1704. /*
  1705. * Update the next expected PSN. We add 1 later
  1706. * below, so only add the remainder here.
  1707. */
  1708. if (len > pmtu)
  1709. qp->r_psn += (len - 1) / pmtu;
  1710. } else {
  1711. e->rdma_sge.sg_list = NULL;
  1712. e->rdma_sge.num_sge = 0;
  1713. e->rdma_sge.sge.mr = NULL;
  1714. e->rdma_sge.sge.vaddr = NULL;
  1715. e->rdma_sge.sge.length = 0;
  1716. e->rdma_sge.sge.sge_length = 0;
  1717. }
  1718. e->opcode = opcode;
  1719. e->sent = 0;
  1720. e->psn = psn;
  1721. /*
  1722. * We need to increment the MSN here instead of when we
  1723. * finish sending the result since a duplicate request would
  1724. * increment it more than once.
  1725. */
  1726. qp->r_msn++;
  1727. qp->r_psn++;
  1728. qp->r_state = opcode;
  1729. qp->r_nak_state = 0;
  1730. barrier();
  1731. qp->r_head_ack_queue = next;
  1732. /* Call ipath_do_rc_send() in another thread. */
  1733. tasklet_hi_schedule(&qp->s_task);
  1734. goto done;
  1735. }
  1736. case OP(COMPARE_SWAP):
  1737. case OP(FETCH_ADD): {
  1738. struct ib_atomic_eth *ateth;
  1739. struct ipath_ack_entry *e;
  1740. u64 vaddr;
  1741. atomic64_t *maddr;
  1742. u64 sdata;
  1743. u32 rkey;
  1744. u8 next;
  1745. if (unlikely(!(qp->qp_access_flags &
  1746. IB_ACCESS_REMOTE_ATOMIC)))
  1747. goto nack_inv;
  1748. next = qp->r_head_ack_queue + 1;
  1749. if (next > IPATH_MAX_RDMA_ATOMIC)
  1750. next = 0;
  1751. if (unlikely(next == qp->s_tail_ack_queue)) {
  1752. if (!qp->s_ack_queue[next].sent)
  1753. goto nack_inv;
  1754. ipath_update_ack_queue(qp, next);
  1755. }
  1756. if (!header_in_data)
  1757. ateth = &ohdr->u.atomic_eth;
  1758. else
  1759. ateth = (struct ib_atomic_eth *)data;
  1760. vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
  1761. be32_to_cpu(ateth->vaddr[1]);
  1762. if (unlikely(vaddr & (sizeof(u64) - 1)))
  1763. goto nack_inv;
  1764. rkey = be32_to_cpu(ateth->rkey);
  1765. /* Check rkey & NAK */
  1766. if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge,
  1767. sizeof(u64), vaddr, rkey,
  1768. IB_ACCESS_REMOTE_ATOMIC)))
  1769. goto nack_acc;
  1770. /* Perform atomic OP and save result. */
  1771. maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
  1772. sdata = be64_to_cpu(ateth->swap_data);
  1773. e = &qp->s_ack_queue[qp->r_head_ack_queue];
  1774. e->atomic_data = (opcode == OP(FETCH_ADD)) ?
  1775. (u64) atomic64_add_return(sdata, maddr) - sdata :
  1776. (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
  1777. be64_to_cpu(ateth->compare_data),
  1778. sdata);
  1779. e->opcode = opcode;
  1780. e->sent = 0;
  1781. e->psn = psn & IPATH_PSN_MASK;
  1782. qp->r_msn++;
  1783. qp->r_psn++;
  1784. qp->r_state = opcode;
  1785. qp->r_nak_state = 0;
  1786. barrier();
  1787. qp->r_head_ack_queue = next;
  1788. /* Call ipath_do_rc_send() in another thread. */
  1789. tasklet_hi_schedule(&qp->s_task);
  1790. goto done;
  1791. }
  1792. default:
  1793. /* NAK unknown opcodes. */
  1794. goto nack_inv;
  1795. }
  1796. qp->r_psn++;
  1797. qp->r_state = opcode;
  1798. qp->r_ack_psn = psn;
  1799. qp->r_nak_state = 0;
  1800. /* Send an ACK if requested or required. */
  1801. if (psn & (1 << 31))
  1802. goto send_ack;
  1803. goto done;
  1804. nack_acc:
  1805. ipath_rc_error(qp, IB_WC_REM_ACCESS_ERR);
  1806. qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
  1807. qp->r_ack_psn = qp->r_psn;
  1808. send_ack:
  1809. send_rc_ack(qp);
  1810. done:
  1811. return;
  1812. }