qib_rc.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299
  1. /*
  2. * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
  3. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/io.h>
  34. #include "qib.h"
  35. /* cut down ridiculously long IB macro names */
  36. #define OP(x) IB_OPCODE_RC_##x
  37. static void rc_timeout(unsigned long arg);
  38. static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe,
  39. u32 psn, u32 pmtu)
  40. {
  41. u32 len;
  42. len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
  43. ss->sge = wqe->sg_list[0];
  44. ss->sg_list = wqe->sg_list + 1;
  45. ss->num_sge = wqe->wr.num_sge;
  46. ss->total_len = wqe->length;
  47. qib_skip_sge(ss, len, 0);
  48. return wqe->length - len;
  49. }
  50. static void start_timer(struct qib_qp *qp)
  51. {
  52. qp->s_flags |= QIB_S_TIMER;
  53. qp->s_timer.function = rc_timeout;
  54. /* 4.096 usec. * (1 << qp->timeout) */
  55. qp->s_timer.expires = jiffies + qp->timeout_jiffies;
  56. add_timer(&qp->s_timer);
  57. }
  58. /**
  59. * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
  60. * @dev: the device for this QP
  61. * @qp: a pointer to the QP
  62. * @ohdr: a pointer to the IB header being constructed
  63. * @pmtu: the path MTU
  64. *
  65. * Return 1 if constructed; otherwise, return 0.
  66. * Note that we are in the responder's side of the QP context.
  67. * Note the QP s_lock must be held.
  68. */
  69. static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
  70. struct qib_other_headers *ohdr, u32 pmtu)
  71. {
  72. struct qib_ack_entry *e;
  73. u32 hwords;
  74. u32 len;
  75. u32 bth0;
  76. u32 bth2;
  77. /* Don't send an ACK if we aren't supposed to. */
  78. if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
  79. goto bail;
  80. /* header size in 32-bit words LRH+BTH = (8+12)/4. */
  81. hwords = 5;
  82. switch (qp->s_ack_state) {
  83. case OP(RDMA_READ_RESPONSE_LAST):
  84. case OP(RDMA_READ_RESPONSE_ONLY):
  85. e = &qp->s_ack_queue[qp->s_tail_ack_queue];
  86. if (e->rdma_sge.mr) {
  87. atomic_dec(&e->rdma_sge.mr->refcount);
  88. e->rdma_sge.mr = NULL;
  89. }
  90. /* FALLTHROUGH */
  91. case OP(ATOMIC_ACKNOWLEDGE):
  92. /*
  93. * We can increment the tail pointer now that the last
  94. * response has been sent instead of only being
  95. * constructed.
  96. */
  97. if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
  98. qp->s_tail_ack_queue = 0;
  99. /* FALLTHROUGH */
  100. case OP(SEND_ONLY):
  101. case OP(ACKNOWLEDGE):
  102. /* Check for no next entry in the queue. */
  103. if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
  104. if (qp->s_flags & QIB_S_ACK_PENDING)
  105. goto normal;
  106. goto bail;
  107. }
  108. e = &qp->s_ack_queue[qp->s_tail_ack_queue];
  109. if (e->opcode == OP(RDMA_READ_REQUEST)) {
  110. /*
  111. * If a RDMA read response is being resent and
  112. * we haven't seen the duplicate request yet,
  113. * then stop sending the remaining responses the
  114. * responder has seen until the requester resends it.
  115. */
  116. len = e->rdma_sge.sge_length;
  117. if (len && !e->rdma_sge.mr) {
  118. qp->s_tail_ack_queue = qp->r_head_ack_queue;
  119. goto bail;
  120. }
  121. /* Copy SGE state in case we need to resend */
  122. qp->s_rdma_mr = e->rdma_sge.mr;
  123. if (qp->s_rdma_mr)
  124. atomic_inc(&qp->s_rdma_mr->refcount);
  125. qp->s_ack_rdma_sge.sge = e->rdma_sge;
  126. qp->s_ack_rdma_sge.num_sge = 1;
  127. qp->s_cur_sge = &qp->s_ack_rdma_sge;
  128. if (len > pmtu) {
  129. len = pmtu;
  130. qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
  131. } else {
  132. qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
  133. e->sent = 1;
  134. }
  135. ohdr->u.aeth = qib_compute_aeth(qp);
  136. hwords++;
  137. qp->s_ack_rdma_psn = e->psn;
  138. bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
  139. } else {
  140. /* COMPARE_SWAP or FETCH_ADD */
  141. qp->s_cur_sge = NULL;
  142. len = 0;
  143. qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
  144. ohdr->u.at.aeth = qib_compute_aeth(qp);
  145. ohdr->u.at.atomic_ack_eth[0] =
  146. cpu_to_be32(e->atomic_data >> 32);
  147. ohdr->u.at.atomic_ack_eth[1] =
  148. cpu_to_be32(e->atomic_data);
  149. hwords += sizeof(ohdr->u.at) / sizeof(u32);
  150. bth2 = e->psn & QIB_PSN_MASK;
  151. e->sent = 1;
  152. }
  153. bth0 = qp->s_ack_state << 24;
  154. break;
  155. case OP(RDMA_READ_RESPONSE_FIRST):
  156. qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
  157. /* FALLTHROUGH */
  158. case OP(RDMA_READ_RESPONSE_MIDDLE):
  159. qp->s_cur_sge = &qp->s_ack_rdma_sge;
  160. qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
  161. if (qp->s_rdma_mr)
  162. atomic_inc(&qp->s_rdma_mr->refcount);
  163. len = qp->s_ack_rdma_sge.sge.sge_length;
  164. if (len > pmtu)
  165. len = pmtu;
  166. else {
  167. ohdr->u.aeth = qib_compute_aeth(qp);
  168. hwords++;
  169. qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
  170. e = &qp->s_ack_queue[qp->s_tail_ack_queue];
  171. e->sent = 1;
  172. }
  173. bth0 = qp->s_ack_state << 24;
  174. bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
  175. break;
  176. default:
  177. normal:
  178. /*
  179. * Send a regular ACK.
  180. * Set the s_ack_state so we wait until after sending
  181. * the ACK before setting s_ack_state to ACKNOWLEDGE
  182. * (see above).
  183. */
  184. qp->s_ack_state = OP(SEND_ONLY);
  185. qp->s_flags &= ~QIB_S_ACK_PENDING;
  186. qp->s_cur_sge = NULL;
  187. if (qp->s_nak_state)
  188. ohdr->u.aeth =
  189. cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
  190. (qp->s_nak_state <<
  191. QIB_AETH_CREDIT_SHIFT));
  192. else
  193. ohdr->u.aeth = qib_compute_aeth(qp);
  194. hwords++;
  195. len = 0;
  196. bth0 = OP(ACKNOWLEDGE) << 24;
  197. bth2 = qp->s_ack_psn & QIB_PSN_MASK;
  198. }
  199. qp->s_rdma_ack_cnt++;
  200. qp->s_hdrwords = hwords;
  201. qp->s_cur_size = len;
  202. qib_make_ruc_header(qp, ohdr, bth0, bth2);
  203. return 1;
  204. bail:
  205. qp->s_ack_state = OP(ACKNOWLEDGE);
  206. qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING);
  207. return 0;
  208. }
  209. /**
  210. * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
  211. * @qp: a pointer to the QP
  212. *
  213. * Return 1 if constructed; otherwise, return 0.
  214. */
  215. int qib_make_rc_req(struct qib_qp *qp)
  216. {
  217. struct qib_ibdev *dev = to_idev(qp->ibqp.device);
  218. struct qib_other_headers *ohdr;
  219. struct qib_sge_state *ss;
  220. struct qib_swqe *wqe;
  221. u32 hwords;
  222. u32 len;
  223. u32 bth0;
  224. u32 bth2;
  225. u32 pmtu = qp->pmtu;
  226. char newreq;
  227. unsigned long flags;
  228. int ret = 0;
  229. int delta;
  230. ohdr = &qp->s_hdr.u.oth;
  231. if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
  232. ohdr = &qp->s_hdr.u.l.oth;
  233. /*
  234. * The lock is needed to synchronize between the sending tasklet,
  235. * the receive interrupt handler, and timeout resends.
  236. */
  237. spin_lock_irqsave(&qp->s_lock, flags);
  238. /* Sending responses has higher priority over sending requests. */
  239. if ((qp->s_flags & QIB_S_RESP_PENDING) &&
  240. qib_make_rc_ack(dev, qp, ohdr, pmtu))
  241. goto done;
  242. if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
  243. if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
  244. goto bail;
  245. /* We are in the error state, flush the work request. */
  246. if (qp->s_last == qp->s_head)
  247. goto bail;
  248. /* If DMAs are in progress, we can't flush immediately. */
  249. if (atomic_read(&qp->s_dma_busy)) {
  250. qp->s_flags |= QIB_S_WAIT_DMA;
  251. goto bail;
  252. }
  253. wqe = get_swqe_ptr(qp, qp->s_last);
  254. while (qp->s_last != qp->s_acked) {
  255. qib_send_complete(qp, wqe, IB_WC_SUCCESS);
  256. if (++qp->s_last >= qp->s_size)
  257. qp->s_last = 0;
  258. wqe = get_swqe_ptr(qp, qp->s_last);
  259. }
  260. qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
  261. goto done;
  262. }
  263. if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK))
  264. goto bail;
  265. if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
  266. if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
  267. qp->s_flags |= QIB_S_WAIT_PSN;
  268. goto bail;
  269. }
  270. qp->s_sending_psn = qp->s_psn;
  271. qp->s_sending_hpsn = qp->s_psn - 1;
  272. }
  273. /* header size in 32-bit words LRH+BTH = (8+12)/4. */
  274. hwords = 5;
  275. bth0 = 0;
  276. /* Send a request. */
  277. wqe = get_swqe_ptr(qp, qp->s_cur);
  278. switch (qp->s_state) {
  279. default:
  280. if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK))
  281. goto bail;
  282. /*
  283. * Resend an old request or start a new one.
  284. *
  285. * We keep track of the current SWQE so that
  286. * we don't reset the "furthest progress" state
  287. * if we need to back up.
  288. */
  289. newreq = 0;
  290. if (qp->s_cur == qp->s_tail) {
  291. /* Check if send work queue is empty. */
  292. if (qp->s_tail == qp->s_head)
  293. goto bail;
  294. /*
  295. * If a fence is requested, wait for previous
  296. * RDMA read and atomic operations to finish.
  297. */
  298. if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
  299. qp->s_num_rd_atomic) {
  300. qp->s_flags |= QIB_S_WAIT_FENCE;
  301. goto bail;
  302. }
  303. wqe->psn = qp->s_next_psn;
  304. newreq = 1;
  305. }
  306. /*
  307. * Note that we have to be careful not to modify the
  308. * original work request since we may need to resend
  309. * it.
  310. */
  311. len = wqe->length;
  312. ss = &qp->s_sge;
  313. bth2 = qp->s_psn & QIB_PSN_MASK;
  314. switch (wqe->wr.opcode) {
  315. case IB_WR_SEND:
  316. case IB_WR_SEND_WITH_IMM:
  317. /* If no credit, return. */
  318. if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
  319. qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
  320. qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
  321. goto bail;
  322. }
  323. wqe->lpsn = wqe->psn;
  324. if (len > pmtu) {
  325. wqe->lpsn += (len - 1) / pmtu;
  326. qp->s_state = OP(SEND_FIRST);
  327. len = pmtu;
  328. break;
  329. }
  330. if (wqe->wr.opcode == IB_WR_SEND)
  331. qp->s_state = OP(SEND_ONLY);
  332. else {
  333. qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
  334. /* Immediate data comes after the BTH */
  335. ohdr->u.imm_data = wqe->wr.ex.imm_data;
  336. hwords += 1;
  337. }
  338. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  339. bth0 |= IB_BTH_SOLICITED;
  340. bth2 |= IB_BTH_REQ_ACK;
  341. if (++qp->s_cur == qp->s_size)
  342. qp->s_cur = 0;
  343. break;
  344. case IB_WR_RDMA_WRITE:
  345. if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
  346. qp->s_lsn++;
  347. /* FALLTHROUGH */
  348. case IB_WR_RDMA_WRITE_WITH_IMM:
  349. /* If no credit, return. */
  350. if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
  351. qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
  352. qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
  353. goto bail;
  354. }
  355. ohdr->u.rc.reth.vaddr =
  356. cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
  357. ohdr->u.rc.reth.rkey =
  358. cpu_to_be32(wqe->wr.wr.rdma.rkey);
  359. ohdr->u.rc.reth.length = cpu_to_be32(len);
  360. hwords += sizeof(struct ib_reth) / sizeof(u32);
  361. wqe->lpsn = wqe->psn;
  362. if (len > pmtu) {
  363. wqe->lpsn += (len - 1) / pmtu;
  364. qp->s_state = OP(RDMA_WRITE_FIRST);
  365. len = pmtu;
  366. break;
  367. }
  368. if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
  369. qp->s_state = OP(RDMA_WRITE_ONLY);
  370. else {
  371. qp->s_state =
  372. OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
  373. /* Immediate data comes after RETH */
  374. ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
  375. hwords += 1;
  376. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  377. bth0 |= IB_BTH_SOLICITED;
  378. }
  379. bth2 |= IB_BTH_REQ_ACK;
  380. if (++qp->s_cur == qp->s_size)
  381. qp->s_cur = 0;
  382. break;
  383. case IB_WR_RDMA_READ:
  384. /*
  385. * Don't allow more operations to be started
  386. * than the QP limits allow.
  387. */
  388. if (newreq) {
  389. if (qp->s_num_rd_atomic >=
  390. qp->s_max_rd_atomic) {
  391. qp->s_flags |= QIB_S_WAIT_RDMAR;
  392. goto bail;
  393. }
  394. qp->s_num_rd_atomic++;
  395. if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
  396. qp->s_lsn++;
  397. /*
  398. * Adjust s_next_psn to count the
  399. * expected number of responses.
  400. */
  401. if (len > pmtu)
  402. qp->s_next_psn += (len - 1) / pmtu;
  403. wqe->lpsn = qp->s_next_psn++;
  404. }
  405. ohdr->u.rc.reth.vaddr =
  406. cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
  407. ohdr->u.rc.reth.rkey =
  408. cpu_to_be32(wqe->wr.wr.rdma.rkey);
  409. ohdr->u.rc.reth.length = cpu_to_be32(len);
  410. qp->s_state = OP(RDMA_READ_REQUEST);
  411. hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
  412. ss = NULL;
  413. len = 0;
  414. bth2 |= IB_BTH_REQ_ACK;
  415. if (++qp->s_cur == qp->s_size)
  416. qp->s_cur = 0;
  417. break;
  418. case IB_WR_ATOMIC_CMP_AND_SWP:
  419. case IB_WR_ATOMIC_FETCH_AND_ADD:
  420. /*
  421. * Don't allow more operations to be started
  422. * than the QP limits allow.
  423. */
  424. if (newreq) {
  425. if (qp->s_num_rd_atomic >=
  426. qp->s_max_rd_atomic) {
  427. qp->s_flags |= QIB_S_WAIT_RDMAR;
  428. goto bail;
  429. }
  430. qp->s_num_rd_atomic++;
  431. if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
  432. qp->s_lsn++;
  433. wqe->lpsn = wqe->psn;
  434. }
  435. if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
  436. qp->s_state = OP(COMPARE_SWAP);
  437. ohdr->u.atomic_eth.swap_data = cpu_to_be64(
  438. wqe->wr.wr.atomic.swap);
  439. ohdr->u.atomic_eth.compare_data = cpu_to_be64(
  440. wqe->wr.wr.atomic.compare_add);
  441. } else {
  442. qp->s_state = OP(FETCH_ADD);
  443. ohdr->u.atomic_eth.swap_data = cpu_to_be64(
  444. wqe->wr.wr.atomic.compare_add);
  445. ohdr->u.atomic_eth.compare_data = 0;
  446. }
  447. ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
  448. wqe->wr.wr.atomic.remote_addr >> 32);
  449. ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
  450. wqe->wr.wr.atomic.remote_addr);
  451. ohdr->u.atomic_eth.rkey = cpu_to_be32(
  452. wqe->wr.wr.atomic.rkey);
  453. hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
  454. ss = NULL;
  455. len = 0;
  456. bth2 |= IB_BTH_REQ_ACK;
  457. if (++qp->s_cur == qp->s_size)
  458. qp->s_cur = 0;
  459. break;
  460. default:
  461. goto bail;
  462. }
  463. qp->s_sge.sge = wqe->sg_list[0];
  464. qp->s_sge.sg_list = wqe->sg_list + 1;
  465. qp->s_sge.num_sge = wqe->wr.num_sge;
  466. qp->s_sge.total_len = wqe->length;
  467. qp->s_len = wqe->length;
  468. if (newreq) {
  469. qp->s_tail++;
  470. if (qp->s_tail >= qp->s_size)
  471. qp->s_tail = 0;
  472. }
  473. if (wqe->wr.opcode == IB_WR_RDMA_READ)
  474. qp->s_psn = wqe->lpsn + 1;
  475. else {
  476. qp->s_psn++;
  477. if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
  478. qp->s_next_psn = qp->s_psn;
  479. }
  480. break;
  481. case OP(RDMA_READ_RESPONSE_FIRST):
  482. /*
  483. * qp->s_state is normally set to the opcode of the
  484. * last packet constructed for new requests and therefore
  485. * is never set to RDMA read response.
  486. * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
  487. * thread to indicate a SEND needs to be restarted from an
  488. * earlier PSN without interferring with the sending thread.
  489. * See qib_restart_rc().
  490. */
  491. qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
  492. /* FALLTHROUGH */
  493. case OP(SEND_FIRST):
  494. qp->s_state = OP(SEND_MIDDLE);
  495. /* FALLTHROUGH */
  496. case OP(SEND_MIDDLE):
  497. bth2 = qp->s_psn++ & QIB_PSN_MASK;
  498. if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
  499. qp->s_next_psn = qp->s_psn;
  500. ss = &qp->s_sge;
  501. len = qp->s_len;
  502. if (len > pmtu) {
  503. len = pmtu;
  504. break;
  505. }
  506. if (wqe->wr.opcode == IB_WR_SEND)
  507. qp->s_state = OP(SEND_LAST);
  508. else {
  509. qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
  510. /* Immediate data comes after the BTH */
  511. ohdr->u.imm_data = wqe->wr.ex.imm_data;
  512. hwords += 1;
  513. }
  514. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  515. bth0 |= IB_BTH_SOLICITED;
  516. bth2 |= IB_BTH_REQ_ACK;
  517. qp->s_cur++;
  518. if (qp->s_cur >= qp->s_size)
  519. qp->s_cur = 0;
  520. break;
  521. case OP(RDMA_READ_RESPONSE_LAST):
  522. /*
  523. * qp->s_state is normally set to the opcode of the
  524. * last packet constructed for new requests and therefore
  525. * is never set to RDMA read response.
  526. * RDMA_READ_RESPONSE_LAST is used by the ACK processing
  527. * thread to indicate a RDMA write needs to be restarted from
  528. * an earlier PSN without interferring with the sending thread.
  529. * See qib_restart_rc().
  530. */
  531. qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
  532. /* FALLTHROUGH */
  533. case OP(RDMA_WRITE_FIRST):
  534. qp->s_state = OP(RDMA_WRITE_MIDDLE);
  535. /* FALLTHROUGH */
  536. case OP(RDMA_WRITE_MIDDLE):
  537. bth2 = qp->s_psn++ & QIB_PSN_MASK;
  538. if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
  539. qp->s_next_psn = qp->s_psn;
  540. ss = &qp->s_sge;
  541. len = qp->s_len;
  542. if (len > pmtu) {
  543. len = pmtu;
  544. break;
  545. }
  546. if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
  547. qp->s_state = OP(RDMA_WRITE_LAST);
  548. else {
  549. qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
  550. /* Immediate data comes after the BTH */
  551. ohdr->u.imm_data = wqe->wr.ex.imm_data;
  552. hwords += 1;
  553. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  554. bth0 |= IB_BTH_SOLICITED;
  555. }
  556. bth2 |= IB_BTH_REQ_ACK;
  557. qp->s_cur++;
  558. if (qp->s_cur >= qp->s_size)
  559. qp->s_cur = 0;
  560. break;
  561. case OP(RDMA_READ_RESPONSE_MIDDLE):
  562. /*
  563. * qp->s_state is normally set to the opcode of the
  564. * last packet constructed for new requests and therefore
  565. * is never set to RDMA read response.
  566. * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
  567. * thread to indicate a RDMA read needs to be restarted from
  568. * an earlier PSN without interferring with the sending thread.
  569. * See qib_restart_rc().
  570. */
  571. len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
  572. ohdr->u.rc.reth.vaddr =
  573. cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
  574. ohdr->u.rc.reth.rkey =
  575. cpu_to_be32(wqe->wr.wr.rdma.rkey);
  576. ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
  577. qp->s_state = OP(RDMA_READ_REQUEST);
  578. hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
  579. bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
  580. qp->s_psn = wqe->lpsn + 1;
  581. ss = NULL;
  582. len = 0;
  583. qp->s_cur++;
  584. if (qp->s_cur == qp->s_size)
  585. qp->s_cur = 0;
  586. break;
  587. }
  588. qp->s_sending_hpsn = bth2;
  589. delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
  590. if (delta && delta % QIB_PSN_CREDIT == 0)
  591. bth2 |= IB_BTH_REQ_ACK;
  592. if (qp->s_flags & QIB_S_SEND_ONE) {
  593. qp->s_flags &= ~QIB_S_SEND_ONE;
  594. qp->s_flags |= QIB_S_WAIT_ACK;
  595. bth2 |= IB_BTH_REQ_ACK;
  596. }
  597. qp->s_len -= len;
  598. qp->s_hdrwords = hwords;
  599. qp->s_cur_sge = ss;
  600. qp->s_cur_size = len;
  601. qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
  602. done:
  603. ret = 1;
  604. goto unlock;
  605. bail:
  606. qp->s_flags &= ~QIB_S_BUSY;
  607. unlock:
  608. spin_unlock_irqrestore(&qp->s_lock, flags);
  609. return ret;
  610. }
  611. /**
  612. * qib_send_rc_ack - Construct an ACK packet and send it
  613. * @qp: a pointer to the QP
  614. *
  615. * This is called from qib_rc_rcv() and qib_kreceive().
  616. * Note that RDMA reads and atomics are handled in the
  617. * send side QP state and tasklet.
  618. */
  619. void qib_send_rc_ack(struct qib_qp *qp)
  620. {
  621. struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
  622. struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
  623. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  624. u64 pbc;
  625. u16 lrh0;
  626. u32 bth0;
  627. u32 hwords;
  628. u32 pbufn;
  629. u32 __iomem *piobuf;
  630. struct qib_ib_header hdr;
  631. struct qib_other_headers *ohdr;
  632. u32 control;
  633. unsigned long flags;
  634. spin_lock_irqsave(&qp->s_lock, flags);
  635. if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
  636. goto unlock;
  637. /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
  638. if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
  639. goto queue_ack;
  640. /* Construct the header with s_lock held so APM doesn't change it. */
  641. ohdr = &hdr.u.oth;
  642. lrh0 = QIB_LRH_BTH;
  643. /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
  644. hwords = 6;
  645. if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
  646. hwords += qib_make_grh(ibp, &hdr.u.l.grh,
  647. &qp->remote_ah_attr.grh, hwords, 0);
  648. ohdr = &hdr.u.l.oth;
  649. lrh0 = QIB_LRH_GRH;
  650. }
  651. /* read pkey_index w/o lock (its atomic) */
  652. bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
  653. if (qp->s_mig_state == IB_MIG_MIGRATED)
  654. bth0 |= IB_BTH_MIG_REQ;
  655. if (qp->r_nak_state)
  656. ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
  657. (qp->r_nak_state <<
  658. QIB_AETH_CREDIT_SHIFT));
  659. else
  660. ohdr->u.aeth = qib_compute_aeth(qp);
  661. lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
  662. qp->remote_ah_attr.sl << 4;
  663. hdr.lrh[0] = cpu_to_be16(lrh0);
  664. hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
  665. hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
  666. hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
  667. ohdr->bth[0] = cpu_to_be32(bth0);
  668. ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
  669. ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
  670. spin_unlock_irqrestore(&qp->s_lock, flags);
  671. /* Don't try to send ACKs if the link isn't ACTIVE */
  672. if (!(ppd->lflags & QIBL_LINKACTIVE))
  673. goto done;
  674. control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
  675. qp->s_srate, lrh0 >> 12);
  676. /* length is + 1 for the control dword */
  677. pbc = ((u64) control << 32) | (hwords + 1);
  678. piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
  679. if (!piobuf) {
  680. /*
  681. * We are out of PIO buffers at the moment.
  682. * Pass responsibility for sending the ACK to the
  683. * send tasklet so that when a PIO buffer becomes
  684. * available, the ACK is sent ahead of other outgoing
  685. * packets.
  686. */
  687. spin_lock_irqsave(&qp->s_lock, flags);
  688. goto queue_ack;
  689. }
  690. /*
  691. * Write the pbc.
  692. * We have to flush after the PBC for correctness
  693. * on some cpus or WC buffer can be written out of order.
  694. */
  695. writeq(pbc, piobuf);
  696. if (dd->flags & QIB_PIO_FLUSH_WC) {
  697. u32 *hdrp = (u32 *) &hdr;
  698. qib_flush_wc();
  699. qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
  700. qib_flush_wc();
  701. __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
  702. } else
  703. qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
  704. if (dd->flags & QIB_USE_SPCL_TRIG) {
  705. u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
  706. qib_flush_wc();
  707. __raw_writel(0xaebecede, piobuf + spcl_off);
  708. }
  709. qib_flush_wc();
  710. qib_sendbuf_done(dd, pbufn);
  711. ibp->n_unicast_xmit++;
  712. goto done;
  713. queue_ack:
  714. if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
  715. ibp->n_rc_qacks++;
  716. qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING;
  717. qp->s_nak_state = qp->r_nak_state;
  718. qp->s_ack_psn = qp->r_ack_psn;
  719. /* Schedule the send tasklet. */
  720. qib_schedule_send(qp);
  721. }
  722. unlock:
  723. spin_unlock_irqrestore(&qp->s_lock, flags);
  724. done:
  725. return;
  726. }
  727. /**
  728. * reset_psn - reset the QP state to send starting from PSN
  729. * @qp: the QP
  730. * @psn: the packet sequence number to restart at
  731. *
  732. * This is called from qib_rc_rcv() to process an incoming RC ACK
  733. * for the given QP.
  734. * Called at interrupt level with the QP s_lock held.
  735. */
  736. static void reset_psn(struct qib_qp *qp, u32 psn)
  737. {
  738. u32 n = qp->s_acked;
  739. struct qib_swqe *wqe = get_swqe_ptr(qp, n);
  740. u32 opcode;
  741. qp->s_cur = n;
  742. /*
  743. * If we are starting the request from the beginning,
  744. * let the normal send code handle initialization.
  745. */
  746. if (qib_cmp24(psn, wqe->psn) <= 0) {
  747. qp->s_state = OP(SEND_LAST);
  748. goto done;
  749. }
  750. /* Find the work request opcode corresponding to the given PSN. */
  751. opcode = wqe->wr.opcode;
  752. for (;;) {
  753. int diff;
  754. if (++n == qp->s_size)
  755. n = 0;
  756. if (n == qp->s_tail)
  757. break;
  758. wqe = get_swqe_ptr(qp, n);
  759. diff = qib_cmp24(psn, wqe->psn);
  760. if (diff < 0)
  761. break;
  762. qp->s_cur = n;
  763. /*
  764. * If we are starting the request from the beginning,
  765. * let the normal send code handle initialization.
  766. */
  767. if (diff == 0) {
  768. qp->s_state = OP(SEND_LAST);
  769. goto done;
  770. }
  771. opcode = wqe->wr.opcode;
  772. }
  773. /*
  774. * Set the state to restart in the middle of a request.
  775. * Don't change the s_sge, s_cur_sge, or s_cur_size.
  776. * See qib_make_rc_req().
  777. */
  778. switch (opcode) {
  779. case IB_WR_SEND:
  780. case IB_WR_SEND_WITH_IMM:
  781. qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
  782. break;
  783. case IB_WR_RDMA_WRITE:
  784. case IB_WR_RDMA_WRITE_WITH_IMM:
  785. qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
  786. break;
  787. case IB_WR_RDMA_READ:
  788. qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
  789. break;
  790. default:
  791. /*
  792. * This case shouldn't happen since its only
  793. * one PSN per req.
  794. */
  795. qp->s_state = OP(SEND_LAST);
  796. }
  797. done:
  798. qp->s_psn = psn;
  799. /*
  800. * Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer
  801. * asynchronously before the send tasklet can get scheduled.
  802. * Doing it in qib_make_rc_req() is too late.
  803. */
  804. if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
  805. (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
  806. qp->s_flags |= QIB_S_WAIT_PSN;
  807. }
  808. /*
  809. * Back up requester to resend the last un-ACKed request.
  810. * The QP r_lock and s_lock should be held and interrupts disabled.
  811. */
  812. static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
  813. {
  814. struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
  815. struct qib_ibport *ibp;
  816. if (qp->s_retry == 0) {
  817. if (qp->s_mig_state == IB_MIG_ARMED) {
  818. qib_migrate_qp(qp);
  819. qp->s_retry = qp->s_retry_cnt;
  820. } else if (qp->s_last == qp->s_acked) {
  821. qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
  822. qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
  823. return;
  824. } else /* XXX need to handle delayed completion */
  825. return;
  826. } else
  827. qp->s_retry--;
  828. ibp = to_iport(qp->ibqp.device, qp->port_num);
  829. if (wqe->wr.opcode == IB_WR_RDMA_READ)
  830. ibp->n_rc_resends++;
  831. else
  832. ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
  833. qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR |
  834. QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN |
  835. QIB_S_WAIT_ACK);
  836. if (wait)
  837. qp->s_flags |= QIB_S_SEND_ONE;
  838. reset_psn(qp, psn);
  839. }
  840. /*
  841. * This is called from s_timer for missing responses.
  842. */
  843. static void rc_timeout(unsigned long arg)
  844. {
  845. struct qib_qp *qp = (struct qib_qp *)arg;
  846. struct qib_ibport *ibp;
  847. unsigned long flags;
  848. spin_lock_irqsave(&qp->r_lock, flags);
  849. spin_lock(&qp->s_lock);
  850. if (qp->s_flags & QIB_S_TIMER) {
  851. ibp = to_iport(qp->ibqp.device, qp->port_num);
  852. ibp->n_rc_timeouts++;
  853. qp->s_flags &= ~QIB_S_TIMER;
  854. del_timer(&qp->s_timer);
  855. qib_restart_rc(qp, qp->s_last_psn + 1, 1);
  856. qib_schedule_send(qp);
  857. }
  858. spin_unlock(&qp->s_lock);
  859. spin_unlock_irqrestore(&qp->r_lock, flags);
  860. }
  861. /*
  862. * This is called from s_timer for RNR timeouts.
  863. */
  864. void qib_rc_rnr_retry(unsigned long arg)
  865. {
  866. struct qib_qp *qp = (struct qib_qp *)arg;
  867. unsigned long flags;
  868. spin_lock_irqsave(&qp->s_lock, flags);
  869. if (qp->s_flags & QIB_S_WAIT_RNR) {
  870. qp->s_flags &= ~QIB_S_WAIT_RNR;
  871. del_timer(&qp->s_timer);
  872. qib_schedule_send(qp);
  873. }
  874. spin_unlock_irqrestore(&qp->s_lock, flags);
  875. }
  876. /*
  877. * Set qp->s_sending_psn to the next PSN after the given one.
  878. * This would be psn+1 except when RDMA reads are present.
  879. */
  880. static void reset_sending_psn(struct qib_qp *qp, u32 psn)
  881. {
  882. struct qib_swqe *wqe;
  883. u32 n = qp->s_last;
  884. /* Find the work request corresponding to the given PSN. */
  885. for (;;) {
  886. wqe = get_swqe_ptr(qp, n);
  887. if (qib_cmp24(psn, wqe->lpsn) <= 0) {
  888. if (wqe->wr.opcode == IB_WR_RDMA_READ)
  889. qp->s_sending_psn = wqe->lpsn + 1;
  890. else
  891. qp->s_sending_psn = psn + 1;
  892. break;
  893. }
  894. if (++n == qp->s_size)
  895. n = 0;
  896. if (n == qp->s_tail)
  897. break;
  898. }
  899. }
  900. /*
  901. * This should be called with the QP s_lock held and interrupts disabled.
  902. */
  903. void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
  904. {
  905. struct qib_other_headers *ohdr;
  906. struct qib_swqe *wqe;
  907. struct ib_wc wc;
  908. unsigned i;
  909. u32 opcode;
  910. u32 psn;
  911. if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
  912. return;
  913. /* Find out where the BTH is */
  914. if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
  915. ohdr = &hdr->u.oth;
  916. else
  917. ohdr = &hdr->u.l.oth;
  918. opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
  919. if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
  920. opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
  921. WARN_ON(!qp->s_rdma_ack_cnt);
  922. qp->s_rdma_ack_cnt--;
  923. return;
  924. }
  925. psn = be32_to_cpu(ohdr->bth[2]);
  926. reset_sending_psn(qp, psn);
  927. /*
  928. * Start timer after a packet requesting an ACK has been sent and
  929. * there are still requests that haven't been acked.
  930. */
  931. if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
  932. !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) &&
  933. (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
  934. start_timer(qp);
  935. while (qp->s_last != qp->s_acked) {
  936. wqe = get_swqe_ptr(qp, qp->s_last);
  937. if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
  938. qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
  939. break;
  940. for (i = 0; i < wqe->wr.num_sge; i++) {
  941. struct qib_sge *sge = &wqe->sg_list[i];
  942. atomic_dec(&sge->mr->refcount);
  943. }
  944. /* Post a send completion queue entry if requested. */
  945. if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
  946. (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
  947. memset(&wc, 0, sizeof wc);
  948. wc.wr_id = wqe->wr.wr_id;
  949. wc.status = IB_WC_SUCCESS;
  950. wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
  951. wc.byte_len = wqe->length;
  952. wc.qp = &qp->ibqp;
  953. qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
  954. }
  955. if (++qp->s_last >= qp->s_size)
  956. qp->s_last = 0;
  957. }
  958. /*
  959. * If we were waiting for sends to complete before resending,
  960. * and they are now complete, restart sending.
  961. */
  962. if (qp->s_flags & QIB_S_WAIT_PSN &&
  963. qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
  964. qp->s_flags &= ~QIB_S_WAIT_PSN;
  965. qp->s_sending_psn = qp->s_psn;
  966. qp->s_sending_hpsn = qp->s_psn - 1;
  967. qib_schedule_send(qp);
  968. }
  969. }
  970. static inline void update_last_psn(struct qib_qp *qp, u32 psn)
  971. {
  972. qp->s_last_psn = psn;
  973. }
  974. /*
  975. * Generate a SWQE completion.
  976. * This is similar to qib_send_complete but has to check to be sure
  977. * that the SGEs are not being referenced if the SWQE is being resent.
  978. */
  979. static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
  980. struct qib_swqe *wqe,
  981. struct qib_ibport *ibp)
  982. {
  983. struct ib_wc wc;
  984. unsigned i;
  985. /*
  986. * Don't decrement refcount and don't generate a
  987. * completion if the SWQE is being resent until the send
  988. * is finished.
  989. */
  990. if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
  991. qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
  992. for (i = 0; i < wqe->wr.num_sge; i++) {
  993. struct qib_sge *sge = &wqe->sg_list[i];
  994. atomic_dec(&sge->mr->refcount);
  995. }
  996. /* Post a send completion queue entry if requested. */
  997. if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
  998. (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
  999. memset(&wc, 0, sizeof wc);
  1000. wc.wr_id = wqe->wr.wr_id;
  1001. wc.status = IB_WC_SUCCESS;
  1002. wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
  1003. wc.byte_len = wqe->length;
  1004. wc.qp = &qp->ibqp;
  1005. qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
  1006. }
  1007. if (++qp->s_last >= qp->s_size)
  1008. qp->s_last = 0;
  1009. } else
  1010. ibp->n_rc_delayed_comp++;
  1011. qp->s_retry = qp->s_retry_cnt;
  1012. update_last_psn(qp, wqe->lpsn);
  1013. /*
  1014. * If we are completing a request which is in the process of
  1015. * being resent, we can stop resending it since we know the
  1016. * responder has already seen it.
  1017. */
  1018. if (qp->s_acked == qp->s_cur) {
  1019. if (++qp->s_cur >= qp->s_size)
  1020. qp->s_cur = 0;
  1021. qp->s_acked = qp->s_cur;
  1022. wqe = get_swqe_ptr(qp, qp->s_cur);
  1023. if (qp->s_acked != qp->s_tail) {
  1024. qp->s_state = OP(SEND_LAST);
  1025. qp->s_psn = wqe->psn;
  1026. }
  1027. } else {
  1028. if (++qp->s_acked >= qp->s_size)
  1029. qp->s_acked = 0;
  1030. if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
  1031. qp->s_draining = 0;
  1032. wqe = get_swqe_ptr(qp, qp->s_acked);
  1033. }
  1034. return wqe;
  1035. }
  1036. /**
  1037. * do_rc_ack - process an incoming RC ACK
  1038. * @qp: the QP the ACK came in on
  1039. * @psn: the packet sequence number of the ACK
  1040. * @opcode: the opcode of the request that resulted in the ACK
  1041. *
  1042. * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
  1043. * for the given QP.
  1044. * Called at interrupt level with the QP s_lock held.
  1045. * Returns 1 if OK, 0 if current operation should be aborted (NAK).
  1046. */
  1047. static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
  1048. u64 val, struct qib_ctxtdata *rcd)
  1049. {
  1050. struct qib_ibport *ibp;
  1051. enum ib_wc_status status;
  1052. struct qib_swqe *wqe;
  1053. int ret = 0;
  1054. u32 ack_psn;
  1055. int diff;
  1056. /* Remove QP from retry timer */
  1057. if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
  1058. qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
  1059. del_timer(&qp->s_timer);
  1060. }
  1061. /*
  1062. * Note that NAKs implicitly ACK outstanding SEND and RDMA write
  1063. * requests and implicitly NAK RDMA read and atomic requests issued
  1064. * before the NAK'ed request. The MSN won't include the NAK'ed
  1065. * request but will include an ACK'ed request(s).
  1066. */
  1067. ack_psn = psn;
  1068. if (aeth >> 29)
  1069. ack_psn--;
  1070. wqe = get_swqe_ptr(qp, qp->s_acked);
  1071. ibp = to_iport(qp->ibqp.device, qp->port_num);
  1072. /*
  1073. * The MSN might be for a later WQE than the PSN indicates so
  1074. * only complete WQEs that the PSN finishes.
  1075. */
  1076. while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
  1077. /*
  1078. * RDMA_READ_RESPONSE_ONLY is a special case since
  1079. * we want to generate completion events for everything
  1080. * before the RDMA read, copy the data, then generate
  1081. * the completion for the read.
  1082. */
  1083. if (wqe->wr.opcode == IB_WR_RDMA_READ &&
  1084. opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
  1085. diff == 0) {
  1086. ret = 1;
  1087. goto bail;
  1088. }
  1089. /*
  1090. * If this request is a RDMA read or atomic, and the ACK is
  1091. * for a later operation, this ACK NAKs the RDMA read or
  1092. * atomic. In other words, only a RDMA_READ_LAST or ONLY
  1093. * can ACK a RDMA read and likewise for atomic ops. Note
  1094. * that the NAK case can only happen if relaxed ordering is
  1095. * used and requests are sent after an RDMA read or atomic
  1096. * is sent but before the response is received.
  1097. */
  1098. if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
  1099. (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
  1100. ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  1101. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
  1102. (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
  1103. /* Retry this request. */
  1104. if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) {
  1105. qp->r_flags |= QIB_R_RDMAR_SEQ;
  1106. qib_restart_rc(qp, qp->s_last_psn + 1, 0);
  1107. if (list_empty(&qp->rspwait)) {
  1108. qp->r_flags |= QIB_R_RSP_SEND;
  1109. atomic_inc(&qp->refcount);
  1110. list_add_tail(&qp->rspwait,
  1111. &rcd->qp_wait_list);
  1112. }
  1113. }
  1114. /*
  1115. * No need to process the ACK/NAK since we are
  1116. * restarting an earlier request.
  1117. */
  1118. goto bail;
  1119. }
  1120. if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  1121. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
  1122. u64 *vaddr = wqe->sg_list[0].vaddr;
  1123. *vaddr = val;
  1124. }
  1125. if (qp->s_num_rd_atomic &&
  1126. (wqe->wr.opcode == IB_WR_RDMA_READ ||
  1127. wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  1128. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
  1129. qp->s_num_rd_atomic--;
  1130. /* Restart sending task if fence is complete */
  1131. if ((qp->s_flags & QIB_S_WAIT_FENCE) &&
  1132. !qp->s_num_rd_atomic) {
  1133. qp->s_flags &= ~(QIB_S_WAIT_FENCE |
  1134. QIB_S_WAIT_ACK);
  1135. qib_schedule_send(qp);
  1136. } else if (qp->s_flags & QIB_S_WAIT_RDMAR) {
  1137. qp->s_flags &= ~(QIB_S_WAIT_RDMAR |
  1138. QIB_S_WAIT_ACK);
  1139. qib_schedule_send(qp);
  1140. }
  1141. }
  1142. wqe = do_rc_completion(qp, wqe, ibp);
  1143. if (qp->s_acked == qp->s_tail)
  1144. break;
  1145. }
  1146. switch (aeth >> 29) {
  1147. case 0: /* ACK */
  1148. ibp->n_rc_acks++;
  1149. if (qp->s_acked != qp->s_tail) {
  1150. /*
  1151. * We are expecting more ACKs so
  1152. * reset the retransmit timer.
  1153. */
  1154. start_timer(qp);
  1155. /*
  1156. * We can stop resending the earlier packets and
  1157. * continue with the next packet the receiver wants.
  1158. */
  1159. if (qib_cmp24(qp->s_psn, psn) <= 0)
  1160. reset_psn(qp, psn + 1);
  1161. } else if (qib_cmp24(qp->s_psn, psn) <= 0) {
  1162. qp->s_state = OP(SEND_LAST);
  1163. qp->s_psn = psn + 1;
  1164. }
  1165. if (qp->s_flags & QIB_S_WAIT_ACK) {
  1166. qp->s_flags &= ~QIB_S_WAIT_ACK;
  1167. qib_schedule_send(qp);
  1168. }
  1169. qib_get_credit(qp, aeth);
  1170. qp->s_rnr_retry = qp->s_rnr_retry_cnt;
  1171. qp->s_retry = qp->s_retry_cnt;
  1172. update_last_psn(qp, psn);
  1173. ret = 1;
  1174. goto bail;
  1175. case 1: /* RNR NAK */
  1176. ibp->n_rnr_naks++;
  1177. if (qp->s_acked == qp->s_tail)
  1178. goto bail;
  1179. if (qp->s_flags & QIB_S_WAIT_RNR)
  1180. goto bail;
  1181. if (qp->s_rnr_retry == 0) {
  1182. status = IB_WC_RNR_RETRY_EXC_ERR;
  1183. goto class_b;
  1184. }
  1185. if (qp->s_rnr_retry_cnt < 7)
  1186. qp->s_rnr_retry--;
  1187. /* The last valid PSN is the previous PSN. */
  1188. update_last_psn(qp, psn - 1);
  1189. ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
  1190. reset_psn(qp, psn);
  1191. qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK);
  1192. qp->s_flags |= QIB_S_WAIT_RNR;
  1193. qp->s_timer.function = qib_rc_rnr_retry;
  1194. qp->s_timer.expires = jiffies + usecs_to_jiffies(
  1195. ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) &
  1196. QIB_AETH_CREDIT_MASK]);
  1197. add_timer(&qp->s_timer);
  1198. goto bail;
  1199. case 3: /* NAK */
  1200. if (qp->s_acked == qp->s_tail)
  1201. goto bail;
  1202. /* The last valid PSN is the previous PSN. */
  1203. update_last_psn(qp, psn - 1);
  1204. switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
  1205. QIB_AETH_CREDIT_MASK) {
  1206. case 0: /* PSN sequence error */
  1207. ibp->n_seq_naks++;
  1208. /*
  1209. * Back up to the responder's expected PSN.
  1210. * Note that we might get a NAK in the middle of an
  1211. * RDMA READ response which terminates the RDMA
  1212. * READ.
  1213. */
  1214. qib_restart_rc(qp, psn, 0);
  1215. qib_schedule_send(qp);
  1216. break;
  1217. case 1: /* Invalid Request */
  1218. status = IB_WC_REM_INV_REQ_ERR;
  1219. ibp->n_other_naks++;
  1220. goto class_b;
  1221. case 2: /* Remote Access Error */
  1222. status = IB_WC_REM_ACCESS_ERR;
  1223. ibp->n_other_naks++;
  1224. goto class_b;
  1225. case 3: /* Remote Operation Error */
  1226. status = IB_WC_REM_OP_ERR;
  1227. ibp->n_other_naks++;
  1228. class_b:
  1229. if (qp->s_last == qp->s_acked) {
  1230. qib_send_complete(qp, wqe, status);
  1231. qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
  1232. }
  1233. break;
  1234. default:
  1235. /* Ignore other reserved NAK error codes */
  1236. goto reserved;
  1237. }
  1238. qp->s_retry = qp->s_retry_cnt;
  1239. qp->s_rnr_retry = qp->s_rnr_retry_cnt;
  1240. goto bail;
  1241. default: /* 2: reserved */
  1242. reserved:
  1243. /* Ignore reserved NAK codes. */
  1244. goto bail;
  1245. }
  1246. bail:
  1247. return ret;
  1248. }
  1249. /*
  1250. * We have seen an out of sequence RDMA read middle or last packet.
  1251. * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
  1252. */
  1253. static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
  1254. struct qib_ctxtdata *rcd)
  1255. {
  1256. struct qib_swqe *wqe;
  1257. /* Remove QP from retry timer */
  1258. if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
  1259. qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
  1260. del_timer(&qp->s_timer);
  1261. }
  1262. wqe = get_swqe_ptr(qp, qp->s_acked);
  1263. while (qib_cmp24(psn, wqe->lpsn) > 0) {
  1264. if (wqe->wr.opcode == IB_WR_RDMA_READ ||
  1265. wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  1266. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
  1267. break;
  1268. wqe = do_rc_completion(qp, wqe, ibp);
  1269. }
  1270. ibp->n_rdma_seq++;
  1271. qp->r_flags |= QIB_R_RDMAR_SEQ;
  1272. qib_restart_rc(qp, qp->s_last_psn + 1, 0);
  1273. if (list_empty(&qp->rspwait)) {
  1274. qp->r_flags |= QIB_R_RSP_SEND;
  1275. atomic_inc(&qp->refcount);
  1276. list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
  1277. }
  1278. }
  1279. /**
  1280. * qib_rc_rcv_resp - process an incoming RC response packet
  1281. * @ibp: the port this packet came in on
  1282. * @ohdr: the other headers for this packet
  1283. * @data: the packet data
  1284. * @tlen: the packet length
  1285. * @qp: the QP for this packet
  1286. * @opcode: the opcode for this packet
  1287. * @psn: the packet sequence number for this packet
  1288. * @hdrsize: the header length
  1289. * @pmtu: the path MTU
  1290. *
  1291. * This is called from qib_rc_rcv() to process an incoming RC response
  1292. * packet for the given QP.
  1293. * Called at interrupt level.
  1294. */
  1295. static void qib_rc_rcv_resp(struct qib_ibport *ibp,
  1296. struct qib_other_headers *ohdr,
  1297. void *data, u32 tlen,
  1298. struct qib_qp *qp,
  1299. u32 opcode,
  1300. u32 psn, u32 hdrsize, u32 pmtu,
  1301. struct qib_ctxtdata *rcd)
  1302. {
  1303. struct qib_swqe *wqe;
  1304. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  1305. enum ib_wc_status status;
  1306. unsigned long flags;
  1307. int diff;
  1308. u32 pad;
  1309. u32 aeth;
  1310. u64 val;
  1311. if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
  1312. /*
  1313. * If ACK'd PSN on SDMA busy list try to make progress to
  1314. * reclaim SDMA credits.
  1315. */
  1316. if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
  1317. (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
  1318. /*
  1319. * If send tasklet not running attempt to progress
  1320. * SDMA queue.
  1321. */
  1322. if (!(qp->s_flags & QIB_S_BUSY)) {
  1323. /* Acquire SDMA Lock */
  1324. spin_lock_irqsave(&ppd->sdma_lock, flags);
  1325. /* Invoke sdma make progress */
  1326. qib_sdma_make_progress(ppd);
  1327. /* Release SDMA Lock */
  1328. spin_unlock_irqrestore(&ppd->sdma_lock, flags);
  1329. }
  1330. }
  1331. }
  1332. spin_lock_irqsave(&qp->s_lock, flags);
  1333. if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
  1334. goto ack_done;
  1335. /* Ignore invalid responses. */
  1336. if (qib_cmp24(psn, qp->s_next_psn) >= 0)
  1337. goto ack_done;
  1338. /* Ignore duplicate responses. */
  1339. diff = qib_cmp24(psn, qp->s_last_psn);
  1340. if (unlikely(diff <= 0)) {
  1341. /* Update credits for "ghost" ACKs */
  1342. if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
  1343. aeth = be32_to_cpu(ohdr->u.aeth);
  1344. if ((aeth >> 29) == 0)
  1345. qib_get_credit(qp, aeth);
  1346. }
  1347. goto ack_done;
  1348. }
  1349. /*
  1350. * Skip everything other than the PSN we expect, if we are waiting
  1351. * for a reply to a restarted RDMA read or atomic op.
  1352. */
  1353. if (qp->r_flags & QIB_R_RDMAR_SEQ) {
  1354. if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
  1355. goto ack_done;
  1356. qp->r_flags &= ~QIB_R_RDMAR_SEQ;
  1357. }
  1358. if (unlikely(qp->s_acked == qp->s_tail))
  1359. goto ack_done;
  1360. wqe = get_swqe_ptr(qp, qp->s_acked);
  1361. status = IB_WC_SUCCESS;
  1362. switch (opcode) {
  1363. case OP(ACKNOWLEDGE):
  1364. case OP(ATOMIC_ACKNOWLEDGE):
  1365. case OP(RDMA_READ_RESPONSE_FIRST):
  1366. aeth = be32_to_cpu(ohdr->u.aeth);
  1367. if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
  1368. __be32 *p = ohdr->u.at.atomic_ack_eth;
  1369. val = ((u64) be32_to_cpu(p[0]) << 32) |
  1370. be32_to_cpu(p[1]);
  1371. } else
  1372. val = 0;
  1373. if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
  1374. opcode != OP(RDMA_READ_RESPONSE_FIRST))
  1375. goto ack_done;
  1376. hdrsize += 4;
  1377. wqe = get_swqe_ptr(qp, qp->s_acked);
  1378. if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
  1379. goto ack_op_err;
  1380. /*
  1381. * If this is a response to a resent RDMA read, we
  1382. * have to be careful to copy the data to the right
  1383. * location.
  1384. */
  1385. qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
  1386. wqe, psn, pmtu);
  1387. goto read_middle;
  1388. case OP(RDMA_READ_RESPONSE_MIDDLE):
  1389. /* no AETH, no ACK */
  1390. if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
  1391. goto ack_seq_err;
  1392. if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
  1393. goto ack_op_err;
  1394. read_middle:
  1395. if (unlikely(tlen != (hdrsize + pmtu + 4)))
  1396. goto ack_len_err;
  1397. if (unlikely(pmtu >= qp->s_rdma_read_len))
  1398. goto ack_len_err;
  1399. /*
  1400. * We got a response so update the timeout.
  1401. * 4.096 usec. * (1 << qp->timeout)
  1402. */
  1403. qp->s_flags |= QIB_S_TIMER;
  1404. mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
  1405. if (qp->s_flags & QIB_S_WAIT_ACK) {
  1406. qp->s_flags &= ~QIB_S_WAIT_ACK;
  1407. qib_schedule_send(qp);
  1408. }
  1409. if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
  1410. qp->s_retry = qp->s_retry_cnt;
  1411. /*
  1412. * Update the RDMA receive state but do the copy w/o
  1413. * holding the locks and blocking interrupts.
  1414. */
  1415. qp->s_rdma_read_len -= pmtu;
  1416. update_last_psn(qp, psn);
  1417. spin_unlock_irqrestore(&qp->s_lock, flags);
  1418. qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
  1419. goto bail;
  1420. case OP(RDMA_READ_RESPONSE_ONLY):
  1421. aeth = be32_to_cpu(ohdr->u.aeth);
  1422. if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
  1423. goto ack_done;
  1424. /* Get the number of bytes the message was padded by. */
  1425. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  1426. /*
  1427. * Check that the data size is >= 0 && <= pmtu.
  1428. * Remember to account for the AETH header (4) and
  1429. * ICRC (4).
  1430. */
  1431. if (unlikely(tlen < (hdrsize + pad + 8)))
  1432. goto ack_len_err;
  1433. /*
  1434. * If this is a response to a resent RDMA read, we
  1435. * have to be careful to copy the data to the right
  1436. * location.
  1437. */
  1438. wqe = get_swqe_ptr(qp, qp->s_acked);
  1439. qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
  1440. wqe, psn, pmtu);
  1441. goto read_last;
  1442. case OP(RDMA_READ_RESPONSE_LAST):
  1443. /* ACKs READ req. */
  1444. if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
  1445. goto ack_seq_err;
  1446. if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
  1447. goto ack_op_err;
  1448. /* Get the number of bytes the message was padded by. */
  1449. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  1450. /*
  1451. * Check that the data size is >= 1 && <= pmtu.
  1452. * Remember to account for the AETH header (4) and
  1453. * ICRC (4).
  1454. */
  1455. if (unlikely(tlen <= (hdrsize + pad + 8)))
  1456. goto ack_len_err;
  1457. read_last:
  1458. tlen -= hdrsize + pad + 8;
  1459. if (unlikely(tlen != qp->s_rdma_read_len))
  1460. goto ack_len_err;
  1461. aeth = be32_to_cpu(ohdr->u.aeth);
  1462. qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
  1463. WARN_ON(qp->s_rdma_read_sge.num_sge);
  1464. (void) do_rc_ack(qp, aeth, psn,
  1465. OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
  1466. goto ack_done;
  1467. }
  1468. ack_op_err:
  1469. status = IB_WC_LOC_QP_OP_ERR;
  1470. goto ack_err;
  1471. ack_seq_err:
  1472. rdma_seq_err(qp, ibp, psn, rcd);
  1473. goto ack_done;
  1474. ack_len_err:
  1475. status = IB_WC_LOC_LEN_ERR;
  1476. ack_err:
  1477. if (qp->s_last == qp->s_acked) {
  1478. qib_send_complete(qp, wqe, status);
  1479. qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
  1480. }
  1481. ack_done:
  1482. spin_unlock_irqrestore(&qp->s_lock, flags);
  1483. bail:
  1484. return;
  1485. }
  1486. /**
  1487. * qib_rc_rcv_error - process an incoming duplicate or error RC packet
  1488. * @ohdr: the other headers for this packet
  1489. * @data: the packet data
  1490. * @qp: the QP for this packet
  1491. * @opcode: the opcode for this packet
  1492. * @psn: the packet sequence number for this packet
  1493. * @diff: the difference between the PSN and the expected PSN
  1494. *
  1495. * This is called from qib_rc_rcv() to process an unexpected
  1496. * incoming RC packet for the given QP.
  1497. * Called at interrupt level.
  1498. * Return 1 if no more processing is needed; otherwise return 0 to
  1499. * schedule a response to be sent.
  1500. */
  1501. static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
  1502. void *data,
  1503. struct qib_qp *qp,
  1504. u32 opcode,
  1505. u32 psn,
  1506. int diff,
  1507. struct qib_ctxtdata *rcd)
  1508. {
  1509. struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
  1510. struct qib_ack_entry *e;
  1511. unsigned long flags;
  1512. u8 i, prev;
  1513. int old_req;
  1514. if (diff > 0) {
  1515. /*
  1516. * Packet sequence error.
  1517. * A NAK will ACK earlier sends and RDMA writes.
  1518. * Don't queue the NAK if we already sent one.
  1519. */
  1520. if (!qp->r_nak_state) {
  1521. ibp->n_rc_seqnak++;
  1522. qp->r_nak_state = IB_NAK_PSN_ERROR;
  1523. /* Use the expected PSN. */
  1524. qp->r_ack_psn = qp->r_psn;
  1525. /*
  1526. * Wait to send the sequence NAK until all packets
  1527. * in the receive queue have been processed.
  1528. * Otherwise, we end up propagating congestion.
  1529. */
  1530. if (list_empty(&qp->rspwait)) {
  1531. qp->r_flags |= QIB_R_RSP_NAK;
  1532. atomic_inc(&qp->refcount);
  1533. list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
  1534. }
  1535. }
  1536. goto done;
  1537. }
  1538. /*
  1539. * Handle a duplicate request. Don't re-execute SEND, RDMA
  1540. * write or atomic op. Don't NAK errors, just silently drop
  1541. * the duplicate request. Note that r_sge, r_len, and
  1542. * r_rcv_len may be in use so don't modify them.
  1543. *
  1544. * We are supposed to ACK the earliest duplicate PSN but we
  1545. * can coalesce an outstanding duplicate ACK. We have to
  1546. * send the earliest so that RDMA reads can be restarted at
  1547. * the requester's expected PSN.
  1548. *
  1549. * First, find where this duplicate PSN falls within the
  1550. * ACKs previously sent.
  1551. * old_req is true if there is an older response that is scheduled
  1552. * to be sent before sending this one.
  1553. */
  1554. e = NULL;
  1555. old_req = 1;
  1556. ibp->n_rc_dupreq++;
  1557. spin_lock_irqsave(&qp->s_lock, flags);
  1558. for (i = qp->r_head_ack_queue; ; i = prev) {
  1559. if (i == qp->s_tail_ack_queue)
  1560. old_req = 0;
  1561. if (i)
  1562. prev = i - 1;
  1563. else
  1564. prev = QIB_MAX_RDMA_ATOMIC;
  1565. if (prev == qp->r_head_ack_queue) {
  1566. e = NULL;
  1567. break;
  1568. }
  1569. e = &qp->s_ack_queue[prev];
  1570. if (!e->opcode) {
  1571. e = NULL;
  1572. break;
  1573. }
  1574. if (qib_cmp24(psn, e->psn) >= 0) {
  1575. if (prev == qp->s_tail_ack_queue &&
  1576. qib_cmp24(psn, e->lpsn) <= 0)
  1577. old_req = 0;
  1578. break;
  1579. }
  1580. }
  1581. switch (opcode) {
  1582. case OP(RDMA_READ_REQUEST): {
  1583. struct ib_reth *reth;
  1584. u32 offset;
  1585. u32 len;
  1586. /*
  1587. * If we didn't find the RDMA read request in the ack queue,
  1588. * we can ignore this request.
  1589. */
  1590. if (!e || e->opcode != OP(RDMA_READ_REQUEST))
  1591. goto unlock_done;
  1592. /* RETH comes after BTH */
  1593. reth = &ohdr->u.rc.reth;
  1594. /*
  1595. * Address range must be a subset of the original
  1596. * request and start on pmtu boundaries.
  1597. * We reuse the old ack_queue slot since the requester
  1598. * should not back up and request an earlier PSN for the
  1599. * same request.
  1600. */
  1601. offset = ((psn - e->psn) & QIB_PSN_MASK) *
  1602. qp->pmtu;
  1603. len = be32_to_cpu(reth->length);
  1604. if (unlikely(offset + len != e->rdma_sge.sge_length))
  1605. goto unlock_done;
  1606. if (e->rdma_sge.mr) {
  1607. atomic_dec(&e->rdma_sge.mr->refcount);
  1608. e->rdma_sge.mr = NULL;
  1609. }
  1610. if (len != 0) {
  1611. u32 rkey = be32_to_cpu(reth->rkey);
  1612. u64 vaddr = be64_to_cpu(reth->vaddr);
  1613. int ok;
  1614. ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
  1615. IB_ACCESS_REMOTE_READ);
  1616. if (unlikely(!ok))
  1617. goto unlock_done;
  1618. } else {
  1619. e->rdma_sge.vaddr = NULL;
  1620. e->rdma_sge.length = 0;
  1621. e->rdma_sge.sge_length = 0;
  1622. }
  1623. e->psn = psn;
  1624. if (old_req)
  1625. goto unlock_done;
  1626. qp->s_tail_ack_queue = prev;
  1627. break;
  1628. }
  1629. case OP(COMPARE_SWAP):
  1630. case OP(FETCH_ADD): {
  1631. /*
  1632. * If we didn't find the atomic request in the ack queue
  1633. * or the send tasklet is already backed up to send an
  1634. * earlier entry, we can ignore this request.
  1635. */
  1636. if (!e || e->opcode != (u8) opcode || old_req)
  1637. goto unlock_done;
  1638. qp->s_tail_ack_queue = prev;
  1639. break;
  1640. }
  1641. default:
  1642. /*
  1643. * Ignore this operation if it doesn't request an ACK
  1644. * or an earlier RDMA read or atomic is going to be resent.
  1645. */
  1646. if (!(psn & IB_BTH_REQ_ACK) || old_req)
  1647. goto unlock_done;
  1648. /*
  1649. * Resend the most recent ACK if this request is
  1650. * after all the previous RDMA reads and atomics.
  1651. */
  1652. if (i == qp->r_head_ack_queue) {
  1653. spin_unlock_irqrestore(&qp->s_lock, flags);
  1654. qp->r_nak_state = 0;
  1655. qp->r_ack_psn = qp->r_psn - 1;
  1656. goto send_ack;
  1657. }
  1658. /*
  1659. * Try to send a simple ACK to work around a Mellanox bug
  1660. * which doesn't accept a RDMA read response or atomic
  1661. * response as an ACK for earlier SENDs or RDMA writes.
  1662. */
  1663. if (!(qp->s_flags & QIB_S_RESP_PENDING)) {
  1664. spin_unlock_irqrestore(&qp->s_lock, flags);
  1665. qp->r_nak_state = 0;
  1666. qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
  1667. goto send_ack;
  1668. }
  1669. /*
  1670. * Resend the RDMA read or atomic op which
  1671. * ACKs this duplicate request.
  1672. */
  1673. qp->s_tail_ack_queue = i;
  1674. break;
  1675. }
  1676. qp->s_ack_state = OP(ACKNOWLEDGE);
  1677. qp->s_flags |= QIB_S_RESP_PENDING;
  1678. qp->r_nak_state = 0;
  1679. qib_schedule_send(qp);
  1680. unlock_done:
  1681. spin_unlock_irqrestore(&qp->s_lock, flags);
  1682. done:
  1683. return 1;
  1684. send_ack:
  1685. return 0;
  1686. }
  1687. void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err)
  1688. {
  1689. unsigned long flags;
  1690. int lastwqe;
  1691. spin_lock_irqsave(&qp->s_lock, flags);
  1692. lastwqe = qib_error_qp(qp, err);
  1693. spin_unlock_irqrestore(&qp->s_lock, flags);
  1694. if (lastwqe) {
  1695. struct ib_event ev;
  1696. ev.device = qp->ibqp.device;
  1697. ev.element.qp = &qp->ibqp;
  1698. ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
  1699. qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
  1700. }
  1701. }
  1702. static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n)
  1703. {
  1704. unsigned next;
  1705. next = n + 1;
  1706. if (next > QIB_MAX_RDMA_ATOMIC)
  1707. next = 0;
  1708. qp->s_tail_ack_queue = next;
  1709. qp->s_ack_state = OP(ACKNOWLEDGE);
  1710. }
  1711. /**
  1712. * qib_rc_rcv - process an incoming RC packet
  1713. * @rcd: the context pointer
  1714. * @hdr: the header of this packet
  1715. * @has_grh: true if the header has a GRH
  1716. * @data: the packet data
  1717. * @tlen: the packet length
  1718. * @qp: the QP for this packet
  1719. *
  1720. * This is called from qib_qp_rcv() to process an incoming RC packet
  1721. * for the given QP.
  1722. * Called at interrupt level.
  1723. */
  1724. void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
  1725. int has_grh, void *data, u32 tlen, struct qib_qp *qp)
  1726. {
  1727. struct qib_ibport *ibp = &rcd->ppd->ibport_data;
  1728. struct qib_other_headers *ohdr;
  1729. u32 opcode;
  1730. u32 hdrsize;
  1731. u32 psn;
  1732. u32 pad;
  1733. struct ib_wc wc;
  1734. u32 pmtu = qp->pmtu;
  1735. int diff;
  1736. struct ib_reth *reth;
  1737. unsigned long flags;
  1738. int ret;
  1739. /* Check for GRH */
  1740. if (!has_grh) {
  1741. ohdr = &hdr->u.oth;
  1742. hdrsize = 8 + 12; /* LRH + BTH */
  1743. } else {
  1744. ohdr = &hdr->u.l.oth;
  1745. hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
  1746. }
  1747. opcode = be32_to_cpu(ohdr->bth[0]);
  1748. if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
  1749. return;
  1750. psn = be32_to_cpu(ohdr->bth[2]);
  1751. opcode >>= 24;
  1752. /*
  1753. * Process responses (ACKs) before anything else. Note that the
  1754. * packet sequence number will be for something in the send work
  1755. * queue rather than the expected receive packet sequence number.
  1756. * In other words, this QP is the requester.
  1757. */
  1758. if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
  1759. opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
  1760. qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
  1761. hdrsize, pmtu, rcd);
  1762. return;
  1763. }
  1764. /* Compute 24 bits worth of difference. */
  1765. diff = qib_cmp24(psn, qp->r_psn);
  1766. if (unlikely(diff)) {
  1767. if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
  1768. return;
  1769. goto send_ack;
  1770. }
  1771. /* Check for opcode sequence errors. */
  1772. switch (qp->r_state) {
  1773. case OP(SEND_FIRST):
  1774. case OP(SEND_MIDDLE):
  1775. if (opcode == OP(SEND_MIDDLE) ||
  1776. opcode == OP(SEND_LAST) ||
  1777. opcode == OP(SEND_LAST_WITH_IMMEDIATE))
  1778. break;
  1779. goto nack_inv;
  1780. case OP(RDMA_WRITE_FIRST):
  1781. case OP(RDMA_WRITE_MIDDLE):
  1782. if (opcode == OP(RDMA_WRITE_MIDDLE) ||
  1783. opcode == OP(RDMA_WRITE_LAST) ||
  1784. opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
  1785. break;
  1786. goto nack_inv;
  1787. default:
  1788. if (opcode == OP(SEND_MIDDLE) ||
  1789. opcode == OP(SEND_LAST) ||
  1790. opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
  1791. opcode == OP(RDMA_WRITE_MIDDLE) ||
  1792. opcode == OP(RDMA_WRITE_LAST) ||
  1793. opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
  1794. goto nack_inv;
  1795. /*
  1796. * Note that it is up to the requester to not send a new
  1797. * RDMA read or atomic operation before receiving an ACK
  1798. * for the previous operation.
  1799. */
  1800. break;
  1801. }
  1802. if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
  1803. qp->r_flags |= QIB_R_COMM_EST;
  1804. if (qp->ibqp.event_handler) {
  1805. struct ib_event ev;
  1806. ev.device = qp->ibqp.device;
  1807. ev.element.qp = &qp->ibqp;
  1808. ev.event = IB_EVENT_COMM_EST;
  1809. qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
  1810. }
  1811. }
  1812. /* OK, process the packet. */
  1813. switch (opcode) {
  1814. case OP(SEND_FIRST):
  1815. ret = qib_get_rwqe(qp, 0);
  1816. if (ret < 0)
  1817. goto nack_op_err;
  1818. if (!ret)
  1819. goto rnr_nak;
  1820. qp->r_rcv_len = 0;
  1821. /* FALLTHROUGH */
  1822. case OP(SEND_MIDDLE):
  1823. case OP(RDMA_WRITE_MIDDLE):
  1824. send_middle:
  1825. /* Check for invalid length PMTU or posted rwqe len. */
  1826. if (unlikely(tlen != (hdrsize + pmtu + 4)))
  1827. goto nack_inv;
  1828. qp->r_rcv_len += pmtu;
  1829. if (unlikely(qp->r_rcv_len > qp->r_len))
  1830. goto nack_inv;
  1831. qib_copy_sge(&qp->r_sge, data, pmtu, 1);
  1832. break;
  1833. case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
  1834. /* consume RWQE */
  1835. ret = qib_get_rwqe(qp, 1);
  1836. if (ret < 0)
  1837. goto nack_op_err;
  1838. if (!ret)
  1839. goto rnr_nak;
  1840. goto send_last_imm;
  1841. case OP(SEND_ONLY):
  1842. case OP(SEND_ONLY_WITH_IMMEDIATE):
  1843. ret = qib_get_rwqe(qp, 0);
  1844. if (ret < 0)
  1845. goto nack_op_err;
  1846. if (!ret)
  1847. goto rnr_nak;
  1848. qp->r_rcv_len = 0;
  1849. if (opcode == OP(SEND_ONLY))
  1850. goto no_immediate_data;
  1851. /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
  1852. case OP(SEND_LAST_WITH_IMMEDIATE):
  1853. send_last_imm:
  1854. wc.ex.imm_data = ohdr->u.imm_data;
  1855. hdrsize += 4;
  1856. wc.wc_flags = IB_WC_WITH_IMM;
  1857. goto send_last;
  1858. case OP(SEND_LAST):
  1859. case OP(RDMA_WRITE_LAST):
  1860. no_immediate_data:
  1861. wc.wc_flags = 0;
  1862. wc.ex.imm_data = 0;
  1863. send_last:
  1864. /* Get the number of bytes the message was padded by. */
  1865. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  1866. /* Check for invalid length. */
  1867. /* XXX LAST len should be >= 1 */
  1868. if (unlikely(tlen < (hdrsize + pad + 4)))
  1869. goto nack_inv;
  1870. /* Don't count the CRC. */
  1871. tlen -= (hdrsize + pad + 4);
  1872. wc.byte_len = tlen + qp->r_rcv_len;
  1873. if (unlikely(wc.byte_len > qp->r_len))
  1874. goto nack_inv;
  1875. qib_copy_sge(&qp->r_sge, data, tlen, 1);
  1876. while (qp->r_sge.num_sge) {
  1877. atomic_dec(&qp->r_sge.sge.mr->refcount);
  1878. if (--qp->r_sge.num_sge)
  1879. qp->r_sge.sge = *qp->r_sge.sg_list++;
  1880. }
  1881. qp->r_msn++;
  1882. if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
  1883. break;
  1884. wc.wr_id = qp->r_wr_id;
  1885. wc.status = IB_WC_SUCCESS;
  1886. if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
  1887. opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
  1888. wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
  1889. else
  1890. wc.opcode = IB_WC_RECV;
  1891. wc.qp = &qp->ibqp;
  1892. wc.src_qp = qp->remote_qpn;
  1893. wc.slid = qp->remote_ah_attr.dlid;
  1894. wc.sl = qp->remote_ah_attr.sl;
  1895. /* zero fields that are N/A */
  1896. wc.vendor_err = 0;
  1897. wc.pkey_index = 0;
  1898. wc.dlid_path_bits = 0;
  1899. wc.port_num = 0;
  1900. wc.csum_ok = 0;
  1901. /* Signal completion event if the solicited bit is set. */
  1902. qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
  1903. (ohdr->bth[0] &
  1904. cpu_to_be32(IB_BTH_SOLICITED)) != 0);
  1905. break;
  1906. case OP(RDMA_WRITE_FIRST):
  1907. case OP(RDMA_WRITE_ONLY):
  1908. case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
  1909. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
  1910. goto nack_inv;
  1911. /* consume RWQE */
  1912. reth = &ohdr->u.rc.reth;
  1913. hdrsize += sizeof(*reth);
  1914. qp->r_len = be32_to_cpu(reth->length);
  1915. qp->r_rcv_len = 0;
  1916. qp->r_sge.sg_list = NULL;
  1917. if (qp->r_len != 0) {
  1918. u32 rkey = be32_to_cpu(reth->rkey);
  1919. u64 vaddr = be64_to_cpu(reth->vaddr);
  1920. int ok;
  1921. /* Check rkey & NAK */
  1922. ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
  1923. rkey, IB_ACCESS_REMOTE_WRITE);
  1924. if (unlikely(!ok))
  1925. goto nack_acc;
  1926. qp->r_sge.num_sge = 1;
  1927. } else {
  1928. qp->r_sge.num_sge = 0;
  1929. qp->r_sge.sge.mr = NULL;
  1930. qp->r_sge.sge.vaddr = NULL;
  1931. qp->r_sge.sge.length = 0;
  1932. qp->r_sge.sge.sge_length = 0;
  1933. }
  1934. if (opcode == OP(RDMA_WRITE_FIRST))
  1935. goto send_middle;
  1936. else if (opcode == OP(RDMA_WRITE_ONLY))
  1937. goto no_immediate_data;
  1938. ret = qib_get_rwqe(qp, 1);
  1939. if (ret < 0)
  1940. goto nack_op_err;
  1941. if (!ret)
  1942. goto rnr_nak;
  1943. wc.ex.imm_data = ohdr->u.rc.imm_data;
  1944. hdrsize += 4;
  1945. wc.wc_flags = IB_WC_WITH_IMM;
  1946. goto send_last;
  1947. case OP(RDMA_READ_REQUEST): {
  1948. struct qib_ack_entry *e;
  1949. u32 len;
  1950. u8 next;
  1951. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
  1952. goto nack_inv;
  1953. next = qp->r_head_ack_queue + 1;
  1954. /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
  1955. if (next > QIB_MAX_RDMA_ATOMIC)
  1956. next = 0;
  1957. spin_lock_irqsave(&qp->s_lock, flags);
  1958. if (unlikely(next == qp->s_tail_ack_queue)) {
  1959. if (!qp->s_ack_queue[next].sent)
  1960. goto nack_inv_unlck;
  1961. qib_update_ack_queue(qp, next);
  1962. }
  1963. e = &qp->s_ack_queue[qp->r_head_ack_queue];
  1964. if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
  1965. atomic_dec(&e->rdma_sge.mr->refcount);
  1966. e->rdma_sge.mr = NULL;
  1967. }
  1968. reth = &ohdr->u.rc.reth;
  1969. len = be32_to_cpu(reth->length);
  1970. if (len) {
  1971. u32 rkey = be32_to_cpu(reth->rkey);
  1972. u64 vaddr = be64_to_cpu(reth->vaddr);
  1973. int ok;
  1974. /* Check rkey & NAK */
  1975. ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr,
  1976. rkey, IB_ACCESS_REMOTE_READ);
  1977. if (unlikely(!ok))
  1978. goto nack_acc_unlck;
  1979. /*
  1980. * Update the next expected PSN. We add 1 later
  1981. * below, so only add the remainder here.
  1982. */
  1983. if (len > pmtu)
  1984. qp->r_psn += (len - 1) / pmtu;
  1985. } else {
  1986. e->rdma_sge.mr = NULL;
  1987. e->rdma_sge.vaddr = NULL;
  1988. e->rdma_sge.length = 0;
  1989. e->rdma_sge.sge_length = 0;
  1990. }
  1991. e->opcode = opcode;
  1992. e->sent = 0;
  1993. e->psn = psn;
  1994. e->lpsn = qp->r_psn;
  1995. /*
  1996. * We need to increment the MSN here instead of when we
  1997. * finish sending the result since a duplicate request would
  1998. * increment it more than once.
  1999. */
  2000. qp->r_msn++;
  2001. qp->r_psn++;
  2002. qp->r_state = opcode;
  2003. qp->r_nak_state = 0;
  2004. qp->r_head_ack_queue = next;
  2005. /* Schedule the send tasklet. */
  2006. qp->s_flags |= QIB_S_RESP_PENDING;
  2007. qib_schedule_send(qp);
  2008. goto sunlock;
  2009. }
  2010. case OP(COMPARE_SWAP):
  2011. case OP(FETCH_ADD): {
  2012. struct ib_atomic_eth *ateth;
  2013. struct qib_ack_entry *e;
  2014. u64 vaddr;
  2015. atomic64_t *maddr;
  2016. u64 sdata;
  2017. u32 rkey;
  2018. u8 next;
  2019. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
  2020. goto nack_inv;
  2021. next = qp->r_head_ack_queue + 1;
  2022. if (next > QIB_MAX_RDMA_ATOMIC)
  2023. next = 0;
  2024. spin_lock_irqsave(&qp->s_lock, flags);
  2025. if (unlikely(next == qp->s_tail_ack_queue)) {
  2026. if (!qp->s_ack_queue[next].sent)
  2027. goto nack_inv_unlck;
  2028. qib_update_ack_queue(qp, next);
  2029. }
  2030. e = &qp->s_ack_queue[qp->r_head_ack_queue];
  2031. if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
  2032. atomic_dec(&e->rdma_sge.mr->refcount);
  2033. e->rdma_sge.mr = NULL;
  2034. }
  2035. ateth = &ohdr->u.atomic_eth;
  2036. vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
  2037. be32_to_cpu(ateth->vaddr[1]);
  2038. if (unlikely(vaddr & (sizeof(u64) - 1)))
  2039. goto nack_inv_unlck;
  2040. rkey = be32_to_cpu(ateth->rkey);
  2041. /* Check rkey & NAK */
  2042. if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
  2043. vaddr, rkey,
  2044. IB_ACCESS_REMOTE_ATOMIC)))
  2045. goto nack_acc_unlck;
  2046. /* Perform atomic OP and save result. */
  2047. maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
  2048. sdata = be64_to_cpu(ateth->swap_data);
  2049. e->atomic_data = (opcode == OP(FETCH_ADD)) ?
  2050. (u64) atomic64_add_return(sdata, maddr) - sdata :
  2051. (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
  2052. be64_to_cpu(ateth->compare_data),
  2053. sdata);
  2054. atomic_dec(&qp->r_sge.sge.mr->refcount);
  2055. qp->r_sge.num_sge = 0;
  2056. e->opcode = opcode;
  2057. e->sent = 0;
  2058. e->psn = psn;
  2059. e->lpsn = psn;
  2060. qp->r_msn++;
  2061. qp->r_psn++;
  2062. qp->r_state = opcode;
  2063. qp->r_nak_state = 0;
  2064. qp->r_head_ack_queue = next;
  2065. /* Schedule the send tasklet. */
  2066. qp->s_flags |= QIB_S_RESP_PENDING;
  2067. qib_schedule_send(qp);
  2068. goto sunlock;
  2069. }
  2070. default:
  2071. /* NAK unknown opcodes. */
  2072. goto nack_inv;
  2073. }
  2074. qp->r_psn++;
  2075. qp->r_state = opcode;
  2076. qp->r_ack_psn = psn;
  2077. qp->r_nak_state = 0;
  2078. /* Send an ACK if requested or required. */
  2079. if (psn & (1 << 31))
  2080. goto send_ack;
  2081. return;
  2082. rnr_nak:
  2083. qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
  2084. qp->r_ack_psn = qp->r_psn;
  2085. /* Queue RNR NAK for later */
  2086. if (list_empty(&qp->rspwait)) {
  2087. qp->r_flags |= QIB_R_RSP_NAK;
  2088. atomic_inc(&qp->refcount);
  2089. list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
  2090. }
  2091. return;
  2092. nack_op_err:
  2093. qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
  2094. qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
  2095. qp->r_ack_psn = qp->r_psn;
  2096. /* Queue NAK for later */
  2097. if (list_empty(&qp->rspwait)) {
  2098. qp->r_flags |= QIB_R_RSP_NAK;
  2099. atomic_inc(&qp->refcount);
  2100. list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
  2101. }
  2102. return;
  2103. nack_inv_unlck:
  2104. spin_unlock_irqrestore(&qp->s_lock, flags);
  2105. nack_inv:
  2106. qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
  2107. qp->r_nak_state = IB_NAK_INVALID_REQUEST;
  2108. qp->r_ack_psn = qp->r_psn;
  2109. /* Queue NAK for later */
  2110. if (list_empty(&qp->rspwait)) {
  2111. qp->r_flags |= QIB_R_RSP_NAK;
  2112. atomic_inc(&qp->refcount);
  2113. list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
  2114. }
  2115. return;
  2116. nack_acc_unlck:
  2117. spin_unlock_irqrestore(&qp->s_lock, flags);
  2118. nack_acc:
  2119. qib_rc_error(qp, IB_WC_LOC_PROT_ERR);
  2120. qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
  2121. qp->r_ack_psn = qp->r_psn;
  2122. send_ack:
  2123. qib_send_rc_ack(qp);
  2124. return;
  2125. sunlock:
  2126. spin_unlock_irqrestore(&qp->s_lock, flags);
  2127. }