ipath_rc.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712
  1. /*
  2. * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  3. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include "ipath_verbs.h"
  34. #include "ipath_kernel.h"
  35. /* cut down ridiculously long IB macro names */
  36. #define OP(x) IB_OPCODE_RC_##x
  37. /**
  38. * ipath_init_restart- initialize the qp->s_sge after a restart
  39. * @qp: the QP who's SGE we're restarting
  40. * @wqe: the work queue to initialize the QP's SGE from
  41. *
  42. * The QP s_lock should be held and interrupts disabled.
  43. */
  44. static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
  45. {
  46. struct ipath_ibdev *dev;
  47. u32 len;
  48. len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) *
  49. ib_mtu_enum_to_int(qp->path_mtu);
  50. qp->s_sge.sge = wqe->sg_list[0];
  51. qp->s_sge.sg_list = wqe->sg_list + 1;
  52. qp->s_sge.num_sge = wqe->wr.num_sge;
  53. ipath_skip_sge(&qp->s_sge, len);
  54. qp->s_len = wqe->length - len;
  55. dev = to_idev(qp->ibqp.device);
  56. spin_lock(&dev->pending_lock);
  57. if (list_empty(&qp->timerwait))
  58. list_add_tail(&qp->timerwait,
  59. &dev->pending[dev->pending_index]);
  60. spin_unlock(&dev->pending_lock);
  61. }
  62. /**
  63. * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
  64. * @qp: a pointer to the QP
  65. * @ohdr: a pointer to the IB header being constructed
  66. * @pmtu: the path MTU
  67. *
  68. * Return bth0 if constructed; otherwise, return 0.
  69. * Note the QP s_lock must be held.
  70. */
  71. u32 ipath_make_rc_ack(struct ipath_qp *qp,
  72. struct ipath_other_headers *ohdr,
  73. u32 pmtu)
  74. {
  75. u32 hwords;
  76. u32 len;
  77. u32 bth0;
  78. /* header size in 32-bit words LRH+BTH = (8+12)/4. */
  79. hwords = 5;
  80. /*
  81. * Send a response. Note that we are in the responder's
  82. * side of the QP context.
  83. */
  84. switch (qp->s_ack_state) {
  85. case OP(RDMA_READ_REQUEST):
  86. qp->s_cur_sge = &qp->s_rdma_sge;
  87. len = qp->s_rdma_len;
  88. if (len > pmtu) {
  89. len = pmtu;
  90. qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
  91. } else
  92. qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
  93. qp->s_rdma_len -= len;
  94. bth0 = qp->s_ack_state << 24;
  95. ohdr->u.aeth = ipath_compute_aeth(qp);
  96. hwords++;
  97. break;
  98. case OP(RDMA_READ_RESPONSE_FIRST):
  99. qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
  100. /* FALLTHROUGH */
  101. case OP(RDMA_READ_RESPONSE_MIDDLE):
  102. qp->s_cur_sge = &qp->s_rdma_sge;
  103. len = qp->s_rdma_len;
  104. if (len > pmtu)
  105. len = pmtu;
  106. else {
  107. ohdr->u.aeth = ipath_compute_aeth(qp);
  108. hwords++;
  109. qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
  110. }
  111. qp->s_rdma_len -= len;
  112. bth0 = qp->s_ack_state << 24;
  113. break;
  114. case OP(RDMA_READ_RESPONSE_LAST):
  115. case OP(RDMA_READ_RESPONSE_ONLY):
  116. /*
  117. * We have to prevent new requests from changing
  118. * the r_sge state while a ipath_verbs_send()
  119. * is in progress.
  120. */
  121. qp->s_ack_state = OP(ACKNOWLEDGE);
  122. bth0 = 0;
  123. goto bail;
  124. case OP(COMPARE_SWAP):
  125. case OP(FETCH_ADD):
  126. qp->s_cur_sge = NULL;
  127. len = 0;
  128. /*
  129. * Set the s_ack_state so the receive interrupt handler
  130. * won't try to send an ACK (out of order) until this one
  131. * is actually sent.
  132. */
  133. qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
  134. bth0 = OP(ATOMIC_ACKNOWLEDGE) << 24;
  135. ohdr->u.at.aeth = ipath_compute_aeth(qp);
  136. ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->r_atomic_data);
  137. hwords += sizeof(ohdr->u.at) / 4;
  138. break;
  139. default:
  140. /* Send a regular ACK. */
  141. qp->s_cur_sge = NULL;
  142. len = 0;
  143. /*
  144. * Set the s_ack_state so the receive interrupt handler
  145. * won't try to send an ACK (out of order) until this one
  146. * is actually sent.
  147. */
  148. qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
  149. bth0 = OP(ACKNOWLEDGE) << 24;
  150. if (qp->s_nak_state)
  151. ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
  152. (qp->s_nak_state <<
  153. IPATH_AETH_CREDIT_SHIFT));
  154. else
  155. ohdr->u.aeth = ipath_compute_aeth(qp);
  156. hwords++;
  157. }
  158. qp->s_hdrwords = hwords;
  159. qp->s_cur_size = len;
  160. bail:
  161. return bth0;
  162. }
  163. /**
  164. * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
  165. * @qp: a pointer to the QP
  166. * @ohdr: a pointer to the IB header being constructed
  167. * @pmtu: the path MTU
  168. * @bth0p: pointer to the BTH opcode word
  169. * @bth2p: pointer to the BTH PSN word
  170. *
  171. * Return 1 if constructed; otherwise, return 0.
  172. * Note the QP s_lock must be held and interrupts disabled.
  173. */
  174. int ipath_make_rc_req(struct ipath_qp *qp,
  175. struct ipath_other_headers *ohdr,
  176. u32 pmtu, u32 *bth0p, u32 *bth2p)
  177. {
  178. struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
  179. struct ipath_sge_state *ss;
  180. struct ipath_swqe *wqe;
  181. u32 hwords;
  182. u32 len;
  183. u32 bth0;
  184. u32 bth2;
  185. char newreq;
  186. if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
  187. qp->s_rnr_timeout)
  188. goto done;
  189. /* Limit the number of packets sent without an ACK. */
  190. if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) {
  191. qp->s_wait_credit = 1;
  192. dev->n_rc_stalls++;
  193. spin_lock(&dev->pending_lock);
  194. if (list_empty(&qp->timerwait))
  195. list_add_tail(&qp->timerwait,
  196. &dev->pending[dev->pending_index]);
  197. spin_unlock(&dev->pending_lock);
  198. goto done;
  199. }
  200. /* header size in 32-bit words LRH+BTH = (8+12)/4. */
  201. hwords = 5;
  202. bth0 = 0;
  203. /* Send a request. */
  204. wqe = get_swqe_ptr(qp, qp->s_cur);
  205. switch (qp->s_state) {
  206. default:
  207. /*
  208. * Resend an old request or start a new one.
  209. *
  210. * We keep track of the current SWQE so that
  211. * we don't reset the "furthest progress" state
  212. * if we need to back up.
  213. */
  214. newreq = 0;
  215. if (qp->s_cur == qp->s_tail) {
  216. /* Check if send work queue is empty. */
  217. if (qp->s_tail == qp->s_head)
  218. goto done;
  219. wqe->psn = qp->s_next_psn;
  220. newreq = 1;
  221. }
  222. /*
  223. * Note that we have to be careful not to modify the
  224. * original work request since we may need to resend
  225. * it.
  226. */
  227. len = wqe->length;
  228. ss = &qp->s_sge;
  229. bth2 = 0;
  230. switch (wqe->wr.opcode) {
  231. case IB_WR_SEND:
  232. case IB_WR_SEND_WITH_IMM:
  233. /* If no credit, return. */
  234. if (qp->s_lsn != (u32) -1 &&
  235. ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
  236. goto done;
  237. wqe->lpsn = wqe->psn;
  238. if (len > pmtu) {
  239. wqe->lpsn += (len - 1) / pmtu;
  240. qp->s_state = OP(SEND_FIRST);
  241. len = pmtu;
  242. break;
  243. }
  244. if (wqe->wr.opcode == IB_WR_SEND)
  245. qp->s_state = OP(SEND_ONLY);
  246. else {
  247. qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
  248. /* Immediate data comes after the BTH */
  249. ohdr->u.imm_data = wqe->wr.imm_data;
  250. hwords += 1;
  251. }
  252. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  253. bth0 |= 1 << 23;
  254. bth2 = 1 << 31; /* Request ACK. */
  255. if (++qp->s_cur == qp->s_size)
  256. qp->s_cur = 0;
  257. break;
  258. case IB_WR_RDMA_WRITE:
  259. if (newreq && qp->s_lsn != (u32) -1)
  260. qp->s_lsn++;
  261. /* FALLTHROUGH */
  262. case IB_WR_RDMA_WRITE_WITH_IMM:
  263. /* If no credit, return. */
  264. if (qp->s_lsn != (u32) -1 &&
  265. ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
  266. goto done;
  267. ohdr->u.rc.reth.vaddr =
  268. cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
  269. ohdr->u.rc.reth.rkey =
  270. cpu_to_be32(wqe->wr.wr.rdma.rkey);
  271. ohdr->u.rc.reth.length = cpu_to_be32(len);
  272. hwords += sizeof(struct ib_reth) / 4;
  273. wqe->lpsn = wqe->psn;
  274. if (len > pmtu) {
  275. wqe->lpsn += (len - 1) / pmtu;
  276. qp->s_state = OP(RDMA_WRITE_FIRST);
  277. len = pmtu;
  278. break;
  279. }
  280. if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
  281. qp->s_state = OP(RDMA_WRITE_ONLY);
  282. else {
  283. qp->s_state =
  284. OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
  285. /* Immediate data comes after RETH */
  286. ohdr->u.rc.imm_data = wqe->wr.imm_data;
  287. hwords += 1;
  288. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  289. bth0 |= 1 << 23;
  290. }
  291. bth2 = 1 << 31; /* Request ACK. */
  292. if (++qp->s_cur == qp->s_size)
  293. qp->s_cur = 0;
  294. break;
  295. case IB_WR_RDMA_READ:
  296. ohdr->u.rc.reth.vaddr =
  297. cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
  298. ohdr->u.rc.reth.rkey =
  299. cpu_to_be32(wqe->wr.wr.rdma.rkey);
  300. ohdr->u.rc.reth.length = cpu_to_be32(len);
  301. qp->s_state = OP(RDMA_READ_REQUEST);
  302. hwords += sizeof(ohdr->u.rc.reth) / 4;
  303. if (newreq) {
  304. if (qp->s_lsn != (u32) -1)
  305. qp->s_lsn++;
  306. /*
  307. * Adjust s_next_psn to count the
  308. * expected number of responses.
  309. */
  310. if (len > pmtu)
  311. qp->s_next_psn += (len - 1) / pmtu;
  312. wqe->lpsn = qp->s_next_psn++;
  313. }
  314. ss = NULL;
  315. len = 0;
  316. if (++qp->s_cur == qp->s_size)
  317. qp->s_cur = 0;
  318. break;
  319. case IB_WR_ATOMIC_CMP_AND_SWP:
  320. case IB_WR_ATOMIC_FETCH_AND_ADD:
  321. if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP)
  322. qp->s_state = OP(COMPARE_SWAP);
  323. else
  324. qp->s_state = OP(FETCH_ADD);
  325. ohdr->u.atomic_eth.vaddr = cpu_to_be64(
  326. wqe->wr.wr.atomic.remote_addr);
  327. ohdr->u.atomic_eth.rkey = cpu_to_be32(
  328. wqe->wr.wr.atomic.rkey);
  329. ohdr->u.atomic_eth.swap_data = cpu_to_be64(
  330. wqe->wr.wr.atomic.swap);
  331. ohdr->u.atomic_eth.compare_data = cpu_to_be64(
  332. wqe->wr.wr.atomic.compare_add);
  333. hwords += sizeof(struct ib_atomic_eth) / 4;
  334. if (newreq) {
  335. if (qp->s_lsn != (u32) -1)
  336. qp->s_lsn++;
  337. wqe->lpsn = wqe->psn;
  338. }
  339. if (++qp->s_cur == qp->s_size)
  340. qp->s_cur = 0;
  341. ss = NULL;
  342. len = 0;
  343. break;
  344. default:
  345. goto done;
  346. }
  347. qp->s_sge.sge = wqe->sg_list[0];
  348. qp->s_sge.sg_list = wqe->sg_list + 1;
  349. qp->s_sge.num_sge = wqe->wr.num_sge;
  350. qp->s_len = wqe->length;
  351. if (newreq) {
  352. qp->s_tail++;
  353. if (qp->s_tail >= qp->s_size)
  354. qp->s_tail = 0;
  355. }
  356. bth2 |= qp->s_psn & IPATH_PSN_MASK;
  357. if (wqe->wr.opcode == IB_WR_RDMA_READ)
  358. qp->s_psn = wqe->lpsn + 1;
  359. else {
  360. qp->s_psn++;
  361. if ((int)(qp->s_psn - qp->s_next_psn) > 0)
  362. qp->s_next_psn = qp->s_psn;
  363. }
  364. /*
  365. * Put the QP on the pending list so lost ACKs will cause
  366. * a retry. More than one request can be pending so the
  367. * QP may already be on the dev->pending list.
  368. */
  369. spin_lock(&dev->pending_lock);
  370. if (list_empty(&qp->timerwait))
  371. list_add_tail(&qp->timerwait,
  372. &dev->pending[dev->pending_index]);
  373. spin_unlock(&dev->pending_lock);
  374. break;
  375. case OP(RDMA_READ_RESPONSE_FIRST):
  376. /*
  377. * This case can only happen if a send is restarted.
  378. * See ipath_restart_rc().
  379. */
  380. ipath_init_restart(qp, wqe);
  381. /* FALLTHROUGH */
  382. case OP(SEND_FIRST):
  383. qp->s_state = OP(SEND_MIDDLE);
  384. /* FALLTHROUGH */
  385. case OP(SEND_MIDDLE):
  386. bth2 = qp->s_psn++ & IPATH_PSN_MASK;
  387. if ((int)(qp->s_psn - qp->s_next_psn) > 0)
  388. qp->s_next_psn = qp->s_psn;
  389. ss = &qp->s_sge;
  390. len = qp->s_len;
  391. if (len > pmtu) {
  392. len = pmtu;
  393. break;
  394. }
  395. if (wqe->wr.opcode == IB_WR_SEND)
  396. qp->s_state = OP(SEND_LAST);
  397. else {
  398. qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
  399. /* Immediate data comes after the BTH */
  400. ohdr->u.imm_data = wqe->wr.imm_data;
  401. hwords += 1;
  402. }
  403. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  404. bth0 |= 1 << 23;
  405. bth2 |= 1 << 31; /* Request ACK. */
  406. qp->s_cur++;
  407. if (qp->s_cur >= qp->s_size)
  408. qp->s_cur = 0;
  409. break;
  410. case OP(RDMA_READ_RESPONSE_LAST):
  411. /*
  412. * This case can only happen if a RDMA write is restarted.
  413. * See ipath_restart_rc().
  414. */
  415. ipath_init_restart(qp, wqe);
  416. /* FALLTHROUGH */
  417. case OP(RDMA_WRITE_FIRST):
  418. qp->s_state = OP(RDMA_WRITE_MIDDLE);
  419. /* FALLTHROUGH */
  420. case OP(RDMA_WRITE_MIDDLE):
  421. bth2 = qp->s_psn++ & IPATH_PSN_MASK;
  422. if ((int)(qp->s_psn - qp->s_next_psn) > 0)
  423. qp->s_next_psn = qp->s_psn;
  424. ss = &qp->s_sge;
  425. len = qp->s_len;
  426. if (len > pmtu) {
  427. len = pmtu;
  428. break;
  429. }
  430. if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
  431. qp->s_state = OP(RDMA_WRITE_LAST);
  432. else {
  433. qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
  434. /* Immediate data comes after the BTH */
  435. ohdr->u.imm_data = wqe->wr.imm_data;
  436. hwords += 1;
  437. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  438. bth0 |= 1 << 23;
  439. }
  440. bth2 |= 1 << 31; /* Request ACK. */
  441. qp->s_cur++;
  442. if (qp->s_cur >= qp->s_size)
  443. qp->s_cur = 0;
  444. break;
  445. case OP(RDMA_READ_RESPONSE_MIDDLE):
  446. /*
  447. * This case can only happen if a RDMA read is restarted.
  448. * See ipath_restart_rc().
  449. */
  450. ipath_init_restart(qp, wqe);
  451. len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
  452. ohdr->u.rc.reth.vaddr =
  453. cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
  454. ohdr->u.rc.reth.rkey =
  455. cpu_to_be32(wqe->wr.wr.rdma.rkey);
  456. ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
  457. qp->s_state = OP(RDMA_READ_REQUEST);
  458. hwords += sizeof(ohdr->u.rc.reth) / 4;
  459. bth2 = qp->s_psn++ & IPATH_PSN_MASK;
  460. if ((int)(qp->s_psn - qp->s_next_psn) > 0)
  461. qp->s_next_psn = qp->s_psn;
  462. ss = NULL;
  463. len = 0;
  464. qp->s_cur++;
  465. if (qp->s_cur == qp->s_size)
  466. qp->s_cur = 0;
  467. break;
  468. case OP(RDMA_READ_REQUEST):
  469. case OP(COMPARE_SWAP):
  470. case OP(FETCH_ADD):
  471. /*
  472. * We shouldn't start anything new until this request is
  473. * finished. The ACK will handle rescheduling us. XXX The
  474. * number of outstanding ones is negotiated at connection
  475. * setup time (see pg. 258,289)? XXX Also, if we support
  476. * multiple outstanding requests, we need to check the WQE
  477. * IB_SEND_FENCE flag and not send a new request if a RDMA
  478. * read or atomic is pending.
  479. */
  480. goto done;
  481. }
  482. if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0)
  483. bth2 |= 1 << 31; /* Request ACK. */
  484. qp->s_len -= len;
  485. qp->s_hdrwords = hwords;
  486. qp->s_cur_sge = ss;
  487. qp->s_cur_size = len;
  488. *bth0p = bth0 | (qp->s_state << 24);
  489. *bth2p = bth2;
  490. return 1;
  491. done:
  492. return 0;
  493. }
  494. /**
  495. * send_rc_ack - Construct an ACK packet and send it
  496. * @qp: a pointer to the QP
  497. *
  498. * This is called from ipath_rc_rcv() and only uses the receive
  499. * side QP state.
  500. * Note that RDMA reads are handled in the send side QP state and tasklet.
  501. */
  502. static void send_rc_ack(struct ipath_qp *qp)
  503. {
  504. struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
  505. u16 lrh0;
  506. u32 bth0;
  507. u32 hwords;
  508. struct ipath_ib_header hdr;
  509. struct ipath_other_headers *ohdr;
  510. /* Construct the header. */
  511. ohdr = &hdr.u.oth;
  512. lrh0 = IPATH_LRH_BTH;
  513. /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
  514. hwords = 6;
  515. if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
  516. hwords += ipath_make_grh(dev, &hdr.u.l.grh,
  517. &qp->remote_ah_attr.grh,
  518. hwords, 0);
  519. ohdr = &hdr.u.l.oth;
  520. lrh0 = IPATH_LRH_GRH;
  521. }
  522. /* read pkey_index w/o lock (its atomic) */
  523. bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index);
  524. if (qp->r_nak_state)
  525. ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
  526. (qp->r_nak_state <<
  527. IPATH_AETH_CREDIT_SHIFT));
  528. else
  529. ohdr->u.aeth = ipath_compute_aeth(qp);
  530. if (qp->r_ack_state >= OP(COMPARE_SWAP)) {
  531. bth0 |= OP(ATOMIC_ACKNOWLEDGE) << 24;
  532. ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->r_atomic_data);
  533. hwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4;
  534. } else
  535. bth0 |= OP(ACKNOWLEDGE) << 24;
  536. lrh0 |= qp->remote_ah_attr.sl << 4;
  537. hdr.lrh[0] = cpu_to_be16(lrh0);
  538. hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
  539. hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
  540. hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
  541. ohdr->bth[0] = cpu_to_be32(bth0);
  542. ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
  543. ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
  544. /*
  545. * If we can send the ACK, clear the ACK state.
  546. */
  547. if (ipath_verbs_send(dev->dd, hwords, (u32 *) &hdr, 0, NULL) == 0) {
  548. qp->r_ack_state = OP(ACKNOWLEDGE);
  549. dev->n_unicast_xmit++;
  550. } else {
  551. /*
  552. * We are out of PIO buffers at the moment.
  553. * Pass responsibility for sending the ACK to the
  554. * send tasklet so that when a PIO buffer becomes
  555. * available, the ACK is sent ahead of other outgoing
  556. * packets.
  557. */
  558. dev->n_rc_qacks++;
  559. spin_lock_irq(&qp->s_lock);
  560. /* Don't coalesce if a RDMA read or atomic is pending. */
  561. if (qp->s_ack_state == OP(ACKNOWLEDGE) ||
  562. qp->s_ack_state < OP(RDMA_READ_REQUEST)) {
  563. qp->s_ack_state = qp->r_ack_state;
  564. qp->s_nak_state = qp->r_nak_state;
  565. qp->s_ack_psn = qp->r_ack_psn;
  566. qp->r_ack_state = OP(ACKNOWLEDGE);
  567. }
  568. spin_unlock_irq(&qp->s_lock);
  569. /* Call ipath_do_rc_send() in another thread. */
  570. tasklet_hi_schedule(&qp->s_task);
  571. }
  572. }
  573. /**
  574. * reset_psn - reset the QP state to send starting from PSN
  575. * @qp: the QP
  576. * @psn: the packet sequence number to restart at
  577. *
  578. * This is called from ipath_rc_rcv() to process an incoming RC ACK
  579. * for the given QP.
  580. * Called at interrupt level with the QP s_lock held.
  581. */
  582. static void reset_psn(struct ipath_qp *qp, u32 psn)
  583. {
  584. u32 n = qp->s_last;
  585. struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
  586. u32 opcode;
  587. qp->s_cur = n;
  588. /*
  589. * If we are starting the request from the beginning,
  590. * let the normal send code handle initialization.
  591. */
  592. if (ipath_cmp24(psn, wqe->psn) <= 0) {
  593. qp->s_state = OP(SEND_LAST);
  594. goto done;
  595. }
  596. /* Find the work request opcode corresponding to the given PSN. */
  597. opcode = wqe->wr.opcode;
  598. for (;;) {
  599. int diff;
  600. if (++n == qp->s_size)
  601. n = 0;
  602. if (n == qp->s_tail)
  603. break;
  604. wqe = get_swqe_ptr(qp, n);
  605. diff = ipath_cmp24(psn, wqe->psn);
  606. if (diff < 0)
  607. break;
  608. qp->s_cur = n;
  609. /*
  610. * If we are starting the request from the beginning,
  611. * let the normal send code handle initialization.
  612. */
  613. if (diff == 0) {
  614. qp->s_state = OP(SEND_LAST);
  615. goto done;
  616. }
  617. opcode = wqe->wr.opcode;
  618. }
  619. /*
  620. * Set the state to restart in the middle of a request.
  621. * Don't change the s_sge, s_cur_sge, or s_cur_size.
  622. * See ipath_do_rc_send().
  623. */
  624. switch (opcode) {
  625. case IB_WR_SEND:
  626. case IB_WR_SEND_WITH_IMM:
  627. qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
  628. break;
  629. case IB_WR_RDMA_WRITE:
  630. case IB_WR_RDMA_WRITE_WITH_IMM:
  631. qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
  632. break;
  633. case IB_WR_RDMA_READ:
  634. qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
  635. break;
  636. default:
  637. /*
  638. * This case shouldn't happen since its only
  639. * one PSN per req.
  640. */
  641. qp->s_state = OP(SEND_LAST);
  642. }
  643. done:
  644. qp->s_psn = psn;
  645. }
  646. /**
  647. * ipath_restart_rc - back up requester to resend the last un-ACKed request
  648. * @qp: the QP to restart
  649. * @psn: packet sequence number for the request
  650. * @wc: the work completion request
  651. *
  652. * The QP s_lock should be held and interrupts disabled.
  653. */
  654. void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
  655. {
  656. struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
  657. struct ipath_ibdev *dev;
  658. if (qp->s_retry == 0) {
  659. wc->wr_id = wqe->wr.wr_id;
  660. wc->status = IB_WC_RETRY_EXC_ERR;
  661. wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
  662. wc->vendor_err = 0;
  663. wc->byte_len = 0;
  664. wc->qp_num = qp->ibqp.qp_num;
  665. wc->src_qp = qp->remote_qpn;
  666. wc->pkey_index = 0;
  667. wc->slid = qp->remote_ah_attr.dlid;
  668. wc->sl = qp->remote_ah_attr.sl;
  669. wc->dlid_path_bits = 0;
  670. wc->port_num = 0;
  671. ipath_sqerror_qp(qp, wc);
  672. goto bail;
  673. }
  674. qp->s_retry--;
  675. /*
  676. * Remove the QP from the timeout queue.
  677. * Note: it may already have been removed by ipath_ib_timer().
  678. */
  679. dev = to_idev(qp->ibqp.device);
  680. spin_lock(&dev->pending_lock);
  681. if (!list_empty(&qp->timerwait))
  682. list_del_init(&qp->timerwait);
  683. spin_unlock(&dev->pending_lock);
  684. if (wqe->wr.opcode == IB_WR_RDMA_READ)
  685. dev->n_rc_resends++;
  686. else
  687. dev->n_rc_resends += (int)qp->s_psn - (int)psn;
  688. reset_psn(qp, psn);
  689. tasklet_hi_schedule(&qp->s_task);
  690. bail:
  691. return;
  692. }
  693. static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
  694. {
  695. if (qp->s_wait_credit) {
  696. qp->s_wait_credit = 0;
  697. tasklet_hi_schedule(&qp->s_task);
  698. }
  699. qp->s_last_psn = psn;
  700. }
  701. /**
  702. * do_rc_ack - process an incoming RC ACK
  703. * @qp: the QP the ACK came in on
  704. * @psn: the packet sequence number of the ACK
  705. * @opcode: the opcode of the request that resulted in the ACK
  706. *
  707. * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
  708. * for the given QP.
  709. * Called at interrupt level with the QP s_lock held and interrupts disabled.
  710. * Returns 1 if OK, 0 if current operation should be aborted (NAK).
  711. */
  712. static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
  713. {
  714. struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
  715. struct ib_wc wc;
  716. struct ipath_swqe *wqe;
  717. int ret = 0;
  718. u32 ack_psn;
  719. /*
  720. * Remove the QP from the timeout queue (or RNR timeout queue).
  721. * If ipath_ib_timer() has already removed it,
  722. * it's OK since we hold the QP s_lock and ipath_restart_rc()
  723. * just won't find anything to restart if we ACK everything.
  724. */
  725. spin_lock(&dev->pending_lock);
  726. if (!list_empty(&qp->timerwait))
  727. list_del_init(&qp->timerwait);
  728. spin_unlock(&dev->pending_lock);
  729. /* Nothing is pending to ACK/NAK. */
  730. if (unlikely(qp->s_last == qp->s_tail))
  731. goto bail;
  732. /*
  733. * Note that NAKs implicitly ACK outstanding SEND and RDMA write
  734. * requests and implicitly NAK RDMA read and atomic requests issued
  735. * before the NAK'ed request. The MSN won't include the NAK'ed
  736. * request but will include an ACK'ed request(s).
  737. */
  738. ack_psn = psn;
  739. if (aeth >> 29)
  740. ack_psn--;
  741. wqe = get_swqe_ptr(qp, qp->s_last);
  742. /*
  743. * The MSN might be for a later WQE than the PSN indicates so
  744. * only complete WQEs that the PSN finishes.
  745. */
  746. while (ipath_cmp24(ack_psn, wqe->lpsn) >= 0) {
  747. /*
  748. * If this request is a RDMA read or atomic, and the ACK is
  749. * for a later operation, this ACK NAKs the RDMA read or
  750. * atomic. In other words, only a RDMA_READ_LAST or ONLY
  751. * can ACK a RDMA read and likewise for atomic ops. Note
  752. * that the NAK case can only happen if relaxed ordering is
  753. * used and requests are sent after an RDMA read or atomic
  754. * is sent but before the response is received.
  755. */
  756. if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
  757. (opcode != OP(RDMA_READ_RESPONSE_LAST) ||
  758. ipath_cmp24(ack_psn, wqe->lpsn) != 0)) ||
  759. ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  760. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
  761. (opcode != OP(ATOMIC_ACKNOWLEDGE) ||
  762. ipath_cmp24(wqe->psn, psn) != 0))) {
  763. /*
  764. * The last valid PSN seen is the previous
  765. * request's.
  766. */
  767. update_last_psn(qp, wqe->psn - 1);
  768. /* Retry this request. */
  769. ipath_restart_rc(qp, wqe->psn, &wc);
  770. /*
  771. * No need to process the ACK/NAK since we are
  772. * restarting an earlier request.
  773. */
  774. goto bail;
  775. }
  776. if (wqe->wr.opcode == IB_WR_RDMA_READ ||
  777. wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  778. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
  779. tasklet_hi_schedule(&qp->s_task);
  780. /* Post a send completion queue entry if requested. */
  781. if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
  782. (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
  783. wc.wr_id = wqe->wr.wr_id;
  784. wc.status = IB_WC_SUCCESS;
  785. wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
  786. wc.vendor_err = 0;
  787. wc.byte_len = wqe->length;
  788. wc.qp_num = qp->ibqp.qp_num;
  789. wc.src_qp = qp->remote_qpn;
  790. wc.pkey_index = 0;
  791. wc.slid = qp->remote_ah_attr.dlid;
  792. wc.sl = qp->remote_ah_attr.sl;
  793. wc.dlid_path_bits = 0;
  794. wc.port_num = 0;
  795. ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
  796. }
  797. qp->s_retry = qp->s_retry_cnt;
  798. /*
  799. * If we are completing a request which is in the process of
  800. * being resent, we can stop resending it since we know the
  801. * responder has already seen it.
  802. */
  803. if (qp->s_last == qp->s_cur) {
  804. if (++qp->s_cur >= qp->s_size)
  805. qp->s_cur = 0;
  806. wqe = get_swqe_ptr(qp, qp->s_cur);
  807. qp->s_state = OP(SEND_LAST);
  808. qp->s_psn = wqe->psn;
  809. }
  810. if (++qp->s_last >= qp->s_size)
  811. qp->s_last = 0;
  812. wqe = get_swqe_ptr(qp, qp->s_last);
  813. if (qp->s_last == qp->s_tail)
  814. break;
  815. }
  816. switch (aeth >> 29) {
  817. case 0: /* ACK */
  818. dev->n_rc_acks++;
  819. /* If this is a partial ACK, reset the retransmit timer. */
  820. if (qp->s_last != qp->s_tail) {
  821. spin_lock(&dev->pending_lock);
  822. list_add_tail(&qp->timerwait,
  823. &dev->pending[dev->pending_index]);
  824. spin_unlock(&dev->pending_lock);
  825. }
  826. ipath_get_credit(qp, aeth);
  827. qp->s_rnr_retry = qp->s_rnr_retry_cnt;
  828. qp->s_retry = qp->s_retry_cnt;
  829. update_last_psn(qp, psn);
  830. ret = 1;
  831. goto bail;
  832. case 1: /* RNR NAK */
  833. dev->n_rnr_naks++;
  834. if (qp->s_rnr_retry == 0) {
  835. if (qp->s_last == qp->s_tail)
  836. goto bail;
  837. wc.status = IB_WC_RNR_RETRY_EXC_ERR;
  838. goto class_b;
  839. }
  840. if (qp->s_rnr_retry_cnt < 7)
  841. qp->s_rnr_retry--;
  842. if (qp->s_last == qp->s_tail)
  843. goto bail;
  844. /* The last valid PSN is the previous PSN. */
  845. update_last_psn(qp, psn - 1);
  846. dev->n_rc_resends += (int)qp->s_psn - (int)psn;
  847. reset_psn(qp, psn);
  848. qp->s_rnr_timeout =
  849. ib_ipath_rnr_table[(aeth >> IPATH_AETH_CREDIT_SHIFT) &
  850. IPATH_AETH_CREDIT_MASK];
  851. ipath_insert_rnr_queue(qp);
  852. goto bail;
  853. case 3: /* NAK */
  854. /* The last valid PSN seen is the previous request's. */
  855. if (qp->s_last != qp->s_tail)
  856. update_last_psn(qp, wqe->psn - 1);
  857. switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
  858. IPATH_AETH_CREDIT_MASK) {
  859. case 0: /* PSN sequence error */
  860. dev->n_seq_naks++;
  861. /*
  862. * Back up to the responder's expected PSN. XXX
  863. * Note that we might get a NAK in the middle of an
  864. * RDMA READ response which terminates the RDMA
  865. * READ.
  866. */
  867. if (qp->s_last == qp->s_tail)
  868. break;
  869. if (ipath_cmp24(psn, wqe->psn) < 0)
  870. break;
  871. /* Retry the request. */
  872. ipath_restart_rc(qp, psn, &wc);
  873. break;
  874. case 1: /* Invalid Request */
  875. wc.status = IB_WC_REM_INV_REQ_ERR;
  876. dev->n_other_naks++;
  877. goto class_b;
  878. case 2: /* Remote Access Error */
  879. wc.status = IB_WC_REM_ACCESS_ERR;
  880. dev->n_other_naks++;
  881. goto class_b;
  882. case 3: /* Remote Operation Error */
  883. wc.status = IB_WC_REM_OP_ERR;
  884. dev->n_other_naks++;
  885. class_b:
  886. wc.wr_id = wqe->wr.wr_id;
  887. wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
  888. wc.vendor_err = 0;
  889. wc.byte_len = 0;
  890. wc.qp_num = qp->ibqp.qp_num;
  891. wc.src_qp = qp->remote_qpn;
  892. wc.pkey_index = 0;
  893. wc.slid = qp->remote_ah_attr.dlid;
  894. wc.sl = qp->remote_ah_attr.sl;
  895. wc.dlid_path_bits = 0;
  896. wc.port_num = 0;
  897. ipath_sqerror_qp(qp, &wc);
  898. break;
  899. default:
  900. /* Ignore other reserved NAK error codes */
  901. goto reserved;
  902. }
  903. qp->s_rnr_retry = qp->s_rnr_retry_cnt;
  904. goto bail;
  905. default: /* 2: reserved */
  906. reserved:
  907. /* Ignore reserved NAK codes. */
  908. goto bail;
  909. }
  910. bail:
  911. return ret;
  912. }
  913. /**
  914. * ipath_rc_rcv_resp - process an incoming RC response packet
  915. * @dev: the device this packet came in on
  916. * @ohdr: the other headers for this packet
  917. * @data: the packet data
  918. * @tlen: the packet length
  919. * @qp: the QP for this packet
  920. * @opcode: the opcode for this packet
  921. * @psn: the packet sequence number for this packet
  922. * @hdrsize: the header length
  923. * @pmtu: the path MTU
  924. * @header_in_data: true if part of the header data is in the data buffer
  925. *
  926. * This is called from ipath_rc_rcv() to process an incoming RC response
  927. * packet for the given QP.
  928. * Called at interrupt level.
  929. */
  930. static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
  931. struct ipath_other_headers *ohdr,
  932. void *data, u32 tlen,
  933. struct ipath_qp *qp,
  934. u32 opcode,
  935. u32 psn, u32 hdrsize, u32 pmtu,
  936. int header_in_data)
  937. {
  938. unsigned long flags;
  939. struct ib_wc wc;
  940. int diff;
  941. u32 pad;
  942. u32 aeth;
  943. spin_lock_irqsave(&qp->s_lock, flags);
  944. /* Ignore invalid responses. */
  945. if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
  946. goto ack_done;
  947. /* Ignore duplicate responses. */
  948. diff = ipath_cmp24(psn, qp->s_last_psn);
  949. if (unlikely(diff <= 0)) {
  950. /* Update credits for "ghost" ACKs */
  951. if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
  952. if (!header_in_data)
  953. aeth = be32_to_cpu(ohdr->u.aeth);
  954. else {
  955. aeth = be32_to_cpu(((__be32 *) data)[0]);
  956. data += sizeof(__be32);
  957. }
  958. if ((aeth >> 29) == 0)
  959. ipath_get_credit(qp, aeth);
  960. }
  961. goto ack_done;
  962. }
  963. switch (opcode) {
  964. case OP(ACKNOWLEDGE):
  965. case OP(ATOMIC_ACKNOWLEDGE):
  966. case OP(RDMA_READ_RESPONSE_FIRST):
  967. if (!header_in_data)
  968. aeth = be32_to_cpu(ohdr->u.aeth);
  969. else {
  970. aeth = be32_to_cpu(((__be32 *) data)[0]);
  971. data += sizeof(__be32);
  972. }
  973. if (opcode == OP(ATOMIC_ACKNOWLEDGE))
  974. *(u64 *) qp->s_sge.sge.vaddr = *(u64 *) data;
  975. if (!do_rc_ack(qp, aeth, psn, opcode) ||
  976. opcode != OP(RDMA_READ_RESPONSE_FIRST))
  977. goto ack_done;
  978. hdrsize += 4;
  979. /*
  980. * do_rc_ack() has already checked the PSN so skip
  981. * the sequence check.
  982. */
  983. goto rdma_read;
  984. case OP(RDMA_READ_RESPONSE_MIDDLE):
  985. /* no AETH, no ACK */
  986. if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
  987. dev->n_rdma_seq++;
  988. if (qp->s_last != qp->s_tail)
  989. ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
  990. goto ack_done;
  991. }
  992. rdma_read:
  993. if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
  994. goto ack_done;
  995. if (unlikely(tlen != (hdrsize + pmtu + 4)))
  996. goto ack_done;
  997. if (unlikely(pmtu >= qp->s_len))
  998. goto ack_done;
  999. /* We got a response so update the timeout. */
  1000. if (unlikely(qp->s_last == qp->s_tail ||
  1001. get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
  1002. IB_WR_RDMA_READ))
  1003. goto ack_done;
  1004. spin_lock(&dev->pending_lock);
  1005. if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
  1006. list_move_tail(&qp->timerwait,
  1007. &dev->pending[dev->pending_index]);
  1008. spin_unlock(&dev->pending_lock);
  1009. /*
  1010. * Update the RDMA receive state but do the copy w/o
  1011. * holding the locks and blocking interrupts.
  1012. * XXX Yet another place that affects relaxed RDMA order
  1013. * since we don't want s_sge modified.
  1014. */
  1015. qp->s_len -= pmtu;
  1016. update_last_psn(qp, psn);
  1017. spin_unlock_irqrestore(&qp->s_lock, flags);
  1018. ipath_copy_sge(&qp->s_sge, data, pmtu);
  1019. goto bail;
  1020. case OP(RDMA_READ_RESPONSE_LAST):
  1021. /* ACKs READ req. */
  1022. if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
  1023. dev->n_rdma_seq++;
  1024. if (qp->s_last != qp->s_tail)
  1025. ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
  1026. goto ack_done;
  1027. }
  1028. /* FALLTHROUGH */
  1029. case OP(RDMA_READ_RESPONSE_ONLY):
  1030. if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
  1031. goto ack_done;
  1032. /*
  1033. * Get the number of bytes the message was padded by.
  1034. */
  1035. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  1036. /*
  1037. * Check that the data size is >= 1 && <= pmtu.
  1038. * Remember to account for the AETH header (4) and
  1039. * ICRC (4).
  1040. */
  1041. if (unlikely(tlen <= (hdrsize + pad + 8))) {
  1042. /* XXX Need to generate an error CQ entry. */
  1043. goto ack_done;
  1044. }
  1045. tlen -= hdrsize + pad + 8;
  1046. if (unlikely(tlen != qp->s_len)) {
  1047. /* XXX Need to generate an error CQ entry. */
  1048. goto ack_done;
  1049. }
  1050. if (!header_in_data)
  1051. aeth = be32_to_cpu(ohdr->u.aeth);
  1052. else {
  1053. aeth = be32_to_cpu(((__be32 *) data)[0]);
  1054. data += sizeof(__be32);
  1055. }
  1056. ipath_copy_sge(&qp->s_sge, data, tlen);
  1057. if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) {
  1058. /*
  1059. * Change the state so we contimue
  1060. * processing new requests and wake up the
  1061. * tasklet if there are posted sends.
  1062. */
  1063. qp->s_state = OP(SEND_LAST);
  1064. if (qp->s_tail != qp->s_head)
  1065. tasklet_hi_schedule(&qp->s_task);
  1066. }
  1067. goto ack_done;
  1068. }
  1069. ack_done:
  1070. spin_unlock_irqrestore(&qp->s_lock, flags);
  1071. bail:
  1072. return;
  1073. }
  1074. /**
  1075. * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
  1076. * @dev: the device this packet came in on
  1077. * @ohdr: the other headers for this packet
  1078. * @data: the packet data
  1079. * @qp: the QP for this packet
  1080. * @opcode: the opcode for this packet
  1081. * @psn: the packet sequence number for this packet
  1082. * @diff: the difference between the PSN and the expected PSN
  1083. * @header_in_data: true if part of the header data is in the data buffer
  1084. *
  1085. * This is called from ipath_rc_rcv() to process an unexpected
  1086. * incoming RC packet for the given QP.
  1087. * Called at interrupt level.
  1088. * Return 1 if no more processing is needed; otherwise return 0 to
  1089. * schedule a response to be sent and the s_lock unlocked.
  1090. */
  1091. static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
  1092. struct ipath_other_headers *ohdr,
  1093. void *data,
  1094. struct ipath_qp *qp,
  1095. u32 opcode,
  1096. u32 psn,
  1097. int diff,
  1098. int header_in_data)
  1099. {
  1100. struct ib_reth *reth;
  1101. if (diff > 0) {
  1102. /*
  1103. * Packet sequence error.
  1104. * A NAK will ACK earlier sends and RDMA writes.
  1105. * Don't queue the NAK if a RDMA read, atomic, or
  1106. * NAK is pending though.
  1107. */
  1108. if (qp->s_ack_state != OP(ACKNOWLEDGE) ||
  1109. qp->r_nak_state != 0)
  1110. goto done;
  1111. if (qp->r_ack_state < OP(COMPARE_SWAP)) {
  1112. qp->r_ack_state = OP(SEND_ONLY);
  1113. qp->r_nak_state = IB_NAK_PSN_ERROR;
  1114. /* Use the expected PSN. */
  1115. qp->r_ack_psn = qp->r_psn;
  1116. }
  1117. goto send_ack;
  1118. }
  1119. /*
  1120. * Handle a duplicate request. Don't re-execute SEND, RDMA
  1121. * write or atomic op. Don't NAK errors, just silently drop
  1122. * the duplicate request. Note that r_sge, r_len, and
  1123. * r_rcv_len may be in use so don't modify them.
  1124. *
  1125. * We are supposed to ACK the earliest duplicate PSN but we
  1126. * can coalesce an outstanding duplicate ACK. We have to
  1127. * send the earliest so that RDMA reads can be restarted at
  1128. * the requester's expected PSN.
  1129. */
  1130. if (opcode == OP(RDMA_READ_REQUEST)) {
  1131. /* RETH comes after BTH */
  1132. if (!header_in_data)
  1133. reth = &ohdr->u.rc.reth;
  1134. else {
  1135. reth = (struct ib_reth *)data;
  1136. data += sizeof(*reth);
  1137. }
  1138. /*
  1139. * If we receive a duplicate RDMA request, it means the
  1140. * requester saw a sequence error and needs to restart
  1141. * from an earlier point. We can abort the current
  1142. * RDMA read send in that case.
  1143. */
  1144. spin_lock_irq(&qp->s_lock);
  1145. if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
  1146. (qp->s_hdrwords || ipath_cmp24(psn, qp->s_ack_psn) >= 0)) {
  1147. /*
  1148. * We are already sending earlier requested data.
  1149. * Don't abort it to send later out of sequence data.
  1150. */
  1151. spin_unlock_irq(&qp->s_lock);
  1152. goto done;
  1153. }
  1154. qp->s_rdma_len = be32_to_cpu(reth->length);
  1155. if (qp->s_rdma_len != 0) {
  1156. u32 rkey = be32_to_cpu(reth->rkey);
  1157. u64 vaddr = be64_to_cpu(reth->vaddr);
  1158. int ok;
  1159. /*
  1160. * Address range must be a subset of the original
  1161. * request and start on pmtu boundaries.
  1162. */
  1163. ok = ipath_rkey_ok(qp, &qp->s_rdma_sge,
  1164. qp->s_rdma_len, vaddr, rkey,
  1165. IB_ACCESS_REMOTE_READ);
  1166. if (unlikely(!ok)) {
  1167. spin_unlock_irq(&qp->s_lock);
  1168. goto done;
  1169. }
  1170. } else {
  1171. qp->s_rdma_sge.sg_list = NULL;
  1172. qp->s_rdma_sge.num_sge = 0;
  1173. qp->s_rdma_sge.sge.mr = NULL;
  1174. qp->s_rdma_sge.sge.vaddr = NULL;
  1175. qp->s_rdma_sge.sge.length = 0;
  1176. qp->s_rdma_sge.sge.sge_length = 0;
  1177. }
  1178. qp->s_ack_state = opcode;
  1179. qp->s_ack_psn = psn;
  1180. spin_unlock_irq(&qp->s_lock);
  1181. tasklet_hi_schedule(&qp->s_task);
  1182. goto send_ack;
  1183. }
  1184. /*
  1185. * A pending RDMA read will ACK anything before it so
  1186. * ignore earlier duplicate requests.
  1187. */
  1188. if (qp->s_ack_state != OP(ACKNOWLEDGE))
  1189. goto done;
  1190. /*
  1191. * If an ACK is pending, don't replace the pending ACK
  1192. * with an earlier one since the later one will ACK the earlier.
  1193. * Also, if we already have a pending atomic, send it.
  1194. */
  1195. if (qp->r_ack_state != OP(ACKNOWLEDGE) &&
  1196. (ipath_cmp24(psn, qp->r_ack_psn) <= 0 ||
  1197. qp->r_ack_state >= OP(COMPARE_SWAP)))
  1198. goto send_ack;
  1199. switch (opcode) {
  1200. case OP(COMPARE_SWAP):
  1201. case OP(FETCH_ADD):
  1202. /*
  1203. * Check for the PSN of the last atomic operation
  1204. * performed and resend the result if found.
  1205. */
  1206. if ((psn & IPATH_PSN_MASK) != qp->r_atomic_psn)
  1207. goto done;
  1208. break;
  1209. }
  1210. qp->r_ack_state = opcode;
  1211. qp->r_nak_state = 0;
  1212. qp->r_ack_psn = psn;
  1213. send_ack:
  1214. return 0;
  1215. done:
  1216. return 1;
  1217. }
  1218. static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
  1219. {
  1220. spin_lock_irq(&qp->s_lock);
  1221. qp->state = IB_QPS_ERR;
  1222. ipath_error_qp(qp, err);
  1223. spin_unlock_irq(&qp->s_lock);
  1224. }
  1225. /**
  1226. * ipath_rc_rcv - process an incoming RC packet
  1227. * @dev: the device this packet came in on
  1228. * @hdr: the header of this packet
  1229. * @has_grh: true if the header has a GRH
  1230. * @data: the packet data
  1231. * @tlen: the packet length
  1232. * @qp: the QP for this packet
  1233. *
  1234. * This is called from ipath_qp_rcv() to process an incoming RC packet
  1235. * for the given QP.
  1236. * Called at interrupt level.
  1237. */
  1238. void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
  1239. int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
  1240. {
  1241. struct ipath_other_headers *ohdr;
  1242. u32 opcode;
  1243. u32 hdrsize;
  1244. u32 psn;
  1245. u32 pad;
  1246. struct ib_wc wc;
  1247. u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
  1248. int diff;
  1249. struct ib_reth *reth;
  1250. int header_in_data;
  1251. /* Validate the SLID. See Ch. 9.6.1.5 */
  1252. if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
  1253. goto done;
  1254. /* Check for GRH */
  1255. if (!has_grh) {
  1256. ohdr = &hdr->u.oth;
  1257. hdrsize = 8 + 12; /* LRH + BTH */
  1258. psn = be32_to_cpu(ohdr->bth[2]);
  1259. header_in_data = 0;
  1260. } else {
  1261. ohdr = &hdr->u.l.oth;
  1262. hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
  1263. /*
  1264. * The header with GRH is 60 bytes and the core driver sets
  1265. * the eager header buffer size to 56 bytes so the last 4
  1266. * bytes of the BTH header (PSN) is in the data buffer.
  1267. */
  1268. header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
  1269. if (header_in_data) {
  1270. psn = be32_to_cpu(((__be32 *) data)[0]);
  1271. data += sizeof(__be32);
  1272. } else
  1273. psn = be32_to_cpu(ohdr->bth[2]);
  1274. }
  1275. /*
  1276. * Process responses (ACKs) before anything else. Note that the
  1277. * packet sequence number will be for something in the send work
  1278. * queue rather than the expected receive packet sequence number.
  1279. * In other words, this QP is the requester.
  1280. */
  1281. opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
  1282. if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
  1283. opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
  1284. ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
  1285. hdrsize, pmtu, header_in_data);
  1286. goto done;
  1287. }
  1288. /* Compute 24 bits worth of difference. */
  1289. diff = ipath_cmp24(psn, qp->r_psn);
  1290. if (unlikely(diff)) {
  1291. if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
  1292. psn, diff, header_in_data))
  1293. goto done;
  1294. goto send_ack;
  1295. }
  1296. /* Check for opcode sequence errors. */
  1297. switch (qp->r_state) {
  1298. case OP(SEND_FIRST):
  1299. case OP(SEND_MIDDLE):
  1300. if (opcode == OP(SEND_MIDDLE) ||
  1301. opcode == OP(SEND_LAST) ||
  1302. opcode == OP(SEND_LAST_WITH_IMMEDIATE))
  1303. break;
  1304. nack_inv:
  1305. /*
  1306. * A NAK will ACK earlier sends and RDMA writes.
  1307. * Don't queue the NAK if a RDMA read, atomic, or NAK
  1308. * is pending though.
  1309. */
  1310. if (qp->r_ack_state >= OP(COMPARE_SWAP))
  1311. goto send_ack;
  1312. ipath_rc_error(qp, IB_WC_REM_INV_REQ_ERR);
  1313. qp->r_ack_state = OP(SEND_ONLY);
  1314. qp->r_nak_state = IB_NAK_INVALID_REQUEST;
  1315. qp->r_ack_psn = qp->r_psn;
  1316. goto send_ack;
  1317. case OP(RDMA_WRITE_FIRST):
  1318. case OP(RDMA_WRITE_MIDDLE):
  1319. if (opcode == OP(RDMA_WRITE_MIDDLE) ||
  1320. opcode == OP(RDMA_WRITE_LAST) ||
  1321. opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
  1322. break;
  1323. goto nack_inv;
  1324. default:
  1325. if (opcode == OP(SEND_MIDDLE) ||
  1326. opcode == OP(SEND_LAST) ||
  1327. opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
  1328. opcode == OP(RDMA_WRITE_MIDDLE) ||
  1329. opcode == OP(RDMA_WRITE_LAST) ||
  1330. opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
  1331. goto nack_inv;
  1332. /*
  1333. * Note that it is up to the requester to not send a new
  1334. * RDMA read or atomic operation before receiving an ACK
  1335. * for the previous operation.
  1336. */
  1337. break;
  1338. }
  1339. wc.imm_data = 0;
  1340. wc.wc_flags = 0;
  1341. /* OK, process the packet. */
  1342. switch (opcode) {
  1343. case OP(SEND_FIRST):
  1344. if (!ipath_get_rwqe(qp, 0)) {
  1345. rnr_nak:
  1346. /*
  1347. * A RNR NAK will ACK earlier sends and RDMA writes.
  1348. * Don't queue the NAK if a RDMA read or atomic
  1349. * is pending though.
  1350. */
  1351. if (qp->r_ack_state >= OP(COMPARE_SWAP))
  1352. goto send_ack;
  1353. qp->r_ack_state = OP(SEND_ONLY);
  1354. qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
  1355. qp->r_ack_psn = qp->r_psn;
  1356. goto send_ack;
  1357. }
  1358. qp->r_rcv_len = 0;
  1359. /* FALLTHROUGH */
  1360. case OP(SEND_MIDDLE):
  1361. case OP(RDMA_WRITE_MIDDLE):
  1362. send_middle:
  1363. /* Check for invalid length PMTU or posted rwqe len. */
  1364. if (unlikely(tlen != (hdrsize + pmtu + 4)))
  1365. goto nack_inv;
  1366. qp->r_rcv_len += pmtu;
  1367. if (unlikely(qp->r_rcv_len > qp->r_len))
  1368. goto nack_inv;
  1369. ipath_copy_sge(&qp->r_sge, data, pmtu);
  1370. break;
  1371. case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
  1372. /* consume RWQE */
  1373. if (!ipath_get_rwqe(qp, 1))
  1374. goto rnr_nak;
  1375. goto send_last_imm;
  1376. case OP(SEND_ONLY):
  1377. case OP(SEND_ONLY_WITH_IMMEDIATE):
  1378. if (!ipath_get_rwqe(qp, 0))
  1379. goto rnr_nak;
  1380. qp->r_rcv_len = 0;
  1381. if (opcode == OP(SEND_ONLY))
  1382. goto send_last;
  1383. /* FALLTHROUGH */
  1384. case OP(SEND_LAST_WITH_IMMEDIATE):
  1385. send_last_imm:
  1386. if (header_in_data) {
  1387. wc.imm_data = *(__be32 *) data;
  1388. data += sizeof(__be32);
  1389. } else {
  1390. /* Immediate data comes after BTH */
  1391. wc.imm_data = ohdr->u.imm_data;
  1392. }
  1393. hdrsize += 4;
  1394. wc.wc_flags = IB_WC_WITH_IMM;
  1395. /* FALLTHROUGH */
  1396. case OP(SEND_LAST):
  1397. case OP(RDMA_WRITE_LAST):
  1398. send_last:
  1399. /* Get the number of bytes the message was padded by. */
  1400. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  1401. /* Check for invalid length. */
  1402. /* XXX LAST len should be >= 1 */
  1403. if (unlikely(tlen < (hdrsize + pad + 4)))
  1404. goto nack_inv;
  1405. /* Don't count the CRC. */
  1406. tlen -= (hdrsize + pad + 4);
  1407. wc.byte_len = tlen + qp->r_rcv_len;
  1408. if (unlikely(wc.byte_len > qp->r_len))
  1409. goto nack_inv;
  1410. ipath_copy_sge(&qp->r_sge, data, tlen);
  1411. qp->r_msn++;
  1412. if (!qp->r_wrid_valid)
  1413. break;
  1414. qp->r_wrid_valid = 0;
  1415. wc.wr_id = qp->r_wr_id;
  1416. wc.status = IB_WC_SUCCESS;
  1417. wc.opcode = IB_WC_RECV;
  1418. wc.vendor_err = 0;
  1419. wc.qp_num = qp->ibqp.qp_num;
  1420. wc.src_qp = qp->remote_qpn;
  1421. wc.pkey_index = 0;
  1422. wc.slid = qp->remote_ah_attr.dlid;
  1423. wc.sl = qp->remote_ah_attr.sl;
  1424. wc.dlid_path_bits = 0;
  1425. wc.port_num = 0;
  1426. /* Signal completion event if the solicited bit is set. */
  1427. ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
  1428. (ohdr->bth[0] &
  1429. __constant_cpu_to_be32(1 << 23)) != 0);
  1430. break;
  1431. case OP(RDMA_WRITE_FIRST):
  1432. case OP(RDMA_WRITE_ONLY):
  1433. case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
  1434. /* consume RWQE */
  1435. /* RETH comes after BTH */
  1436. if (!header_in_data)
  1437. reth = &ohdr->u.rc.reth;
  1438. else {
  1439. reth = (struct ib_reth *)data;
  1440. data += sizeof(*reth);
  1441. }
  1442. hdrsize += sizeof(*reth);
  1443. qp->r_len = be32_to_cpu(reth->length);
  1444. qp->r_rcv_len = 0;
  1445. if (qp->r_len != 0) {
  1446. u32 rkey = be32_to_cpu(reth->rkey);
  1447. u64 vaddr = be64_to_cpu(reth->vaddr);
  1448. int ok;
  1449. /* Check rkey & NAK */
  1450. ok = ipath_rkey_ok(qp, &qp->r_sge,
  1451. qp->r_len, vaddr, rkey,
  1452. IB_ACCESS_REMOTE_WRITE);
  1453. if (unlikely(!ok))
  1454. goto nack_acc;
  1455. } else {
  1456. qp->r_sge.sg_list = NULL;
  1457. qp->r_sge.sge.mr = NULL;
  1458. qp->r_sge.sge.vaddr = NULL;
  1459. qp->r_sge.sge.length = 0;
  1460. qp->r_sge.sge.sge_length = 0;
  1461. }
  1462. if (unlikely(!(qp->qp_access_flags &
  1463. IB_ACCESS_REMOTE_WRITE)))
  1464. goto nack_acc;
  1465. if (opcode == OP(RDMA_WRITE_FIRST))
  1466. goto send_middle;
  1467. else if (opcode == OP(RDMA_WRITE_ONLY))
  1468. goto send_last;
  1469. if (!ipath_get_rwqe(qp, 1))
  1470. goto rnr_nak;
  1471. goto send_last_imm;
  1472. case OP(RDMA_READ_REQUEST):
  1473. /* RETH comes after BTH */
  1474. if (!header_in_data)
  1475. reth = &ohdr->u.rc.reth;
  1476. else {
  1477. reth = (struct ib_reth *)data;
  1478. data += sizeof(*reth);
  1479. }
  1480. if (unlikely(!(qp->qp_access_flags &
  1481. IB_ACCESS_REMOTE_READ)))
  1482. goto nack_acc;
  1483. spin_lock_irq(&qp->s_lock);
  1484. qp->s_rdma_len = be32_to_cpu(reth->length);
  1485. if (qp->s_rdma_len != 0) {
  1486. u32 rkey = be32_to_cpu(reth->rkey);
  1487. u64 vaddr = be64_to_cpu(reth->vaddr);
  1488. int ok;
  1489. /* Check rkey & NAK */
  1490. ok = ipath_rkey_ok(qp, &qp->s_rdma_sge,
  1491. qp->s_rdma_len, vaddr, rkey,
  1492. IB_ACCESS_REMOTE_READ);
  1493. if (unlikely(!ok)) {
  1494. spin_unlock_irq(&qp->s_lock);
  1495. goto nack_acc;
  1496. }
  1497. /*
  1498. * Update the next expected PSN. We add 1 later
  1499. * below, so only add the remainder here.
  1500. */
  1501. if (qp->s_rdma_len > pmtu)
  1502. qp->r_psn += (qp->s_rdma_len - 1) / pmtu;
  1503. } else {
  1504. qp->s_rdma_sge.sg_list = NULL;
  1505. qp->s_rdma_sge.num_sge = 0;
  1506. qp->s_rdma_sge.sge.mr = NULL;
  1507. qp->s_rdma_sge.sge.vaddr = NULL;
  1508. qp->s_rdma_sge.sge.length = 0;
  1509. qp->s_rdma_sge.sge.sge_length = 0;
  1510. }
  1511. /*
  1512. * We need to increment the MSN here instead of when we
  1513. * finish sending the result since a duplicate request would
  1514. * increment it more than once.
  1515. */
  1516. qp->r_msn++;
  1517. qp->s_ack_state = opcode;
  1518. qp->s_ack_psn = psn;
  1519. spin_unlock_irq(&qp->s_lock);
  1520. qp->r_psn++;
  1521. qp->r_state = opcode;
  1522. qp->r_nak_state = 0;
  1523. /* Call ipath_do_rc_send() in another thread. */
  1524. tasklet_hi_schedule(&qp->s_task);
  1525. goto done;
  1526. case OP(COMPARE_SWAP):
  1527. case OP(FETCH_ADD): {
  1528. struct ib_atomic_eth *ateth;
  1529. u64 vaddr;
  1530. u64 sdata;
  1531. u32 rkey;
  1532. if (!header_in_data)
  1533. ateth = &ohdr->u.atomic_eth;
  1534. else {
  1535. ateth = (struct ib_atomic_eth *)data;
  1536. data += sizeof(*ateth);
  1537. }
  1538. vaddr = be64_to_cpu(ateth->vaddr);
  1539. if (unlikely(vaddr & (sizeof(u64) - 1)))
  1540. goto nack_inv;
  1541. rkey = be32_to_cpu(ateth->rkey);
  1542. /* Check rkey & NAK */
  1543. if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge,
  1544. sizeof(u64), vaddr, rkey,
  1545. IB_ACCESS_REMOTE_ATOMIC)))
  1546. goto nack_acc;
  1547. if (unlikely(!(qp->qp_access_flags &
  1548. IB_ACCESS_REMOTE_ATOMIC)))
  1549. goto nack_acc;
  1550. /* Perform atomic OP and save result. */
  1551. sdata = be64_to_cpu(ateth->swap_data);
  1552. spin_lock_irq(&dev->pending_lock);
  1553. qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
  1554. if (opcode == OP(FETCH_ADD))
  1555. *(u64 *) qp->r_sge.sge.vaddr =
  1556. qp->r_atomic_data + sdata;
  1557. else if (qp->r_atomic_data ==
  1558. be64_to_cpu(ateth->compare_data))
  1559. *(u64 *) qp->r_sge.sge.vaddr = sdata;
  1560. spin_unlock_irq(&dev->pending_lock);
  1561. qp->r_msn++;
  1562. qp->r_atomic_psn = psn & IPATH_PSN_MASK;
  1563. psn |= 1 << 31;
  1564. break;
  1565. }
  1566. default:
  1567. /* Drop packet for unknown opcodes. */
  1568. goto done;
  1569. }
  1570. qp->r_psn++;
  1571. qp->r_state = opcode;
  1572. qp->r_nak_state = 0;
  1573. /* Send an ACK if requested or required. */
  1574. if (psn & (1 << 31)) {
  1575. /*
  1576. * Coalesce ACKs unless there is a RDMA READ or
  1577. * ATOMIC pending.
  1578. */
  1579. if (qp->r_ack_state < OP(COMPARE_SWAP)) {
  1580. qp->r_ack_state = opcode;
  1581. qp->r_ack_psn = psn;
  1582. }
  1583. goto send_ack;
  1584. }
  1585. goto done;
  1586. nack_acc:
  1587. /*
  1588. * A NAK will ACK earlier sends and RDMA writes.
  1589. * Don't queue the NAK if a RDMA read, atomic, or NAK
  1590. * is pending though.
  1591. */
  1592. if (qp->r_ack_state < OP(COMPARE_SWAP)) {
  1593. ipath_rc_error(qp, IB_WC_REM_ACCESS_ERR);
  1594. qp->r_ack_state = OP(RDMA_WRITE_ONLY);
  1595. qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
  1596. qp->r_ack_psn = qp->r_psn;
  1597. }
  1598. send_ack:
  1599. /* Send ACK right away unless the send tasklet has a pending ACK. */
  1600. if (qp->s_ack_state == OP(ACKNOWLEDGE))
  1601. send_rc_ack(qp);
  1602. done:
  1603. return;
  1604. }