ib_recv.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869
  1. /*
  2. * Copyright (c) 2006 Oracle. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/kernel.h>
  34. #include <linux/pci.h>
  35. #include <linux/dma-mapping.h>
  36. #include <rdma/rdma_cm.h>
  37. #include "rds.h"
  38. #include "ib.h"
  39. static struct kmem_cache *rds_ib_incoming_slab;
  40. static struct kmem_cache *rds_ib_frag_slab;
  41. static atomic_t rds_ib_allocation = ATOMIC_INIT(0);
  42. static void rds_ib_frag_drop_page(struct rds_page_frag *frag)
  43. {
  44. rdsdebug("frag %p page %p\n", frag, frag->f_page);
  45. __free_page(frag->f_page);
  46. frag->f_page = NULL;
  47. }
  48. static void rds_ib_frag_free(struct rds_page_frag *frag)
  49. {
  50. rdsdebug("frag %p page %p\n", frag, frag->f_page);
  51. BUG_ON(frag->f_page != NULL);
  52. kmem_cache_free(rds_ib_frag_slab, frag);
  53. }
  54. /*
  55. * We map a page at a time. Its fragments are posted in order. This
  56. * is called in fragment order as the fragments get send completion events.
  57. * Only the last frag in the page performs the unmapping.
  58. *
  59. * It's OK for ring cleanup to call this in whatever order it likes because
  60. * DMA is not in flight and so we can unmap while other ring entries still
  61. * hold page references in their frags.
  62. */
  63. static void rds_ib_recv_unmap_page(struct rds_ib_connection *ic,
  64. struct rds_ib_recv_work *recv)
  65. {
  66. struct rds_page_frag *frag = recv->r_frag;
  67. rdsdebug("recv %p frag %p page %p\n", recv, frag, frag->f_page);
  68. if (frag->f_mapped)
  69. ib_dma_unmap_page(ic->i_cm_id->device,
  70. frag->f_mapped,
  71. RDS_FRAG_SIZE, DMA_FROM_DEVICE);
  72. frag->f_mapped = 0;
  73. }
  74. void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
  75. {
  76. struct rds_ib_recv_work *recv;
  77. u32 i;
  78. for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
  79. struct ib_sge *sge;
  80. recv->r_ibinc = NULL;
  81. recv->r_frag = NULL;
  82. recv->r_wr.next = NULL;
  83. recv->r_wr.wr_id = i;
  84. recv->r_wr.sg_list = recv->r_sge;
  85. recv->r_wr.num_sge = RDS_IB_RECV_SGE;
  86. sge = rds_ib_data_sge(ic, recv->r_sge);
  87. sge->addr = 0;
  88. sge->length = RDS_FRAG_SIZE;
  89. sge->lkey = ic->i_mr->lkey;
  90. sge = rds_ib_header_sge(ic, recv->r_sge);
  91. sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
  92. sge->length = sizeof(struct rds_header);
  93. sge->lkey = ic->i_mr->lkey;
  94. }
  95. }
  96. static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
  97. struct rds_ib_recv_work *recv)
  98. {
  99. if (recv->r_ibinc) {
  100. rds_inc_put(&recv->r_ibinc->ii_inc);
  101. recv->r_ibinc = NULL;
  102. }
  103. if (recv->r_frag) {
  104. rds_ib_recv_unmap_page(ic, recv);
  105. if (recv->r_frag->f_page)
  106. rds_ib_frag_drop_page(recv->r_frag);
  107. rds_ib_frag_free(recv->r_frag);
  108. recv->r_frag = NULL;
  109. }
  110. }
  111. void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
  112. {
  113. u32 i;
  114. for (i = 0; i < ic->i_recv_ring.w_nr; i++)
  115. rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
  116. if (ic->i_frag.f_page)
  117. rds_ib_frag_drop_page(&ic->i_frag);
  118. }
  119. static int rds_ib_recv_refill_one(struct rds_connection *conn,
  120. struct rds_ib_recv_work *recv,
  121. gfp_t kptr_gfp, gfp_t page_gfp)
  122. {
  123. struct rds_ib_connection *ic = conn->c_transport_data;
  124. dma_addr_t dma_addr;
  125. struct ib_sge *sge;
  126. int ret = -ENOMEM;
  127. if (recv->r_ibinc == NULL) {
  128. if (atomic_read(&rds_ib_allocation) >= rds_ib_sysctl_max_recv_allocation) {
  129. rds_ib_stats_inc(s_ib_rx_alloc_limit);
  130. goto out;
  131. }
  132. recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab,
  133. kptr_gfp);
  134. if (recv->r_ibinc == NULL)
  135. goto out;
  136. atomic_inc(&rds_ib_allocation);
  137. INIT_LIST_HEAD(&recv->r_ibinc->ii_frags);
  138. rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
  139. }
  140. if (recv->r_frag == NULL) {
  141. recv->r_frag = kmem_cache_alloc(rds_ib_frag_slab, kptr_gfp);
  142. if (recv->r_frag == NULL)
  143. goto out;
  144. INIT_LIST_HEAD(&recv->r_frag->f_item);
  145. recv->r_frag->f_page = NULL;
  146. }
  147. if (ic->i_frag.f_page == NULL) {
  148. ic->i_frag.f_page = alloc_page(page_gfp);
  149. if (ic->i_frag.f_page == NULL)
  150. goto out;
  151. ic->i_frag.f_offset = 0;
  152. }
  153. dma_addr = ib_dma_map_page(ic->i_cm_id->device,
  154. ic->i_frag.f_page,
  155. ic->i_frag.f_offset,
  156. RDS_FRAG_SIZE,
  157. DMA_FROM_DEVICE);
  158. if (ib_dma_mapping_error(ic->i_cm_id->device, dma_addr))
  159. goto out;
  160. /*
  161. * Once we get the RDS_PAGE_LAST_OFF frag then rds_ib_frag_unmap()
  162. * must be called on this recv. This happens as completions hit
  163. * in order or on connection shutdown.
  164. */
  165. recv->r_frag->f_page = ic->i_frag.f_page;
  166. recv->r_frag->f_offset = ic->i_frag.f_offset;
  167. recv->r_frag->f_mapped = dma_addr;
  168. sge = rds_ib_data_sge(ic, recv->r_sge);
  169. sge->addr = dma_addr;
  170. sge->length = RDS_FRAG_SIZE;
  171. sge = rds_ib_header_sge(ic, recv->r_sge);
  172. sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
  173. sge->length = sizeof(struct rds_header);
  174. get_page(recv->r_frag->f_page);
  175. if (ic->i_frag.f_offset < RDS_PAGE_LAST_OFF) {
  176. ic->i_frag.f_offset += RDS_FRAG_SIZE;
  177. } else {
  178. put_page(ic->i_frag.f_page);
  179. ic->i_frag.f_page = NULL;
  180. ic->i_frag.f_offset = 0;
  181. }
  182. ret = 0;
  183. out:
  184. return ret;
  185. }
  186. /*
  187. * This tries to allocate and post unused work requests after making sure that
  188. * they have all the allocations they need to queue received fragments into
  189. * sockets. The i_recv_mutex is held here so that ring_alloc and _unalloc
  190. * pairs don't go unmatched.
  191. *
  192. * -1 is returned if posting fails due to temporary resource exhaustion.
  193. */
  194. int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
  195. gfp_t page_gfp, int prefill)
  196. {
  197. struct rds_ib_connection *ic = conn->c_transport_data;
  198. struct rds_ib_recv_work *recv;
  199. struct ib_recv_wr *failed_wr;
  200. unsigned int posted = 0;
  201. int ret = 0;
  202. u32 pos;
  203. while ((prefill || rds_conn_up(conn))
  204. && rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
  205. if (pos >= ic->i_recv_ring.w_nr) {
  206. printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
  207. pos);
  208. ret = -EINVAL;
  209. break;
  210. }
  211. recv = &ic->i_recvs[pos];
  212. ret = rds_ib_recv_refill_one(conn, recv, kptr_gfp, page_gfp);
  213. if (ret) {
  214. ret = -1;
  215. break;
  216. }
  217. /* XXX when can this fail? */
  218. ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
  219. rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
  220. recv->r_ibinc, recv->r_frag->f_page,
  221. (long) recv->r_frag->f_mapped, ret);
  222. if (ret) {
  223. rds_ib_conn_error(conn, "recv post on "
  224. "%pI4 returned %d, disconnecting and "
  225. "reconnecting\n", &conn->c_faddr,
  226. ret);
  227. ret = -1;
  228. break;
  229. }
  230. posted++;
  231. }
  232. /* We're doing flow control - update the window. */
  233. if (ic->i_flowctl && posted)
  234. rds_ib_advertise_credits(conn, posted);
  235. if (ret)
  236. rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
  237. return ret;
  238. }
  239. void rds_ib_inc_purge(struct rds_incoming *inc)
  240. {
  241. struct rds_ib_incoming *ibinc;
  242. struct rds_page_frag *frag;
  243. struct rds_page_frag *pos;
  244. ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
  245. rdsdebug("purging ibinc %p inc %p\n", ibinc, inc);
  246. list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
  247. list_del_init(&frag->f_item);
  248. rds_ib_frag_drop_page(frag);
  249. rds_ib_frag_free(frag);
  250. }
  251. }
  252. void rds_ib_inc_free(struct rds_incoming *inc)
  253. {
  254. struct rds_ib_incoming *ibinc;
  255. ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
  256. rds_ib_inc_purge(inc);
  257. rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
  258. BUG_ON(!list_empty(&ibinc->ii_frags));
  259. kmem_cache_free(rds_ib_incoming_slab, ibinc);
  260. atomic_dec(&rds_ib_allocation);
  261. BUG_ON(atomic_read(&rds_ib_allocation) < 0);
  262. }
  263. int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
  264. size_t size)
  265. {
  266. struct rds_ib_incoming *ibinc;
  267. struct rds_page_frag *frag;
  268. struct iovec *iov = first_iov;
  269. unsigned long to_copy;
  270. unsigned long frag_off = 0;
  271. unsigned long iov_off = 0;
  272. int copied = 0;
  273. int ret;
  274. u32 len;
  275. ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
  276. frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
  277. len = be32_to_cpu(inc->i_hdr.h_len);
  278. while (copied < size && copied < len) {
  279. if (frag_off == RDS_FRAG_SIZE) {
  280. frag = list_entry(frag->f_item.next,
  281. struct rds_page_frag, f_item);
  282. frag_off = 0;
  283. }
  284. while (iov_off == iov->iov_len) {
  285. iov_off = 0;
  286. iov++;
  287. }
  288. to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
  289. to_copy = min_t(size_t, to_copy, size - copied);
  290. to_copy = min_t(unsigned long, to_copy, len - copied);
  291. rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
  292. "[%p, %lu] + %lu\n",
  293. to_copy, iov->iov_base, iov->iov_len, iov_off,
  294. frag->f_page, frag->f_offset, frag_off);
  295. /* XXX needs + offset for multiple recvs per page */
  296. ret = rds_page_copy_to_user(frag->f_page,
  297. frag->f_offset + frag_off,
  298. iov->iov_base + iov_off,
  299. to_copy);
  300. if (ret) {
  301. copied = ret;
  302. break;
  303. }
  304. iov_off += to_copy;
  305. frag_off += to_copy;
  306. copied += to_copy;
  307. }
  308. return copied;
  309. }
  310. /* ic starts out kzalloc()ed */
  311. void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
  312. {
  313. struct ib_send_wr *wr = &ic->i_ack_wr;
  314. struct ib_sge *sge = &ic->i_ack_sge;
  315. sge->addr = ic->i_ack_dma;
  316. sge->length = sizeof(struct rds_header);
  317. sge->lkey = ic->i_mr->lkey;
  318. wr->sg_list = sge;
  319. wr->num_sge = 1;
  320. wr->opcode = IB_WR_SEND;
  321. wr->wr_id = RDS_IB_ACK_WR_ID;
  322. wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
  323. }
  324. /*
  325. * You'd think that with reliable IB connections you wouldn't need to ack
  326. * messages that have been received. The problem is that IB hardware generates
  327. * an ack message before it has DMAed the message into memory. This creates a
  328. * potential message loss if the HCA is disabled for any reason between when it
  329. * sends the ack and before the message is DMAed and processed. This is only a
  330. * potential issue if another HCA is available for fail-over.
  331. *
  332. * When the remote host receives our ack they'll free the sent message from
  333. * their send queue. To decrease the latency of this we always send an ack
  334. * immediately after we've received messages.
  335. *
  336. * For simplicity, we only have one ack in flight at a time. This puts
  337. * pressure on senders to have deep enough send queues to absorb the latency of
  338. * a single ack frame being in flight. This might not be good enough.
  339. *
  340. * This is implemented by have a long-lived send_wr and sge which point to a
  341. * statically allocated ack frame. This ack wr does not fall under the ring
  342. * accounting that the tx and rx wrs do. The QP attribute specifically makes
  343. * room for it beyond the ring size. Send completion notices its special
  344. * wr_id and avoids working with the ring in that case.
  345. */
  346. static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
  347. int ack_required)
  348. {
  349. rds_ib_set_64bit(&ic->i_ack_next, seq);
  350. if (ack_required) {
  351. smp_mb__before_clear_bit();
  352. set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
  353. }
  354. }
  355. static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
  356. {
  357. clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
  358. smp_mb__after_clear_bit();
  359. return ic->i_ack_next;
  360. }
  361. static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
  362. {
  363. struct rds_header *hdr = ic->i_ack;
  364. struct ib_send_wr *failed_wr;
  365. u64 seq;
  366. int ret;
  367. seq = rds_ib_get_ack(ic);
  368. rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
  369. rds_message_populate_header(hdr, 0, 0, 0);
  370. hdr->h_ack = cpu_to_be64(seq);
  371. hdr->h_credit = adv_credits;
  372. rds_message_make_checksum(hdr);
  373. ic->i_ack_queued = jiffies;
  374. ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr);
  375. if (unlikely(ret)) {
  376. /* Failed to send. Release the WR, and
  377. * force another ACK.
  378. */
  379. clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
  380. set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
  381. rds_ib_stats_inc(s_ib_ack_send_failure);
  382. /* Need to finesse this later. */
  383. BUG();
  384. } else
  385. rds_ib_stats_inc(s_ib_ack_sent);
  386. }
  387. /*
  388. * There are 3 ways of getting acknowledgements to the peer:
  389. * 1. We call rds_ib_attempt_ack from the recv completion handler
  390. * to send an ACK-only frame.
  391. * However, there can be only one such frame in the send queue
  392. * at any time, so we may have to postpone it.
  393. * 2. When another (data) packet is transmitted while there's
  394. * an ACK in the queue, we piggyback the ACK sequence number
  395. * on the data packet.
  396. * 3. If the ACK WR is done sending, we get called from the
  397. * send queue completion handler, and check whether there's
  398. * another ACK pending (postponed because the WR was on the
  399. * queue). If so, we transmit it.
  400. *
  401. * We maintain 2 variables:
  402. * - i_ack_flags, which keeps track of whether the ACK WR
  403. * is currently in the send queue or not (IB_ACK_IN_FLIGHT)
  404. * - i_ack_next, which is the last sequence number we received
  405. *
  406. * Potentially, send queue and receive queue handlers can run concurrently.
  407. *
  408. * Reconnecting complicates this picture just slightly. When we
  409. * reconnect, we may be seeing duplicate packets. The peer
  410. * is retransmitting them, because it hasn't seen an ACK for
  411. * them. It is important that we ACK these.
  412. *
  413. * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
  414. * this flag set *MUST* be acknowledged immediately.
  415. */
  416. /*
  417. * When we get here, we're called from the recv queue handler.
  418. * Check whether we ought to transmit an ACK.
  419. */
  420. void rds_ib_attempt_ack(struct rds_ib_connection *ic)
  421. {
  422. unsigned int adv_credits;
  423. if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
  424. return;
  425. if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
  426. rds_ib_stats_inc(s_ib_ack_send_delayed);
  427. return;
  428. }
  429. /* Can we get a send credit? */
  430. if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0)) {
  431. rds_ib_stats_inc(s_ib_tx_throttle);
  432. clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
  433. return;
  434. }
  435. clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
  436. rds_ib_send_ack(ic, adv_credits);
  437. }
  438. /*
  439. * We get here from the send completion handler, when the
  440. * adapter tells us the ACK frame was sent.
  441. */
  442. void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
  443. {
  444. clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
  445. rds_ib_attempt_ack(ic);
  446. }
  447. /*
  448. * This is called by the regular xmit code when it wants to piggyback
  449. * an ACK on an outgoing frame.
  450. */
  451. u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
  452. {
  453. if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
  454. rds_ib_stats_inc(s_ib_ack_send_piggybacked);
  455. return rds_ib_get_ack(ic);
  456. }
  457. /*
  458. * It's kind of lame that we're copying from the posted receive pages into
  459. * long-lived bitmaps. We could have posted the bitmaps and rdma written into
  460. * them. But receiving new congestion bitmaps should be a *rare* event, so
  461. * hopefully we won't need to invest that complexity in making it more
  462. * efficient. By copying we can share a simpler core with TCP which has to
  463. * copy.
  464. */
  465. static void rds_ib_cong_recv(struct rds_connection *conn,
  466. struct rds_ib_incoming *ibinc)
  467. {
  468. struct rds_cong_map *map;
  469. unsigned int map_off;
  470. unsigned int map_page;
  471. struct rds_page_frag *frag;
  472. unsigned long frag_off;
  473. unsigned long to_copy;
  474. unsigned long copied;
  475. uint64_t uncongested = 0;
  476. void *addr;
  477. /* catch completely corrupt packets */
  478. if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
  479. return;
  480. map = conn->c_fcong;
  481. map_page = 0;
  482. map_off = 0;
  483. frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
  484. frag_off = 0;
  485. copied = 0;
  486. while (copied < RDS_CONG_MAP_BYTES) {
  487. uint64_t *src, *dst;
  488. unsigned int k;
  489. to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
  490. BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
  491. addr = kmap_atomic(frag->f_page, KM_SOFTIRQ0);
  492. src = addr + frag_off;
  493. dst = (void *)map->m_page_addrs[map_page] + map_off;
  494. for (k = 0; k < to_copy; k += 8) {
  495. /* Record ports that became uncongested, ie
  496. * bits that changed from 0 to 1. */
  497. uncongested |= ~(*src) & *dst;
  498. *dst++ = *src++;
  499. }
  500. kunmap_atomic(addr, KM_SOFTIRQ0);
  501. copied += to_copy;
  502. map_off += to_copy;
  503. if (map_off == PAGE_SIZE) {
  504. map_off = 0;
  505. map_page++;
  506. }
  507. frag_off += to_copy;
  508. if (frag_off == RDS_FRAG_SIZE) {
  509. frag = list_entry(frag->f_item.next,
  510. struct rds_page_frag, f_item);
  511. frag_off = 0;
  512. }
  513. }
  514. /* the congestion map is in little endian order */
  515. uncongested = le64_to_cpu(uncongested);
  516. rds_cong_map_updated(map, uncongested);
  517. }
  518. /*
  519. * Rings are posted with all the allocations they'll need to queue the
  520. * incoming message to the receiving socket so this can't fail.
  521. * All fragments start with a header, so we can make sure we're not receiving
  522. * garbage, and we can tell a small 8 byte fragment from an ACK frame.
  523. */
  524. struct rds_ib_ack_state {
  525. u64 ack_next;
  526. u64 ack_recv;
  527. unsigned int ack_required:1;
  528. unsigned int ack_next_valid:1;
  529. unsigned int ack_recv_valid:1;
  530. };
  531. static void rds_ib_process_recv(struct rds_connection *conn,
  532. struct rds_ib_recv_work *recv, u32 byte_len,
  533. struct rds_ib_ack_state *state)
  534. {
  535. struct rds_ib_connection *ic = conn->c_transport_data;
  536. struct rds_ib_incoming *ibinc = ic->i_ibinc;
  537. struct rds_header *ihdr, *hdr;
  538. /* XXX shut down the connection if port 0,0 are seen? */
  539. rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
  540. byte_len);
  541. if (byte_len < sizeof(struct rds_header)) {
  542. rds_ib_conn_error(conn, "incoming message "
  543. "from %pI4 didn't inclue a "
  544. "header, disconnecting and "
  545. "reconnecting\n",
  546. &conn->c_faddr);
  547. return;
  548. }
  549. byte_len -= sizeof(struct rds_header);
  550. ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
  551. /* Validate the checksum. */
  552. if (!rds_message_verify_checksum(ihdr)) {
  553. rds_ib_conn_error(conn, "incoming message "
  554. "from %pI4 has corrupted header - "
  555. "forcing a reconnect\n",
  556. &conn->c_faddr);
  557. rds_stats_inc(s_recv_drop_bad_checksum);
  558. return;
  559. }
  560. /* Process the ACK sequence which comes with every packet */
  561. state->ack_recv = be64_to_cpu(ihdr->h_ack);
  562. state->ack_recv_valid = 1;
  563. /* Process the credits update if there was one */
  564. if (ihdr->h_credit)
  565. rds_ib_send_add_credits(conn, ihdr->h_credit);
  566. if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && byte_len == 0) {
  567. /* This is an ACK-only packet. The fact that it gets
  568. * special treatment here is that historically, ACKs
  569. * were rather special beasts.
  570. */
  571. rds_ib_stats_inc(s_ib_ack_received);
  572. /*
  573. * Usually the frags make their way on to incs and are then freed as
  574. * the inc is freed. We don't go that route, so we have to drop the
  575. * page ref ourselves. We can't just leave the page on the recv
  576. * because that confuses the dma mapping of pages and each recv's use
  577. * of a partial page. We can leave the frag, though, it will be
  578. * reused.
  579. *
  580. * FIXME: Fold this into the code path below.
  581. */
  582. rds_ib_frag_drop_page(recv->r_frag);
  583. return;
  584. }
  585. /*
  586. * If we don't already have an inc on the connection then this
  587. * fragment has a header and starts a message.. copy its header
  588. * into the inc and save the inc so we can hang upcoming fragments
  589. * off its list.
  590. */
  591. if (ibinc == NULL) {
  592. ibinc = recv->r_ibinc;
  593. recv->r_ibinc = NULL;
  594. ic->i_ibinc = ibinc;
  595. hdr = &ibinc->ii_inc.i_hdr;
  596. memcpy(hdr, ihdr, sizeof(*hdr));
  597. ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
  598. rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
  599. ic->i_recv_data_rem, hdr->h_flags);
  600. } else {
  601. hdr = &ibinc->ii_inc.i_hdr;
  602. /* We can't just use memcmp here; fragments of a
  603. * single message may carry different ACKs */
  604. if (hdr->h_sequence != ihdr->h_sequence
  605. || hdr->h_len != ihdr->h_len
  606. || hdr->h_sport != ihdr->h_sport
  607. || hdr->h_dport != ihdr->h_dport) {
  608. rds_ib_conn_error(conn,
  609. "fragment header mismatch; forcing reconnect\n");
  610. return;
  611. }
  612. }
  613. list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
  614. recv->r_frag = NULL;
  615. if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
  616. ic->i_recv_data_rem -= RDS_FRAG_SIZE;
  617. else {
  618. ic->i_recv_data_rem = 0;
  619. ic->i_ibinc = NULL;
  620. if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
  621. rds_ib_cong_recv(conn, ibinc);
  622. else {
  623. rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
  624. &ibinc->ii_inc, GFP_ATOMIC,
  625. KM_SOFTIRQ0);
  626. state->ack_next = be64_to_cpu(hdr->h_sequence);
  627. state->ack_next_valid = 1;
  628. }
  629. /* Evaluate the ACK_REQUIRED flag *after* we received
  630. * the complete frame, and after bumping the next_rx
  631. * sequence. */
  632. if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
  633. rds_stats_inc(s_recv_ack_required);
  634. state->ack_required = 1;
  635. }
  636. rds_inc_put(&ibinc->ii_inc);
  637. }
  638. }
  639. /*
  640. * Plucking the oldest entry from the ring can be done concurrently with
  641. * the thread refilling the ring. Each ring operation is protected by
  642. * spinlocks and the transient state of refilling doesn't change the
  643. * recording of which entry is oldest.
  644. *
  645. * This relies on IB only calling one cq comp_handler for each cq so that
  646. * there will only be one caller of rds_recv_incoming() per RDS connection.
  647. */
  648. void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
  649. {
  650. struct rds_connection *conn = context;
  651. struct rds_ib_connection *ic = conn->c_transport_data;
  652. struct ib_wc wc;
  653. struct rds_ib_ack_state state = { 0, };
  654. struct rds_ib_recv_work *recv;
  655. rdsdebug("conn %p cq %p\n", conn, cq);
  656. rds_ib_stats_inc(s_ib_rx_cq_call);
  657. ib_req_notify_cq(cq, IB_CQ_SOLICITED);
  658. while (ib_poll_cq(cq, 1, &wc) > 0) {
  659. rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
  660. (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
  661. be32_to_cpu(wc.ex.imm_data));
  662. rds_ib_stats_inc(s_ib_rx_cq_event);
  663. recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
  664. rds_ib_recv_unmap_page(ic, recv);
  665. /*
  666. * Also process recvs in connecting state because it is possible
  667. * to get a recv completion _before_ the rdmacm ESTABLISHED
  668. * event is processed.
  669. */
  670. if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
  671. /* We expect errors as the qp is drained during shutdown */
  672. if (wc.status == IB_WC_SUCCESS) {
  673. rds_ib_process_recv(conn, recv, wc.byte_len, &state);
  674. } else {
  675. rds_ib_conn_error(conn, "recv completion on "
  676. "%pI4 had status %u, disconnecting and "
  677. "reconnecting\n", &conn->c_faddr,
  678. wc.status);
  679. }
  680. }
  681. rds_ib_ring_free(&ic->i_recv_ring, 1);
  682. }
  683. if (state.ack_next_valid)
  684. rds_ib_set_ack(ic, state.ack_next, state.ack_required);
  685. if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
  686. rds_send_drop_acked(conn, state.ack_recv, NULL);
  687. ic->i_ack_recv = state.ack_recv;
  688. }
  689. if (rds_conn_up(conn))
  690. rds_ib_attempt_ack(ic);
  691. /* If we ever end up with a really empty receive ring, we're
  692. * in deep trouble, as the sender will definitely see RNR
  693. * timeouts. */
  694. if (rds_ib_ring_empty(&ic->i_recv_ring))
  695. rds_ib_stats_inc(s_ib_rx_ring_empty);
  696. /*
  697. * If the ring is running low, then schedule the thread to refill.
  698. */
  699. if (rds_ib_ring_low(&ic->i_recv_ring))
  700. queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
  701. }
  702. int rds_ib_recv(struct rds_connection *conn)
  703. {
  704. struct rds_ib_connection *ic = conn->c_transport_data;
  705. int ret = 0;
  706. rdsdebug("conn %p\n", conn);
  707. /*
  708. * If we get a temporary posting failure in this context then
  709. * we're really low and we want the caller to back off for a bit.
  710. */
  711. mutex_lock(&ic->i_recv_mutex);
  712. if (rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 0))
  713. ret = -ENOMEM;
  714. else
  715. rds_ib_stats_inc(s_ib_rx_refill_from_thread);
  716. mutex_unlock(&ic->i_recv_mutex);
  717. if (rds_conn_up(conn))
  718. rds_ib_attempt_ack(ic);
  719. return ret;
  720. }
  721. int __init rds_ib_recv_init(void)
  722. {
  723. struct sysinfo si;
  724. int ret = -ENOMEM;
  725. /* Default to 30% of all available RAM for recv memory */
  726. si_meminfo(&si);
  727. rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
  728. rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
  729. sizeof(struct rds_ib_incoming),
  730. 0, 0, NULL);
  731. if (rds_ib_incoming_slab == NULL)
  732. goto out;
  733. rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
  734. sizeof(struct rds_page_frag),
  735. 0, 0, NULL);
  736. if (rds_ib_frag_slab == NULL)
  737. kmem_cache_destroy(rds_ib_incoming_slab);
  738. else
  739. ret = 0;
  740. out:
  741. return ret;
  742. }
  743. void rds_ib_recv_exit(void)
  744. {
  745. kmem_cache_destroy(rds_ib_incoming_slab);
  746. kmem_cache_destroy(rds_ib_frag_slab);
  747. }