send.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006
  1. /*
  2. * Copyright (c) 2006 Oracle. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/kernel.h>
  34. #include <net/sock.h>
  35. #include <linux/in.h>
  36. #include <linux/list.h>
  37. #include "rds.h"
  38. #include "rdma.h"
  39. /* When transmitting messages in rds_send_xmit, we need to emerge from
  40. * time to time and briefly release the CPU. Otherwise the softlock watchdog
  41. * will kick our shin.
  42. * Also, it seems fairer to not let one busy connection stall all the
  43. * others.
  44. *
  45. * send_batch_count is the number of times we'll loop in send_xmit. Setting
  46. * it to 0 will restore the old behavior (where we looped until we had
  47. * drained the queue).
  48. */
  49. static int send_batch_count = 64;
  50. module_param(send_batch_count, int, 0444);
  51. MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
  52. /*
  53. * Reset the send state. Caller must hold c_send_lock when calling here.
  54. */
  55. void rds_send_reset(struct rds_connection *conn)
  56. {
  57. struct rds_message *rm, *tmp;
  58. unsigned long flags;
  59. if (conn->c_xmit_rm) {
  60. /* Tell the user the RDMA op is no longer mapped by the
  61. * transport. This isn't entirely true (it's flushed out
  62. * independently) but as the connection is down, there's
  63. * no ongoing RDMA to/from that memory */
  64. rds_message_unmapped(conn->c_xmit_rm);
  65. rds_message_put(conn->c_xmit_rm);
  66. conn->c_xmit_rm = NULL;
  67. }
  68. conn->c_xmit_sg = 0;
  69. conn->c_xmit_hdr_off = 0;
  70. conn->c_xmit_data_off = 0;
  71. conn->c_xmit_rdma_sent = 0;
  72. conn->c_map_queued = 0;
  73. conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
  74. conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
  75. /* Mark messages as retransmissions, and move them to the send q */
  76. spin_lock_irqsave(&conn->c_lock, flags);
  77. list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
  78. set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
  79. set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
  80. }
  81. list_splice_init(&conn->c_retrans, &conn->c_send_queue);
  82. spin_unlock_irqrestore(&conn->c_lock, flags);
  83. }
  84. /*
  85. * We're making the concious trade-off here to only send one message
  86. * down the connection at a time.
  87. * Pro:
  88. * - tx queueing is a simple fifo list
  89. * - reassembly is optional and easily done by transports per conn
  90. * - no per flow rx lookup at all, straight to the socket
  91. * - less per-frag memory and wire overhead
  92. * Con:
  93. * - queued acks can be delayed behind large messages
  94. * Depends:
  95. * - small message latency is higher behind queued large messages
  96. * - large message latency isn't starved by intervening small sends
  97. */
  98. int rds_send_xmit(struct rds_connection *conn)
  99. {
  100. struct rds_message *rm;
  101. unsigned long flags;
  102. unsigned int tmp;
  103. unsigned int send_quota = send_batch_count;
  104. struct scatterlist *sg;
  105. int ret = 0;
  106. int was_empty = 0;
  107. LIST_HEAD(to_be_dropped);
  108. /*
  109. * sendmsg calls here after having queued its message on the send
  110. * queue. We only have one task feeding the connection at a time. If
  111. * another thread is already feeding the queue then we back off. This
  112. * avoids blocking the caller and trading per-connection data between
  113. * caches per message.
  114. *
  115. * The sem holder will issue a retry if they notice that someone queued
  116. * a message after they stopped walking the send queue but before they
  117. * dropped the sem.
  118. */
  119. if (!mutex_trylock(&conn->c_send_lock)) {
  120. rds_stats_inc(s_send_sem_contention);
  121. ret = -ENOMEM;
  122. goto out;
  123. }
  124. if (conn->c_trans->xmit_prepare)
  125. conn->c_trans->xmit_prepare(conn);
  126. /*
  127. * spin trying to push headers and data down the connection until
  128. * the connection doens't make forward progress.
  129. */
  130. while (--send_quota) {
  131. /*
  132. * See if need to send a congestion map update if we're
  133. * between sending messages. The send_sem protects our sole
  134. * use of c_map_offset and _bytes.
  135. * Note this is used only by transports that define a special
  136. * xmit_cong_map function. For all others, we create allocate
  137. * a cong_map message and treat it just like any other send.
  138. */
  139. if (conn->c_map_bytes) {
  140. ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong,
  141. conn->c_map_offset);
  142. if (ret <= 0)
  143. break;
  144. conn->c_map_offset += ret;
  145. conn->c_map_bytes -= ret;
  146. if (conn->c_map_bytes)
  147. continue;
  148. }
  149. /* If we're done sending the current message, clear the
  150. * offset and S/G temporaries.
  151. */
  152. rm = conn->c_xmit_rm;
  153. if (rm != NULL &&
  154. conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
  155. conn->c_xmit_sg == rm->m_nents) {
  156. conn->c_xmit_rm = NULL;
  157. conn->c_xmit_sg = 0;
  158. conn->c_xmit_hdr_off = 0;
  159. conn->c_xmit_data_off = 0;
  160. conn->c_xmit_rdma_sent = 0;
  161. /* Release the reference to the previous message. */
  162. rds_message_put(rm);
  163. rm = NULL;
  164. }
  165. /* If we're asked to send a cong map update, do so.
  166. */
  167. if (rm == NULL && test_and_clear_bit(0, &conn->c_map_queued)) {
  168. if (conn->c_trans->xmit_cong_map != NULL) {
  169. conn->c_map_offset = 0;
  170. conn->c_map_bytes = sizeof(struct rds_header) +
  171. RDS_CONG_MAP_BYTES;
  172. continue;
  173. }
  174. rm = rds_cong_update_alloc(conn);
  175. if (IS_ERR(rm)) {
  176. ret = PTR_ERR(rm);
  177. break;
  178. }
  179. conn->c_xmit_rm = rm;
  180. }
  181. /*
  182. * Grab the next message from the send queue, if there is one.
  183. *
  184. * c_xmit_rm holds a ref while we're sending this message down
  185. * the connction. We can use this ref while holding the
  186. * send_sem.. rds_send_reset() is serialized with it.
  187. */
  188. if (rm == NULL) {
  189. unsigned int len;
  190. spin_lock_irqsave(&conn->c_lock, flags);
  191. if (!list_empty(&conn->c_send_queue)) {
  192. rm = list_entry(conn->c_send_queue.next,
  193. struct rds_message,
  194. m_conn_item);
  195. rds_message_addref(rm);
  196. /*
  197. * Move the message from the send queue to the retransmit
  198. * list right away.
  199. */
  200. list_move_tail(&rm->m_conn_item, &conn->c_retrans);
  201. }
  202. spin_unlock_irqrestore(&conn->c_lock, flags);
  203. if (rm == NULL) {
  204. was_empty = 1;
  205. break;
  206. }
  207. /* Unfortunately, the way Infiniband deals with
  208. * RDMA to a bad MR key is by moving the entire
  209. * queue pair to error state. We cold possibly
  210. * recover from that, but right now we drop the
  211. * connection.
  212. * Therefore, we never retransmit messages with RDMA ops.
  213. */
  214. if (rm->m_rdma_op
  215. && test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
  216. spin_lock_irqsave(&conn->c_lock, flags);
  217. if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
  218. list_move(&rm->m_conn_item, &to_be_dropped);
  219. spin_unlock_irqrestore(&conn->c_lock, flags);
  220. rds_message_put(rm);
  221. continue;
  222. }
  223. /* Require an ACK every once in a while */
  224. len = ntohl(rm->m_inc.i_hdr.h_len);
  225. if (conn->c_unacked_packets == 0
  226. || conn->c_unacked_bytes < len) {
  227. __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
  228. conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
  229. conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
  230. rds_stats_inc(s_send_ack_required);
  231. } else {
  232. conn->c_unacked_bytes -= len;
  233. conn->c_unacked_packets--;
  234. }
  235. conn->c_xmit_rm = rm;
  236. }
  237. /*
  238. * Try and send an rdma message. Let's see if we can
  239. * keep this simple and require that the transport either
  240. * send the whole rdma or none of it.
  241. */
  242. if (rm->m_rdma_op && !conn->c_xmit_rdma_sent) {
  243. ret = conn->c_trans->xmit_rdma(conn, rm->m_rdma_op);
  244. if (ret)
  245. break;
  246. conn->c_xmit_rdma_sent = 1;
  247. /* The transport owns the mapped memory for now.
  248. * You can't unmap it while it's on the send queue */
  249. set_bit(RDS_MSG_MAPPED, &rm->m_flags);
  250. }
  251. if (conn->c_xmit_hdr_off < sizeof(struct rds_header) ||
  252. conn->c_xmit_sg < rm->m_nents) {
  253. ret = conn->c_trans->xmit(conn, rm,
  254. conn->c_xmit_hdr_off,
  255. conn->c_xmit_sg,
  256. conn->c_xmit_data_off);
  257. if (ret <= 0)
  258. break;
  259. if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
  260. tmp = min_t(int, ret,
  261. sizeof(struct rds_header) -
  262. conn->c_xmit_hdr_off);
  263. conn->c_xmit_hdr_off += tmp;
  264. ret -= tmp;
  265. }
  266. sg = &rm->m_sg[conn->c_xmit_sg];
  267. while (ret) {
  268. tmp = min_t(int, ret, sg->length -
  269. conn->c_xmit_data_off);
  270. conn->c_xmit_data_off += tmp;
  271. ret -= tmp;
  272. if (conn->c_xmit_data_off == sg->length) {
  273. conn->c_xmit_data_off = 0;
  274. sg++;
  275. conn->c_xmit_sg++;
  276. BUG_ON(ret != 0 &&
  277. conn->c_xmit_sg == rm->m_nents);
  278. }
  279. }
  280. }
  281. }
  282. /* Nuke any messages we decided not to retransmit. */
  283. if (!list_empty(&to_be_dropped))
  284. rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
  285. if (conn->c_trans->xmit_complete)
  286. conn->c_trans->xmit_complete(conn);
  287. /*
  288. * We might be racing with another sender who queued a message but
  289. * backed off on noticing that we held the c_send_lock. If we check
  290. * for queued messages after dropping the sem then either we'll
  291. * see the queued message or the queuer will get the sem. If we
  292. * notice the queued message then we trigger an immediate retry.
  293. *
  294. * We need to be careful only to do this when we stopped processing
  295. * the send queue because it was empty. It's the only way we
  296. * stop processing the loop when the transport hasn't taken
  297. * responsibility for forward progress.
  298. */
  299. mutex_unlock(&conn->c_send_lock);
  300. if (conn->c_map_bytes || (send_quota == 0 && !was_empty)) {
  301. /* We exhausted the send quota, but there's work left to
  302. * do. Return and (re-)schedule the send worker.
  303. */
  304. ret = -EAGAIN;
  305. }
  306. if (ret == 0 && was_empty) {
  307. /* A simple bit test would be way faster than taking the
  308. * spin lock */
  309. spin_lock_irqsave(&conn->c_lock, flags);
  310. if (!list_empty(&conn->c_send_queue)) {
  311. rds_stats_inc(s_send_sem_queue_raced);
  312. ret = -EAGAIN;
  313. }
  314. spin_unlock_irqrestore(&conn->c_lock, flags);
  315. }
  316. out:
  317. return ret;
  318. }
  319. static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
  320. {
  321. u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
  322. assert_spin_locked(&rs->rs_lock);
  323. BUG_ON(rs->rs_snd_bytes < len);
  324. rs->rs_snd_bytes -= len;
  325. if (rs->rs_snd_bytes == 0)
  326. rds_stats_inc(s_send_queue_empty);
  327. }
  328. static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
  329. is_acked_func is_acked)
  330. {
  331. if (is_acked)
  332. return is_acked(rm, ack);
  333. return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
  334. }
  335. /*
  336. * Returns true if there are no messages on the send and retransmit queues
  337. * which have a sequence number greater than or equal to the given sequence
  338. * number.
  339. */
  340. int rds_send_acked_before(struct rds_connection *conn, u64 seq)
  341. {
  342. struct rds_message *rm, *tmp;
  343. int ret = 1;
  344. spin_lock(&conn->c_lock);
  345. list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
  346. if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
  347. ret = 0;
  348. break;
  349. }
  350. list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
  351. if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
  352. ret = 0;
  353. break;
  354. }
  355. spin_unlock(&conn->c_lock);
  356. return ret;
  357. }
  358. /*
  359. * This is pretty similar to what happens below in the ACK
  360. * handling code - except that we call here as soon as we get
  361. * the IB send completion on the RDMA op and the accompanying
  362. * message.
  363. */
  364. void rds_rdma_send_complete(struct rds_message *rm, int status)
  365. {
  366. struct rds_sock *rs = NULL;
  367. struct rds_rdma_op *ro;
  368. struct rds_notifier *notifier;
  369. spin_lock(&rm->m_rs_lock);
  370. ro = rm->m_rdma_op;
  371. if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
  372. && ro && ro->r_notify && ro->r_notifier) {
  373. notifier = ro->r_notifier;
  374. rs = rm->m_rs;
  375. sock_hold(rds_rs_to_sk(rs));
  376. notifier->n_status = status;
  377. spin_lock(&rs->rs_lock);
  378. list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
  379. spin_unlock(&rs->rs_lock);
  380. ro->r_notifier = NULL;
  381. }
  382. spin_unlock(&rm->m_rs_lock);
  383. if (rs) {
  384. rds_wake_sk_sleep(rs);
  385. sock_put(rds_rs_to_sk(rs));
  386. }
  387. }
  388. EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
  389. /*
  390. * This is the same as rds_rdma_send_complete except we
  391. * don't do any locking - we have all the ingredients (message,
  392. * socket, socket lock) and can just move the notifier.
  393. */
  394. static inline void
  395. __rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
  396. {
  397. struct rds_rdma_op *ro;
  398. ro = rm->m_rdma_op;
  399. if (ro && ro->r_notify && ro->r_notifier) {
  400. ro->r_notifier->n_status = status;
  401. list_add_tail(&ro->r_notifier->n_list, &rs->rs_notify_queue);
  402. ro->r_notifier = NULL;
  403. }
  404. /* No need to wake the app - caller does this */
  405. }
  406. /*
  407. * This is called from the IB send completion when we detect
  408. * a RDMA operation that failed with remote access error.
  409. * So speed is not an issue here.
  410. */
  411. struct rds_message *rds_send_get_message(struct rds_connection *conn,
  412. struct rds_rdma_op *op)
  413. {
  414. struct rds_message *rm, *tmp, *found = NULL;
  415. unsigned long flags;
  416. spin_lock_irqsave(&conn->c_lock, flags);
  417. list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
  418. if (rm->m_rdma_op == op) {
  419. atomic_inc(&rm->m_refcount);
  420. found = rm;
  421. goto out;
  422. }
  423. }
  424. list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
  425. if (rm->m_rdma_op == op) {
  426. atomic_inc(&rm->m_refcount);
  427. found = rm;
  428. break;
  429. }
  430. }
  431. out:
  432. spin_unlock_irqrestore(&conn->c_lock, flags);
  433. return found;
  434. }
  435. EXPORT_SYMBOL_GPL(rds_send_get_message);
  436. /*
  437. * This removes messages from the socket's list if they're on it. The list
  438. * argument must be private to the caller, we must be able to modify it
  439. * without locks. The messages must have a reference held for their
  440. * position on the list. This function will drop that reference after
  441. * removing the messages from the 'messages' list regardless of if it found
  442. * the messages on the socket list or not.
  443. */
  444. void rds_send_remove_from_sock(struct list_head *messages, int status)
  445. {
  446. unsigned long flags = 0; /* silence gcc :P */
  447. struct rds_sock *rs = NULL;
  448. struct rds_message *rm;
  449. local_irq_save(flags);
  450. while (!list_empty(messages)) {
  451. rm = list_entry(messages->next, struct rds_message,
  452. m_conn_item);
  453. list_del_init(&rm->m_conn_item);
  454. /*
  455. * If we see this flag cleared then we're *sure* that someone
  456. * else beat us to removing it from the sock. If we race
  457. * with their flag update we'll get the lock and then really
  458. * see that the flag has been cleared.
  459. *
  460. * The message spinlock makes sure nobody clears rm->m_rs
  461. * while we're messing with it. It does not prevent the
  462. * message from being removed from the socket, though.
  463. */
  464. spin_lock(&rm->m_rs_lock);
  465. if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
  466. goto unlock_and_drop;
  467. if (rs != rm->m_rs) {
  468. if (rs) {
  469. spin_unlock(&rs->rs_lock);
  470. rds_wake_sk_sleep(rs);
  471. sock_put(rds_rs_to_sk(rs));
  472. }
  473. rs = rm->m_rs;
  474. spin_lock(&rs->rs_lock);
  475. sock_hold(rds_rs_to_sk(rs));
  476. }
  477. if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
  478. struct rds_rdma_op *ro = rm->m_rdma_op;
  479. struct rds_notifier *notifier;
  480. list_del_init(&rm->m_sock_item);
  481. rds_send_sndbuf_remove(rs, rm);
  482. if (ro && ro->r_notifier
  483. && (status || ro->r_notify)) {
  484. notifier = ro->r_notifier;
  485. list_add_tail(&notifier->n_list,
  486. &rs->rs_notify_queue);
  487. if (!notifier->n_status)
  488. notifier->n_status = status;
  489. rm->m_rdma_op->r_notifier = NULL;
  490. }
  491. rds_message_put(rm);
  492. rm->m_rs = NULL;
  493. }
  494. unlock_and_drop:
  495. spin_unlock(&rm->m_rs_lock);
  496. rds_message_put(rm);
  497. }
  498. if (rs) {
  499. spin_unlock(&rs->rs_lock);
  500. rds_wake_sk_sleep(rs);
  501. sock_put(rds_rs_to_sk(rs));
  502. }
  503. local_irq_restore(flags);
  504. }
  505. /*
  506. * Transports call here when they've determined that the receiver queued
  507. * messages up to, and including, the given sequence number. Messages are
  508. * moved to the retrans queue when rds_send_xmit picks them off the send
  509. * queue. This means that in the TCP case, the message may not have been
  510. * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
  511. * checks the RDS_MSG_HAS_ACK_SEQ bit.
  512. *
  513. * XXX It's not clear to me how this is safely serialized with socket
  514. * destruction. Maybe it should bail if it sees SOCK_DEAD.
  515. */
  516. void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
  517. is_acked_func is_acked)
  518. {
  519. struct rds_message *rm, *tmp;
  520. unsigned long flags;
  521. LIST_HEAD(list);
  522. spin_lock_irqsave(&conn->c_lock, flags);
  523. list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
  524. if (!rds_send_is_acked(rm, ack, is_acked))
  525. break;
  526. list_move(&rm->m_conn_item, &list);
  527. clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
  528. }
  529. /* order flag updates with spin locks */
  530. if (!list_empty(&list))
  531. smp_mb__after_clear_bit();
  532. spin_unlock_irqrestore(&conn->c_lock, flags);
  533. /* now remove the messages from the sock list as needed */
  534. rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
  535. }
  536. EXPORT_SYMBOL_GPL(rds_send_drop_acked);
  537. void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
  538. {
  539. struct rds_message *rm, *tmp;
  540. struct rds_connection *conn;
  541. unsigned long flags, flags2;
  542. LIST_HEAD(list);
  543. int wake = 0;
  544. /* get all the messages we're dropping under the rs lock */
  545. spin_lock_irqsave(&rs->rs_lock, flags);
  546. list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
  547. if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
  548. dest->sin_port != rm->m_inc.i_hdr.h_dport))
  549. continue;
  550. wake = 1;
  551. list_move(&rm->m_sock_item, &list);
  552. rds_send_sndbuf_remove(rs, rm);
  553. clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
  554. /* If this is a RDMA operation, notify the app. */
  555. __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED);
  556. }
  557. /* order flag updates with the rs lock */
  558. if (wake)
  559. smp_mb__after_clear_bit();
  560. spin_unlock_irqrestore(&rs->rs_lock, flags);
  561. if (wake)
  562. rds_wake_sk_sleep(rs);
  563. conn = NULL;
  564. /* now remove the messages from the conn list as needed */
  565. list_for_each_entry(rm, &list, m_sock_item) {
  566. /* We do this here rather than in the loop above, so that
  567. * we don't have to nest m_rs_lock under rs->rs_lock */
  568. spin_lock_irqsave(&rm->m_rs_lock, flags2);
  569. rm->m_rs = NULL;
  570. spin_unlock_irqrestore(&rm->m_rs_lock, flags2);
  571. /*
  572. * If we see this flag cleared then we're *sure* that someone
  573. * else beat us to removing it from the conn. If we race
  574. * with their flag update we'll get the lock and then really
  575. * see that the flag has been cleared.
  576. */
  577. if (!test_bit(RDS_MSG_ON_CONN, &rm->m_flags))
  578. continue;
  579. if (conn != rm->m_inc.i_conn) {
  580. if (conn)
  581. spin_unlock_irqrestore(&conn->c_lock, flags);
  582. conn = rm->m_inc.i_conn;
  583. spin_lock_irqsave(&conn->c_lock, flags);
  584. }
  585. if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
  586. list_del_init(&rm->m_conn_item);
  587. rds_message_put(rm);
  588. }
  589. }
  590. if (conn)
  591. spin_unlock_irqrestore(&conn->c_lock, flags);
  592. while (!list_empty(&list)) {
  593. rm = list_entry(list.next, struct rds_message, m_sock_item);
  594. list_del_init(&rm->m_sock_item);
  595. rds_message_wait(rm);
  596. rds_message_put(rm);
  597. }
  598. }
  599. /*
  600. * we only want this to fire once so we use the callers 'queued'. It's
  601. * possible that another thread can race with us and remove the
  602. * message from the flow with RDS_CANCEL_SENT_TO.
  603. */
  604. static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
  605. struct rds_message *rm, __be16 sport,
  606. __be16 dport, int *queued)
  607. {
  608. unsigned long flags;
  609. u32 len;
  610. if (*queued)
  611. goto out;
  612. len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
  613. /* this is the only place which holds both the socket's rs_lock
  614. * and the connection's c_lock */
  615. spin_lock_irqsave(&rs->rs_lock, flags);
  616. /*
  617. * If there is a little space in sndbuf, we don't queue anything,
  618. * and userspace gets -EAGAIN. But poll() indicates there's send
  619. * room. This can lead to bad behavior (spinning) if snd_bytes isn't
  620. * freed up by incoming acks. So we check the *old* value of
  621. * rs_snd_bytes here to allow the last msg to exceed the buffer,
  622. * and poll() now knows no more data can be sent.
  623. */
  624. if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
  625. rs->rs_snd_bytes += len;
  626. /* let recv side know we are close to send space exhaustion.
  627. * This is probably not the optimal way to do it, as this
  628. * means we set the flag on *all* messages as soon as our
  629. * throughput hits a certain threshold.
  630. */
  631. if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
  632. __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
  633. list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
  634. set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
  635. rds_message_addref(rm);
  636. rm->m_rs = rs;
  637. /* The code ordering is a little weird, but we're
  638. trying to minimize the time we hold c_lock */
  639. rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
  640. rm->m_inc.i_conn = conn;
  641. rds_message_addref(rm);
  642. spin_lock(&conn->c_lock);
  643. rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
  644. list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
  645. set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
  646. spin_unlock(&conn->c_lock);
  647. rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
  648. rm, len, rs, rs->rs_snd_bytes,
  649. (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
  650. *queued = 1;
  651. }
  652. spin_unlock_irqrestore(&rs->rs_lock, flags);
  653. out:
  654. return *queued;
  655. }
  656. static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
  657. struct msghdr *msg, int *allocated_mr)
  658. {
  659. struct cmsghdr *cmsg;
  660. int ret = 0;
  661. for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
  662. if (!CMSG_OK(msg, cmsg))
  663. return -EINVAL;
  664. if (cmsg->cmsg_level != SOL_RDS)
  665. continue;
  666. /* As a side effect, RDMA_DEST and RDMA_MAP will set
  667. * rm->m_rdma_cookie and rm->m_rdma_mr.
  668. */
  669. switch (cmsg->cmsg_type) {
  670. case RDS_CMSG_RDMA_ARGS:
  671. ret = rds_cmsg_rdma_args(rs, rm, cmsg);
  672. break;
  673. case RDS_CMSG_RDMA_DEST:
  674. ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
  675. break;
  676. case RDS_CMSG_RDMA_MAP:
  677. ret = rds_cmsg_rdma_map(rs, rm, cmsg);
  678. if (!ret)
  679. *allocated_mr = 1;
  680. break;
  681. default:
  682. return -EINVAL;
  683. }
  684. if (ret)
  685. break;
  686. }
  687. return ret;
  688. }
  689. int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
  690. size_t payload_len)
  691. {
  692. struct sock *sk = sock->sk;
  693. struct rds_sock *rs = rds_sk_to_rs(sk);
  694. struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
  695. __be32 daddr;
  696. __be16 dport;
  697. struct rds_message *rm = NULL;
  698. struct rds_connection *conn;
  699. int ret = 0;
  700. int queued = 0, allocated_mr = 0;
  701. int nonblock = msg->msg_flags & MSG_DONTWAIT;
  702. long timeo = sock_rcvtimeo(sk, nonblock);
  703. /* Mirror Linux UDP mirror of BSD error message compatibility */
  704. /* XXX: Perhaps MSG_MORE someday */
  705. if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
  706. printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
  707. ret = -EOPNOTSUPP;
  708. goto out;
  709. }
  710. if (msg->msg_namelen) {
  711. /* XXX fail non-unicast destination IPs? */
  712. if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
  713. ret = -EINVAL;
  714. goto out;
  715. }
  716. daddr = usin->sin_addr.s_addr;
  717. dport = usin->sin_port;
  718. } else {
  719. /* We only care about consistency with ->connect() */
  720. lock_sock(sk);
  721. daddr = rs->rs_conn_addr;
  722. dport = rs->rs_conn_port;
  723. release_sock(sk);
  724. }
  725. /* racing with another thread binding seems ok here */
  726. if (daddr == 0 || rs->rs_bound_addr == 0) {
  727. ret = -ENOTCONN; /* XXX not a great errno */
  728. goto out;
  729. }
  730. rm = rds_message_copy_from_user(msg->msg_iov, payload_len);
  731. if (IS_ERR(rm)) {
  732. ret = PTR_ERR(rm);
  733. rm = NULL;
  734. goto out;
  735. }
  736. rm->m_daddr = daddr;
  737. /* rds_conn_create has a spinlock that runs with IRQ off.
  738. * Caching the conn in the socket helps a lot. */
  739. if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
  740. conn = rs->rs_conn;
  741. else {
  742. conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
  743. rs->rs_transport,
  744. sock->sk->sk_allocation);
  745. if (IS_ERR(conn)) {
  746. ret = PTR_ERR(conn);
  747. goto out;
  748. }
  749. rs->rs_conn = conn;
  750. }
  751. /* Parse any control messages the user may have included. */
  752. ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
  753. if (ret)
  754. goto out;
  755. if ((rm->m_rdma_cookie || rm->m_rdma_op)
  756. && conn->c_trans->xmit_rdma == NULL) {
  757. if (printk_ratelimit())
  758. printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
  759. rm->m_rdma_op, conn->c_trans->xmit_rdma);
  760. ret = -EOPNOTSUPP;
  761. goto out;
  762. }
  763. /* If the connection is down, trigger a connect. We may
  764. * have scheduled a delayed reconnect however - in this case
  765. * we should not interfere.
  766. */
  767. if (rds_conn_state(conn) == RDS_CONN_DOWN
  768. && !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
  769. queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
  770. ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
  771. if (ret)
  772. goto out;
  773. while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
  774. dport, &queued)) {
  775. rds_stats_inc(s_send_queue_full);
  776. /* XXX make sure this is reasonable */
  777. if (payload_len > rds_sk_sndbuf(rs)) {
  778. ret = -EMSGSIZE;
  779. goto out;
  780. }
  781. if (nonblock) {
  782. ret = -EAGAIN;
  783. goto out;
  784. }
  785. timeo = wait_event_interruptible_timeout(*sk->sk_sleep,
  786. rds_send_queue_rm(rs, conn, rm,
  787. rs->rs_bound_port,
  788. dport,
  789. &queued),
  790. timeo);
  791. rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
  792. if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
  793. continue;
  794. ret = timeo;
  795. if (ret == 0)
  796. ret = -ETIMEDOUT;
  797. goto out;
  798. }
  799. /*
  800. * By now we've committed to the send. We reuse rds_send_worker()
  801. * to retry sends in the rds thread if the transport asks us to.
  802. */
  803. rds_stats_inc(s_send_queued);
  804. if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
  805. rds_send_worker(&conn->c_send_w.work);
  806. rds_message_put(rm);
  807. return payload_len;
  808. out:
  809. /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
  810. * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
  811. * or in any other way, we need to destroy the MR again */
  812. if (allocated_mr)
  813. rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
  814. if (rm)
  815. rds_message_put(rm);
  816. return ret;
  817. }
  818. /*
  819. * Reply to a ping packet.
  820. */
  821. int
  822. rds_send_pong(struct rds_connection *conn, __be16 dport)
  823. {
  824. struct rds_message *rm;
  825. unsigned long flags;
  826. int ret = 0;
  827. rm = rds_message_alloc(0, GFP_ATOMIC);
  828. if (rm == NULL) {
  829. ret = -ENOMEM;
  830. goto out;
  831. }
  832. rm->m_daddr = conn->c_faddr;
  833. /* If the connection is down, trigger a connect. We may
  834. * have scheduled a delayed reconnect however - in this case
  835. * we should not interfere.
  836. */
  837. if (rds_conn_state(conn) == RDS_CONN_DOWN
  838. && !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
  839. queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
  840. ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
  841. if (ret)
  842. goto out;
  843. spin_lock_irqsave(&conn->c_lock, flags);
  844. list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
  845. set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
  846. rds_message_addref(rm);
  847. rm->m_inc.i_conn = conn;
  848. rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
  849. conn->c_next_tx_seq);
  850. conn->c_next_tx_seq++;
  851. spin_unlock_irqrestore(&conn->c_lock, flags);
  852. rds_stats_inc(s_send_queued);
  853. rds_stats_inc(s_send_pong);
  854. queue_delayed_work(rds_wq, &conn->c_send_w, 0);
  855. rds_message_put(rm);
  856. return 0;
  857. out:
  858. if (rm)
  859. rds_message_put(rm);
  860. return ret;
  861. }