ar-ack.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309
  1. /* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/circ_buf.h>
  13. #include <linux/net.h>
  14. #include <linux/skbuff.h>
  15. #include <linux/slab.h>
  16. #include <linux/udp.h>
  17. #include <net/sock.h>
  18. #include <net/af_rxrpc.h>
  19. #include "ar-internal.h"
  20. static unsigned rxrpc_ack_defer = 1;
  21. static const char *const rxrpc_acks[] = {
  22. "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL",
  23. "-?-"
  24. };
  25. static const s8 rxrpc_ack_priority[] = {
  26. [0] = 0,
  27. [RXRPC_ACK_DELAY] = 1,
  28. [RXRPC_ACK_REQUESTED] = 2,
  29. [RXRPC_ACK_IDLE] = 3,
  30. [RXRPC_ACK_PING_RESPONSE] = 4,
  31. [RXRPC_ACK_DUPLICATE] = 5,
  32. [RXRPC_ACK_OUT_OF_SEQUENCE] = 6,
  33. [RXRPC_ACK_EXCEEDS_WINDOW] = 7,
  34. [RXRPC_ACK_NOSPACE] = 8,
  35. };
  36. /*
  37. * propose an ACK be sent
  38. */
  39. void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
  40. __be32 serial, bool immediate)
  41. {
  42. unsigned long expiry;
  43. s8 prior = rxrpc_ack_priority[ack_reason];
  44. ASSERTCMP(prior, >, 0);
  45. _enter("{%d},%s,%%%x,%u",
  46. call->debug_id, rxrpc_acks[ack_reason], ntohl(serial),
  47. immediate);
  48. if (prior < rxrpc_ack_priority[call->ackr_reason]) {
  49. if (immediate)
  50. goto cancel_timer;
  51. return;
  52. }
  53. /* update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
  54. * numbers */
  55. if (prior == rxrpc_ack_priority[call->ackr_reason]) {
  56. if (prior <= 4)
  57. call->ackr_serial = serial;
  58. if (immediate)
  59. goto cancel_timer;
  60. return;
  61. }
  62. call->ackr_reason = ack_reason;
  63. call->ackr_serial = serial;
  64. switch (ack_reason) {
  65. case RXRPC_ACK_DELAY:
  66. _debug("run delay timer");
  67. call->ack_timer.expires = jiffies + rxrpc_ack_timeout * HZ;
  68. add_timer(&call->ack_timer);
  69. return;
  70. case RXRPC_ACK_IDLE:
  71. if (!immediate) {
  72. _debug("run defer timer");
  73. expiry = 1;
  74. goto run_timer;
  75. }
  76. goto cancel_timer;
  77. case RXRPC_ACK_REQUESTED:
  78. if (!rxrpc_ack_defer)
  79. goto cancel_timer;
  80. if (!immediate || serial == cpu_to_be32(1)) {
  81. _debug("run defer timer");
  82. expiry = rxrpc_ack_defer;
  83. goto run_timer;
  84. }
  85. default:
  86. _debug("immediate ACK");
  87. goto cancel_timer;
  88. }
  89. run_timer:
  90. expiry += jiffies;
  91. if (!timer_pending(&call->ack_timer) ||
  92. time_after(call->ack_timer.expires, expiry))
  93. mod_timer(&call->ack_timer, expiry);
  94. return;
  95. cancel_timer:
  96. _debug("cancel timer %%%u", ntohl(serial));
  97. try_to_del_timer_sync(&call->ack_timer);
  98. read_lock_bh(&call->state_lock);
  99. if (call->state <= RXRPC_CALL_COMPLETE &&
  100. !test_and_set_bit(RXRPC_CALL_ACK, &call->events))
  101. rxrpc_queue_call(call);
  102. read_unlock_bh(&call->state_lock);
  103. }
  104. /*
  105. * propose an ACK be sent, locking the call structure
  106. */
  107. void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
  108. __be32 serial, bool immediate)
  109. {
  110. s8 prior = rxrpc_ack_priority[ack_reason];
  111. if (prior > rxrpc_ack_priority[call->ackr_reason]) {
  112. spin_lock_bh(&call->lock);
  113. __rxrpc_propose_ACK(call, ack_reason, serial, immediate);
  114. spin_unlock_bh(&call->lock);
  115. }
  116. }
  117. /*
  118. * set the resend timer
  119. */
  120. static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend,
  121. unsigned long resend_at)
  122. {
  123. read_lock_bh(&call->state_lock);
  124. if (call->state >= RXRPC_CALL_COMPLETE)
  125. resend = 0;
  126. if (resend & 1) {
  127. _debug("SET RESEND");
  128. set_bit(RXRPC_CALL_RESEND, &call->events);
  129. }
  130. if (resend & 2) {
  131. _debug("MODIFY RESEND TIMER");
  132. set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  133. mod_timer(&call->resend_timer, resend_at);
  134. } else {
  135. _debug("KILL RESEND TIMER");
  136. del_timer_sync(&call->resend_timer);
  137. clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
  138. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  139. }
  140. read_unlock_bh(&call->state_lock);
  141. }
  142. /*
  143. * resend packets
  144. */
  145. static void rxrpc_resend(struct rxrpc_call *call)
  146. {
  147. struct rxrpc_skb_priv *sp;
  148. struct rxrpc_header *hdr;
  149. struct sk_buff *txb;
  150. unsigned long *p_txb, resend_at;
  151. int loop, stop;
  152. u8 resend;
  153. _enter("{%d,%d,%d,%d},",
  154. call->acks_hard, call->acks_unacked,
  155. atomic_read(&call->sequence),
  156. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
  157. stop = 0;
  158. resend = 0;
  159. resend_at = 0;
  160. for (loop = call->acks_tail;
  161. loop != call->acks_head || stop;
  162. loop = (loop + 1) & (call->acks_winsz - 1)
  163. ) {
  164. p_txb = call->acks_window + loop;
  165. smp_read_barrier_depends();
  166. if (*p_txb & 1)
  167. continue;
  168. txb = (struct sk_buff *) *p_txb;
  169. sp = rxrpc_skb(txb);
  170. if (sp->need_resend) {
  171. sp->need_resend = 0;
  172. /* each Tx packet has a new serial number */
  173. sp->hdr.serial =
  174. htonl(atomic_inc_return(&call->conn->serial));
  175. hdr = (struct rxrpc_header *) txb->head;
  176. hdr->serial = sp->hdr.serial;
  177. _proto("Tx DATA %%%u { #%d }",
  178. ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
  179. if (rxrpc_send_packet(call->conn->trans, txb) < 0) {
  180. stop = 0;
  181. sp->resend_at = jiffies + 3;
  182. } else {
  183. sp->resend_at =
  184. jiffies + rxrpc_resend_timeout * HZ;
  185. }
  186. }
  187. if (time_after_eq(jiffies + 1, sp->resend_at)) {
  188. sp->need_resend = 1;
  189. resend |= 1;
  190. } else if (resend & 2) {
  191. if (time_before(sp->resend_at, resend_at))
  192. resend_at = sp->resend_at;
  193. } else {
  194. resend_at = sp->resend_at;
  195. resend |= 2;
  196. }
  197. }
  198. rxrpc_set_resend(call, resend, resend_at);
  199. _leave("");
  200. }
  201. /*
  202. * handle resend timer expiry
  203. */
  204. static void rxrpc_resend_timer(struct rxrpc_call *call)
  205. {
  206. struct rxrpc_skb_priv *sp;
  207. struct sk_buff *txb;
  208. unsigned long *p_txb, resend_at;
  209. int loop;
  210. u8 resend;
  211. _enter("%d,%d,%d",
  212. call->acks_tail, call->acks_unacked, call->acks_head);
  213. if (call->state >= RXRPC_CALL_COMPLETE)
  214. return;
  215. resend = 0;
  216. resend_at = 0;
  217. for (loop = call->acks_unacked;
  218. loop != call->acks_head;
  219. loop = (loop + 1) & (call->acks_winsz - 1)
  220. ) {
  221. p_txb = call->acks_window + loop;
  222. smp_read_barrier_depends();
  223. txb = (struct sk_buff *) (*p_txb & ~1);
  224. sp = rxrpc_skb(txb);
  225. ASSERT(!(*p_txb & 1));
  226. if (sp->need_resend) {
  227. ;
  228. } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
  229. sp->need_resend = 1;
  230. resend |= 1;
  231. } else if (resend & 2) {
  232. if (time_before(sp->resend_at, resend_at))
  233. resend_at = sp->resend_at;
  234. } else {
  235. resend_at = sp->resend_at;
  236. resend |= 2;
  237. }
  238. }
  239. rxrpc_set_resend(call, resend, resend_at);
  240. _leave("");
  241. }
  242. /*
  243. * process soft ACKs of our transmitted packets
  244. * - these indicate packets the peer has or has not received, but hasn't yet
  245. * given to the consumer, and so can still be discarded and re-requested
  246. */
  247. static int rxrpc_process_soft_ACKs(struct rxrpc_call *call,
  248. struct rxrpc_ackpacket *ack,
  249. struct sk_buff *skb)
  250. {
  251. struct rxrpc_skb_priv *sp;
  252. struct sk_buff *txb;
  253. unsigned long *p_txb, resend_at;
  254. int loop;
  255. u8 sacks[RXRPC_MAXACKS], resend;
  256. _enter("{%d,%d},{%d},",
  257. call->acks_hard,
  258. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz),
  259. ack->nAcks);
  260. if (skb_copy_bits(skb, 0, sacks, ack->nAcks) < 0)
  261. goto protocol_error;
  262. resend = 0;
  263. resend_at = 0;
  264. for (loop = 0; loop < ack->nAcks; loop++) {
  265. p_txb = call->acks_window;
  266. p_txb += (call->acks_tail + loop) & (call->acks_winsz - 1);
  267. smp_read_barrier_depends();
  268. txb = (struct sk_buff *) (*p_txb & ~1);
  269. sp = rxrpc_skb(txb);
  270. switch (sacks[loop]) {
  271. case RXRPC_ACK_TYPE_ACK:
  272. sp->need_resend = 0;
  273. *p_txb |= 1;
  274. break;
  275. case RXRPC_ACK_TYPE_NACK:
  276. sp->need_resend = 1;
  277. *p_txb &= ~1;
  278. resend = 1;
  279. break;
  280. default:
  281. _debug("Unsupported ACK type %d", sacks[loop]);
  282. goto protocol_error;
  283. }
  284. }
  285. smp_mb();
  286. call->acks_unacked = (call->acks_tail + loop) & (call->acks_winsz - 1);
  287. /* anything not explicitly ACK'd is implicitly NACK'd, but may just not
  288. * have been received or processed yet by the far end */
  289. for (loop = call->acks_unacked;
  290. loop != call->acks_head;
  291. loop = (loop + 1) & (call->acks_winsz - 1)
  292. ) {
  293. p_txb = call->acks_window + loop;
  294. smp_read_barrier_depends();
  295. txb = (struct sk_buff *) (*p_txb & ~1);
  296. sp = rxrpc_skb(txb);
  297. if (*p_txb & 1) {
  298. /* packet must have been discarded */
  299. sp->need_resend = 1;
  300. *p_txb &= ~1;
  301. resend |= 1;
  302. } else if (sp->need_resend) {
  303. ;
  304. } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
  305. sp->need_resend = 1;
  306. resend |= 1;
  307. } else if (resend & 2) {
  308. if (time_before(sp->resend_at, resend_at))
  309. resend_at = sp->resend_at;
  310. } else {
  311. resend_at = sp->resend_at;
  312. resend |= 2;
  313. }
  314. }
  315. rxrpc_set_resend(call, resend, resend_at);
  316. _leave(" = 0");
  317. return 0;
  318. protocol_error:
  319. _leave(" = -EPROTO");
  320. return -EPROTO;
  321. }
  322. /*
  323. * discard hard-ACK'd packets from the Tx window
  324. */
  325. static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
  326. {
  327. struct rxrpc_skb_priv *sp;
  328. unsigned long _skb;
  329. int tail = call->acks_tail, old_tail;
  330. int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz);
  331. _enter("{%u,%u},%u", call->acks_hard, win, hard);
  332. ASSERTCMP(hard - call->acks_hard, <=, win);
  333. while (call->acks_hard < hard) {
  334. smp_read_barrier_depends();
  335. _skb = call->acks_window[tail] & ~1;
  336. sp = rxrpc_skb((struct sk_buff *) _skb);
  337. rxrpc_free_skb((struct sk_buff *) _skb);
  338. old_tail = tail;
  339. tail = (tail + 1) & (call->acks_winsz - 1);
  340. call->acks_tail = tail;
  341. if (call->acks_unacked == old_tail)
  342. call->acks_unacked = tail;
  343. call->acks_hard++;
  344. }
  345. wake_up(&call->tx_waitq);
  346. }
  347. /*
  348. * clear the Tx window in the event of a failure
  349. */
  350. static void rxrpc_clear_tx_window(struct rxrpc_call *call)
  351. {
  352. rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
  353. }
  354. /*
  355. * drain the out of sequence received packet queue into the packet Rx queue
  356. */
  357. static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call)
  358. {
  359. struct rxrpc_skb_priv *sp;
  360. struct sk_buff *skb;
  361. bool terminal;
  362. int ret;
  363. _enter("{%d,%d}", call->rx_data_post, call->rx_first_oos);
  364. spin_lock_bh(&call->lock);
  365. ret = -ECONNRESET;
  366. if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
  367. goto socket_unavailable;
  368. skb = skb_dequeue(&call->rx_oos_queue);
  369. if (skb) {
  370. sp = rxrpc_skb(skb);
  371. _debug("drain OOS packet %d [%d]",
  372. ntohl(sp->hdr.seq), call->rx_first_oos);
  373. if (ntohl(sp->hdr.seq) != call->rx_first_oos) {
  374. skb_queue_head(&call->rx_oos_queue, skb);
  375. call->rx_first_oos = ntohl(rxrpc_skb(skb)->hdr.seq);
  376. _debug("requeue %p {%u}", skb, call->rx_first_oos);
  377. } else {
  378. skb->mark = RXRPC_SKB_MARK_DATA;
  379. terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
  380. !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
  381. ret = rxrpc_queue_rcv_skb(call, skb, true, terminal);
  382. BUG_ON(ret < 0);
  383. _debug("drain #%u", call->rx_data_post);
  384. call->rx_data_post++;
  385. /* find out what the next packet is */
  386. skb = skb_peek(&call->rx_oos_queue);
  387. if (skb)
  388. call->rx_first_oos =
  389. ntohl(rxrpc_skb(skb)->hdr.seq);
  390. else
  391. call->rx_first_oos = 0;
  392. _debug("peek %p {%u}", skb, call->rx_first_oos);
  393. }
  394. }
  395. ret = 0;
  396. socket_unavailable:
  397. spin_unlock_bh(&call->lock);
  398. _leave(" = %d", ret);
  399. return ret;
  400. }
  401. /*
  402. * insert an out of sequence packet into the buffer
  403. */
  404. static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
  405. struct sk_buff *skb)
  406. {
  407. struct rxrpc_skb_priv *sp, *psp;
  408. struct sk_buff *p;
  409. u32 seq;
  410. sp = rxrpc_skb(skb);
  411. seq = ntohl(sp->hdr.seq);
  412. _enter(",,{%u}", seq);
  413. skb->destructor = rxrpc_packet_destructor;
  414. ASSERTCMP(sp->call, ==, NULL);
  415. sp->call = call;
  416. rxrpc_get_call(call);
  417. /* insert into the buffer in sequence order */
  418. spin_lock_bh(&call->lock);
  419. skb_queue_walk(&call->rx_oos_queue, p) {
  420. psp = rxrpc_skb(p);
  421. if (ntohl(psp->hdr.seq) > seq) {
  422. _debug("insert oos #%u before #%u",
  423. seq, ntohl(psp->hdr.seq));
  424. skb_insert(p, skb, &call->rx_oos_queue);
  425. goto inserted;
  426. }
  427. }
  428. _debug("append oos #%u", seq);
  429. skb_queue_tail(&call->rx_oos_queue, skb);
  430. inserted:
  431. /* we might now have a new front to the queue */
  432. if (call->rx_first_oos == 0 || seq < call->rx_first_oos)
  433. call->rx_first_oos = seq;
  434. read_lock(&call->state_lock);
  435. if (call->state < RXRPC_CALL_COMPLETE &&
  436. call->rx_data_post == call->rx_first_oos) {
  437. _debug("drain rx oos now");
  438. set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
  439. }
  440. read_unlock(&call->state_lock);
  441. spin_unlock_bh(&call->lock);
  442. _leave(" [stored #%u]", call->rx_first_oos);
  443. }
  444. /*
  445. * clear the Tx window on final ACK reception
  446. */
  447. static void rxrpc_zap_tx_window(struct rxrpc_call *call)
  448. {
  449. struct rxrpc_skb_priv *sp;
  450. struct sk_buff *skb;
  451. unsigned long _skb, *acks_window;
  452. u8 winsz = call->acks_winsz;
  453. int tail;
  454. acks_window = call->acks_window;
  455. call->acks_window = NULL;
  456. while (CIRC_CNT(call->acks_head, call->acks_tail, winsz) > 0) {
  457. tail = call->acks_tail;
  458. smp_read_barrier_depends();
  459. _skb = acks_window[tail] & ~1;
  460. smp_mb();
  461. call->acks_tail = (call->acks_tail + 1) & (winsz - 1);
  462. skb = (struct sk_buff *) _skb;
  463. sp = rxrpc_skb(skb);
  464. _debug("+++ clear Tx %u", ntohl(sp->hdr.seq));
  465. rxrpc_free_skb(skb);
  466. }
  467. kfree(acks_window);
  468. }
  469. /*
  470. * process the extra information that may be appended to an ACK packet
  471. */
  472. static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
  473. unsigned latest, int nAcks)
  474. {
  475. struct rxrpc_ackinfo ackinfo;
  476. struct rxrpc_peer *peer;
  477. unsigned mtu;
  478. if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) {
  479. _leave(" [no ackinfo]");
  480. return;
  481. }
  482. _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
  483. latest,
  484. ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU),
  485. ntohl(ackinfo.rwind), ntohl(ackinfo.jumbo_max));
  486. mtu = min(ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU));
  487. peer = call->conn->trans->peer;
  488. if (mtu < peer->maxdata) {
  489. spin_lock_bh(&peer->lock);
  490. peer->maxdata = mtu;
  491. peer->mtu = mtu + peer->hdrsize;
  492. spin_unlock_bh(&peer->lock);
  493. _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
  494. }
  495. }
  496. /*
  497. * process packets in the reception queue
  498. */
  499. static int rxrpc_process_rx_queue(struct rxrpc_call *call,
  500. u32 *_abort_code)
  501. {
  502. struct rxrpc_ackpacket ack;
  503. struct rxrpc_skb_priv *sp;
  504. struct sk_buff *skb;
  505. bool post_ACK;
  506. int latest;
  507. u32 hard, tx;
  508. _enter("");
  509. process_further:
  510. skb = skb_dequeue(&call->rx_queue);
  511. if (!skb)
  512. return -EAGAIN;
  513. _net("deferred skb %p", skb);
  514. sp = rxrpc_skb(skb);
  515. _debug("process %s [st %d]", rxrpc_pkts[sp->hdr.type], call->state);
  516. post_ACK = false;
  517. switch (sp->hdr.type) {
  518. /* data packets that wind up here have been received out of
  519. * order, need security processing or are jumbo packets */
  520. case RXRPC_PACKET_TYPE_DATA:
  521. _proto("OOSQ DATA %%%u { #%u }",
  522. ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
  523. /* secured packets must be verified and possibly decrypted */
  524. if (rxrpc_verify_packet(call, skb, _abort_code) < 0)
  525. goto protocol_error;
  526. rxrpc_insert_oos_packet(call, skb);
  527. goto process_further;
  528. /* partial ACK to process */
  529. case RXRPC_PACKET_TYPE_ACK:
  530. if (skb_copy_bits(skb, 0, &ack, sizeof(ack)) < 0) {
  531. _debug("extraction failure");
  532. goto protocol_error;
  533. }
  534. if (!skb_pull(skb, sizeof(ack)))
  535. BUG();
  536. latest = ntohl(sp->hdr.serial);
  537. hard = ntohl(ack.firstPacket);
  538. tx = atomic_read(&call->sequence);
  539. _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
  540. latest,
  541. ntohs(ack.maxSkew),
  542. hard,
  543. ntohl(ack.previousPacket),
  544. ntohl(ack.serial),
  545. rxrpc_acks[ack.reason],
  546. ack.nAcks);
  547. rxrpc_extract_ackinfo(call, skb, latest, ack.nAcks);
  548. if (ack.reason == RXRPC_ACK_PING) {
  549. _proto("Rx ACK %%%u PING Request", latest);
  550. rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
  551. sp->hdr.serial, true);
  552. }
  553. /* discard any out-of-order or duplicate ACKs */
  554. if (latest - call->acks_latest <= 0) {
  555. _debug("discard ACK %d <= %d",
  556. latest, call->acks_latest);
  557. goto discard;
  558. }
  559. call->acks_latest = latest;
  560. if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
  561. call->state != RXRPC_CALL_CLIENT_AWAIT_REPLY &&
  562. call->state != RXRPC_CALL_SERVER_SEND_REPLY &&
  563. call->state != RXRPC_CALL_SERVER_AWAIT_ACK)
  564. goto discard;
  565. _debug("Tx=%d H=%u S=%d", tx, call->acks_hard, call->state);
  566. if (hard > 0) {
  567. if (hard - 1 > tx) {
  568. _debug("hard-ACK'd packet %d not transmitted"
  569. " (%d top)",
  570. hard - 1, tx);
  571. goto protocol_error;
  572. }
  573. if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY ||
  574. call->state == RXRPC_CALL_SERVER_AWAIT_ACK) &&
  575. hard > tx)
  576. goto all_acked;
  577. smp_rmb();
  578. rxrpc_rotate_tx_window(call, hard - 1);
  579. }
  580. if (ack.nAcks > 0) {
  581. if (hard - 1 + ack.nAcks > tx) {
  582. _debug("soft-ACK'd packet %d+%d not"
  583. " transmitted (%d top)",
  584. hard - 1, ack.nAcks, tx);
  585. goto protocol_error;
  586. }
  587. if (rxrpc_process_soft_ACKs(call, &ack, skb) < 0)
  588. goto protocol_error;
  589. }
  590. goto discard;
  591. /* complete ACK to process */
  592. case RXRPC_PACKET_TYPE_ACKALL:
  593. goto all_acked;
  594. /* abort and busy are handled elsewhere */
  595. case RXRPC_PACKET_TYPE_BUSY:
  596. case RXRPC_PACKET_TYPE_ABORT:
  597. BUG();
  598. /* connection level events - also handled elsewhere */
  599. case RXRPC_PACKET_TYPE_CHALLENGE:
  600. case RXRPC_PACKET_TYPE_RESPONSE:
  601. case RXRPC_PACKET_TYPE_DEBUG:
  602. BUG();
  603. }
  604. /* if we've had a hard ACK that covers all the packets we've sent, then
  605. * that ends that phase of the operation */
  606. all_acked:
  607. write_lock_bh(&call->state_lock);
  608. _debug("ack all %d", call->state);
  609. switch (call->state) {
  610. case RXRPC_CALL_CLIENT_AWAIT_REPLY:
  611. call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
  612. break;
  613. case RXRPC_CALL_SERVER_AWAIT_ACK:
  614. _debug("srv complete");
  615. call->state = RXRPC_CALL_COMPLETE;
  616. post_ACK = true;
  617. break;
  618. case RXRPC_CALL_CLIENT_SEND_REQUEST:
  619. case RXRPC_CALL_SERVER_RECV_REQUEST:
  620. goto protocol_error_unlock; /* can't occur yet */
  621. default:
  622. write_unlock_bh(&call->state_lock);
  623. goto discard; /* assume packet left over from earlier phase */
  624. }
  625. write_unlock_bh(&call->state_lock);
  626. /* if all the packets we sent are hard-ACK'd, then we can discard
  627. * whatever we've got left */
  628. _debug("clear Tx %d",
  629. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
  630. del_timer_sync(&call->resend_timer);
  631. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  632. clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
  633. if (call->acks_window)
  634. rxrpc_zap_tx_window(call);
  635. if (post_ACK) {
  636. /* post the final ACK message for userspace to pick up */
  637. _debug("post ACK");
  638. skb->mark = RXRPC_SKB_MARK_FINAL_ACK;
  639. sp->call = call;
  640. rxrpc_get_call(call);
  641. spin_lock_bh(&call->lock);
  642. if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0)
  643. BUG();
  644. spin_unlock_bh(&call->lock);
  645. goto process_further;
  646. }
  647. discard:
  648. rxrpc_free_skb(skb);
  649. goto process_further;
  650. protocol_error_unlock:
  651. write_unlock_bh(&call->state_lock);
  652. protocol_error:
  653. rxrpc_free_skb(skb);
  654. _leave(" = -EPROTO");
  655. return -EPROTO;
  656. }
  657. /*
  658. * post a message to the socket Rx queue for recvmsg() to pick up
  659. */
  660. static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
  661. bool fatal)
  662. {
  663. struct rxrpc_skb_priv *sp;
  664. struct sk_buff *skb;
  665. int ret;
  666. _enter("{%d,%lx},%u,%u,%d",
  667. call->debug_id, call->flags, mark, error, fatal);
  668. /* remove timers and things for fatal messages */
  669. if (fatal) {
  670. del_timer_sync(&call->resend_timer);
  671. del_timer_sync(&call->ack_timer);
  672. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  673. }
  674. if (mark != RXRPC_SKB_MARK_NEW_CALL &&
  675. !test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  676. _leave("[no userid]");
  677. return 0;
  678. }
  679. if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
  680. skb = alloc_skb(0, GFP_NOFS);
  681. if (!skb)
  682. return -ENOMEM;
  683. rxrpc_new_skb(skb);
  684. skb->mark = mark;
  685. sp = rxrpc_skb(skb);
  686. memset(sp, 0, sizeof(*sp));
  687. sp->error = error;
  688. sp->call = call;
  689. rxrpc_get_call(call);
  690. spin_lock_bh(&call->lock);
  691. ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
  692. spin_unlock_bh(&call->lock);
  693. BUG_ON(ret < 0);
  694. }
  695. return 0;
  696. }
  697. /*
  698. * handle background processing of incoming call packets and ACK / abort
  699. * generation
  700. */
  701. void rxrpc_process_call(struct work_struct *work)
  702. {
  703. struct rxrpc_call *call =
  704. container_of(work, struct rxrpc_call, processor);
  705. struct rxrpc_ackpacket ack;
  706. struct rxrpc_ackinfo ackinfo;
  707. struct rxrpc_header hdr;
  708. struct msghdr msg;
  709. struct kvec iov[5];
  710. unsigned long bits;
  711. __be32 data, pad;
  712. size_t len;
  713. int genbit, loop, nbit, ioc, ret, mtu;
  714. u32 abort_code = RX_PROTOCOL_ERROR;
  715. u8 *acks = NULL;
  716. //printk("\n--------------------\n");
  717. _enter("{%d,%s,%lx} [%lu]",
  718. call->debug_id, rxrpc_call_states[call->state], call->events,
  719. (jiffies - call->creation_jif) / (HZ / 10));
  720. if (test_and_set_bit(RXRPC_CALL_PROC_BUSY, &call->flags)) {
  721. _debug("XXXXXXXXXXXXX RUNNING ON MULTIPLE CPUS XXXXXXXXXXXXX");
  722. return;
  723. }
  724. /* there's a good chance we're going to have to send a message, so set
  725. * one up in advance */
  726. msg.msg_name = &call->conn->trans->peer->srx.transport.sin;
  727. msg.msg_namelen = sizeof(call->conn->trans->peer->srx.transport.sin);
  728. msg.msg_control = NULL;
  729. msg.msg_controllen = 0;
  730. msg.msg_flags = 0;
  731. hdr.epoch = call->conn->epoch;
  732. hdr.cid = call->cid;
  733. hdr.callNumber = call->call_id;
  734. hdr.seq = 0;
  735. hdr.type = RXRPC_PACKET_TYPE_ACK;
  736. hdr.flags = call->conn->out_clientflag;
  737. hdr.userStatus = 0;
  738. hdr.securityIndex = call->conn->security_ix;
  739. hdr._rsvd = 0;
  740. hdr.serviceId = call->conn->service_id;
  741. memset(iov, 0, sizeof(iov));
  742. iov[0].iov_base = &hdr;
  743. iov[0].iov_len = sizeof(hdr);
  744. /* deal with events of a final nature */
  745. if (test_bit(RXRPC_CALL_RELEASE, &call->events)) {
  746. rxrpc_release_call(call);
  747. clear_bit(RXRPC_CALL_RELEASE, &call->events);
  748. }
  749. if (test_bit(RXRPC_CALL_RCVD_ERROR, &call->events)) {
  750. int error;
  751. clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
  752. clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
  753. clear_bit(RXRPC_CALL_ABORT, &call->events);
  754. error = call->conn->trans->peer->net_error;
  755. _debug("post net error %d", error);
  756. if (rxrpc_post_message(call, RXRPC_SKB_MARK_NET_ERROR,
  757. error, true) < 0)
  758. goto no_mem;
  759. clear_bit(RXRPC_CALL_RCVD_ERROR, &call->events);
  760. goto kill_ACKs;
  761. }
  762. if (test_bit(RXRPC_CALL_CONN_ABORT, &call->events)) {
  763. ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
  764. clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
  765. clear_bit(RXRPC_CALL_ABORT, &call->events);
  766. _debug("post conn abort");
  767. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  768. call->conn->error, true) < 0)
  769. goto no_mem;
  770. clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
  771. goto kill_ACKs;
  772. }
  773. if (test_bit(RXRPC_CALL_REJECT_BUSY, &call->events)) {
  774. hdr.type = RXRPC_PACKET_TYPE_BUSY;
  775. genbit = RXRPC_CALL_REJECT_BUSY;
  776. goto send_message;
  777. }
  778. if (test_bit(RXRPC_CALL_ABORT, &call->events)) {
  779. ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
  780. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  781. ECONNABORTED, true) < 0)
  782. goto no_mem;
  783. hdr.type = RXRPC_PACKET_TYPE_ABORT;
  784. data = htonl(call->abort_code);
  785. iov[1].iov_base = &data;
  786. iov[1].iov_len = sizeof(data);
  787. genbit = RXRPC_CALL_ABORT;
  788. goto send_message;
  789. }
  790. if (test_bit(RXRPC_CALL_ACK_FINAL, &call->events)) {
  791. genbit = RXRPC_CALL_ACK_FINAL;
  792. ack.bufferSpace = htons(8);
  793. ack.maxSkew = 0;
  794. ack.serial = 0;
  795. ack.reason = RXRPC_ACK_IDLE;
  796. ack.nAcks = 0;
  797. call->ackr_reason = 0;
  798. spin_lock_bh(&call->lock);
  799. ack.serial = call->ackr_serial;
  800. ack.previousPacket = call->ackr_prev_seq;
  801. ack.firstPacket = htonl(call->rx_data_eaten + 1);
  802. spin_unlock_bh(&call->lock);
  803. pad = 0;
  804. iov[1].iov_base = &ack;
  805. iov[1].iov_len = sizeof(ack);
  806. iov[2].iov_base = &pad;
  807. iov[2].iov_len = 3;
  808. iov[3].iov_base = &ackinfo;
  809. iov[3].iov_len = sizeof(ackinfo);
  810. goto send_ACK;
  811. }
  812. if (call->events & ((1 << RXRPC_CALL_RCVD_BUSY) |
  813. (1 << RXRPC_CALL_RCVD_ABORT))
  814. ) {
  815. u32 mark;
  816. if (test_bit(RXRPC_CALL_RCVD_ABORT, &call->events))
  817. mark = RXRPC_SKB_MARK_REMOTE_ABORT;
  818. else
  819. mark = RXRPC_SKB_MARK_BUSY;
  820. _debug("post abort/busy");
  821. rxrpc_clear_tx_window(call);
  822. if (rxrpc_post_message(call, mark, ECONNABORTED, true) < 0)
  823. goto no_mem;
  824. clear_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
  825. clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
  826. goto kill_ACKs;
  827. }
  828. if (test_and_clear_bit(RXRPC_CALL_RCVD_ACKALL, &call->events)) {
  829. _debug("do implicit ackall");
  830. rxrpc_clear_tx_window(call);
  831. }
  832. if (test_bit(RXRPC_CALL_LIFE_TIMER, &call->events)) {
  833. write_lock_bh(&call->state_lock);
  834. if (call->state <= RXRPC_CALL_COMPLETE) {
  835. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  836. call->abort_code = RX_CALL_TIMEOUT;
  837. set_bit(RXRPC_CALL_ABORT, &call->events);
  838. }
  839. write_unlock_bh(&call->state_lock);
  840. _debug("post timeout");
  841. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  842. ETIME, true) < 0)
  843. goto no_mem;
  844. clear_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
  845. goto kill_ACKs;
  846. }
  847. /* deal with assorted inbound messages */
  848. if (!skb_queue_empty(&call->rx_queue)) {
  849. switch (rxrpc_process_rx_queue(call, &abort_code)) {
  850. case 0:
  851. case -EAGAIN:
  852. break;
  853. case -ENOMEM:
  854. goto no_mem;
  855. case -EKEYEXPIRED:
  856. case -EKEYREJECTED:
  857. case -EPROTO:
  858. rxrpc_abort_call(call, abort_code);
  859. goto kill_ACKs;
  860. }
  861. }
  862. /* handle resending */
  863. if (test_and_clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
  864. rxrpc_resend_timer(call);
  865. if (test_and_clear_bit(RXRPC_CALL_RESEND, &call->events))
  866. rxrpc_resend(call);
  867. /* consider sending an ordinary ACK */
  868. if (test_bit(RXRPC_CALL_ACK, &call->events)) {
  869. _debug("send ACK: window: %d - %d { %lx }",
  870. call->rx_data_eaten, call->ackr_win_top,
  871. call->ackr_window[0]);
  872. if (call->state > RXRPC_CALL_SERVER_ACK_REQUEST &&
  873. call->ackr_reason != RXRPC_ACK_PING_RESPONSE) {
  874. /* ACK by sending reply DATA packet in this state */
  875. clear_bit(RXRPC_CALL_ACK, &call->events);
  876. goto maybe_reschedule;
  877. }
  878. genbit = RXRPC_CALL_ACK;
  879. acks = kzalloc(call->ackr_win_top - call->rx_data_eaten,
  880. GFP_NOFS);
  881. if (!acks)
  882. goto no_mem;
  883. //hdr.flags = RXRPC_SLOW_START_OK;
  884. ack.bufferSpace = htons(8);
  885. ack.maxSkew = 0;
  886. ack.serial = 0;
  887. ack.reason = 0;
  888. spin_lock_bh(&call->lock);
  889. ack.reason = call->ackr_reason;
  890. ack.serial = call->ackr_serial;
  891. ack.previousPacket = call->ackr_prev_seq;
  892. ack.firstPacket = htonl(call->rx_data_eaten + 1);
  893. ack.nAcks = 0;
  894. for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
  895. nbit = loop * BITS_PER_LONG;
  896. for (bits = call->ackr_window[loop]; bits; bits >>= 1
  897. ) {
  898. _debug("- l=%d n=%d b=%lx", loop, nbit, bits);
  899. if (bits & 1) {
  900. acks[nbit] = RXRPC_ACK_TYPE_ACK;
  901. ack.nAcks = nbit + 1;
  902. }
  903. nbit++;
  904. }
  905. }
  906. call->ackr_reason = 0;
  907. spin_unlock_bh(&call->lock);
  908. pad = 0;
  909. iov[1].iov_base = &ack;
  910. iov[1].iov_len = sizeof(ack);
  911. iov[2].iov_base = acks;
  912. iov[2].iov_len = ack.nAcks;
  913. iov[3].iov_base = &pad;
  914. iov[3].iov_len = 3;
  915. iov[4].iov_base = &ackinfo;
  916. iov[4].iov_len = sizeof(ackinfo);
  917. switch (ack.reason) {
  918. case RXRPC_ACK_REQUESTED:
  919. case RXRPC_ACK_DUPLICATE:
  920. case RXRPC_ACK_OUT_OF_SEQUENCE:
  921. case RXRPC_ACK_EXCEEDS_WINDOW:
  922. case RXRPC_ACK_NOSPACE:
  923. case RXRPC_ACK_PING:
  924. case RXRPC_ACK_PING_RESPONSE:
  925. goto send_ACK_with_skew;
  926. case RXRPC_ACK_DELAY:
  927. case RXRPC_ACK_IDLE:
  928. goto send_ACK;
  929. }
  930. }
  931. /* handle completion of security negotiations on an incoming
  932. * connection */
  933. if (test_and_clear_bit(RXRPC_CALL_SECURED, &call->events)) {
  934. _debug("secured");
  935. spin_lock_bh(&call->lock);
  936. if (call->state == RXRPC_CALL_SERVER_SECURING) {
  937. _debug("securing");
  938. write_lock(&call->conn->lock);
  939. if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
  940. !test_bit(RXRPC_CALL_RELEASE, &call->events)) {
  941. _debug("not released");
  942. call->state = RXRPC_CALL_SERVER_ACCEPTING;
  943. list_move_tail(&call->accept_link,
  944. &call->socket->acceptq);
  945. }
  946. write_unlock(&call->conn->lock);
  947. read_lock(&call->state_lock);
  948. if (call->state < RXRPC_CALL_COMPLETE)
  949. set_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
  950. read_unlock(&call->state_lock);
  951. }
  952. spin_unlock_bh(&call->lock);
  953. if (!test_bit(RXRPC_CALL_POST_ACCEPT, &call->events))
  954. goto maybe_reschedule;
  955. }
  956. /* post a notification of an acceptable connection to the app */
  957. if (test_bit(RXRPC_CALL_POST_ACCEPT, &call->events)) {
  958. _debug("post accept");
  959. if (rxrpc_post_message(call, RXRPC_SKB_MARK_NEW_CALL,
  960. 0, false) < 0)
  961. goto no_mem;
  962. clear_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
  963. goto maybe_reschedule;
  964. }
  965. /* handle incoming call acceptance */
  966. if (test_and_clear_bit(RXRPC_CALL_ACCEPTED, &call->events)) {
  967. _debug("accepted");
  968. ASSERTCMP(call->rx_data_post, ==, 0);
  969. call->rx_data_post = 1;
  970. read_lock_bh(&call->state_lock);
  971. if (call->state < RXRPC_CALL_COMPLETE)
  972. set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
  973. read_unlock_bh(&call->state_lock);
  974. }
  975. /* drain the out of sequence received packet queue into the packet Rx
  976. * queue */
  977. if (test_and_clear_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) {
  978. while (call->rx_data_post == call->rx_first_oos)
  979. if (rxrpc_drain_rx_oos_queue(call) < 0)
  980. break;
  981. goto maybe_reschedule;
  982. }
  983. /* other events may have been raised since we started checking */
  984. goto maybe_reschedule;
  985. send_ACK_with_skew:
  986. ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
  987. ntohl(ack.serial));
  988. send_ACK:
  989. mtu = call->conn->trans->peer->if_mtu;
  990. mtu -= call->conn->trans->peer->hdrsize;
  991. ackinfo.maxMTU = htonl(mtu);
  992. ackinfo.rwind = htonl(32);
  993. /* permit the peer to send us jumbo packets if it wants to */
  994. ackinfo.rxMTU = htonl(5692);
  995. ackinfo.jumbo_max = htonl(4);
  996. hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
  997. _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
  998. ntohl(hdr.serial),
  999. ntohs(ack.maxSkew),
  1000. ntohl(ack.firstPacket),
  1001. ntohl(ack.previousPacket),
  1002. ntohl(ack.serial),
  1003. rxrpc_acks[ack.reason],
  1004. ack.nAcks);
  1005. del_timer_sync(&call->ack_timer);
  1006. if (ack.nAcks > 0)
  1007. set_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags);
  1008. goto send_message_2;
  1009. send_message:
  1010. _debug("send message");
  1011. hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
  1012. _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
  1013. send_message_2:
  1014. len = iov[0].iov_len;
  1015. ioc = 1;
  1016. if (iov[4].iov_len) {
  1017. ioc = 5;
  1018. len += iov[4].iov_len;
  1019. len += iov[3].iov_len;
  1020. len += iov[2].iov_len;
  1021. len += iov[1].iov_len;
  1022. } else if (iov[3].iov_len) {
  1023. ioc = 4;
  1024. len += iov[3].iov_len;
  1025. len += iov[2].iov_len;
  1026. len += iov[1].iov_len;
  1027. } else if (iov[2].iov_len) {
  1028. ioc = 3;
  1029. len += iov[2].iov_len;
  1030. len += iov[1].iov_len;
  1031. } else if (iov[1].iov_len) {
  1032. ioc = 2;
  1033. len += iov[1].iov_len;
  1034. }
  1035. ret = kernel_sendmsg(call->conn->trans->local->socket,
  1036. &msg, iov, ioc, len);
  1037. if (ret < 0) {
  1038. _debug("sendmsg failed: %d", ret);
  1039. read_lock_bh(&call->state_lock);
  1040. if (call->state < RXRPC_CALL_DEAD)
  1041. rxrpc_queue_call(call);
  1042. read_unlock_bh(&call->state_lock);
  1043. goto error;
  1044. }
  1045. switch (genbit) {
  1046. case RXRPC_CALL_ABORT:
  1047. clear_bit(genbit, &call->events);
  1048. clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
  1049. goto kill_ACKs;
  1050. case RXRPC_CALL_ACK_FINAL:
  1051. write_lock_bh(&call->state_lock);
  1052. if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK)
  1053. call->state = RXRPC_CALL_COMPLETE;
  1054. write_unlock_bh(&call->state_lock);
  1055. goto kill_ACKs;
  1056. default:
  1057. clear_bit(genbit, &call->events);
  1058. switch (call->state) {
  1059. case RXRPC_CALL_CLIENT_AWAIT_REPLY:
  1060. case RXRPC_CALL_CLIENT_RECV_REPLY:
  1061. case RXRPC_CALL_SERVER_RECV_REQUEST:
  1062. case RXRPC_CALL_SERVER_ACK_REQUEST:
  1063. _debug("start ACK timer");
  1064. rxrpc_propose_ACK(call, RXRPC_ACK_DELAY,
  1065. call->ackr_serial, false);
  1066. default:
  1067. break;
  1068. }
  1069. goto maybe_reschedule;
  1070. }
  1071. kill_ACKs:
  1072. del_timer_sync(&call->ack_timer);
  1073. if (test_and_clear_bit(RXRPC_CALL_ACK_FINAL, &call->events))
  1074. rxrpc_put_call(call);
  1075. clear_bit(RXRPC_CALL_ACK, &call->events);
  1076. maybe_reschedule:
  1077. if (call->events || !skb_queue_empty(&call->rx_queue)) {
  1078. read_lock_bh(&call->state_lock);
  1079. if (call->state < RXRPC_CALL_DEAD)
  1080. rxrpc_queue_call(call);
  1081. read_unlock_bh(&call->state_lock);
  1082. }
  1083. /* don't leave aborted connections on the accept queue */
  1084. if (call->state >= RXRPC_CALL_COMPLETE &&
  1085. !list_empty(&call->accept_link)) {
  1086. _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }",
  1087. call, call->events, call->flags,
  1088. ntohl(call->conn->cid));
  1089. read_lock_bh(&call->state_lock);
  1090. if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
  1091. !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
  1092. rxrpc_queue_call(call);
  1093. read_unlock_bh(&call->state_lock);
  1094. }
  1095. error:
  1096. clear_bit(RXRPC_CALL_PROC_BUSY, &call->flags);
  1097. kfree(acks);
  1098. /* because we don't want two CPUs both processing the work item for one
  1099. * call at the same time, we use a flag to note when it's busy; however
  1100. * this means there's a race between clearing the flag and setting the
  1101. * work pending bit and the work item being processed again */
  1102. if (call->events && !work_pending(&call->processor)) {
  1103. _debug("jumpstart %x", ntohl(call->conn->cid));
  1104. rxrpc_queue_call(call);
  1105. }
  1106. _leave("");
  1107. return;
  1108. no_mem:
  1109. _debug("out of memory");
  1110. goto maybe_reschedule;
  1111. }