ar-call.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787
  1. /* RxRPC individual remote procedure call handling
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/circ_buf.h>
  13. #include <net/sock.h>
  14. #include <net/af_rxrpc.h>
  15. #include "ar-internal.h"
  16. struct kmem_cache *rxrpc_call_jar;
  17. LIST_HEAD(rxrpc_calls);
  18. DEFINE_RWLOCK(rxrpc_call_lock);
  19. static unsigned rxrpc_call_max_lifetime = 60;
  20. static unsigned rxrpc_dead_call_timeout = 10;
  21. static void rxrpc_destroy_call(struct work_struct *work);
  22. static void rxrpc_call_life_expired(unsigned long _call);
  23. static void rxrpc_dead_call_expired(unsigned long _call);
  24. static void rxrpc_ack_time_expired(unsigned long _call);
  25. static void rxrpc_resend_time_expired(unsigned long _call);
  26. /*
  27. * allocate a new call
  28. */
  29. static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
  30. {
  31. struct rxrpc_call *call;
  32. call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
  33. if (!call)
  34. return NULL;
  35. call->acks_winsz = 16;
  36. call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
  37. gfp);
  38. if (!call->acks_window) {
  39. kmem_cache_free(rxrpc_call_jar, call);
  40. return NULL;
  41. }
  42. setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
  43. (unsigned long) call);
  44. setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
  45. (unsigned long) call);
  46. setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
  47. (unsigned long) call);
  48. setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
  49. (unsigned long) call);
  50. INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
  51. INIT_WORK(&call->processor, &rxrpc_process_call);
  52. INIT_LIST_HEAD(&call->accept_link);
  53. skb_queue_head_init(&call->rx_queue);
  54. skb_queue_head_init(&call->rx_oos_queue);
  55. init_waitqueue_head(&call->tx_waitq);
  56. spin_lock_init(&call->lock);
  57. rwlock_init(&call->state_lock);
  58. atomic_set(&call->usage, 1);
  59. call->debug_id = atomic_inc_return(&rxrpc_debug_id);
  60. call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
  61. memset(&call->sock_node, 0xed, sizeof(call->sock_node));
  62. call->rx_data_expect = 1;
  63. call->rx_data_eaten = 0;
  64. call->rx_first_oos = 0;
  65. call->ackr_win_top = call->rx_data_eaten + 1 + RXRPC_MAXACKS;
  66. call->creation_jif = jiffies;
  67. return call;
  68. }
  69. /*
  70. * allocate a new client call and attempt to to get a connection slot for it
  71. */
  72. static struct rxrpc_call *rxrpc_alloc_client_call(
  73. struct rxrpc_sock *rx,
  74. struct rxrpc_transport *trans,
  75. struct rxrpc_conn_bundle *bundle,
  76. gfp_t gfp)
  77. {
  78. struct rxrpc_call *call;
  79. int ret;
  80. _enter("");
  81. ASSERT(rx != NULL);
  82. ASSERT(trans != NULL);
  83. ASSERT(bundle != NULL);
  84. call = rxrpc_alloc_call(gfp);
  85. if (!call)
  86. return ERR_PTR(-ENOMEM);
  87. sock_hold(&rx->sk);
  88. call->socket = rx;
  89. call->rx_data_post = 1;
  90. ret = rxrpc_connect_call(rx, trans, bundle, call, gfp);
  91. if (ret < 0) {
  92. kmem_cache_free(rxrpc_call_jar, call);
  93. return ERR_PTR(ret);
  94. }
  95. spin_lock(&call->conn->trans->peer->lock);
  96. list_add(&call->error_link, &call->conn->trans->peer->error_targets);
  97. spin_unlock(&call->conn->trans->peer->lock);
  98. call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ;
  99. add_timer(&call->lifetimer);
  100. _leave(" = %p", call);
  101. return call;
  102. }
  103. /*
  104. * set up a call for the given data
  105. * - called in process context with IRQs enabled
  106. */
  107. struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx,
  108. struct rxrpc_transport *trans,
  109. struct rxrpc_conn_bundle *bundle,
  110. unsigned long user_call_ID,
  111. int create,
  112. gfp_t gfp)
  113. {
  114. struct rxrpc_call *call, *candidate;
  115. struct rb_node *p, *parent, **pp;
  116. _enter("%p,%d,%d,%lx,%d",
  117. rx, trans ? trans->debug_id : -1, bundle ? bundle->debug_id : -1,
  118. user_call_ID, create);
  119. /* search the extant calls first for one that matches the specified
  120. * user ID */
  121. read_lock(&rx->call_lock);
  122. p = rx->calls.rb_node;
  123. while (p) {
  124. call = rb_entry(p, struct rxrpc_call, sock_node);
  125. if (user_call_ID < call->user_call_ID)
  126. p = p->rb_left;
  127. else if (user_call_ID > call->user_call_ID)
  128. p = p->rb_right;
  129. else
  130. goto found_extant_call;
  131. }
  132. read_unlock(&rx->call_lock);
  133. if (!create || !trans)
  134. return ERR_PTR(-EBADSLT);
  135. /* not yet present - create a candidate for a new record and then
  136. * redo the search */
  137. candidate = rxrpc_alloc_client_call(rx, trans, bundle, gfp);
  138. if (IS_ERR(candidate)) {
  139. _leave(" = %ld", PTR_ERR(candidate));
  140. return candidate;
  141. }
  142. candidate->user_call_ID = user_call_ID;
  143. __set_bit(RXRPC_CALL_HAS_USERID, &candidate->flags);
  144. write_lock(&rx->call_lock);
  145. pp = &rx->calls.rb_node;
  146. parent = NULL;
  147. while (*pp) {
  148. parent = *pp;
  149. call = rb_entry(parent, struct rxrpc_call, sock_node);
  150. if (user_call_ID < call->user_call_ID)
  151. pp = &(*pp)->rb_left;
  152. else if (user_call_ID > call->user_call_ID)
  153. pp = &(*pp)->rb_right;
  154. else
  155. goto found_extant_second;
  156. }
  157. /* second search also failed; add the new call */
  158. call = candidate;
  159. candidate = NULL;
  160. rxrpc_get_call(call);
  161. rb_link_node(&call->sock_node, parent, pp);
  162. rb_insert_color(&call->sock_node, &rx->calls);
  163. write_unlock(&rx->call_lock);
  164. write_lock_bh(&rxrpc_call_lock);
  165. list_add_tail(&call->link, &rxrpc_calls);
  166. write_unlock_bh(&rxrpc_call_lock);
  167. _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
  168. _leave(" = %p [new]", call);
  169. return call;
  170. /* we found the call in the list immediately */
  171. found_extant_call:
  172. rxrpc_get_call(call);
  173. read_unlock(&rx->call_lock);
  174. _leave(" = %p [extant %d]", call, atomic_read(&call->usage));
  175. return call;
  176. /* we found the call on the second time through the list */
  177. found_extant_second:
  178. rxrpc_get_call(call);
  179. write_unlock(&rx->call_lock);
  180. rxrpc_put_call(candidate);
  181. _leave(" = %p [second %d]", call, atomic_read(&call->usage));
  182. return call;
  183. }
  184. /*
  185. * set up an incoming call
  186. * - called in process context with IRQs enabled
  187. */
  188. struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
  189. struct rxrpc_connection *conn,
  190. struct rxrpc_header *hdr,
  191. gfp_t gfp)
  192. {
  193. struct rxrpc_call *call, *candidate;
  194. struct rb_node **p, *parent;
  195. __be32 call_id;
  196. _enter(",%d,,%x", conn->debug_id, gfp);
  197. ASSERT(rx != NULL);
  198. candidate = rxrpc_alloc_call(gfp);
  199. if (!candidate)
  200. return ERR_PTR(-EBUSY);
  201. candidate->socket = rx;
  202. candidate->conn = conn;
  203. candidate->cid = hdr->cid;
  204. candidate->call_id = hdr->callNumber;
  205. candidate->channel = ntohl(hdr->cid) & RXRPC_CHANNELMASK;
  206. candidate->rx_data_post = 0;
  207. candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
  208. if (conn->security_ix > 0)
  209. candidate->state = RXRPC_CALL_SERVER_SECURING;
  210. write_lock_bh(&conn->lock);
  211. /* set the channel for this call */
  212. call = conn->channels[candidate->channel];
  213. _debug("channel[%u] is %p", candidate->channel, call);
  214. if (call && call->call_id == hdr->callNumber) {
  215. /* already set; must've been a duplicate packet */
  216. _debug("extant call [%d]", call->state);
  217. ASSERTCMP(call->conn, ==, conn);
  218. read_lock(&call->state_lock);
  219. switch (call->state) {
  220. case RXRPC_CALL_LOCALLY_ABORTED:
  221. if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
  222. schedule_work(&call->processor);
  223. case RXRPC_CALL_REMOTELY_ABORTED:
  224. read_unlock(&call->state_lock);
  225. goto aborted_call;
  226. default:
  227. rxrpc_get_call(call);
  228. read_unlock(&call->state_lock);
  229. goto extant_call;
  230. }
  231. }
  232. if (call) {
  233. /* it seems the channel is still in use from the previous call
  234. * - ditch the old binding if its call is now complete */
  235. _debug("CALL: %u { %s }",
  236. call->debug_id, rxrpc_call_states[call->state]);
  237. if (call->state >= RXRPC_CALL_COMPLETE) {
  238. conn->channels[call->channel] = NULL;
  239. } else {
  240. write_unlock_bh(&conn->lock);
  241. kmem_cache_free(rxrpc_call_jar, candidate);
  242. _leave(" = -EBUSY");
  243. return ERR_PTR(-EBUSY);
  244. }
  245. }
  246. /* check the call number isn't duplicate */
  247. _debug("check dup");
  248. call_id = hdr->callNumber;
  249. p = &conn->calls.rb_node;
  250. parent = NULL;
  251. while (*p) {
  252. parent = *p;
  253. call = rb_entry(parent, struct rxrpc_call, conn_node);
  254. if (call_id < call->call_id)
  255. p = &(*p)->rb_left;
  256. else if (call_id > call->call_id)
  257. p = &(*p)->rb_right;
  258. else
  259. goto old_call;
  260. }
  261. /* make the call available */
  262. _debug("new call");
  263. call = candidate;
  264. candidate = NULL;
  265. rb_link_node(&call->conn_node, parent, p);
  266. rb_insert_color(&call->conn_node, &conn->calls);
  267. conn->channels[call->channel] = call;
  268. sock_hold(&rx->sk);
  269. atomic_inc(&conn->usage);
  270. write_unlock_bh(&conn->lock);
  271. spin_lock(&conn->trans->peer->lock);
  272. list_add(&call->error_link, &conn->trans->peer->error_targets);
  273. spin_unlock(&conn->trans->peer->lock);
  274. write_lock_bh(&rxrpc_call_lock);
  275. list_add_tail(&call->link, &rxrpc_calls);
  276. write_unlock_bh(&rxrpc_call_lock);
  277. _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
  278. call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ;
  279. add_timer(&call->lifetimer);
  280. _leave(" = %p {%d} [new]", call, call->debug_id);
  281. return call;
  282. extant_call:
  283. write_unlock_bh(&conn->lock);
  284. kmem_cache_free(rxrpc_call_jar, candidate);
  285. _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
  286. return call;
  287. aborted_call:
  288. write_unlock_bh(&conn->lock);
  289. kmem_cache_free(rxrpc_call_jar, candidate);
  290. _leave(" = -ECONNABORTED");
  291. return ERR_PTR(-ECONNABORTED);
  292. old_call:
  293. write_unlock_bh(&conn->lock);
  294. kmem_cache_free(rxrpc_call_jar, candidate);
  295. _leave(" = -ECONNRESET [old]");
  296. return ERR_PTR(-ECONNRESET);
  297. }
  298. /*
  299. * find an extant server call
  300. * - called in process context with IRQs enabled
  301. */
  302. struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *rx,
  303. unsigned long user_call_ID)
  304. {
  305. struct rxrpc_call *call;
  306. struct rb_node *p;
  307. _enter("%p,%lx", rx, user_call_ID);
  308. /* search the extant calls for one that matches the specified user
  309. * ID */
  310. read_lock(&rx->call_lock);
  311. p = rx->calls.rb_node;
  312. while (p) {
  313. call = rb_entry(p, struct rxrpc_call, sock_node);
  314. if (user_call_ID < call->user_call_ID)
  315. p = p->rb_left;
  316. else if (user_call_ID > call->user_call_ID)
  317. p = p->rb_right;
  318. else
  319. goto found_extant_call;
  320. }
  321. read_unlock(&rx->call_lock);
  322. _leave(" = NULL");
  323. return NULL;
  324. /* we found the call in the list immediately */
  325. found_extant_call:
  326. rxrpc_get_call(call);
  327. read_unlock(&rx->call_lock);
  328. _leave(" = %p [%d]", call, atomic_read(&call->usage));
  329. return call;
  330. }
  331. /*
  332. * detach a call from a socket and set up for release
  333. */
  334. void rxrpc_release_call(struct rxrpc_call *call)
  335. {
  336. struct rxrpc_sock *rx = call->socket;
  337. _enter("{%d,%d,%d,%d}",
  338. call->debug_id, atomic_read(&call->usage),
  339. atomic_read(&call->ackr_not_idle),
  340. call->rx_first_oos);
  341. spin_lock_bh(&call->lock);
  342. if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
  343. BUG();
  344. spin_unlock_bh(&call->lock);
  345. /* dissociate from the socket
  346. * - the socket's ref on the call is passed to the death timer
  347. */
  348. _debug("RELEASE CALL %p (%d CONN %p)",
  349. call, call->debug_id, call->conn);
  350. write_lock_bh(&rx->call_lock);
  351. if (!list_empty(&call->accept_link)) {
  352. _debug("unlinking once-pending call %p { e=%lx f=%lx }",
  353. call, call->events, call->flags);
  354. ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
  355. list_del_init(&call->accept_link);
  356. sk_acceptq_removed(&rx->sk);
  357. } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  358. rb_erase(&call->sock_node, &rx->calls);
  359. memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
  360. clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
  361. }
  362. write_unlock_bh(&rx->call_lock);
  363. if (call->conn->out_clientflag)
  364. spin_lock(&call->conn->trans->client_lock);
  365. write_lock_bh(&call->conn->lock);
  366. /* free up the channel for reuse */
  367. if (call->conn->out_clientflag) {
  368. call->conn->avail_calls++;
  369. if (call->conn->avail_calls == RXRPC_MAXCALLS)
  370. list_move_tail(&call->conn->bundle_link,
  371. &call->conn->bundle->unused_conns);
  372. else if (call->conn->avail_calls == 1)
  373. list_move_tail(&call->conn->bundle_link,
  374. &call->conn->bundle->avail_conns);
  375. }
  376. write_lock(&call->state_lock);
  377. if (call->conn->channels[call->channel] == call)
  378. call->conn->channels[call->channel] = NULL;
  379. if (call->state < RXRPC_CALL_COMPLETE &&
  380. call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
  381. _debug("+++ ABORTING STATE %d +++\n", call->state);
  382. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  383. call->abort_code = RX_CALL_DEAD;
  384. set_bit(RXRPC_CALL_ABORT, &call->events);
  385. schedule_work(&call->processor);
  386. }
  387. write_unlock(&call->state_lock);
  388. write_unlock_bh(&call->conn->lock);
  389. if (call->conn->out_clientflag)
  390. spin_unlock(&call->conn->trans->client_lock);
  391. if (!skb_queue_empty(&call->rx_queue) ||
  392. !skb_queue_empty(&call->rx_oos_queue)) {
  393. struct rxrpc_skb_priv *sp;
  394. struct sk_buff *skb;
  395. _debug("purge Rx queues");
  396. spin_lock_bh(&call->lock);
  397. while ((skb = skb_dequeue(&call->rx_queue)) ||
  398. (skb = skb_dequeue(&call->rx_oos_queue))) {
  399. sp = rxrpc_skb(skb);
  400. if (sp->call) {
  401. ASSERTCMP(sp->call, ==, call);
  402. rxrpc_put_call(call);
  403. sp->call = NULL;
  404. }
  405. skb->destructor = NULL;
  406. spin_unlock_bh(&call->lock);
  407. _debug("- zap %s %%%u #%u",
  408. rxrpc_pkts[sp->hdr.type],
  409. ntohl(sp->hdr.serial),
  410. ntohl(sp->hdr.seq));
  411. rxrpc_free_skb(skb);
  412. spin_lock_bh(&call->lock);
  413. }
  414. spin_unlock_bh(&call->lock);
  415. ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
  416. }
  417. del_timer_sync(&call->resend_timer);
  418. del_timer_sync(&call->ack_timer);
  419. del_timer_sync(&call->lifetimer);
  420. call->deadspan.expires = jiffies + rxrpc_dead_call_timeout * HZ;
  421. add_timer(&call->deadspan);
  422. _leave("");
  423. }
  424. /*
  425. * handle a dead call being ready for reaping
  426. */
  427. static void rxrpc_dead_call_expired(unsigned long _call)
  428. {
  429. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  430. _enter("{%d}", call->debug_id);
  431. write_lock_bh(&call->state_lock);
  432. call->state = RXRPC_CALL_DEAD;
  433. write_unlock_bh(&call->state_lock);
  434. rxrpc_put_call(call);
  435. }
  436. /*
  437. * mark a call as to be released, aborting it if it's still in progress
  438. * - called with softirqs disabled
  439. */
  440. static void rxrpc_mark_call_released(struct rxrpc_call *call)
  441. {
  442. bool sched;
  443. write_lock(&call->state_lock);
  444. if (call->state < RXRPC_CALL_DEAD) {
  445. sched = false;
  446. if (call->state < RXRPC_CALL_COMPLETE) {
  447. _debug("abort call %p", call);
  448. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  449. call->abort_code = RX_CALL_DEAD;
  450. if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
  451. sched = true;
  452. }
  453. if (!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
  454. sched = true;
  455. if (sched)
  456. schedule_work(&call->processor);
  457. }
  458. write_unlock(&call->state_lock);
  459. }
  460. /*
  461. * release all the calls associated with a socket
  462. */
  463. void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
  464. {
  465. struct rxrpc_call *call;
  466. struct rb_node *p;
  467. _enter("%p", rx);
  468. read_lock_bh(&rx->call_lock);
  469. /* mark all the calls as no longer wanting incoming packets */
  470. for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
  471. call = rb_entry(p, struct rxrpc_call, sock_node);
  472. rxrpc_mark_call_released(call);
  473. }
  474. /* kill the not-yet-accepted incoming calls */
  475. list_for_each_entry(call, &rx->secureq, accept_link) {
  476. rxrpc_mark_call_released(call);
  477. }
  478. list_for_each_entry(call, &rx->acceptq, accept_link) {
  479. rxrpc_mark_call_released(call);
  480. }
  481. read_unlock_bh(&rx->call_lock);
  482. _leave("");
  483. }
  484. /*
  485. * release a call
  486. */
  487. void __rxrpc_put_call(struct rxrpc_call *call)
  488. {
  489. ASSERT(call != NULL);
  490. _enter("%p{u=%d}", call, atomic_read(&call->usage));
  491. ASSERTCMP(atomic_read(&call->usage), >, 0);
  492. if (atomic_dec_and_test(&call->usage)) {
  493. _debug("call %d dead", call->debug_id);
  494. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  495. schedule_work(&call->destroyer);
  496. }
  497. _leave("");
  498. }
  499. /*
  500. * clean up a call
  501. */
  502. static void rxrpc_cleanup_call(struct rxrpc_call *call)
  503. {
  504. _net("DESTROY CALL %d", call->debug_id);
  505. ASSERT(call->socket);
  506. memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
  507. del_timer_sync(&call->lifetimer);
  508. del_timer_sync(&call->deadspan);
  509. del_timer_sync(&call->ack_timer);
  510. del_timer_sync(&call->resend_timer);
  511. ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
  512. ASSERTCMP(call->events, ==, 0);
  513. if (work_pending(&call->processor)) {
  514. _debug("defer destroy");
  515. schedule_work(&call->destroyer);
  516. return;
  517. }
  518. if (call->conn) {
  519. spin_lock(&call->conn->trans->peer->lock);
  520. list_del(&call->error_link);
  521. spin_unlock(&call->conn->trans->peer->lock);
  522. write_lock_bh(&call->conn->lock);
  523. rb_erase(&call->conn_node, &call->conn->calls);
  524. write_unlock_bh(&call->conn->lock);
  525. rxrpc_put_connection(call->conn);
  526. }
  527. if (call->acks_window) {
  528. _debug("kill Tx window %d",
  529. CIRC_CNT(call->acks_head, call->acks_tail,
  530. call->acks_winsz));
  531. smp_mb();
  532. while (CIRC_CNT(call->acks_head, call->acks_tail,
  533. call->acks_winsz) > 0) {
  534. struct rxrpc_skb_priv *sp;
  535. unsigned long _skb;
  536. _skb = call->acks_window[call->acks_tail] & ~1;
  537. sp = rxrpc_skb((struct sk_buff *) _skb);
  538. _debug("+++ clear Tx %u", ntohl(sp->hdr.seq));
  539. rxrpc_free_skb((struct sk_buff *) _skb);
  540. call->acks_tail =
  541. (call->acks_tail + 1) & (call->acks_winsz - 1);
  542. }
  543. kfree(call->acks_window);
  544. }
  545. rxrpc_free_skb(call->tx_pending);
  546. rxrpc_purge_queue(&call->rx_queue);
  547. ASSERT(skb_queue_empty(&call->rx_oos_queue));
  548. sock_put(&call->socket->sk);
  549. kmem_cache_free(rxrpc_call_jar, call);
  550. }
  551. /*
  552. * destroy a call
  553. */
  554. static void rxrpc_destroy_call(struct work_struct *work)
  555. {
  556. struct rxrpc_call *call =
  557. container_of(work, struct rxrpc_call, destroyer);
  558. _enter("%p{%d,%d,%p}",
  559. call, atomic_read(&call->usage), call->channel, call->conn);
  560. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  561. write_lock_bh(&rxrpc_call_lock);
  562. list_del_init(&call->link);
  563. write_unlock_bh(&rxrpc_call_lock);
  564. rxrpc_cleanup_call(call);
  565. _leave("");
  566. }
  567. /*
  568. * preemptively destroy all the call records from a transport endpoint rather
  569. * than waiting for them to time out
  570. */
  571. void __exit rxrpc_destroy_all_calls(void)
  572. {
  573. struct rxrpc_call *call;
  574. _enter("");
  575. write_lock_bh(&rxrpc_call_lock);
  576. while (!list_empty(&rxrpc_calls)) {
  577. call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
  578. _debug("Zapping call %p", call);
  579. list_del_init(&call->link);
  580. switch (atomic_read(&call->usage)) {
  581. case 0:
  582. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  583. break;
  584. case 1:
  585. if (del_timer_sync(&call->deadspan) != 0 &&
  586. call->state != RXRPC_CALL_DEAD)
  587. rxrpc_dead_call_expired((unsigned long) call);
  588. if (call->state != RXRPC_CALL_DEAD)
  589. break;
  590. default:
  591. printk(KERN_ERR "RXRPC:"
  592. " Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
  593. call, atomic_read(&call->usage),
  594. atomic_read(&call->ackr_not_idle),
  595. rxrpc_call_states[call->state],
  596. call->flags, call->events);
  597. if (!skb_queue_empty(&call->rx_queue))
  598. printk(KERN_ERR"RXRPC: Rx queue occupied\n");
  599. if (!skb_queue_empty(&call->rx_oos_queue))
  600. printk(KERN_ERR"RXRPC: OOS queue occupied\n");
  601. break;
  602. }
  603. write_unlock_bh(&rxrpc_call_lock);
  604. cond_resched();
  605. write_lock_bh(&rxrpc_call_lock);
  606. }
  607. write_unlock_bh(&rxrpc_call_lock);
  608. _leave("");
  609. }
  610. /*
  611. * handle call lifetime being exceeded
  612. */
  613. static void rxrpc_call_life_expired(unsigned long _call)
  614. {
  615. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  616. if (call->state >= RXRPC_CALL_COMPLETE)
  617. return;
  618. _enter("{%d}", call->debug_id);
  619. read_lock_bh(&call->state_lock);
  620. if (call->state < RXRPC_CALL_COMPLETE) {
  621. set_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
  622. schedule_work(&call->processor);
  623. }
  624. read_unlock_bh(&call->state_lock);
  625. }
  626. /*
  627. * handle resend timer expiry
  628. */
  629. static void rxrpc_resend_time_expired(unsigned long _call)
  630. {
  631. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  632. _enter("{%d}", call->debug_id);
  633. if (call->state >= RXRPC_CALL_COMPLETE)
  634. return;
  635. read_lock_bh(&call->state_lock);
  636. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  637. if (call->state < RXRPC_CALL_COMPLETE &&
  638. !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
  639. schedule_work(&call->processor);
  640. read_unlock_bh(&call->state_lock);
  641. }
  642. /*
  643. * handle ACK timer expiry
  644. */
  645. static void rxrpc_ack_time_expired(unsigned long _call)
  646. {
  647. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  648. _enter("{%d}", call->debug_id);
  649. if (call->state >= RXRPC_CALL_COMPLETE)
  650. return;
  651. read_lock_bh(&call->state_lock);
  652. if (call->state < RXRPC_CALL_COMPLETE &&
  653. !test_and_set_bit(RXRPC_CALL_ACK, &call->events))
  654. schedule_work(&call->processor);
  655. read_unlock_bh(&call->state_lock);
  656. }