ar-call.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804
  1. /* RxRPC individual remote procedure call handling
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/circ_buf.h>
  13. #include <net/sock.h>
  14. #include <net/af_rxrpc.h>
  15. #include "ar-internal.h"
  16. struct kmem_cache *rxrpc_call_jar;
  17. LIST_HEAD(rxrpc_calls);
  18. DEFINE_RWLOCK(rxrpc_call_lock);
  19. static unsigned rxrpc_call_max_lifetime = 60;
  20. static unsigned rxrpc_dead_call_timeout = 2;
  21. static void rxrpc_destroy_call(struct work_struct *work);
  22. static void rxrpc_call_life_expired(unsigned long _call);
  23. static void rxrpc_dead_call_expired(unsigned long _call);
  24. static void rxrpc_ack_time_expired(unsigned long _call);
  25. static void rxrpc_resend_time_expired(unsigned long _call);
  26. /*
  27. * allocate a new call
  28. */
  29. static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
  30. {
  31. struct rxrpc_call *call;
  32. call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
  33. if (!call)
  34. return NULL;
  35. call->acks_winsz = 16;
  36. call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
  37. gfp);
  38. if (!call->acks_window) {
  39. kmem_cache_free(rxrpc_call_jar, call);
  40. return NULL;
  41. }
  42. setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
  43. (unsigned long) call);
  44. setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
  45. (unsigned long) call);
  46. setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
  47. (unsigned long) call);
  48. setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
  49. (unsigned long) call);
  50. INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
  51. INIT_WORK(&call->processor, &rxrpc_process_call);
  52. INIT_LIST_HEAD(&call->accept_link);
  53. skb_queue_head_init(&call->rx_queue);
  54. skb_queue_head_init(&call->rx_oos_queue);
  55. init_waitqueue_head(&call->tx_waitq);
  56. spin_lock_init(&call->lock);
  57. rwlock_init(&call->state_lock);
  58. atomic_set(&call->usage, 1);
  59. call->debug_id = atomic_inc_return(&rxrpc_debug_id);
  60. call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
  61. memset(&call->sock_node, 0xed, sizeof(call->sock_node));
  62. call->rx_data_expect = 1;
  63. call->rx_data_eaten = 0;
  64. call->rx_first_oos = 0;
  65. call->ackr_win_top = call->rx_data_eaten + 1 + RXRPC_MAXACKS;
  66. call->creation_jif = jiffies;
  67. return call;
  68. }
  69. /*
  70. * allocate a new client call and attempt to to get a connection slot for it
  71. */
  72. static struct rxrpc_call *rxrpc_alloc_client_call(
  73. struct rxrpc_sock *rx,
  74. struct rxrpc_transport *trans,
  75. struct rxrpc_conn_bundle *bundle,
  76. gfp_t gfp)
  77. {
  78. struct rxrpc_call *call;
  79. int ret;
  80. _enter("");
  81. ASSERT(rx != NULL);
  82. ASSERT(trans != NULL);
  83. ASSERT(bundle != NULL);
  84. call = rxrpc_alloc_call(gfp);
  85. if (!call)
  86. return ERR_PTR(-ENOMEM);
  87. sock_hold(&rx->sk);
  88. call->socket = rx;
  89. call->rx_data_post = 1;
  90. ret = rxrpc_connect_call(rx, trans, bundle, call, gfp);
  91. if (ret < 0) {
  92. kmem_cache_free(rxrpc_call_jar, call);
  93. return ERR_PTR(ret);
  94. }
  95. spin_lock(&call->conn->trans->peer->lock);
  96. list_add(&call->error_link, &call->conn->trans->peer->error_targets);
  97. spin_unlock(&call->conn->trans->peer->lock);
  98. call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ;
  99. add_timer(&call->lifetimer);
  100. _leave(" = %p", call);
  101. return call;
  102. }
  103. /*
  104. * set up a call for the given data
  105. * - called in process context with IRQs enabled
  106. */
  107. struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx,
  108. struct rxrpc_transport *trans,
  109. struct rxrpc_conn_bundle *bundle,
  110. unsigned long user_call_ID,
  111. int create,
  112. gfp_t gfp)
  113. {
  114. struct rxrpc_call *call, *candidate;
  115. struct rb_node *p, *parent, **pp;
  116. _enter("%p,%d,%d,%lx,%d",
  117. rx, trans ? trans->debug_id : -1, bundle ? bundle->debug_id : -1,
  118. user_call_ID, create);
  119. /* search the extant calls first for one that matches the specified
  120. * user ID */
  121. read_lock(&rx->call_lock);
  122. p = rx->calls.rb_node;
  123. while (p) {
  124. call = rb_entry(p, struct rxrpc_call, sock_node);
  125. if (user_call_ID < call->user_call_ID)
  126. p = p->rb_left;
  127. else if (user_call_ID > call->user_call_ID)
  128. p = p->rb_right;
  129. else
  130. goto found_extant_call;
  131. }
  132. read_unlock(&rx->call_lock);
  133. if (!create || !trans)
  134. return ERR_PTR(-EBADSLT);
  135. /* not yet present - create a candidate for a new record and then
  136. * redo the search */
  137. candidate = rxrpc_alloc_client_call(rx, trans, bundle, gfp);
  138. if (IS_ERR(candidate)) {
  139. _leave(" = %ld", PTR_ERR(candidate));
  140. return candidate;
  141. }
  142. candidate->user_call_ID = user_call_ID;
  143. __set_bit(RXRPC_CALL_HAS_USERID, &candidate->flags);
  144. write_lock(&rx->call_lock);
  145. pp = &rx->calls.rb_node;
  146. parent = NULL;
  147. while (*pp) {
  148. parent = *pp;
  149. call = rb_entry(parent, struct rxrpc_call, sock_node);
  150. if (user_call_ID < call->user_call_ID)
  151. pp = &(*pp)->rb_left;
  152. else if (user_call_ID > call->user_call_ID)
  153. pp = &(*pp)->rb_right;
  154. else
  155. goto found_extant_second;
  156. }
  157. /* second search also failed; add the new call */
  158. call = candidate;
  159. candidate = NULL;
  160. rxrpc_get_call(call);
  161. rb_link_node(&call->sock_node, parent, pp);
  162. rb_insert_color(&call->sock_node, &rx->calls);
  163. write_unlock(&rx->call_lock);
  164. write_lock_bh(&rxrpc_call_lock);
  165. list_add_tail(&call->link, &rxrpc_calls);
  166. write_unlock_bh(&rxrpc_call_lock);
  167. _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
  168. _leave(" = %p [new]", call);
  169. return call;
  170. /* we found the call in the list immediately */
  171. found_extant_call:
  172. rxrpc_get_call(call);
  173. read_unlock(&rx->call_lock);
  174. _leave(" = %p [extant %d]", call, atomic_read(&call->usage));
  175. return call;
  176. /* we found the call on the second time through the list */
  177. found_extant_second:
  178. rxrpc_get_call(call);
  179. write_unlock(&rx->call_lock);
  180. rxrpc_put_call(candidate);
  181. _leave(" = %p [second %d]", call, atomic_read(&call->usage));
  182. return call;
  183. }
  184. /*
  185. * set up an incoming call
  186. * - called in process context with IRQs enabled
  187. */
  188. struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
  189. struct rxrpc_connection *conn,
  190. struct rxrpc_header *hdr,
  191. gfp_t gfp)
  192. {
  193. struct rxrpc_call *call, *candidate;
  194. struct rb_node **p, *parent;
  195. __be32 call_id;
  196. _enter(",%d,,%x", conn->debug_id, gfp);
  197. ASSERT(rx != NULL);
  198. candidate = rxrpc_alloc_call(gfp);
  199. if (!candidate)
  200. return ERR_PTR(-EBUSY);
  201. candidate->socket = rx;
  202. candidate->conn = conn;
  203. candidate->cid = hdr->cid;
  204. candidate->call_id = hdr->callNumber;
  205. candidate->channel = ntohl(hdr->cid) & RXRPC_CHANNELMASK;
  206. candidate->rx_data_post = 0;
  207. candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
  208. if (conn->security_ix > 0)
  209. candidate->state = RXRPC_CALL_SERVER_SECURING;
  210. write_lock_bh(&conn->lock);
  211. /* set the channel for this call */
  212. call = conn->channels[candidate->channel];
  213. _debug("channel[%u] is %p", candidate->channel, call);
  214. if (call && call->call_id == hdr->callNumber) {
  215. /* already set; must've been a duplicate packet */
  216. _debug("extant call [%d]", call->state);
  217. ASSERTCMP(call->conn, ==, conn);
  218. read_lock(&call->state_lock);
  219. switch (call->state) {
  220. case RXRPC_CALL_LOCALLY_ABORTED:
  221. if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
  222. rxrpc_queue_call(call);
  223. case RXRPC_CALL_REMOTELY_ABORTED:
  224. read_unlock(&call->state_lock);
  225. goto aborted_call;
  226. default:
  227. rxrpc_get_call(call);
  228. read_unlock(&call->state_lock);
  229. goto extant_call;
  230. }
  231. }
  232. if (call) {
  233. /* it seems the channel is still in use from the previous call
  234. * - ditch the old binding if its call is now complete */
  235. _debug("CALL: %u { %s }",
  236. call->debug_id, rxrpc_call_states[call->state]);
  237. if (call->state >= RXRPC_CALL_COMPLETE) {
  238. conn->channels[call->channel] = NULL;
  239. } else {
  240. write_unlock_bh(&conn->lock);
  241. kmem_cache_free(rxrpc_call_jar, candidate);
  242. _leave(" = -EBUSY");
  243. return ERR_PTR(-EBUSY);
  244. }
  245. }
  246. /* check the call number isn't duplicate */
  247. _debug("check dup");
  248. call_id = hdr->callNumber;
  249. p = &conn->calls.rb_node;
  250. parent = NULL;
  251. while (*p) {
  252. parent = *p;
  253. call = rb_entry(parent, struct rxrpc_call, conn_node);
  254. if (call_id < call->call_id)
  255. p = &(*p)->rb_left;
  256. else if (call_id > call->call_id)
  257. p = &(*p)->rb_right;
  258. else
  259. goto old_call;
  260. }
  261. /* make the call available */
  262. _debug("new call");
  263. call = candidate;
  264. candidate = NULL;
  265. rb_link_node(&call->conn_node, parent, p);
  266. rb_insert_color(&call->conn_node, &conn->calls);
  267. conn->channels[call->channel] = call;
  268. sock_hold(&rx->sk);
  269. atomic_inc(&conn->usage);
  270. write_unlock_bh(&conn->lock);
  271. spin_lock(&conn->trans->peer->lock);
  272. list_add(&call->error_link, &conn->trans->peer->error_targets);
  273. spin_unlock(&conn->trans->peer->lock);
  274. write_lock_bh(&rxrpc_call_lock);
  275. list_add_tail(&call->link, &rxrpc_calls);
  276. write_unlock_bh(&rxrpc_call_lock);
  277. _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
  278. call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ;
  279. add_timer(&call->lifetimer);
  280. _leave(" = %p {%d} [new]", call, call->debug_id);
  281. return call;
  282. extant_call:
  283. write_unlock_bh(&conn->lock);
  284. kmem_cache_free(rxrpc_call_jar, candidate);
  285. _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
  286. return call;
  287. aborted_call:
  288. write_unlock_bh(&conn->lock);
  289. kmem_cache_free(rxrpc_call_jar, candidate);
  290. _leave(" = -ECONNABORTED");
  291. return ERR_PTR(-ECONNABORTED);
  292. old_call:
  293. write_unlock_bh(&conn->lock);
  294. kmem_cache_free(rxrpc_call_jar, candidate);
  295. _leave(" = -ECONNRESET [old]");
  296. return ERR_PTR(-ECONNRESET);
  297. }
  298. /*
  299. * find an extant server call
  300. * - called in process context with IRQs enabled
  301. */
  302. struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *rx,
  303. unsigned long user_call_ID)
  304. {
  305. struct rxrpc_call *call;
  306. struct rb_node *p;
  307. _enter("%p,%lx", rx, user_call_ID);
  308. /* search the extant calls for one that matches the specified user
  309. * ID */
  310. read_lock(&rx->call_lock);
  311. p = rx->calls.rb_node;
  312. while (p) {
  313. call = rb_entry(p, struct rxrpc_call, sock_node);
  314. if (user_call_ID < call->user_call_ID)
  315. p = p->rb_left;
  316. else if (user_call_ID > call->user_call_ID)
  317. p = p->rb_right;
  318. else
  319. goto found_extant_call;
  320. }
  321. read_unlock(&rx->call_lock);
  322. _leave(" = NULL");
  323. return NULL;
  324. /* we found the call in the list immediately */
  325. found_extant_call:
  326. rxrpc_get_call(call);
  327. read_unlock(&rx->call_lock);
  328. _leave(" = %p [%d]", call, atomic_read(&call->usage));
  329. return call;
  330. }
  331. /*
  332. * detach a call from a socket and set up for release
  333. */
  334. void rxrpc_release_call(struct rxrpc_call *call)
  335. {
  336. struct rxrpc_connection *conn = call->conn;
  337. struct rxrpc_sock *rx = call->socket;
  338. _enter("{%d,%d,%d,%d}",
  339. call->debug_id, atomic_read(&call->usage),
  340. atomic_read(&call->ackr_not_idle),
  341. call->rx_first_oos);
  342. spin_lock_bh(&call->lock);
  343. if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
  344. BUG();
  345. spin_unlock_bh(&call->lock);
  346. /* dissociate from the socket
  347. * - the socket's ref on the call is passed to the death timer
  348. */
  349. _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
  350. write_lock_bh(&rx->call_lock);
  351. if (!list_empty(&call->accept_link)) {
  352. _debug("unlinking once-pending call %p { e=%lx f=%lx }",
  353. call, call->events, call->flags);
  354. ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
  355. list_del_init(&call->accept_link);
  356. sk_acceptq_removed(&rx->sk);
  357. } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  358. rb_erase(&call->sock_node, &rx->calls);
  359. memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
  360. clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
  361. }
  362. write_unlock_bh(&rx->call_lock);
  363. /* free up the channel for reuse */
  364. spin_lock(&conn->trans->client_lock);
  365. write_lock_bh(&conn->lock);
  366. write_lock(&call->state_lock);
  367. if (conn->channels[call->channel] == call)
  368. conn->channels[call->channel] = NULL;
  369. if (conn->out_clientflag && conn->bundle) {
  370. conn->avail_calls++;
  371. switch (conn->avail_calls) {
  372. case 1:
  373. list_move_tail(&conn->bundle_link,
  374. &conn->bundle->avail_conns);
  375. case 2 ... RXRPC_MAXCALLS - 1:
  376. ASSERT(conn->channels[0] == NULL ||
  377. conn->channels[1] == NULL ||
  378. conn->channels[2] == NULL ||
  379. conn->channels[3] == NULL);
  380. break;
  381. case RXRPC_MAXCALLS:
  382. list_move_tail(&conn->bundle_link,
  383. &conn->bundle->unused_conns);
  384. ASSERT(conn->channels[0] == NULL &&
  385. conn->channels[1] == NULL &&
  386. conn->channels[2] == NULL &&
  387. conn->channels[3] == NULL);
  388. break;
  389. default:
  390. printk(KERN_ERR "RxRPC: conn->avail_calls=%d\n",
  391. conn->avail_calls);
  392. BUG();
  393. }
  394. }
  395. spin_unlock(&conn->trans->client_lock);
  396. if (call->state < RXRPC_CALL_COMPLETE &&
  397. call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
  398. _debug("+++ ABORTING STATE %d +++\n", call->state);
  399. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  400. call->abort_code = RX_CALL_DEAD;
  401. set_bit(RXRPC_CALL_ABORT, &call->events);
  402. rxrpc_queue_call(call);
  403. }
  404. write_unlock(&call->state_lock);
  405. write_unlock_bh(&conn->lock);
  406. /* clean up the Rx queue */
  407. if (!skb_queue_empty(&call->rx_queue) ||
  408. !skb_queue_empty(&call->rx_oos_queue)) {
  409. struct rxrpc_skb_priv *sp;
  410. struct sk_buff *skb;
  411. _debug("purge Rx queues");
  412. spin_lock_bh(&call->lock);
  413. while ((skb = skb_dequeue(&call->rx_queue)) ||
  414. (skb = skb_dequeue(&call->rx_oos_queue))) {
  415. sp = rxrpc_skb(skb);
  416. if (sp->call) {
  417. ASSERTCMP(sp->call, ==, call);
  418. rxrpc_put_call(call);
  419. sp->call = NULL;
  420. }
  421. skb->destructor = NULL;
  422. spin_unlock_bh(&call->lock);
  423. _debug("- zap %s %%%u #%u",
  424. rxrpc_pkts[sp->hdr.type],
  425. ntohl(sp->hdr.serial),
  426. ntohl(sp->hdr.seq));
  427. rxrpc_free_skb(skb);
  428. spin_lock_bh(&call->lock);
  429. }
  430. spin_unlock_bh(&call->lock);
  431. ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
  432. }
  433. del_timer_sync(&call->resend_timer);
  434. del_timer_sync(&call->ack_timer);
  435. del_timer_sync(&call->lifetimer);
  436. call->deadspan.expires = jiffies + rxrpc_dead_call_timeout * HZ;
  437. add_timer(&call->deadspan);
  438. _leave("");
  439. }
  440. /*
  441. * handle a dead call being ready for reaping
  442. */
  443. static void rxrpc_dead_call_expired(unsigned long _call)
  444. {
  445. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  446. _enter("{%d}", call->debug_id);
  447. write_lock_bh(&call->state_lock);
  448. call->state = RXRPC_CALL_DEAD;
  449. write_unlock_bh(&call->state_lock);
  450. rxrpc_put_call(call);
  451. }
  452. /*
  453. * mark a call as to be released, aborting it if it's still in progress
  454. * - called with softirqs disabled
  455. */
  456. static void rxrpc_mark_call_released(struct rxrpc_call *call)
  457. {
  458. bool sched;
  459. write_lock(&call->state_lock);
  460. if (call->state < RXRPC_CALL_DEAD) {
  461. sched = false;
  462. if (call->state < RXRPC_CALL_COMPLETE) {
  463. _debug("abort call %p", call);
  464. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  465. call->abort_code = RX_CALL_DEAD;
  466. if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
  467. sched = true;
  468. }
  469. if (!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
  470. sched = true;
  471. if (sched)
  472. rxrpc_queue_call(call);
  473. }
  474. write_unlock(&call->state_lock);
  475. }
  476. /*
  477. * release all the calls associated with a socket
  478. */
  479. void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
  480. {
  481. struct rxrpc_call *call;
  482. struct rb_node *p;
  483. _enter("%p", rx);
  484. read_lock_bh(&rx->call_lock);
  485. /* mark all the calls as no longer wanting incoming packets */
  486. for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
  487. call = rb_entry(p, struct rxrpc_call, sock_node);
  488. rxrpc_mark_call_released(call);
  489. }
  490. /* kill the not-yet-accepted incoming calls */
  491. list_for_each_entry(call, &rx->secureq, accept_link) {
  492. rxrpc_mark_call_released(call);
  493. }
  494. list_for_each_entry(call, &rx->acceptq, accept_link) {
  495. rxrpc_mark_call_released(call);
  496. }
  497. read_unlock_bh(&rx->call_lock);
  498. _leave("");
  499. }
  500. /*
  501. * release a call
  502. */
  503. void __rxrpc_put_call(struct rxrpc_call *call)
  504. {
  505. ASSERT(call != NULL);
  506. _enter("%p{u=%d}", call, atomic_read(&call->usage));
  507. ASSERTCMP(atomic_read(&call->usage), >, 0);
  508. if (atomic_dec_and_test(&call->usage)) {
  509. _debug("call %d dead", call->debug_id);
  510. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  511. rxrpc_queue_work(&call->destroyer);
  512. }
  513. _leave("");
  514. }
  515. /*
  516. * clean up a call
  517. */
  518. static void rxrpc_cleanup_call(struct rxrpc_call *call)
  519. {
  520. _net("DESTROY CALL %d", call->debug_id);
  521. ASSERT(call->socket);
  522. memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
  523. del_timer_sync(&call->lifetimer);
  524. del_timer_sync(&call->deadspan);
  525. del_timer_sync(&call->ack_timer);
  526. del_timer_sync(&call->resend_timer);
  527. ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
  528. ASSERTCMP(call->events, ==, 0);
  529. if (work_pending(&call->processor)) {
  530. _debug("defer destroy");
  531. rxrpc_queue_work(&call->destroyer);
  532. return;
  533. }
  534. if (call->conn) {
  535. spin_lock(&call->conn->trans->peer->lock);
  536. list_del(&call->error_link);
  537. spin_unlock(&call->conn->trans->peer->lock);
  538. write_lock_bh(&call->conn->lock);
  539. rb_erase(&call->conn_node, &call->conn->calls);
  540. write_unlock_bh(&call->conn->lock);
  541. rxrpc_put_connection(call->conn);
  542. }
  543. if (call->acks_window) {
  544. _debug("kill Tx window %d",
  545. CIRC_CNT(call->acks_head, call->acks_tail,
  546. call->acks_winsz));
  547. smp_mb();
  548. while (CIRC_CNT(call->acks_head, call->acks_tail,
  549. call->acks_winsz) > 0) {
  550. struct rxrpc_skb_priv *sp;
  551. unsigned long _skb;
  552. _skb = call->acks_window[call->acks_tail] & ~1;
  553. sp = rxrpc_skb((struct sk_buff *) _skb);
  554. _debug("+++ clear Tx %u", ntohl(sp->hdr.seq));
  555. rxrpc_free_skb((struct sk_buff *) _skb);
  556. call->acks_tail =
  557. (call->acks_tail + 1) & (call->acks_winsz - 1);
  558. }
  559. kfree(call->acks_window);
  560. }
  561. rxrpc_free_skb(call->tx_pending);
  562. rxrpc_purge_queue(&call->rx_queue);
  563. ASSERT(skb_queue_empty(&call->rx_oos_queue));
  564. sock_put(&call->socket->sk);
  565. kmem_cache_free(rxrpc_call_jar, call);
  566. }
  567. /*
  568. * destroy a call
  569. */
  570. static void rxrpc_destroy_call(struct work_struct *work)
  571. {
  572. struct rxrpc_call *call =
  573. container_of(work, struct rxrpc_call, destroyer);
  574. _enter("%p{%d,%d,%p}",
  575. call, atomic_read(&call->usage), call->channel, call->conn);
  576. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  577. write_lock_bh(&rxrpc_call_lock);
  578. list_del_init(&call->link);
  579. write_unlock_bh(&rxrpc_call_lock);
  580. rxrpc_cleanup_call(call);
  581. _leave("");
  582. }
  583. /*
  584. * preemptively destroy all the call records from a transport endpoint rather
  585. * than waiting for them to time out
  586. */
  587. void __exit rxrpc_destroy_all_calls(void)
  588. {
  589. struct rxrpc_call *call;
  590. _enter("");
  591. write_lock_bh(&rxrpc_call_lock);
  592. while (!list_empty(&rxrpc_calls)) {
  593. call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
  594. _debug("Zapping call %p", call);
  595. list_del_init(&call->link);
  596. switch (atomic_read(&call->usage)) {
  597. case 0:
  598. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  599. break;
  600. case 1:
  601. if (del_timer_sync(&call->deadspan) != 0 &&
  602. call->state != RXRPC_CALL_DEAD)
  603. rxrpc_dead_call_expired((unsigned long) call);
  604. if (call->state != RXRPC_CALL_DEAD)
  605. break;
  606. default:
  607. printk(KERN_ERR "RXRPC:"
  608. " Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
  609. call, atomic_read(&call->usage),
  610. atomic_read(&call->ackr_not_idle),
  611. rxrpc_call_states[call->state],
  612. call->flags, call->events);
  613. if (!skb_queue_empty(&call->rx_queue))
  614. printk(KERN_ERR"RXRPC: Rx queue occupied\n");
  615. if (!skb_queue_empty(&call->rx_oos_queue))
  616. printk(KERN_ERR"RXRPC: OOS queue occupied\n");
  617. break;
  618. }
  619. write_unlock_bh(&rxrpc_call_lock);
  620. cond_resched();
  621. write_lock_bh(&rxrpc_call_lock);
  622. }
  623. write_unlock_bh(&rxrpc_call_lock);
  624. _leave("");
  625. }
  626. /*
  627. * handle call lifetime being exceeded
  628. */
  629. static void rxrpc_call_life_expired(unsigned long _call)
  630. {
  631. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  632. if (call->state >= RXRPC_CALL_COMPLETE)
  633. return;
  634. _enter("{%d}", call->debug_id);
  635. read_lock_bh(&call->state_lock);
  636. if (call->state < RXRPC_CALL_COMPLETE) {
  637. set_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
  638. rxrpc_queue_call(call);
  639. }
  640. read_unlock_bh(&call->state_lock);
  641. }
  642. /*
  643. * handle resend timer expiry
  644. */
  645. static void rxrpc_resend_time_expired(unsigned long _call)
  646. {
  647. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  648. _enter("{%d}", call->debug_id);
  649. if (call->state >= RXRPC_CALL_COMPLETE)
  650. return;
  651. read_lock_bh(&call->state_lock);
  652. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  653. if (call->state < RXRPC_CALL_COMPLETE &&
  654. !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
  655. rxrpc_queue_call(call);
  656. read_unlock_bh(&call->state_lock);
  657. }
  658. /*
  659. * handle ACK timer expiry
  660. */
  661. static void rxrpc_ack_time_expired(unsigned long _call)
  662. {
  663. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  664. _enter("{%d}", call->debug_id);
  665. if (call->state >= RXRPC_CALL_COMPLETE)
  666. return;
  667. read_lock_bh(&call->state_lock);
  668. if (call->state < RXRPC_CALL_COMPLETE &&
  669. !test_and_set_bit(RXRPC_CALL_ACK, &call->events))
  670. rxrpc_queue_call(call);
  671. read_unlock_bh(&call->state_lock);
  672. }