ar-connection.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911
  1. /* RxRPC virtual connection handler
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/net.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/crypto.h>
  15. #include <net/sock.h>
  16. #include <net/af_rxrpc.h>
  17. #include "ar-internal.h"
  18. static void rxrpc_connection_reaper(struct work_struct *work);
  19. LIST_HEAD(rxrpc_connections);
  20. DEFINE_RWLOCK(rxrpc_connection_lock);
  21. static unsigned long rxrpc_connection_timeout = 10 * 60;
  22. static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
  23. /*
  24. * allocate a new client connection bundle
  25. */
  26. static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp)
  27. {
  28. struct rxrpc_conn_bundle *bundle;
  29. _enter("");
  30. bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp);
  31. if (bundle) {
  32. INIT_LIST_HEAD(&bundle->unused_conns);
  33. INIT_LIST_HEAD(&bundle->avail_conns);
  34. INIT_LIST_HEAD(&bundle->busy_conns);
  35. init_waitqueue_head(&bundle->chanwait);
  36. atomic_set(&bundle->usage, 1);
  37. }
  38. _leave(" = %p", bundle);
  39. return bundle;
  40. }
  41. /*
  42. * compare bundle parameters with what we're looking for
  43. * - return -ve, 0 or +ve
  44. */
  45. static inline
  46. int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle,
  47. struct key *key, __be16 service_id)
  48. {
  49. return (bundle->service_id - service_id) ?:
  50. ((unsigned long) bundle->key - (unsigned long) key);
  51. }
  52. /*
  53. * get bundle of client connections that a client socket can make use of
  54. */
  55. struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
  56. struct rxrpc_transport *trans,
  57. struct key *key,
  58. __be16 service_id,
  59. gfp_t gfp)
  60. {
  61. struct rxrpc_conn_bundle *bundle, *candidate;
  62. struct rb_node *p, *parent, **pp;
  63. _enter("%p{%x},%x,%hx,",
  64. rx, key_serial(key), trans->debug_id, ntohs(service_id));
  65. if (rx->trans == trans && rx->bundle) {
  66. atomic_inc(&rx->bundle->usage);
  67. return rx->bundle;
  68. }
  69. /* search the extant bundles first for one that matches the specified
  70. * user ID */
  71. spin_lock(&trans->client_lock);
  72. p = trans->bundles.rb_node;
  73. while (p) {
  74. bundle = rb_entry(p, struct rxrpc_conn_bundle, node);
  75. if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
  76. p = p->rb_left;
  77. else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
  78. p = p->rb_right;
  79. else
  80. goto found_extant_bundle;
  81. }
  82. spin_unlock(&trans->client_lock);
  83. /* not yet present - create a candidate for a new record and then
  84. * redo the search */
  85. candidate = rxrpc_alloc_bundle(gfp);
  86. if (!candidate) {
  87. _leave(" = -ENOMEM");
  88. return ERR_PTR(-ENOMEM);
  89. }
  90. candidate->key = key_get(key);
  91. candidate->service_id = service_id;
  92. spin_lock(&trans->client_lock);
  93. pp = &trans->bundles.rb_node;
  94. parent = NULL;
  95. while (*pp) {
  96. parent = *pp;
  97. bundle = rb_entry(parent, struct rxrpc_conn_bundle, node);
  98. if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
  99. pp = &(*pp)->rb_left;
  100. else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
  101. pp = &(*pp)->rb_right;
  102. else
  103. goto found_extant_second;
  104. }
  105. /* second search also failed; add the new bundle */
  106. bundle = candidate;
  107. candidate = NULL;
  108. rb_link_node(&bundle->node, parent, pp);
  109. rb_insert_color(&bundle->node, &trans->bundles);
  110. spin_unlock(&trans->client_lock);
  111. _net("BUNDLE new on trans %d", trans->debug_id);
  112. if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
  113. atomic_inc(&bundle->usage);
  114. rx->bundle = bundle;
  115. }
  116. _leave(" = %p [new]", bundle);
  117. return bundle;
  118. /* we found the bundle in the list immediately */
  119. found_extant_bundle:
  120. atomic_inc(&bundle->usage);
  121. spin_unlock(&trans->client_lock);
  122. _net("BUNDLE old on trans %d", trans->debug_id);
  123. if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
  124. atomic_inc(&bundle->usage);
  125. rx->bundle = bundle;
  126. }
  127. _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage));
  128. return bundle;
  129. /* we found the bundle on the second time through the list */
  130. found_extant_second:
  131. atomic_inc(&bundle->usage);
  132. spin_unlock(&trans->client_lock);
  133. kfree(candidate);
  134. _net("BUNDLE old2 on trans %d", trans->debug_id);
  135. if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
  136. atomic_inc(&bundle->usage);
  137. rx->bundle = bundle;
  138. }
  139. _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage));
  140. return bundle;
  141. }
  142. /*
  143. * release a bundle
  144. */
  145. void rxrpc_put_bundle(struct rxrpc_transport *trans,
  146. struct rxrpc_conn_bundle *bundle)
  147. {
  148. _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage));
  149. if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) {
  150. _debug("Destroy bundle");
  151. rb_erase(&bundle->node, &trans->bundles);
  152. spin_unlock(&trans->client_lock);
  153. ASSERT(list_empty(&bundle->unused_conns));
  154. ASSERT(list_empty(&bundle->avail_conns));
  155. ASSERT(list_empty(&bundle->busy_conns));
  156. ASSERTCMP(bundle->num_conns, ==, 0);
  157. key_put(bundle->key);
  158. kfree(bundle);
  159. }
  160. _leave("");
  161. }
  162. /*
  163. * allocate a new connection
  164. */
  165. static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
  166. {
  167. struct rxrpc_connection *conn;
  168. _enter("");
  169. conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
  170. if (conn) {
  171. INIT_WORK(&conn->processor, &rxrpc_process_connection);
  172. INIT_LIST_HEAD(&conn->bundle_link);
  173. conn->calls = RB_ROOT;
  174. skb_queue_head_init(&conn->rx_queue);
  175. rwlock_init(&conn->lock);
  176. spin_lock_init(&conn->state_lock);
  177. atomic_set(&conn->usage, 1);
  178. conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
  179. conn->avail_calls = RXRPC_MAXCALLS;
  180. conn->size_align = 4;
  181. conn->header_size = sizeof(struct rxrpc_header);
  182. }
  183. _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
  184. return conn;
  185. }
  186. /*
  187. * assign a connection ID to a connection and add it to the transport's
  188. * connection lookup tree
  189. * - called with transport client lock held
  190. */
  191. static void rxrpc_assign_connection_id(struct rxrpc_connection *conn)
  192. {
  193. struct rxrpc_connection *xconn;
  194. struct rb_node *parent, **p;
  195. __be32 epoch;
  196. u32 real_conn_id;
  197. _enter("");
  198. epoch = conn->epoch;
  199. write_lock_bh(&conn->trans->conn_lock);
  200. conn->trans->conn_idcounter += RXRPC_CID_INC;
  201. if (conn->trans->conn_idcounter < RXRPC_CID_INC)
  202. conn->trans->conn_idcounter = RXRPC_CID_INC;
  203. real_conn_id = conn->trans->conn_idcounter;
  204. attempt_insertion:
  205. parent = NULL;
  206. p = &conn->trans->client_conns.rb_node;
  207. while (*p) {
  208. parent = *p;
  209. xconn = rb_entry(parent, struct rxrpc_connection, node);
  210. if (epoch < xconn->epoch)
  211. p = &(*p)->rb_left;
  212. else if (epoch > xconn->epoch)
  213. p = &(*p)->rb_right;
  214. else if (real_conn_id < xconn->real_conn_id)
  215. p = &(*p)->rb_left;
  216. else if (real_conn_id > xconn->real_conn_id)
  217. p = &(*p)->rb_right;
  218. else
  219. goto id_exists;
  220. }
  221. /* we've found a suitable hole - arrange for this connection to occupy
  222. * it */
  223. rb_link_node(&conn->node, parent, p);
  224. rb_insert_color(&conn->node, &conn->trans->client_conns);
  225. conn->real_conn_id = real_conn_id;
  226. conn->cid = htonl(real_conn_id);
  227. write_unlock_bh(&conn->trans->conn_lock);
  228. _leave(" [CONNID %x CID %x]", real_conn_id, ntohl(conn->cid));
  229. return;
  230. /* we found a connection with the proposed ID - walk the tree from that
  231. * point looking for the next unused ID */
  232. id_exists:
  233. for (;;) {
  234. real_conn_id += RXRPC_CID_INC;
  235. if (real_conn_id < RXRPC_CID_INC) {
  236. real_conn_id = RXRPC_CID_INC;
  237. conn->trans->conn_idcounter = real_conn_id;
  238. goto attempt_insertion;
  239. }
  240. parent = rb_next(parent);
  241. if (!parent)
  242. goto attempt_insertion;
  243. xconn = rb_entry(parent, struct rxrpc_connection, node);
  244. if (epoch < xconn->epoch ||
  245. real_conn_id < xconn->real_conn_id)
  246. goto attempt_insertion;
  247. }
  248. }
  249. /*
  250. * add a call to a connection's call-by-ID tree
  251. */
  252. static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
  253. struct rxrpc_call *call)
  254. {
  255. struct rxrpc_call *xcall;
  256. struct rb_node *parent, **p;
  257. __be32 call_id;
  258. write_lock_bh(&conn->lock);
  259. call_id = call->call_id;
  260. p = &conn->calls.rb_node;
  261. parent = NULL;
  262. while (*p) {
  263. parent = *p;
  264. xcall = rb_entry(parent, struct rxrpc_call, conn_node);
  265. if (call_id < xcall->call_id)
  266. p = &(*p)->rb_left;
  267. else if (call_id > xcall->call_id)
  268. p = &(*p)->rb_right;
  269. else
  270. BUG();
  271. }
  272. rb_link_node(&call->conn_node, parent, p);
  273. rb_insert_color(&call->conn_node, &conn->calls);
  274. write_unlock_bh(&conn->lock);
  275. }
  276. /*
  277. * connect a call on an exclusive connection
  278. */
  279. static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
  280. struct rxrpc_transport *trans,
  281. __be16 service_id,
  282. struct rxrpc_call *call,
  283. gfp_t gfp)
  284. {
  285. struct rxrpc_connection *conn;
  286. int chan, ret;
  287. _enter("");
  288. conn = rx->conn;
  289. if (!conn) {
  290. /* not yet present - create a candidate for a new connection
  291. * and then redo the check */
  292. conn = rxrpc_alloc_connection(gfp);
  293. if (IS_ERR(conn)) {
  294. _leave(" = %ld", PTR_ERR(conn));
  295. return PTR_ERR(conn);
  296. }
  297. conn->trans = trans;
  298. conn->bundle = NULL;
  299. conn->service_id = service_id;
  300. conn->epoch = rxrpc_epoch;
  301. conn->in_clientflag = 0;
  302. conn->out_clientflag = RXRPC_CLIENT_INITIATED;
  303. conn->cid = 0;
  304. conn->state = RXRPC_CONN_CLIENT;
  305. conn->avail_calls = RXRPC_MAXCALLS - 1;
  306. conn->security_level = rx->min_sec_level;
  307. conn->key = key_get(rx->key);
  308. ret = rxrpc_init_client_conn_security(conn);
  309. if (ret < 0) {
  310. key_put(conn->key);
  311. kfree(conn);
  312. _leave(" = %d [key]", ret);
  313. return ret;
  314. }
  315. write_lock_bh(&rxrpc_connection_lock);
  316. list_add_tail(&conn->link, &rxrpc_connections);
  317. write_unlock_bh(&rxrpc_connection_lock);
  318. spin_lock(&trans->client_lock);
  319. atomic_inc(&trans->usage);
  320. _net("CONNECT EXCL new %d on TRANS %d",
  321. conn->debug_id, conn->trans->debug_id);
  322. rxrpc_assign_connection_id(conn);
  323. rx->conn = conn;
  324. }
  325. /* we've got a connection with a free channel and we can now attach the
  326. * call to it
  327. * - we're holding the transport's client lock
  328. * - we're holding a reference on the connection
  329. */
  330. for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
  331. if (!conn->channels[chan])
  332. goto found_channel;
  333. goto no_free_channels;
  334. found_channel:
  335. atomic_inc(&conn->usage);
  336. conn->channels[chan] = call;
  337. call->conn = conn;
  338. call->channel = chan;
  339. call->cid = conn->cid | htonl(chan);
  340. call->call_id = htonl(++conn->call_counter);
  341. _net("CONNECT client on conn %d chan %d as call %x",
  342. conn->debug_id, chan, ntohl(call->call_id));
  343. spin_unlock(&trans->client_lock);
  344. rxrpc_add_call_ID_to_conn(conn, call);
  345. _leave(" = 0");
  346. return 0;
  347. no_free_channels:
  348. spin_unlock(&trans->client_lock);
  349. _leave(" = -ENOSR");
  350. return -ENOSR;
  351. }
  352. /*
  353. * find a connection for a call
  354. * - called in process context with IRQs enabled
  355. */
  356. int rxrpc_connect_call(struct rxrpc_sock *rx,
  357. struct rxrpc_transport *trans,
  358. struct rxrpc_conn_bundle *bundle,
  359. struct rxrpc_call *call,
  360. gfp_t gfp)
  361. {
  362. struct rxrpc_connection *conn, *candidate;
  363. int chan, ret;
  364. DECLARE_WAITQUEUE(myself, current);
  365. _enter("%p,%lx,", rx, call->user_call_ID);
  366. if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags))
  367. return rxrpc_connect_exclusive(rx, trans, bundle->service_id,
  368. call, gfp);
  369. spin_lock(&trans->client_lock);
  370. for (;;) {
  371. /* see if the bundle has a call slot available */
  372. if (!list_empty(&bundle->avail_conns)) {
  373. _debug("avail");
  374. conn = list_entry(bundle->avail_conns.next,
  375. struct rxrpc_connection,
  376. bundle_link);
  377. if (--conn->avail_calls == 0)
  378. list_move(&conn->bundle_link,
  379. &bundle->busy_conns);
  380. ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
  381. ASSERT(conn->channels[0] == NULL ||
  382. conn->channels[1] == NULL ||
  383. conn->channels[2] == NULL ||
  384. conn->channels[3] == NULL);
  385. atomic_inc(&conn->usage);
  386. break;
  387. }
  388. if (!list_empty(&bundle->unused_conns)) {
  389. _debug("unused");
  390. conn = list_entry(bundle->unused_conns.next,
  391. struct rxrpc_connection,
  392. bundle_link);
  393. ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS);
  394. conn->avail_calls = RXRPC_MAXCALLS - 1;
  395. ASSERT(conn->channels[0] == NULL &&
  396. conn->channels[1] == NULL &&
  397. conn->channels[2] == NULL &&
  398. conn->channels[3] == NULL);
  399. atomic_inc(&conn->usage);
  400. list_move(&conn->bundle_link, &bundle->avail_conns);
  401. break;
  402. }
  403. /* need to allocate a new connection */
  404. _debug("get new conn [%d]", bundle->num_conns);
  405. spin_unlock(&trans->client_lock);
  406. if (signal_pending(current))
  407. goto interrupted;
  408. if (bundle->num_conns >= 20) {
  409. _debug("too many conns");
  410. if (!(gfp & __GFP_WAIT)) {
  411. _leave(" = -EAGAIN");
  412. return -EAGAIN;
  413. }
  414. add_wait_queue(&bundle->chanwait, &myself);
  415. for (;;) {
  416. set_current_state(TASK_INTERRUPTIBLE);
  417. if (bundle->num_conns < 20 ||
  418. !list_empty(&bundle->unused_conns) ||
  419. !list_empty(&bundle->avail_conns))
  420. break;
  421. if (signal_pending(current))
  422. goto interrupted_dequeue;
  423. schedule();
  424. }
  425. remove_wait_queue(&bundle->chanwait, &myself);
  426. __set_current_state(TASK_RUNNING);
  427. spin_lock(&trans->client_lock);
  428. continue;
  429. }
  430. /* not yet present - create a candidate for a new connection and then
  431. * redo the check */
  432. candidate = rxrpc_alloc_connection(gfp);
  433. if (IS_ERR(candidate)) {
  434. _leave(" = %ld", PTR_ERR(candidate));
  435. return PTR_ERR(candidate);
  436. }
  437. candidate->trans = trans;
  438. candidate->bundle = bundle;
  439. candidate->service_id = bundle->service_id;
  440. candidate->epoch = rxrpc_epoch;
  441. candidate->in_clientflag = 0;
  442. candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
  443. candidate->cid = 0;
  444. candidate->state = RXRPC_CONN_CLIENT;
  445. candidate->avail_calls = RXRPC_MAXCALLS;
  446. candidate->security_level = rx->min_sec_level;
  447. candidate->key = key_get(bundle->key);
  448. ret = rxrpc_init_client_conn_security(candidate);
  449. if (ret < 0) {
  450. key_put(candidate->key);
  451. kfree(candidate);
  452. _leave(" = %d [key]", ret);
  453. return ret;
  454. }
  455. write_lock_bh(&rxrpc_connection_lock);
  456. list_add_tail(&candidate->link, &rxrpc_connections);
  457. write_unlock_bh(&rxrpc_connection_lock);
  458. spin_lock(&trans->client_lock);
  459. list_add(&candidate->bundle_link, &bundle->unused_conns);
  460. bundle->num_conns++;
  461. atomic_inc(&bundle->usage);
  462. atomic_inc(&trans->usage);
  463. _net("CONNECT new %d on TRANS %d",
  464. candidate->debug_id, candidate->trans->debug_id);
  465. rxrpc_assign_connection_id(candidate);
  466. if (candidate->security)
  467. candidate->security->prime_packet_security(candidate);
  468. /* leave the candidate lurking in zombie mode attached to the
  469. * bundle until we're ready for it */
  470. rxrpc_put_connection(candidate);
  471. candidate = NULL;
  472. }
  473. /* we've got a connection with a free channel and we can now attach the
  474. * call to it
  475. * - we're holding the transport's client lock
  476. * - we're holding a reference on the connection
  477. * - we're holding a reference on the bundle
  478. */
  479. for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
  480. if (!conn->channels[chan])
  481. goto found_channel;
  482. ASSERT(conn->channels[0] == NULL ||
  483. conn->channels[1] == NULL ||
  484. conn->channels[2] == NULL ||
  485. conn->channels[3] == NULL);
  486. BUG();
  487. found_channel:
  488. conn->channels[chan] = call;
  489. call->conn = conn;
  490. call->channel = chan;
  491. call->cid = conn->cid | htonl(chan);
  492. call->call_id = htonl(++conn->call_counter);
  493. _net("CONNECT client on conn %d chan %d as call %x",
  494. conn->debug_id, chan, ntohl(call->call_id));
  495. ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
  496. spin_unlock(&trans->client_lock);
  497. rxrpc_add_call_ID_to_conn(conn, call);
  498. _leave(" = 0");
  499. return 0;
  500. interrupted_dequeue:
  501. remove_wait_queue(&bundle->chanwait, &myself);
  502. __set_current_state(TASK_RUNNING);
  503. interrupted:
  504. _leave(" = -ERESTARTSYS");
  505. return -ERESTARTSYS;
  506. }
  507. /*
  508. * get a record of an incoming connection
  509. */
  510. struct rxrpc_connection *
  511. rxrpc_incoming_connection(struct rxrpc_transport *trans,
  512. struct rxrpc_header *hdr,
  513. gfp_t gfp)
  514. {
  515. struct rxrpc_connection *conn, *candidate = NULL;
  516. struct rb_node *p, **pp;
  517. const char *new = "old";
  518. __be32 epoch;
  519. u32 conn_id;
  520. _enter("");
  521. ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED);
  522. epoch = hdr->epoch;
  523. conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
  524. /* search the connection list first */
  525. read_lock_bh(&trans->conn_lock);
  526. p = trans->server_conns.rb_node;
  527. while (p) {
  528. conn = rb_entry(p, struct rxrpc_connection, node);
  529. _debug("maybe %x", conn->real_conn_id);
  530. if (epoch < conn->epoch)
  531. p = p->rb_left;
  532. else if (epoch > conn->epoch)
  533. p = p->rb_right;
  534. else if (conn_id < conn->real_conn_id)
  535. p = p->rb_left;
  536. else if (conn_id > conn->real_conn_id)
  537. p = p->rb_right;
  538. else
  539. goto found_extant_connection;
  540. }
  541. read_unlock_bh(&trans->conn_lock);
  542. /* not yet present - create a candidate for a new record and then
  543. * redo the search */
  544. candidate = rxrpc_alloc_connection(gfp);
  545. if (!candidate) {
  546. _leave(" = -ENOMEM");
  547. return ERR_PTR(-ENOMEM);
  548. }
  549. candidate->trans = trans;
  550. candidate->epoch = hdr->epoch;
  551. candidate->cid = hdr->cid & cpu_to_be32(RXRPC_CIDMASK);
  552. candidate->service_id = hdr->serviceId;
  553. candidate->security_ix = hdr->securityIndex;
  554. candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
  555. candidate->out_clientflag = 0;
  556. candidate->real_conn_id = conn_id;
  557. candidate->state = RXRPC_CONN_SERVER;
  558. if (candidate->service_id)
  559. candidate->state = RXRPC_CONN_SERVER_UNSECURED;
  560. write_lock_bh(&trans->conn_lock);
  561. pp = &trans->server_conns.rb_node;
  562. p = NULL;
  563. while (*pp) {
  564. p = *pp;
  565. conn = rb_entry(p, struct rxrpc_connection, node);
  566. if (epoch < conn->epoch)
  567. pp = &(*pp)->rb_left;
  568. else if (epoch > conn->epoch)
  569. pp = &(*pp)->rb_right;
  570. else if (conn_id < conn->real_conn_id)
  571. pp = &(*pp)->rb_left;
  572. else if (conn_id > conn->real_conn_id)
  573. pp = &(*pp)->rb_right;
  574. else
  575. goto found_extant_second;
  576. }
  577. /* we can now add the new candidate to the list */
  578. conn = candidate;
  579. candidate = NULL;
  580. rb_link_node(&conn->node, p, pp);
  581. rb_insert_color(&conn->node, &trans->server_conns);
  582. atomic_inc(&conn->trans->usage);
  583. write_unlock_bh(&trans->conn_lock);
  584. write_lock_bh(&rxrpc_connection_lock);
  585. list_add_tail(&conn->link, &rxrpc_connections);
  586. write_unlock_bh(&rxrpc_connection_lock);
  587. new = "new";
  588. success:
  589. _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->real_conn_id);
  590. _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
  591. return conn;
  592. /* we found the connection in the list immediately */
  593. found_extant_connection:
  594. if (hdr->securityIndex != conn->security_ix) {
  595. read_unlock_bh(&trans->conn_lock);
  596. goto security_mismatch;
  597. }
  598. atomic_inc(&conn->usage);
  599. read_unlock_bh(&trans->conn_lock);
  600. goto success;
  601. /* we found the connection on the second time through the list */
  602. found_extant_second:
  603. if (hdr->securityIndex != conn->security_ix) {
  604. write_unlock_bh(&trans->conn_lock);
  605. goto security_mismatch;
  606. }
  607. atomic_inc(&conn->usage);
  608. write_unlock_bh(&trans->conn_lock);
  609. kfree(candidate);
  610. goto success;
  611. security_mismatch:
  612. kfree(candidate);
  613. _leave(" = -EKEYREJECTED");
  614. return ERR_PTR(-EKEYREJECTED);
  615. }
  616. /*
  617. * find a connection based on transport and RxRPC connection ID for an incoming
  618. * packet
  619. */
  620. struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans,
  621. struct rxrpc_header *hdr)
  622. {
  623. struct rxrpc_connection *conn;
  624. struct rb_node *p;
  625. __be32 epoch;
  626. u32 conn_id;
  627. _enter(",{%x,%x}", ntohl(hdr->cid), hdr->flags);
  628. read_lock_bh(&trans->conn_lock);
  629. conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
  630. epoch = hdr->epoch;
  631. if (hdr->flags & RXRPC_CLIENT_INITIATED)
  632. p = trans->server_conns.rb_node;
  633. else
  634. p = trans->client_conns.rb_node;
  635. while (p) {
  636. conn = rb_entry(p, struct rxrpc_connection, node);
  637. _debug("maybe %x", conn->real_conn_id);
  638. if (epoch < conn->epoch)
  639. p = p->rb_left;
  640. else if (epoch > conn->epoch)
  641. p = p->rb_right;
  642. else if (conn_id < conn->real_conn_id)
  643. p = p->rb_left;
  644. else if (conn_id > conn->real_conn_id)
  645. p = p->rb_right;
  646. else
  647. goto found;
  648. }
  649. read_unlock_bh(&trans->conn_lock);
  650. _leave(" = NULL");
  651. return NULL;
  652. found:
  653. atomic_inc(&conn->usage);
  654. read_unlock_bh(&trans->conn_lock);
  655. _leave(" = %p", conn);
  656. return conn;
  657. }
  658. /*
  659. * release a virtual connection
  660. */
  661. void rxrpc_put_connection(struct rxrpc_connection *conn)
  662. {
  663. _enter("%p{u=%d,d=%d}",
  664. conn, atomic_read(&conn->usage), conn->debug_id);
  665. ASSERTCMP(atomic_read(&conn->usage), >, 0);
  666. conn->put_time = get_seconds();
  667. if (atomic_dec_and_test(&conn->usage)) {
  668. _debug("zombie");
  669. rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
  670. }
  671. _leave("");
  672. }
  673. /*
  674. * destroy a virtual connection
  675. */
  676. static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
  677. {
  678. _enter("%p{%d}", conn, atomic_read(&conn->usage));
  679. ASSERTCMP(atomic_read(&conn->usage), ==, 0);
  680. _net("DESTROY CONN %d", conn->debug_id);
  681. if (conn->bundle)
  682. rxrpc_put_bundle(conn->trans, conn->bundle);
  683. ASSERT(RB_EMPTY_ROOT(&conn->calls));
  684. rxrpc_purge_queue(&conn->rx_queue);
  685. rxrpc_clear_conn_security(conn);
  686. rxrpc_put_transport(conn->trans);
  687. kfree(conn);
  688. _leave("");
  689. }
  690. /*
  691. * reap dead connections
  692. */
  693. static void rxrpc_connection_reaper(struct work_struct *work)
  694. {
  695. struct rxrpc_connection *conn, *_p;
  696. unsigned long now, earliest, reap_time;
  697. LIST_HEAD(graveyard);
  698. _enter("");
  699. now = get_seconds();
  700. earliest = ULONG_MAX;
  701. write_lock_bh(&rxrpc_connection_lock);
  702. list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
  703. _debug("reap CONN %d { u=%d,t=%ld }",
  704. conn->debug_id, atomic_read(&conn->usage),
  705. (long) now - (long) conn->put_time);
  706. if (likely(atomic_read(&conn->usage) > 0))
  707. continue;
  708. spin_lock(&conn->trans->client_lock);
  709. write_lock(&conn->trans->conn_lock);
  710. reap_time = conn->put_time + rxrpc_connection_timeout;
  711. if (atomic_read(&conn->usage) > 0) {
  712. ;
  713. } else if (reap_time <= now) {
  714. list_move_tail(&conn->link, &graveyard);
  715. if (conn->out_clientflag)
  716. rb_erase(&conn->node,
  717. &conn->trans->client_conns);
  718. else
  719. rb_erase(&conn->node,
  720. &conn->trans->server_conns);
  721. if (conn->bundle) {
  722. list_del_init(&conn->bundle_link);
  723. conn->bundle->num_conns--;
  724. }
  725. } else if (reap_time < earliest) {
  726. earliest = reap_time;
  727. }
  728. write_unlock(&conn->trans->conn_lock);
  729. spin_unlock(&conn->trans->client_lock);
  730. }
  731. write_unlock_bh(&rxrpc_connection_lock);
  732. if (earliest != ULONG_MAX) {
  733. _debug("reschedule reaper %ld", (long) earliest - now);
  734. ASSERTCMP(earliest, >, now);
  735. rxrpc_queue_delayed_work(&rxrpc_connection_reap,
  736. (earliest - now) * HZ);
  737. }
  738. /* then destroy all those pulled out */
  739. while (!list_empty(&graveyard)) {
  740. conn = list_entry(graveyard.next, struct rxrpc_connection,
  741. link);
  742. list_del_init(&conn->link);
  743. ASSERTCMP(atomic_read(&conn->usage), ==, 0);
  744. rxrpc_destroy_connection(conn);
  745. }
  746. _leave("");
  747. }
  748. /*
  749. * preemptively destroy all the connection records rather than waiting for them
  750. * to time out
  751. */
  752. void __exit rxrpc_destroy_all_connections(void)
  753. {
  754. _enter("");
  755. rxrpc_connection_timeout = 0;
  756. cancel_delayed_work(&rxrpc_connection_reap);
  757. rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
  758. _leave("");
  759. }