af_rxrpc.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884
  1. /* AF_RXRPC implementation
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/net.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/poll.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/key-type.h>
  17. #include <net/net_namespace.h>
  18. #include <net/sock.h>
  19. #include <net/af_rxrpc.h>
  20. #include "ar-internal.h"
  21. MODULE_DESCRIPTION("RxRPC network protocol");
  22. MODULE_AUTHOR("Red Hat, Inc.");
  23. MODULE_LICENSE("GPL");
  24. MODULE_ALIAS_NETPROTO(PF_RXRPC);
  25. unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
  26. module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
  27. MODULE_PARM_DESC(debug, "RxRPC debugging mask");
  28. static int sysctl_rxrpc_max_qlen __read_mostly = 10;
  29. static struct proto rxrpc_proto;
  30. static const struct proto_ops rxrpc_rpc_ops;
  31. /* local epoch for detecting local-end reset */
  32. __be32 rxrpc_epoch;
  33. /* current debugging ID */
  34. atomic_t rxrpc_debug_id;
  35. /* count of skbs currently in use */
  36. atomic_t rxrpc_n_skbs;
  37. struct workqueue_struct *rxrpc_workqueue;
  38. static void rxrpc_sock_destructor(struct sock *);
  39. /*
  40. * see if an RxRPC socket is currently writable
  41. */
  42. static inline int rxrpc_writable(struct sock *sk)
  43. {
  44. return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
  45. }
  46. /*
  47. * wait for write bufferage to become available
  48. */
  49. static void rxrpc_write_space(struct sock *sk)
  50. {
  51. _enter("%p", sk);
  52. read_lock(&sk->sk_callback_lock);
  53. if (rxrpc_writable(sk)) {
  54. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  55. wake_up_interruptible(sk->sk_sleep);
  56. sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
  57. }
  58. read_unlock(&sk->sk_callback_lock);
  59. }
  60. /*
  61. * validate an RxRPC address
  62. */
  63. static int rxrpc_validate_address(struct rxrpc_sock *rx,
  64. struct sockaddr_rxrpc *srx,
  65. int len)
  66. {
  67. if (len < sizeof(struct sockaddr_rxrpc))
  68. return -EINVAL;
  69. if (srx->srx_family != AF_RXRPC)
  70. return -EAFNOSUPPORT;
  71. if (srx->transport_type != SOCK_DGRAM)
  72. return -ESOCKTNOSUPPORT;
  73. len -= offsetof(struct sockaddr_rxrpc, transport);
  74. if (srx->transport_len < sizeof(sa_family_t) ||
  75. srx->transport_len > len)
  76. return -EINVAL;
  77. if (srx->transport.family != rx->proto)
  78. return -EAFNOSUPPORT;
  79. switch (srx->transport.family) {
  80. case AF_INET:
  81. _debug("INET: %x @ %u.%u.%u.%u",
  82. ntohs(srx->transport.sin.sin_port),
  83. NIPQUAD(srx->transport.sin.sin_addr));
  84. if (srx->transport_len > 8)
  85. memset((void *)&srx->transport + 8, 0,
  86. srx->transport_len - 8);
  87. break;
  88. case AF_INET6:
  89. default:
  90. return -EAFNOSUPPORT;
  91. }
  92. return 0;
  93. }
  94. /*
  95. * bind a local address to an RxRPC socket
  96. */
  97. static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
  98. {
  99. struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) saddr;
  100. struct sock *sk = sock->sk;
  101. struct rxrpc_local *local;
  102. struct rxrpc_sock *rx = rxrpc_sk(sk), *prx;
  103. __be16 service_id;
  104. int ret;
  105. _enter("%p,%p,%d", rx, saddr, len);
  106. ret = rxrpc_validate_address(rx, srx, len);
  107. if (ret < 0)
  108. goto error;
  109. lock_sock(&rx->sk);
  110. if (rx->sk.sk_state != RXRPC_UNCONNECTED) {
  111. ret = -EINVAL;
  112. goto error_unlock;
  113. }
  114. memcpy(&rx->srx, srx, sizeof(rx->srx));
  115. /* find a local transport endpoint if we don't have one already */
  116. local = rxrpc_lookup_local(&rx->srx);
  117. if (IS_ERR(local)) {
  118. ret = PTR_ERR(local);
  119. goto error_unlock;
  120. }
  121. rx->local = local;
  122. if (srx->srx_service) {
  123. service_id = htons(srx->srx_service);
  124. write_lock_bh(&local->services_lock);
  125. list_for_each_entry(prx, &local->services, listen_link) {
  126. if (prx->service_id == service_id)
  127. goto service_in_use;
  128. }
  129. rx->service_id = service_id;
  130. list_add_tail(&rx->listen_link, &local->services);
  131. write_unlock_bh(&local->services_lock);
  132. rx->sk.sk_state = RXRPC_SERVER_BOUND;
  133. } else {
  134. rx->sk.sk_state = RXRPC_CLIENT_BOUND;
  135. }
  136. release_sock(&rx->sk);
  137. _leave(" = 0");
  138. return 0;
  139. service_in_use:
  140. ret = -EADDRINUSE;
  141. write_unlock_bh(&local->services_lock);
  142. error_unlock:
  143. release_sock(&rx->sk);
  144. error:
  145. _leave(" = %d", ret);
  146. return ret;
  147. }
  148. /*
  149. * set the number of pending calls permitted on a listening socket
  150. */
  151. static int rxrpc_listen(struct socket *sock, int backlog)
  152. {
  153. struct sock *sk = sock->sk;
  154. struct rxrpc_sock *rx = rxrpc_sk(sk);
  155. int ret;
  156. _enter("%p,%d", rx, backlog);
  157. lock_sock(&rx->sk);
  158. switch (rx->sk.sk_state) {
  159. case RXRPC_UNCONNECTED:
  160. ret = -EADDRNOTAVAIL;
  161. break;
  162. case RXRPC_CLIENT_BOUND:
  163. case RXRPC_CLIENT_CONNECTED:
  164. default:
  165. ret = -EBUSY;
  166. break;
  167. case RXRPC_SERVER_BOUND:
  168. ASSERT(rx->local != NULL);
  169. sk->sk_max_ack_backlog = backlog;
  170. rx->sk.sk_state = RXRPC_SERVER_LISTENING;
  171. ret = 0;
  172. break;
  173. }
  174. release_sock(&rx->sk);
  175. _leave(" = %d", ret);
  176. return ret;
  177. }
  178. /*
  179. * find a transport by address
  180. */
  181. static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock,
  182. struct sockaddr *addr,
  183. int addr_len, int flags,
  184. gfp_t gfp)
  185. {
  186. struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
  187. struct rxrpc_transport *trans;
  188. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  189. struct rxrpc_peer *peer;
  190. _enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
  191. ASSERT(rx->local != NULL);
  192. ASSERT(rx->sk.sk_state > RXRPC_UNCONNECTED);
  193. if (rx->srx.transport_type != srx->transport_type)
  194. return ERR_PTR(-ESOCKTNOSUPPORT);
  195. if (rx->srx.transport.family != srx->transport.family)
  196. return ERR_PTR(-EAFNOSUPPORT);
  197. /* find a remote transport endpoint from the local one */
  198. peer = rxrpc_get_peer(srx, gfp);
  199. if (IS_ERR(peer))
  200. return ERR_CAST(peer);
  201. /* find a transport */
  202. trans = rxrpc_get_transport(rx->local, peer, gfp);
  203. rxrpc_put_peer(peer);
  204. _leave(" = %p", trans);
  205. return trans;
  206. }
  207. /**
  208. * rxrpc_kernel_begin_call - Allow a kernel service to begin a call
  209. * @sock: The socket on which to make the call
  210. * @srx: The address of the peer to contact (defaults to socket setting)
  211. * @key: The security context to use (defaults to socket setting)
  212. * @user_call_ID: The ID to use
  213. *
  214. * Allow a kernel service to begin a call on the nominated socket. This just
  215. * sets up all the internal tracking structures and allocates connection and
  216. * call IDs as appropriate. The call to be used is returned.
  217. *
  218. * The default socket destination address and security may be overridden by
  219. * supplying @srx and @key.
  220. */
  221. struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
  222. struct sockaddr_rxrpc *srx,
  223. struct key *key,
  224. unsigned long user_call_ID,
  225. gfp_t gfp)
  226. {
  227. struct rxrpc_conn_bundle *bundle;
  228. struct rxrpc_transport *trans;
  229. struct rxrpc_call *call;
  230. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  231. __be16 service_id;
  232. _enter(",,%x,%lx", key_serial(key), user_call_ID);
  233. lock_sock(&rx->sk);
  234. if (srx) {
  235. trans = rxrpc_name_to_transport(sock, (struct sockaddr *) srx,
  236. sizeof(*srx), 0, gfp);
  237. if (IS_ERR(trans)) {
  238. call = ERR_CAST(trans);
  239. trans = NULL;
  240. goto out;
  241. }
  242. } else {
  243. trans = rx->trans;
  244. if (!trans) {
  245. call = ERR_PTR(-ENOTCONN);
  246. goto out;
  247. }
  248. atomic_inc(&trans->usage);
  249. }
  250. service_id = rx->service_id;
  251. if (srx)
  252. service_id = htons(srx->srx_service);
  253. if (!key)
  254. key = rx->key;
  255. if (key && !key->payload.data)
  256. key = NULL; /* a no-security key */
  257. bundle = rxrpc_get_bundle(rx, trans, key, service_id, gfp);
  258. if (IS_ERR(bundle)) {
  259. call = ERR_CAST(bundle);
  260. goto out;
  261. }
  262. call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, true,
  263. gfp);
  264. rxrpc_put_bundle(trans, bundle);
  265. out:
  266. rxrpc_put_transport(trans);
  267. release_sock(&rx->sk);
  268. _leave(" = %p", call);
  269. return call;
  270. }
  271. EXPORT_SYMBOL(rxrpc_kernel_begin_call);
  272. /**
  273. * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using
  274. * @call: The call to end
  275. *
  276. * Allow a kernel service to end a call it was using. The call must be
  277. * complete before this is called (the call should be aborted if necessary).
  278. */
  279. void rxrpc_kernel_end_call(struct rxrpc_call *call)
  280. {
  281. _enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
  282. rxrpc_remove_user_ID(call->socket, call);
  283. rxrpc_put_call(call);
  284. }
  285. EXPORT_SYMBOL(rxrpc_kernel_end_call);
  286. /**
  287. * rxrpc_kernel_intercept_rx_messages - Intercept received RxRPC messages
  288. * @sock: The socket to intercept received messages on
  289. * @interceptor: The function to pass the messages to
  290. *
  291. * Allow a kernel service to intercept messages heading for the Rx queue on an
  292. * RxRPC socket. They get passed to the specified function instead.
  293. * @interceptor should free the socket buffers it is given. @interceptor is
  294. * called with the socket receive queue spinlock held and softirqs disabled -
  295. * this ensures that the messages will be delivered in the right order.
  296. */
  297. void rxrpc_kernel_intercept_rx_messages(struct socket *sock,
  298. rxrpc_interceptor_t interceptor)
  299. {
  300. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  301. _enter("");
  302. rx->interceptor = interceptor;
  303. }
  304. EXPORT_SYMBOL(rxrpc_kernel_intercept_rx_messages);
  305. /*
  306. * connect an RxRPC socket
  307. * - this just targets it at a specific destination; no actual connection
  308. * negotiation takes place
  309. */
  310. static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
  311. int addr_len, int flags)
  312. {
  313. struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
  314. struct sock *sk = sock->sk;
  315. struct rxrpc_transport *trans;
  316. struct rxrpc_local *local;
  317. struct rxrpc_sock *rx = rxrpc_sk(sk);
  318. int ret;
  319. _enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
  320. ret = rxrpc_validate_address(rx, srx, addr_len);
  321. if (ret < 0) {
  322. _leave(" = %d [bad addr]", ret);
  323. return ret;
  324. }
  325. lock_sock(&rx->sk);
  326. switch (rx->sk.sk_state) {
  327. case RXRPC_UNCONNECTED:
  328. /* find a local transport endpoint if we don't have one already */
  329. ASSERTCMP(rx->local, ==, NULL);
  330. rx->srx.srx_family = AF_RXRPC;
  331. rx->srx.srx_service = 0;
  332. rx->srx.transport_type = srx->transport_type;
  333. rx->srx.transport_len = sizeof(sa_family_t);
  334. rx->srx.transport.family = srx->transport.family;
  335. local = rxrpc_lookup_local(&rx->srx);
  336. if (IS_ERR(local)) {
  337. release_sock(&rx->sk);
  338. return PTR_ERR(local);
  339. }
  340. rx->local = local;
  341. rx->sk.sk_state = RXRPC_CLIENT_BOUND;
  342. case RXRPC_CLIENT_BOUND:
  343. break;
  344. case RXRPC_CLIENT_CONNECTED:
  345. release_sock(&rx->sk);
  346. return -EISCONN;
  347. default:
  348. release_sock(&rx->sk);
  349. return -EBUSY; /* server sockets can't connect as well */
  350. }
  351. trans = rxrpc_name_to_transport(sock, addr, addr_len, flags,
  352. GFP_KERNEL);
  353. if (IS_ERR(trans)) {
  354. release_sock(&rx->sk);
  355. _leave(" = %ld", PTR_ERR(trans));
  356. return PTR_ERR(trans);
  357. }
  358. rx->trans = trans;
  359. rx->service_id = htons(srx->srx_service);
  360. rx->sk.sk_state = RXRPC_CLIENT_CONNECTED;
  361. release_sock(&rx->sk);
  362. return 0;
  363. }
  364. /*
  365. * send a message through an RxRPC socket
  366. * - in a client this does a number of things:
  367. * - finds/sets up a connection for the security specified (if any)
  368. * - initiates a call (ID in control data)
  369. * - ends the request phase of a call (if MSG_MORE is not set)
  370. * - sends a call data packet
  371. * - may send an abort (abort code in control data)
  372. */
  373. static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock,
  374. struct msghdr *m, size_t len)
  375. {
  376. struct rxrpc_transport *trans;
  377. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  378. int ret;
  379. _enter(",{%d},,%zu", rx->sk.sk_state, len);
  380. if (m->msg_flags & MSG_OOB)
  381. return -EOPNOTSUPP;
  382. if (m->msg_name) {
  383. ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen);
  384. if (ret < 0) {
  385. _leave(" = %d [bad addr]", ret);
  386. return ret;
  387. }
  388. }
  389. trans = NULL;
  390. lock_sock(&rx->sk);
  391. if (m->msg_name) {
  392. ret = -EISCONN;
  393. trans = rxrpc_name_to_transport(sock, m->msg_name,
  394. m->msg_namelen, 0, GFP_KERNEL);
  395. if (IS_ERR(trans)) {
  396. ret = PTR_ERR(trans);
  397. trans = NULL;
  398. goto out;
  399. }
  400. } else {
  401. trans = rx->trans;
  402. if (trans)
  403. atomic_inc(&trans->usage);
  404. }
  405. switch (rx->sk.sk_state) {
  406. case RXRPC_SERVER_LISTENING:
  407. if (!m->msg_name) {
  408. ret = rxrpc_server_sendmsg(iocb, rx, m, len);
  409. break;
  410. }
  411. case RXRPC_SERVER_BOUND:
  412. case RXRPC_CLIENT_BOUND:
  413. if (!m->msg_name) {
  414. ret = -ENOTCONN;
  415. break;
  416. }
  417. case RXRPC_CLIENT_CONNECTED:
  418. ret = rxrpc_client_sendmsg(iocb, rx, trans, m, len);
  419. break;
  420. default:
  421. ret = -ENOTCONN;
  422. break;
  423. }
  424. out:
  425. release_sock(&rx->sk);
  426. if (trans)
  427. rxrpc_put_transport(trans);
  428. _leave(" = %d", ret);
  429. return ret;
  430. }
  431. /*
  432. * set RxRPC socket options
  433. */
  434. static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
  435. char __user *optval, int optlen)
  436. {
  437. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  438. unsigned min_sec_level;
  439. int ret;
  440. _enter(",%d,%d,,%d", level, optname, optlen);
  441. lock_sock(&rx->sk);
  442. ret = -EOPNOTSUPP;
  443. if (level == SOL_RXRPC) {
  444. switch (optname) {
  445. case RXRPC_EXCLUSIVE_CONNECTION:
  446. ret = -EINVAL;
  447. if (optlen != 0)
  448. goto error;
  449. ret = -EISCONN;
  450. if (rx->sk.sk_state != RXRPC_UNCONNECTED)
  451. goto error;
  452. set_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags);
  453. goto success;
  454. case RXRPC_SECURITY_KEY:
  455. ret = -EINVAL;
  456. if (rx->key)
  457. goto error;
  458. ret = -EISCONN;
  459. if (rx->sk.sk_state != RXRPC_UNCONNECTED)
  460. goto error;
  461. ret = rxrpc_request_key(rx, optval, optlen);
  462. goto error;
  463. case RXRPC_SECURITY_KEYRING:
  464. ret = -EINVAL;
  465. if (rx->key)
  466. goto error;
  467. ret = -EISCONN;
  468. if (rx->sk.sk_state != RXRPC_UNCONNECTED)
  469. goto error;
  470. ret = rxrpc_server_keyring(rx, optval, optlen);
  471. goto error;
  472. case RXRPC_MIN_SECURITY_LEVEL:
  473. ret = -EINVAL;
  474. if (optlen != sizeof(unsigned))
  475. goto error;
  476. ret = -EISCONN;
  477. if (rx->sk.sk_state != RXRPC_UNCONNECTED)
  478. goto error;
  479. ret = get_user(min_sec_level,
  480. (unsigned __user *) optval);
  481. if (ret < 0)
  482. goto error;
  483. ret = -EINVAL;
  484. if (min_sec_level > RXRPC_SECURITY_MAX)
  485. goto error;
  486. rx->min_sec_level = min_sec_level;
  487. goto success;
  488. default:
  489. break;
  490. }
  491. }
  492. success:
  493. ret = 0;
  494. error:
  495. release_sock(&rx->sk);
  496. return ret;
  497. }
  498. /*
  499. * permit an RxRPC socket to be polled
  500. */
  501. static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
  502. poll_table *wait)
  503. {
  504. unsigned int mask;
  505. struct sock *sk = sock->sk;
  506. poll_wait(file, sk->sk_sleep, wait);
  507. mask = 0;
  508. /* the socket is readable if there are any messages waiting on the Rx
  509. * queue */
  510. if (!skb_queue_empty(&sk->sk_receive_queue))
  511. mask |= POLLIN | POLLRDNORM;
  512. /* the socket is writable if there is space to add new data to the
  513. * socket; there is no guarantee that any particular call in progress
  514. * on the socket may have space in the Tx ACK window */
  515. if (rxrpc_writable(sk))
  516. mask |= POLLOUT | POLLWRNORM;
  517. return mask;
  518. }
  519. /*
  520. * create an RxRPC socket
  521. */
  522. static int rxrpc_create(struct net *net, struct socket *sock, int protocol)
  523. {
  524. struct rxrpc_sock *rx;
  525. struct sock *sk;
  526. _enter("%p,%d", sock, protocol);
  527. if (net != &init_net)
  528. return -EAFNOSUPPORT;
  529. /* we support transport protocol UDP only */
  530. if (protocol != PF_INET)
  531. return -EPROTONOSUPPORT;
  532. if (sock->type != SOCK_DGRAM)
  533. return -ESOCKTNOSUPPORT;
  534. sock->ops = &rxrpc_rpc_ops;
  535. sock->state = SS_UNCONNECTED;
  536. sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto);
  537. if (!sk)
  538. return -ENOMEM;
  539. sock_init_data(sock, sk);
  540. sk->sk_state = RXRPC_UNCONNECTED;
  541. sk->sk_write_space = rxrpc_write_space;
  542. sk->sk_max_ack_backlog = sysctl_rxrpc_max_qlen;
  543. sk->sk_destruct = rxrpc_sock_destructor;
  544. rx = rxrpc_sk(sk);
  545. rx->proto = protocol;
  546. rx->calls = RB_ROOT;
  547. INIT_LIST_HEAD(&rx->listen_link);
  548. INIT_LIST_HEAD(&rx->secureq);
  549. INIT_LIST_HEAD(&rx->acceptq);
  550. rwlock_init(&rx->call_lock);
  551. memset(&rx->srx, 0, sizeof(rx->srx));
  552. _leave(" = 0 [%p]", rx);
  553. return 0;
  554. }
  555. /*
  556. * RxRPC socket destructor
  557. */
  558. static void rxrpc_sock_destructor(struct sock *sk)
  559. {
  560. _enter("%p", sk);
  561. rxrpc_purge_queue(&sk->sk_receive_queue);
  562. WARN_ON(atomic_read(&sk->sk_wmem_alloc));
  563. WARN_ON(!sk_unhashed(sk));
  564. WARN_ON(sk->sk_socket);
  565. if (!sock_flag(sk, SOCK_DEAD)) {
  566. printk("Attempt to release alive rxrpc socket: %p\n", sk);
  567. return;
  568. }
  569. }
  570. /*
  571. * release an RxRPC socket
  572. */
  573. static int rxrpc_release_sock(struct sock *sk)
  574. {
  575. struct rxrpc_sock *rx = rxrpc_sk(sk);
  576. _enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
  577. /* declare the socket closed for business */
  578. sock_orphan(sk);
  579. sk->sk_shutdown = SHUTDOWN_MASK;
  580. spin_lock_bh(&sk->sk_receive_queue.lock);
  581. sk->sk_state = RXRPC_CLOSE;
  582. spin_unlock_bh(&sk->sk_receive_queue.lock);
  583. ASSERTCMP(rx->listen_link.next, !=, LIST_POISON1);
  584. if (!list_empty(&rx->listen_link)) {
  585. write_lock_bh(&rx->local->services_lock);
  586. list_del(&rx->listen_link);
  587. write_unlock_bh(&rx->local->services_lock);
  588. }
  589. /* try to flush out this socket */
  590. rxrpc_release_calls_on_socket(rx);
  591. flush_workqueue(rxrpc_workqueue);
  592. rxrpc_purge_queue(&sk->sk_receive_queue);
  593. if (rx->conn) {
  594. rxrpc_put_connection(rx->conn);
  595. rx->conn = NULL;
  596. }
  597. if (rx->bundle) {
  598. rxrpc_put_bundle(rx->trans, rx->bundle);
  599. rx->bundle = NULL;
  600. }
  601. if (rx->trans) {
  602. rxrpc_put_transport(rx->trans);
  603. rx->trans = NULL;
  604. }
  605. if (rx->local) {
  606. rxrpc_put_local(rx->local);
  607. rx->local = NULL;
  608. }
  609. key_put(rx->key);
  610. rx->key = NULL;
  611. key_put(rx->securities);
  612. rx->securities = NULL;
  613. sock_put(sk);
  614. _leave(" = 0");
  615. return 0;
  616. }
  617. /*
  618. * release an RxRPC BSD socket on close() or equivalent
  619. */
  620. static int rxrpc_release(struct socket *sock)
  621. {
  622. struct sock *sk = sock->sk;
  623. _enter("%p{%p}", sock, sk);
  624. if (!sk)
  625. return 0;
  626. sock->sk = NULL;
  627. return rxrpc_release_sock(sk);
  628. }
  629. /*
  630. * RxRPC network protocol
  631. */
  632. static const struct proto_ops rxrpc_rpc_ops = {
  633. .family = PF_UNIX,
  634. .owner = THIS_MODULE,
  635. .release = rxrpc_release,
  636. .bind = rxrpc_bind,
  637. .connect = rxrpc_connect,
  638. .socketpair = sock_no_socketpair,
  639. .accept = sock_no_accept,
  640. .getname = sock_no_getname,
  641. .poll = rxrpc_poll,
  642. .ioctl = sock_no_ioctl,
  643. .listen = rxrpc_listen,
  644. .shutdown = sock_no_shutdown,
  645. .setsockopt = rxrpc_setsockopt,
  646. .getsockopt = sock_no_getsockopt,
  647. .sendmsg = rxrpc_sendmsg,
  648. .recvmsg = rxrpc_recvmsg,
  649. .mmap = sock_no_mmap,
  650. .sendpage = sock_no_sendpage,
  651. };
  652. static struct proto rxrpc_proto = {
  653. .name = "RXRPC",
  654. .owner = THIS_MODULE,
  655. .obj_size = sizeof(struct rxrpc_sock),
  656. .max_header = sizeof(struct rxrpc_header),
  657. };
  658. static struct net_proto_family rxrpc_family_ops = {
  659. .family = PF_RXRPC,
  660. .create = rxrpc_create,
  661. .owner = THIS_MODULE,
  662. };
  663. /*
  664. * initialise and register the RxRPC protocol
  665. */
  666. static int __init af_rxrpc_init(void)
  667. {
  668. struct sk_buff *dummy_skb;
  669. int ret = -1;
  670. BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof(dummy_skb->cb));
  671. rxrpc_epoch = htonl(get_seconds());
  672. ret = -ENOMEM;
  673. rxrpc_call_jar = kmem_cache_create(
  674. "rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
  675. SLAB_HWCACHE_ALIGN, NULL);
  676. if (!rxrpc_call_jar) {
  677. printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n");
  678. goto error_call_jar;
  679. }
  680. rxrpc_workqueue = create_workqueue("krxrpcd");
  681. if (!rxrpc_workqueue) {
  682. printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n");
  683. goto error_work_queue;
  684. }
  685. ret = proto_register(&rxrpc_proto, 1);
  686. if (ret < 0) {
  687. printk(KERN_CRIT "RxRPC: Cannot register protocol\n");
  688. goto error_proto;
  689. }
  690. ret = sock_register(&rxrpc_family_ops);
  691. if (ret < 0) {
  692. printk(KERN_CRIT "RxRPC: Cannot register socket family\n");
  693. goto error_sock;
  694. }
  695. ret = register_key_type(&key_type_rxrpc);
  696. if (ret < 0) {
  697. printk(KERN_CRIT "RxRPC: Cannot register client key type\n");
  698. goto error_key_type;
  699. }
  700. ret = register_key_type(&key_type_rxrpc_s);
  701. if (ret < 0) {
  702. printk(KERN_CRIT "RxRPC: Cannot register server key type\n");
  703. goto error_key_type_s;
  704. }
  705. #ifdef CONFIG_PROC_FS
  706. proc_net_fops_create(&init_net, "rxrpc_calls", 0, &rxrpc_call_seq_fops);
  707. proc_net_fops_create(&init_net, "rxrpc_conns", 0, &rxrpc_connection_seq_fops);
  708. #endif
  709. return 0;
  710. error_key_type_s:
  711. unregister_key_type(&key_type_rxrpc);
  712. error_key_type:
  713. sock_unregister(PF_RXRPC);
  714. error_sock:
  715. proto_unregister(&rxrpc_proto);
  716. error_proto:
  717. destroy_workqueue(rxrpc_workqueue);
  718. error_work_queue:
  719. kmem_cache_destroy(rxrpc_call_jar);
  720. error_call_jar:
  721. return ret;
  722. }
  723. /*
  724. * unregister the RxRPC protocol
  725. */
  726. static void __exit af_rxrpc_exit(void)
  727. {
  728. _enter("");
  729. unregister_key_type(&key_type_rxrpc_s);
  730. unregister_key_type(&key_type_rxrpc);
  731. sock_unregister(PF_RXRPC);
  732. proto_unregister(&rxrpc_proto);
  733. rxrpc_destroy_all_calls();
  734. rxrpc_destroy_all_connections();
  735. rxrpc_destroy_all_transports();
  736. rxrpc_destroy_all_peers();
  737. rxrpc_destroy_all_locals();
  738. ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0);
  739. _debug("flush scheduled work");
  740. flush_workqueue(rxrpc_workqueue);
  741. proc_net_remove(&init_net, "rxrpc_conns");
  742. proc_net_remove(&init_net, "rxrpc_calls");
  743. destroy_workqueue(rxrpc_workqueue);
  744. kmem_cache_destroy(rxrpc_call_jar);
  745. _leave("");
  746. }
  747. module_init(af_rxrpc_init);
  748. module_exit(af_rxrpc_exit);