af_rxrpc.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883
  1. /* AF_RXRPC implementation
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/net.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/poll.h>
  15. #include <linux/proc_fs.h>
  16. #include <net/net_namespace.h>
  17. #include <net/sock.h>
  18. #include <net/af_rxrpc.h>
  19. #include "ar-internal.h"
  20. MODULE_DESCRIPTION("RxRPC network protocol");
  21. MODULE_AUTHOR("Red Hat, Inc.");
  22. MODULE_LICENSE("GPL");
  23. MODULE_ALIAS_NETPROTO(PF_RXRPC);
  24. unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
  25. module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
  26. MODULE_PARM_DESC(rxrpc_debug, "RxRPC debugging mask");
  27. static int sysctl_rxrpc_max_qlen __read_mostly = 10;
  28. static struct proto rxrpc_proto;
  29. static const struct proto_ops rxrpc_rpc_ops;
  30. /* local epoch for detecting local-end reset */
  31. __be32 rxrpc_epoch;
  32. /* current debugging ID */
  33. atomic_t rxrpc_debug_id;
  34. /* count of skbs currently in use */
  35. atomic_t rxrpc_n_skbs;
  36. struct workqueue_struct *rxrpc_workqueue;
  37. static void rxrpc_sock_destructor(struct sock *);
  38. /*
  39. * see if an RxRPC socket is currently writable
  40. */
  41. static inline int rxrpc_writable(struct sock *sk)
  42. {
  43. return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
  44. }
  45. /*
  46. * wait for write bufferage to become available
  47. */
  48. static void rxrpc_write_space(struct sock *sk)
  49. {
  50. _enter("%p", sk);
  51. read_lock(&sk->sk_callback_lock);
  52. if (rxrpc_writable(sk)) {
  53. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  54. wake_up_interruptible(sk->sk_sleep);
  55. sk_wake_async(sk, 2, POLL_OUT);
  56. }
  57. read_unlock(&sk->sk_callback_lock);
  58. }
  59. /*
  60. * validate an RxRPC address
  61. */
  62. static int rxrpc_validate_address(struct rxrpc_sock *rx,
  63. struct sockaddr_rxrpc *srx,
  64. int len)
  65. {
  66. if (len < sizeof(struct sockaddr_rxrpc))
  67. return -EINVAL;
  68. if (srx->srx_family != AF_RXRPC)
  69. return -EAFNOSUPPORT;
  70. if (srx->transport_type != SOCK_DGRAM)
  71. return -ESOCKTNOSUPPORT;
  72. len -= offsetof(struct sockaddr_rxrpc, transport);
  73. if (srx->transport_len < sizeof(sa_family_t) ||
  74. srx->transport_len > len)
  75. return -EINVAL;
  76. if (srx->transport.family != rx->proto)
  77. return -EAFNOSUPPORT;
  78. switch (srx->transport.family) {
  79. case AF_INET:
  80. _debug("INET: %x @ %u.%u.%u.%u",
  81. ntohs(srx->transport.sin.sin_port),
  82. NIPQUAD(srx->transport.sin.sin_addr));
  83. if (srx->transport_len > 8)
  84. memset((void *)&srx->transport + 8, 0,
  85. srx->transport_len - 8);
  86. break;
  87. case AF_INET6:
  88. default:
  89. return -EAFNOSUPPORT;
  90. }
  91. return 0;
  92. }
  93. /*
  94. * bind a local address to an RxRPC socket
  95. */
  96. static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
  97. {
  98. struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) saddr;
  99. struct sock *sk = sock->sk;
  100. struct rxrpc_local *local;
  101. struct rxrpc_sock *rx = rxrpc_sk(sk), *prx;
  102. __be16 service_id;
  103. int ret;
  104. _enter("%p,%p,%d", rx, saddr, len);
  105. ret = rxrpc_validate_address(rx, srx, len);
  106. if (ret < 0)
  107. goto error;
  108. lock_sock(&rx->sk);
  109. if (rx->sk.sk_state != RXRPC_UNCONNECTED) {
  110. ret = -EINVAL;
  111. goto error_unlock;
  112. }
  113. memcpy(&rx->srx, srx, sizeof(rx->srx));
  114. /* find a local transport endpoint if we don't have one already */
  115. local = rxrpc_lookup_local(&rx->srx);
  116. if (IS_ERR(local)) {
  117. ret = PTR_ERR(local);
  118. goto error_unlock;
  119. }
  120. rx->local = local;
  121. if (srx->srx_service) {
  122. service_id = htons(srx->srx_service);
  123. write_lock_bh(&local->services_lock);
  124. list_for_each_entry(prx, &local->services, listen_link) {
  125. if (prx->service_id == service_id)
  126. goto service_in_use;
  127. }
  128. rx->service_id = service_id;
  129. list_add_tail(&rx->listen_link, &local->services);
  130. write_unlock_bh(&local->services_lock);
  131. rx->sk.sk_state = RXRPC_SERVER_BOUND;
  132. } else {
  133. rx->sk.sk_state = RXRPC_CLIENT_BOUND;
  134. }
  135. release_sock(&rx->sk);
  136. _leave(" = 0");
  137. return 0;
  138. service_in_use:
  139. ret = -EADDRINUSE;
  140. write_unlock_bh(&local->services_lock);
  141. error_unlock:
  142. release_sock(&rx->sk);
  143. error:
  144. _leave(" = %d", ret);
  145. return ret;
  146. }
  147. /*
  148. * set the number of pending calls permitted on a listening socket
  149. */
  150. static int rxrpc_listen(struct socket *sock, int backlog)
  151. {
  152. struct sock *sk = sock->sk;
  153. struct rxrpc_sock *rx = rxrpc_sk(sk);
  154. int ret;
  155. _enter("%p,%d", rx, backlog);
  156. lock_sock(&rx->sk);
  157. switch (rx->sk.sk_state) {
  158. case RXRPC_UNCONNECTED:
  159. ret = -EADDRNOTAVAIL;
  160. break;
  161. case RXRPC_CLIENT_BOUND:
  162. case RXRPC_CLIENT_CONNECTED:
  163. default:
  164. ret = -EBUSY;
  165. break;
  166. case RXRPC_SERVER_BOUND:
  167. ASSERT(rx->local != NULL);
  168. sk->sk_max_ack_backlog = backlog;
  169. rx->sk.sk_state = RXRPC_SERVER_LISTENING;
  170. ret = 0;
  171. break;
  172. }
  173. release_sock(&rx->sk);
  174. _leave(" = %d", ret);
  175. return ret;
  176. }
  177. /*
  178. * find a transport by address
  179. */
  180. static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock,
  181. struct sockaddr *addr,
  182. int addr_len, int flags,
  183. gfp_t gfp)
  184. {
  185. struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
  186. struct rxrpc_transport *trans;
  187. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  188. struct rxrpc_peer *peer;
  189. _enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
  190. ASSERT(rx->local != NULL);
  191. ASSERT(rx->sk.sk_state > RXRPC_UNCONNECTED);
  192. if (rx->srx.transport_type != srx->transport_type)
  193. return ERR_PTR(-ESOCKTNOSUPPORT);
  194. if (rx->srx.transport.family != srx->transport.family)
  195. return ERR_PTR(-EAFNOSUPPORT);
  196. /* find a remote transport endpoint from the local one */
  197. peer = rxrpc_get_peer(srx, gfp);
  198. if (IS_ERR(peer))
  199. return ERR_PTR(PTR_ERR(peer));
  200. /* find a transport */
  201. trans = rxrpc_get_transport(rx->local, peer, gfp);
  202. rxrpc_put_peer(peer);
  203. _leave(" = %p", trans);
  204. return trans;
  205. }
  206. /**
  207. * rxrpc_kernel_begin_call - Allow a kernel service to begin a call
  208. * @sock: The socket on which to make the call
  209. * @srx: The address of the peer to contact (defaults to socket setting)
  210. * @key: The security context to use (defaults to socket setting)
  211. * @user_call_ID: The ID to use
  212. *
  213. * Allow a kernel service to begin a call on the nominated socket. This just
  214. * sets up all the internal tracking structures and allocates connection and
  215. * call IDs as appropriate. The call to be used is returned.
  216. *
  217. * The default socket destination address and security may be overridden by
  218. * supplying @srx and @key.
  219. */
  220. struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
  221. struct sockaddr_rxrpc *srx,
  222. struct key *key,
  223. unsigned long user_call_ID,
  224. gfp_t gfp)
  225. {
  226. struct rxrpc_conn_bundle *bundle;
  227. struct rxrpc_transport *trans;
  228. struct rxrpc_call *call;
  229. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  230. __be16 service_id;
  231. _enter(",,%x,%lx", key_serial(key), user_call_ID);
  232. lock_sock(&rx->sk);
  233. if (srx) {
  234. trans = rxrpc_name_to_transport(sock, (struct sockaddr *) srx,
  235. sizeof(*srx), 0, gfp);
  236. if (IS_ERR(trans)) {
  237. call = ERR_PTR(PTR_ERR(trans));
  238. trans = NULL;
  239. goto out;
  240. }
  241. } else {
  242. trans = rx->trans;
  243. if (!trans) {
  244. call = ERR_PTR(-ENOTCONN);
  245. goto out;
  246. }
  247. atomic_inc(&trans->usage);
  248. }
  249. service_id = rx->service_id;
  250. if (srx)
  251. service_id = htons(srx->srx_service);
  252. if (!key)
  253. key = rx->key;
  254. if (key && !key->payload.data)
  255. key = NULL; /* a no-security key */
  256. bundle = rxrpc_get_bundle(rx, trans, key, service_id, gfp);
  257. if (IS_ERR(bundle)) {
  258. call = ERR_PTR(PTR_ERR(bundle));
  259. goto out;
  260. }
  261. call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, true,
  262. gfp);
  263. rxrpc_put_bundle(trans, bundle);
  264. out:
  265. rxrpc_put_transport(trans);
  266. release_sock(&rx->sk);
  267. _leave(" = %p", call);
  268. return call;
  269. }
  270. EXPORT_SYMBOL(rxrpc_kernel_begin_call);
  271. /**
  272. * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using
  273. * @call: The call to end
  274. *
  275. * Allow a kernel service to end a call it was using. The call must be
  276. * complete before this is called (the call should be aborted if necessary).
  277. */
  278. void rxrpc_kernel_end_call(struct rxrpc_call *call)
  279. {
  280. _enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
  281. rxrpc_remove_user_ID(call->socket, call);
  282. rxrpc_put_call(call);
  283. }
  284. EXPORT_SYMBOL(rxrpc_kernel_end_call);
  285. /**
  286. * rxrpc_kernel_intercept_rx_messages - Intercept received RxRPC messages
  287. * @sock: The socket to intercept received messages on
  288. * @interceptor: The function to pass the messages to
  289. *
  290. * Allow a kernel service to intercept messages heading for the Rx queue on an
  291. * RxRPC socket. They get passed to the specified function instead.
  292. * @interceptor should free the socket buffers it is given. @interceptor is
  293. * called with the socket receive queue spinlock held and softirqs disabled -
  294. * this ensures that the messages will be delivered in the right order.
  295. */
  296. void rxrpc_kernel_intercept_rx_messages(struct socket *sock,
  297. rxrpc_interceptor_t interceptor)
  298. {
  299. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  300. _enter("");
  301. rx->interceptor = interceptor;
  302. }
  303. EXPORT_SYMBOL(rxrpc_kernel_intercept_rx_messages);
  304. /*
  305. * connect an RxRPC socket
  306. * - this just targets it at a specific destination; no actual connection
  307. * negotiation takes place
  308. */
  309. static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
  310. int addr_len, int flags)
  311. {
  312. struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
  313. struct sock *sk = sock->sk;
  314. struct rxrpc_transport *trans;
  315. struct rxrpc_local *local;
  316. struct rxrpc_sock *rx = rxrpc_sk(sk);
  317. int ret;
  318. _enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
  319. ret = rxrpc_validate_address(rx, srx, addr_len);
  320. if (ret < 0) {
  321. _leave(" = %d [bad addr]", ret);
  322. return ret;
  323. }
  324. lock_sock(&rx->sk);
  325. switch (rx->sk.sk_state) {
  326. case RXRPC_UNCONNECTED:
  327. /* find a local transport endpoint if we don't have one already */
  328. ASSERTCMP(rx->local, ==, NULL);
  329. rx->srx.srx_family = AF_RXRPC;
  330. rx->srx.srx_service = 0;
  331. rx->srx.transport_type = srx->transport_type;
  332. rx->srx.transport_len = sizeof(sa_family_t);
  333. rx->srx.transport.family = srx->transport.family;
  334. local = rxrpc_lookup_local(&rx->srx);
  335. if (IS_ERR(local)) {
  336. release_sock(&rx->sk);
  337. return PTR_ERR(local);
  338. }
  339. rx->local = local;
  340. rx->sk.sk_state = RXRPC_CLIENT_BOUND;
  341. case RXRPC_CLIENT_BOUND:
  342. break;
  343. case RXRPC_CLIENT_CONNECTED:
  344. release_sock(&rx->sk);
  345. return -EISCONN;
  346. default:
  347. release_sock(&rx->sk);
  348. return -EBUSY; /* server sockets can't connect as well */
  349. }
  350. trans = rxrpc_name_to_transport(sock, addr, addr_len, flags,
  351. GFP_KERNEL);
  352. if (IS_ERR(trans)) {
  353. release_sock(&rx->sk);
  354. _leave(" = %ld", PTR_ERR(trans));
  355. return PTR_ERR(trans);
  356. }
  357. rx->trans = trans;
  358. rx->service_id = htons(srx->srx_service);
  359. rx->sk.sk_state = RXRPC_CLIENT_CONNECTED;
  360. release_sock(&rx->sk);
  361. return 0;
  362. }
  363. /*
  364. * send a message through an RxRPC socket
  365. * - in a client this does a number of things:
  366. * - finds/sets up a connection for the security specified (if any)
  367. * - initiates a call (ID in control data)
  368. * - ends the request phase of a call (if MSG_MORE is not set)
  369. * - sends a call data packet
  370. * - may send an abort (abort code in control data)
  371. */
  372. static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock,
  373. struct msghdr *m, size_t len)
  374. {
  375. struct rxrpc_transport *trans;
  376. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  377. int ret;
  378. _enter(",{%d},,%zu", rx->sk.sk_state, len);
  379. if (m->msg_flags & MSG_OOB)
  380. return -EOPNOTSUPP;
  381. if (m->msg_name) {
  382. ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen);
  383. if (ret < 0) {
  384. _leave(" = %d [bad addr]", ret);
  385. return ret;
  386. }
  387. }
  388. trans = NULL;
  389. lock_sock(&rx->sk);
  390. if (m->msg_name) {
  391. ret = -EISCONN;
  392. trans = rxrpc_name_to_transport(sock, m->msg_name,
  393. m->msg_namelen, 0, GFP_KERNEL);
  394. if (IS_ERR(trans)) {
  395. ret = PTR_ERR(trans);
  396. trans = NULL;
  397. goto out;
  398. }
  399. } else {
  400. trans = rx->trans;
  401. if (trans)
  402. atomic_inc(&trans->usage);
  403. }
  404. switch (rx->sk.sk_state) {
  405. case RXRPC_SERVER_LISTENING:
  406. if (!m->msg_name) {
  407. ret = rxrpc_server_sendmsg(iocb, rx, m, len);
  408. break;
  409. }
  410. case RXRPC_SERVER_BOUND:
  411. case RXRPC_CLIENT_BOUND:
  412. if (!m->msg_name) {
  413. ret = -ENOTCONN;
  414. break;
  415. }
  416. case RXRPC_CLIENT_CONNECTED:
  417. ret = rxrpc_client_sendmsg(iocb, rx, trans, m, len);
  418. break;
  419. default:
  420. ret = -ENOTCONN;
  421. break;
  422. }
  423. out:
  424. release_sock(&rx->sk);
  425. if (trans)
  426. rxrpc_put_transport(trans);
  427. _leave(" = %d", ret);
  428. return ret;
  429. }
  430. /*
  431. * set RxRPC socket options
  432. */
  433. static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
  434. char __user *optval, int optlen)
  435. {
  436. struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
  437. unsigned min_sec_level;
  438. int ret;
  439. _enter(",%d,%d,,%d", level, optname, optlen);
  440. lock_sock(&rx->sk);
  441. ret = -EOPNOTSUPP;
  442. if (level == SOL_RXRPC) {
  443. switch (optname) {
  444. case RXRPC_EXCLUSIVE_CONNECTION:
  445. ret = -EINVAL;
  446. if (optlen != 0)
  447. goto error;
  448. ret = -EISCONN;
  449. if (rx->sk.sk_state != RXRPC_UNCONNECTED)
  450. goto error;
  451. set_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags);
  452. goto success;
  453. case RXRPC_SECURITY_KEY:
  454. ret = -EINVAL;
  455. if (rx->key)
  456. goto error;
  457. ret = -EISCONN;
  458. if (rx->sk.sk_state != RXRPC_UNCONNECTED)
  459. goto error;
  460. ret = rxrpc_request_key(rx, optval, optlen);
  461. goto error;
  462. case RXRPC_SECURITY_KEYRING:
  463. ret = -EINVAL;
  464. if (rx->key)
  465. goto error;
  466. ret = -EISCONN;
  467. if (rx->sk.sk_state != RXRPC_UNCONNECTED)
  468. goto error;
  469. ret = rxrpc_server_keyring(rx, optval, optlen);
  470. goto error;
  471. case RXRPC_MIN_SECURITY_LEVEL:
  472. ret = -EINVAL;
  473. if (optlen != sizeof(unsigned))
  474. goto error;
  475. ret = -EISCONN;
  476. if (rx->sk.sk_state != RXRPC_UNCONNECTED)
  477. goto error;
  478. ret = get_user(min_sec_level,
  479. (unsigned __user *) optval);
  480. if (ret < 0)
  481. goto error;
  482. ret = -EINVAL;
  483. if (min_sec_level > RXRPC_SECURITY_MAX)
  484. goto error;
  485. rx->min_sec_level = min_sec_level;
  486. goto success;
  487. default:
  488. break;
  489. }
  490. }
  491. success:
  492. ret = 0;
  493. error:
  494. release_sock(&rx->sk);
  495. return ret;
  496. }
  497. /*
  498. * permit an RxRPC socket to be polled
  499. */
  500. static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
  501. poll_table *wait)
  502. {
  503. unsigned int mask;
  504. struct sock *sk = sock->sk;
  505. poll_wait(file, sk->sk_sleep, wait);
  506. mask = 0;
  507. /* the socket is readable if there are any messages waiting on the Rx
  508. * queue */
  509. if (!skb_queue_empty(&sk->sk_receive_queue))
  510. mask |= POLLIN | POLLRDNORM;
  511. /* the socket is writable if there is space to add new data to the
  512. * socket; there is no guarantee that any particular call in progress
  513. * on the socket may have space in the Tx ACK window */
  514. if (rxrpc_writable(sk))
  515. mask |= POLLOUT | POLLWRNORM;
  516. return mask;
  517. }
  518. /*
  519. * create an RxRPC socket
  520. */
  521. static int rxrpc_create(struct net *net, struct socket *sock, int protocol)
  522. {
  523. struct rxrpc_sock *rx;
  524. struct sock *sk;
  525. _enter("%p,%d", sock, protocol);
  526. if (net != &init_net)
  527. return -EAFNOSUPPORT;
  528. /* we support transport protocol UDP only */
  529. if (protocol != PF_INET)
  530. return -EPROTONOSUPPORT;
  531. if (sock->type != SOCK_DGRAM)
  532. return -ESOCKTNOSUPPORT;
  533. sock->ops = &rxrpc_rpc_ops;
  534. sock->state = SS_UNCONNECTED;
  535. sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, 1);
  536. if (!sk)
  537. return -ENOMEM;
  538. sock_init_data(sock, sk);
  539. sk->sk_state = RXRPC_UNCONNECTED;
  540. sk->sk_write_space = rxrpc_write_space;
  541. sk->sk_max_ack_backlog = sysctl_rxrpc_max_qlen;
  542. sk->sk_destruct = rxrpc_sock_destructor;
  543. rx = rxrpc_sk(sk);
  544. rx->proto = protocol;
  545. rx->calls = RB_ROOT;
  546. INIT_LIST_HEAD(&rx->listen_link);
  547. INIT_LIST_HEAD(&rx->secureq);
  548. INIT_LIST_HEAD(&rx->acceptq);
  549. rwlock_init(&rx->call_lock);
  550. memset(&rx->srx, 0, sizeof(rx->srx));
  551. _leave(" = 0 [%p]", rx);
  552. return 0;
  553. }
  554. /*
  555. * RxRPC socket destructor
  556. */
  557. static void rxrpc_sock_destructor(struct sock *sk)
  558. {
  559. _enter("%p", sk);
  560. rxrpc_purge_queue(&sk->sk_receive_queue);
  561. BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
  562. BUG_TRAP(sk_unhashed(sk));
  563. BUG_TRAP(!sk->sk_socket);
  564. if (!sock_flag(sk, SOCK_DEAD)) {
  565. printk("Attempt to release alive rxrpc socket: %p\n", sk);
  566. return;
  567. }
  568. }
  569. /*
  570. * release an RxRPC socket
  571. */
  572. static int rxrpc_release_sock(struct sock *sk)
  573. {
  574. struct rxrpc_sock *rx = rxrpc_sk(sk);
  575. _enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
  576. /* declare the socket closed for business */
  577. sock_orphan(sk);
  578. sk->sk_shutdown = SHUTDOWN_MASK;
  579. spin_lock_bh(&sk->sk_receive_queue.lock);
  580. sk->sk_state = RXRPC_CLOSE;
  581. spin_unlock_bh(&sk->sk_receive_queue.lock);
  582. ASSERTCMP(rx->listen_link.next, !=, LIST_POISON1);
  583. if (!list_empty(&rx->listen_link)) {
  584. write_lock_bh(&rx->local->services_lock);
  585. list_del(&rx->listen_link);
  586. write_unlock_bh(&rx->local->services_lock);
  587. }
  588. /* try to flush out this socket */
  589. rxrpc_release_calls_on_socket(rx);
  590. flush_workqueue(rxrpc_workqueue);
  591. rxrpc_purge_queue(&sk->sk_receive_queue);
  592. if (rx->conn) {
  593. rxrpc_put_connection(rx->conn);
  594. rx->conn = NULL;
  595. }
  596. if (rx->bundle) {
  597. rxrpc_put_bundle(rx->trans, rx->bundle);
  598. rx->bundle = NULL;
  599. }
  600. if (rx->trans) {
  601. rxrpc_put_transport(rx->trans);
  602. rx->trans = NULL;
  603. }
  604. if (rx->local) {
  605. rxrpc_put_local(rx->local);
  606. rx->local = NULL;
  607. }
  608. key_put(rx->key);
  609. rx->key = NULL;
  610. key_put(rx->securities);
  611. rx->securities = NULL;
  612. sock_put(sk);
  613. _leave(" = 0");
  614. return 0;
  615. }
  616. /*
  617. * release an RxRPC BSD socket on close() or equivalent
  618. */
  619. static int rxrpc_release(struct socket *sock)
  620. {
  621. struct sock *sk = sock->sk;
  622. _enter("%p{%p}", sock, sk);
  623. if (!sk)
  624. return 0;
  625. sock->sk = NULL;
  626. return rxrpc_release_sock(sk);
  627. }
  628. /*
  629. * RxRPC network protocol
  630. */
  631. static const struct proto_ops rxrpc_rpc_ops = {
  632. .family = PF_UNIX,
  633. .owner = THIS_MODULE,
  634. .release = rxrpc_release,
  635. .bind = rxrpc_bind,
  636. .connect = rxrpc_connect,
  637. .socketpair = sock_no_socketpair,
  638. .accept = sock_no_accept,
  639. .getname = sock_no_getname,
  640. .poll = rxrpc_poll,
  641. .ioctl = sock_no_ioctl,
  642. .listen = rxrpc_listen,
  643. .shutdown = sock_no_shutdown,
  644. .setsockopt = rxrpc_setsockopt,
  645. .getsockopt = sock_no_getsockopt,
  646. .sendmsg = rxrpc_sendmsg,
  647. .recvmsg = rxrpc_recvmsg,
  648. .mmap = sock_no_mmap,
  649. .sendpage = sock_no_sendpage,
  650. };
  651. static struct proto rxrpc_proto = {
  652. .name = "RXRPC",
  653. .owner = THIS_MODULE,
  654. .obj_size = sizeof(struct rxrpc_sock),
  655. .max_header = sizeof(struct rxrpc_header),
  656. };
  657. static struct net_proto_family rxrpc_family_ops = {
  658. .family = PF_RXRPC,
  659. .create = rxrpc_create,
  660. .owner = THIS_MODULE,
  661. };
  662. /*
  663. * initialise and register the RxRPC protocol
  664. */
  665. static int __init af_rxrpc_init(void)
  666. {
  667. struct sk_buff *dummy_skb;
  668. int ret = -1;
  669. BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof(dummy_skb->cb));
  670. rxrpc_epoch = htonl(get_seconds());
  671. ret = -ENOMEM;
  672. rxrpc_call_jar = kmem_cache_create(
  673. "rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
  674. SLAB_HWCACHE_ALIGN, NULL);
  675. if (!rxrpc_call_jar) {
  676. printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n");
  677. goto error_call_jar;
  678. }
  679. rxrpc_workqueue = create_workqueue("krxrpcd");
  680. if (!rxrpc_workqueue) {
  681. printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n");
  682. goto error_work_queue;
  683. }
  684. ret = proto_register(&rxrpc_proto, 1);
  685. if (ret < 0) {
  686. printk(KERN_CRIT "RxRPC: Cannot register protocol\n");
  687. goto error_proto;
  688. }
  689. ret = sock_register(&rxrpc_family_ops);
  690. if (ret < 0) {
  691. printk(KERN_CRIT "RxRPC: Cannot register socket family\n");
  692. goto error_sock;
  693. }
  694. ret = register_key_type(&key_type_rxrpc);
  695. if (ret < 0) {
  696. printk(KERN_CRIT "RxRPC: Cannot register client key type\n");
  697. goto error_key_type;
  698. }
  699. ret = register_key_type(&key_type_rxrpc_s);
  700. if (ret < 0) {
  701. printk(KERN_CRIT "RxRPC: Cannot register server key type\n");
  702. goto error_key_type_s;
  703. }
  704. #ifdef CONFIG_PROC_FS
  705. proc_net_fops_create(&init_net, "rxrpc_calls", 0, &rxrpc_call_seq_fops);
  706. proc_net_fops_create(&init_net, "rxrpc_conns", 0, &rxrpc_connection_seq_fops);
  707. #endif
  708. return 0;
  709. error_key_type_s:
  710. unregister_key_type(&key_type_rxrpc);
  711. error_key_type:
  712. sock_unregister(PF_RXRPC);
  713. error_sock:
  714. proto_unregister(&rxrpc_proto);
  715. error_proto:
  716. destroy_workqueue(rxrpc_workqueue);
  717. error_work_queue:
  718. kmem_cache_destroy(rxrpc_call_jar);
  719. error_call_jar:
  720. return ret;
  721. }
  722. /*
  723. * unregister the RxRPC protocol
  724. */
  725. static void __exit af_rxrpc_exit(void)
  726. {
  727. _enter("");
  728. unregister_key_type(&key_type_rxrpc_s);
  729. unregister_key_type(&key_type_rxrpc);
  730. sock_unregister(PF_RXRPC);
  731. proto_unregister(&rxrpc_proto);
  732. rxrpc_destroy_all_calls();
  733. rxrpc_destroy_all_connections();
  734. rxrpc_destroy_all_transports();
  735. rxrpc_destroy_all_peers();
  736. rxrpc_destroy_all_locals();
  737. ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0);
  738. _debug("flush scheduled work");
  739. flush_workqueue(rxrpc_workqueue);
  740. proc_net_remove(&init_net, "rxrpc_conns");
  741. proc_net_remove(&init_net, "rxrpc_calls");
  742. destroy_workqueue(rxrpc_workqueue);
  743. kmem_cache_destroy(rxrpc_call_jar);
  744. _leave("");
  745. }
  746. module_init(af_rxrpc_init);
  747. module_exit(af_rxrpc_exit);