ar-local.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. /* AF_RXRPC local endpoint management
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/net.h>
  13. #include <linux/skbuff.h>
  14. #include <net/sock.h>
  15. #include <net/af_rxrpc.h>
  16. #include "ar-internal.h"
  17. static LIST_HEAD(rxrpc_locals);
  18. DEFINE_RWLOCK(rxrpc_local_lock);
  19. static DECLARE_RWSEM(rxrpc_local_sem);
  20. static DECLARE_WAIT_QUEUE_HEAD(rxrpc_local_wq);
  21. static void rxrpc_destroy_local(struct work_struct *work);
  22. /*
  23. * allocate a new local
  24. */
  25. static
  26. struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
  27. {
  28. struct rxrpc_local *local;
  29. local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
  30. if (local) {
  31. INIT_WORK(&local->destroyer, &rxrpc_destroy_local);
  32. INIT_WORK(&local->acceptor, &rxrpc_accept_incoming_calls);
  33. INIT_WORK(&local->rejecter, &rxrpc_reject_packets);
  34. INIT_LIST_HEAD(&local->services);
  35. INIT_LIST_HEAD(&local->link);
  36. init_rwsem(&local->defrag_sem);
  37. skb_queue_head_init(&local->accept_queue);
  38. skb_queue_head_init(&local->reject_queue);
  39. spin_lock_init(&local->lock);
  40. rwlock_init(&local->services_lock);
  41. atomic_set(&local->usage, 1);
  42. local->debug_id = atomic_inc_return(&rxrpc_debug_id);
  43. memcpy(&local->srx, srx, sizeof(*srx));
  44. }
  45. _leave(" = %p", local);
  46. return local;
  47. }
  48. /*
  49. * create the local socket
  50. * - must be called with rxrpc_local_sem writelocked
  51. */
  52. static int rxrpc_create_local(struct rxrpc_local *local)
  53. {
  54. struct sock *sock;
  55. int ret, opt;
  56. _enter("%p{%d}", local, local->srx.transport_type);
  57. /* create a socket to represent the local endpoint */
  58. ret = sock_create_kern(PF_INET, local->srx.transport_type, IPPROTO_UDP,
  59. &local->socket);
  60. if (ret < 0) {
  61. _leave(" = %d [socket]", ret);
  62. return ret;
  63. }
  64. /* if a local address was supplied then bind it */
  65. if (local->srx.transport_len > sizeof(sa_family_t)) {
  66. _debug("bind");
  67. ret = kernel_bind(local->socket,
  68. (struct sockaddr *) &local->srx.transport,
  69. local->srx.transport_len);
  70. if (ret < 0) {
  71. _debug("bind failed");
  72. goto error;
  73. }
  74. }
  75. /* we want to receive ICMP errors */
  76. opt = 1;
  77. ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
  78. (char *) &opt, sizeof(opt));
  79. if (ret < 0) {
  80. _debug("setsockopt failed");
  81. goto error;
  82. }
  83. /* we want to set the don't fragment bit */
  84. opt = IP_PMTUDISC_DO;
  85. ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
  86. (char *) &opt, sizeof(opt));
  87. if (ret < 0) {
  88. _debug("setsockopt failed");
  89. goto error;
  90. }
  91. write_lock_bh(&rxrpc_local_lock);
  92. list_add(&local->link, &rxrpc_locals);
  93. write_unlock_bh(&rxrpc_local_lock);
  94. /* set the socket up */
  95. sock = local->socket->sk;
  96. sock->sk_user_data = local;
  97. sock->sk_data_ready = rxrpc_data_ready;
  98. sock->sk_error_report = rxrpc_UDP_error_report;
  99. _leave(" = 0");
  100. return 0;
  101. error:
  102. kernel_sock_shutdown(local->socket, SHUT_RDWR);
  103. local->socket->sk->sk_user_data = NULL;
  104. sock_release(local->socket);
  105. local->socket = NULL;
  106. _leave(" = %d", ret);
  107. return ret;
  108. }
  109. /*
  110. * create a new local endpoint using the specified UDP address
  111. */
  112. struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *srx)
  113. {
  114. struct rxrpc_local *local;
  115. int ret;
  116. _enter("{%d,%u,%pI4+%hu}",
  117. srx->transport_type,
  118. srx->transport.family,
  119. &srx->transport.sin.sin_addr,
  120. ntohs(srx->transport.sin.sin_port));
  121. down_write(&rxrpc_local_sem);
  122. /* see if we have a suitable local local endpoint already */
  123. read_lock_bh(&rxrpc_local_lock);
  124. list_for_each_entry(local, &rxrpc_locals, link) {
  125. _debug("CMP {%d,%u,%pI4+%hu}",
  126. local->srx.transport_type,
  127. local->srx.transport.family,
  128. &local->srx.transport.sin.sin_addr,
  129. ntohs(local->srx.transport.sin.sin_port));
  130. if (local->srx.transport_type != srx->transport_type ||
  131. local->srx.transport.family != srx->transport.family)
  132. continue;
  133. switch (srx->transport.family) {
  134. case AF_INET:
  135. if (local->srx.transport.sin.sin_port !=
  136. srx->transport.sin.sin_port)
  137. continue;
  138. if (memcmp(&local->srx.transport.sin.sin_addr,
  139. &srx->transport.sin.sin_addr,
  140. sizeof(struct in_addr)) != 0)
  141. continue;
  142. goto found_local;
  143. default:
  144. BUG();
  145. }
  146. }
  147. read_unlock_bh(&rxrpc_local_lock);
  148. /* we didn't find one, so we need to create one */
  149. local = rxrpc_alloc_local(srx);
  150. if (!local) {
  151. up_write(&rxrpc_local_sem);
  152. return ERR_PTR(-ENOMEM);
  153. }
  154. ret = rxrpc_create_local(local);
  155. if (ret < 0) {
  156. up_write(&rxrpc_local_sem);
  157. kfree(local);
  158. _leave(" = %d", ret);
  159. return ERR_PTR(ret);
  160. }
  161. up_write(&rxrpc_local_sem);
  162. _net("LOCAL new %d {%d,%u,%pI4+%hu}",
  163. local->debug_id,
  164. local->srx.transport_type,
  165. local->srx.transport.family,
  166. &local->srx.transport.sin.sin_addr,
  167. ntohs(local->srx.transport.sin.sin_port));
  168. _leave(" = %p [new]", local);
  169. return local;
  170. found_local:
  171. rxrpc_get_local(local);
  172. read_unlock_bh(&rxrpc_local_lock);
  173. up_write(&rxrpc_local_sem);
  174. _net("LOCAL old %d {%d,%u,%pI4+%hu}",
  175. local->debug_id,
  176. local->srx.transport_type,
  177. local->srx.transport.family,
  178. &local->srx.transport.sin.sin_addr,
  179. ntohs(local->srx.transport.sin.sin_port));
  180. _leave(" = %p [reuse]", local);
  181. return local;
  182. }
  183. /*
  184. * release a local endpoint
  185. */
  186. void rxrpc_put_local(struct rxrpc_local *local)
  187. {
  188. _enter("%p{u=%d}", local, atomic_read(&local->usage));
  189. ASSERTCMP(atomic_read(&local->usage), >, 0);
  190. /* to prevent a race, the decrement and the dequeue must be effectively
  191. * atomic */
  192. write_lock_bh(&rxrpc_local_lock);
  193. if (unlikely(atomic_dec_and_test(&local->usage))) {
  194. _debug("destroy local");
  195. rxrpc_queue_work(&local->destroyer);
  196. }
  197. write_unlock_bh(&rxrpc_local_lock);
  198. _leave("");
  199. }
  200. /*
  201. * destroy a local endpoint
  202. */
  203. static void rxrpc_destroy_local(struct work_struct *work)
  204. {
  205. struct rxrpc_local *local =
  206. container_of(work, struct rxrpc_local, destroyer);
  207. _enter("%p{%d}", local, atomic_read(&local->usage));
  208. down_write(&rxrpc_local_sem);
  209. write_lock_bh(&rxrpc_local_lock);
  210. if (atomic_read(&local->usage) > 0) {
  211. write_unlock_bh(&rxrpc_local_lock);
  212. up_read(&rxrpc_local_sem);
  213. _leave(" [resurrected]");
  214. return;
  215. }
  216. list_del(&local->link);
  217. local->socket->sk->sk_user_data = NULL;
  218. write_unlock_bh(&rxrpc_local_lock);
  219. downgrade_write(&rxrpc_local_sem);
  220. ASSERT(list_empty(&local->services));
  221. ASSERT(!work_pending(&local->acceptor));
  222. ASSERT(!work_pending(&local->rejecter));
  223. /* finish cleaning up the local descriptor */
  224. rxrpc_purge_queue(&local->accept_queue);
  225. rxrpc_purge_queue(&local->reject_queue);
  226. kernel_sock_shutdown(local->socket, SHUT_RDWR);
  227. sock_release(local->socket);
  228. up_read(&rxrpc_local_sem);
  229. _net("DESTROY LOCAL %d", local->debug_id);
  230. kfree(local);
  231. if (list_empty(&rxrpc_locals))
  232. wake_up_all(&rxrpc_local_wq);
  233. _leave("");
  234. }
  235. /*
  236. * preemptively destroy all local local endpoint rather than waiting for
  237. * them to be destroyed
  238. */
  239. void __exit rxrpc_destroy_all_locals(void)
  240. {
  241. DECLARE_WAITQUEUE(myself,current);
  242. _enter("");
  243. /* we simply have to wait for them to go away */
  244. if (!list_empty(&rxrpc_locals)) {
  245. set_current_state(TASK_UNINTERRUPTIBLE);
  246. add_wait_queue(&rxrpc_local_wq, &myself);
  247. while (!list_empty(&rxrpc_locals)) {
  248. schedule();
  249. set_current_state(TASK_UNINTERRUPTIBLE);
  250. }
  251. remove_wait_queue(&rxrpc_local_wq, &myself);
  252. set_current_state(TASK_RUNNING);
  253. }
  254. _leave("");
  255. }