ar-peer.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. /* RxRPC remote transport endpoint management
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/net.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/udp.h>
  15. #include <linux/in.h>
  16. #include <linux/in6.h>
  17. #include <linux/icmp.h>
  18. #include <linux/slab.h>
  19. #include <net/sock.h>
  20. #include <net/af_rxrpc.h>
  21. #include <net/ip.h>
  22. #include <net/route.h>
  23. #include "ar-internal.h"
  24. static LIST_HEAD(rxrpc_peers);
  25. static DEFINE_RWLOCK(rxrpc_peer_lock);
  26. static DECLARE_WAIT_QUEUE_HEAD(rxrpc_peer_wq);
  27. static void rxrpc_destroy_peer(struct work_struct *work);
  28. /*
  29. * assess the MTU size for the network interface through which this peer is
  30. * reached
  31. */
  32. static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
  33. {
  34. struct rtable *rt;
  35. peer->if_mtu = 1500;
  36. rt = ip_route_output_ports(&init_net, NULL,
  37. peer->srx.transport.sin.sin_addr.s_addr, 0,
  38. htons(7000), htons(7001),
  39. IPPROTO_UDP, 0, 0);
  40. if (IS_ERR(rt)) {
  41. _leave(" [route err %ld]", PTR_ERR(rt));
  42. return;
  43. }
  44. peer->if_mtu = dst_mtu(&rt->dst);
  45. dst_release(&rt->dst);
  46. _leave(" [if_mtu %u]", peer->if_mtu);
  47. }
  48. /*
  49. * allocate a new peer
  50. */
  51. static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
  52. gfp_t gfp)
  53. {
  54. struct rxrpc_peer *peer;
  55. _enter("");
  56. peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
  57. if (peer) {
  58. INIT_WORK(&peer->destroyer, &rxrpc_destroy_peer);
  59. INIT_LIST_HEAD(&peer->link);
  60. INIT_LIST_HEAD(&peer->error_targets);
  61. spin_lock_init(&peer->lock);
  62. atomic_set(&peer->usage, 1);
  63. peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
  64. memcpy(&peer->srx, srx, sizeof(*srx));
  65. rxrpc_assess_MTU_size(peer);
  66. peer->mtu = peer->if_mtu;
  67. if (srx->transport.family == AF_INET) {
  68. peer->hdrsize = sizeof(struct iphdr);
  69. switch (srx->transport_type) {
  70. case SOCK_DGRAM:
  71. peer->hdrsize += sizeof(struct udphdr);
  72. break;
  73. default:
  74. BUG();
  75. break;
  76. }
  77. } else {
  78. BUG();
  79. }
  80. peer->hdrsize += sizeof(struct rxrpc_header);
  81. peer->maxdata = peer->mtu - peer->hdrsize;
  82. }
  83. _leave(" = %p", peer);
  84. return peer;
  85. }
  86. /*
  87. * obtain a remote transport endpoint for the specified address
  88. */
  89. struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *srx, gfp_t gfp)
  90. {
  91. struct rxrpc_peer *peer, *candidate;
  92. const char *new = "old";
  93. int usage;
  94. _enter("{%d,%d,%pI4+%hu}",
  95. srx->transport_type,
  96. srx->transport_len,
  97. &srx->transport.sin.sin_addr,
  98. ntohs(srx->transport.sin.sin_port));
  99. /* search the peer list first */
  100. read_lock_bh(&rxrpc_peer_lock);
  101. list_for_each_entry(peer, &rxrpc_peers, link) {
  102. _debug("check PEER %d { u=%d t=%d l=%d }",
  103. peer->debug_id,
  104. atomic_read(&peer->usage),
  105. peer->srx.transport_type,
  106. peer->srx.transport_len);
  107. if (atomic_read(&peer->usage) > 0 &&
  108. peer->srx.transport_type == srx->transport_type &&
  109. peer->srx.transport_len == srx->transport_len &&
  110. memcmp(&peer->srx.transport,
  111. &srx->transport,
  112. srx->transport_len) == 0)
  113. goto found_extant_peer;
  114. }
  115. read_unlock_bh(&rxrpc_peer_lock);
  116. /* not yet present - create a candidate for a new record and then
  117. * redo the search */
  118. candidate = rxrpc_alloc_peer(srx, gfp);
  119. if (!candidate) {
  120. _leave(" = -ENOMEM");
  121. return ERR_PTR(-ENOMEM);
  122. }
  123. write_lock_bh(&rxrpc_peer_lock);
  124. list_for_each_entry(peer, &rxrpc_peers, link) {
  125. if (atomic_read(&peer->usage) > 0 &&
  126. peer->srx.transport_type == srx->transport_type &&
  127. peer->srx.transport_len == srx->transport_len &&
  128. memcmp(&peer->srx.transport,
  129. &srx->transport,
  130. srx->transport_len) == 0)
  131. goto found_extant_second;
  132. }
  133. /* we can now add the new candidate to the list */
  134. peer = candidate;
  135. candidate = NULL;
  136. list_add_tail(&peer->link, &rxrpc_peers);
  137. write_unlock_bh(&rxrpc_peer_lock);
  138. new = "new";
  139. success:
  140. _net("PEER %s %d {%d,%u,%pI4+%hu}",
  141. new,
  142. peer->debug_id,
  143. peer->srx.transport_type,
  144. peer->srx.transport.family,
  145. &peer->srx.transport.sin.sin_addr,
  146. ntohs(peer->srx.transport.sin.sin_port));
  147. _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
  148. return peer;
  149. /* we found the peer in the list immediately */
  150. found_extant_peer:
  151. usage = atomic_inc_return(&peer->usage);
  152. read_unlock_bh(&rxrpc_peer_lock);
  153. goto success;
  154. /* we found the peer on the second time through the list */
  155. found_extant_second:
  156. usage = atomic_inc_return(&peer->usage);
  157. write_unlock_bh(&rxrpc_peer_lock);
  158. kfree(candidate);
  159. goto success;
  160. }
  161. /*
  162. * find the peer associated with a packet
  163. */
  164. struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *local,
  165. __be32 addr, __be16 port)
  166. {
  167. struct rxrpc_peer *peer;
  168. _enter("");
  169. /* search the peer list */
  170. read_lock_bh(&rxrpc_peer_lock);
  171. if (local->srx.transport.family == AF_INET &&
  172. local->srx.transport_type == SOCK_DGRAM
  173. ) {
  174. list_for_each_entry(peer, &rxrpc_peers, link) {
  175. if (atomic_read(&peer->usage) > 0 &&
  176. peer->srx.transport_type == SOCK_DGRAM &&
  177. peer->srx.transport.family == AF_INET &&
  178. peer->srx.transport.sin.sin_port == port &&
  179. peer->srx.transport.sin.sin_addr.s_addr == addr)
  180. goto found_UDP_peer;
  181. }
  182. goto new_UDP_peer;
  183. }
  184. read_unlock_bh(&rxrpc_peer_lock);
  185. _leave(" = -EAFNOSUPPORT");
  186. return ERR_PTR(-EAFNOSUPPORT);
  187. found_UDP_peer:
  188. _net("Rx UDP DGRAM from peer %d", peer->debug_id);
  189. atomic_inc(&peer->usage);
  190. read_unlock_bh(&rxrpc_peer_lock);
  191. _leave(" = %p", peer);
  192. return peer;
  193. new_UDP_peer:
  194. _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id);
  195. read_unlock_bh(&rxrpc_peer_lock);
  196. _leave(" = -EBUSY [new]");
  197. return ERR_PTR(-EBUSY);
  198. }
  199. /*
  200. * release a remote transport endpoint
  201. */
  202. void rxrpc_put_peer(struct rxrpc_peer *peer)
  203. {
  204. _enter("%p{u=%d}", peer, atomic_read(&peer->usage));
  205. ASSERTCMP(atomic_read(&peer->usage), >, 0);
  206. if (likely(!atomic_dec_and_test(&peer->usage))) {
  207. _leave(" [in use]");
  208. return;
  209. }
  210. rxrpc_queue_work(&peer->destroyer);
  211. _leave("");
  212. }
  213. /*
  214. * destroy a remote transport endpoint
  215. */
  216. static void rxrpc_destroy_peer(struct work_struct *work)
  217. {
  218. struct rxrpc_peer *peer =
  219. container_of(work, struct rxrpc_peer, destroyer);
  220. _enter("%p{%d}", peer, atomic_read(&peer->usage));
  221. write_lock_bh(&rxrpc_peer_lock);
  222. list_del(&peer->link);
  223. write_unlock_bh(&rxrpc_peer_lock);
  224. _net("DESTROY PEER %d", peer->debug_id);
  225. kfree(peer);
  226. if (list_empty(&rxrpc_peers))
  227. wake_up_all(&rxrpc_peer_wq);
  228. _leave("");
  229. }
  230. /*
  231. * preemptively destroy all the peer records from a transport endpoint rather
  232. * than waiting for them to time out
  233. */
  234. void __exit rxrpc_destroy_all_peers(void)
  235. {
  236. DECLARE_WAITQUEUE(myself,current);
  237. _enter("");
  238. /* we simply have to wait for them to go away */
  239. if (!list_empty(&rxrpc_peers)) {
  240. set_current_state(TASK_UNINTERRUPTIBLE);
  241. add_wait_queue(&rxrpc_peer_wq, &myself);
  242. while (!list_empty(&rxrpc_peers)) {
  243. schedule();
  244. set_current_state(TASK_UNINTERRUPTIBLE);
  245. }
  246. remove_wait_queue(&rxrpc_peer_wq, &myself);
  247. set_current_state(TASK_RUNNING);
  248. }
  249. _leave("");
  250. }