inet_hashtables.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Authors: Lotsa people, from code originally in tcp
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. */
  13. #ifndef _INET_HASHTABLES_H
  14. #define _INET_HASHTABLES_H
  15. #include <linux/interrupt.h>
  16. #include <linux/ipv6.h>
  17. #include <linux/list.h>
  18. #include <linux/slab.h>
  19. #include <linux/socket.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/types.h>
  22. #include <linux/wait.h>
  23. #include <linux/vmalloc.h>
  24. #include <net/inet_connection_sock.h>
  25. #include <net/inet_sock.h>
  26. #include <net/sock.h>
  27. #include <net/tcp_states.h>
  28. #include <asm/atomic.h>
  29. #include <asm/byteorder.h>
  30. /* This is for all connections with a full identity, no wildcards.
  31. * One chain is dedicated to TIME_WAIT sockets.
  32. * I'll experiment with dynamic table growth later.
  33. */
  34. struct inet_ehash_bucket {
  35. struct hlist_head chain;
  36. struct hlist_head twchain;
  37. };
  38. /* There are a few simple rules, which allow for local port reuse by
  39. * an application. In essence:
  40. *
  41. * 1) Sockets bound to different interfaces may share a local port.
  42. * Failing that, goto test 2.
  43. * 2) If all sockets have sk->sk_reuse set, and none of them are in
  44. * TCP_LISTEN state, the port may be shared.
  45. * Failing that, goto test 3.
  46. * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
  47. * address, and none of them are the same, the port may be
  48. * shared.
  49. * Failing this, the port cannot be shared.
  50. *
  51. * The interesting point, is test #2. This is what an FTP server does
  52. * all day. To optimize this case we use a specific flag bit defined
  53. * below. As we add sockets to a bind bucket list, we perform a
  54. * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
  55. * As long as all sockets added to a bind bucket pass this test,
  56. * the flag bit will be set.
  57. * The resulting situation is that tcp_v[46]_verify_bind() can just check
  58. * for this flag bit, if it is set and the socket trying to bind has
  59. * sk->sk_reuse set, we don't even have to walk the owners list at all,
  60. * we return that it is ok to bind this socket to the requested local port.
  61. *
  62. * Sounds like a lot of work, but it is worth it. In a more naive
  63. * implementation (ie. current FreeBSD etc.) the entire list of ports
  64. * must be walked for each data port opened by an ftp server. Needless
  65. * to say, this does not scale at all. With a couple thousand FTP
  66. * users logged onto your box, isn't it nice to know that new data
  67. * ports are created in O(1) time? I thought so. ;-) -DaveM
  68. */
  69. struct inet_bind_bucket {
  70. unsigned short port;
  71. signed short fastreuse;
  72. struct hlist_node node;
  73. struct hlist_head owners;
  74. };
  75. #define inet_bind_bucket_for_each(tb, node, head) \
  76. hlist_for_each_entry(tb, node, head, node)
  77. struct inet_bind_hashbucket {
  78. spinlock_t lock;
  79. struct hlist_head chain;
  80. };
  81. /* This is for listening sockets, thus all sockets which possess wildcards. */
  82. #define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
  83. struct inet_hashinfo {
  84. /* This is for sockets with full identity only. Sockets here will
  85. * always be without wildcards and will have the following invariant:
  86. *
  87. * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
  88. *
  89. * TIME_WAIT sockets use a separate chain (twchain).
  90. */
  91. struct inet_ehash_bucket *ehash;
  92. rwlock_t *ehash_locks;
  93. unsigned int ehash_size;
  94. unsigned int ehash_locks_mask;
  95. /* Ok, let's try this, I give up, we do need a local binding
  96. * TCP hash as well as the others for fast bind/connect.
  97. */
  98. struct inet_bind_hashbucket *bhash;
  99. unsigned int bhash_size;
  100. /* Note : 4 bytes padding on 64 bit arches */
  101. /* All sockets in TCP_LISTEN state will be in here. This is the only
  102. * table where wildcard'd TCP sockets can exist. Hash function here
  103. * is just local port number.
  104. */
  105. struct hlist_head listening_hash[INET_LHTABLE_SIZE];
  106. /* All the above members are written once at bootup and
  107. * never written again _or_ are predominantly read-access.
  108. *
  109. * Now align to a new cache line as all the following members
  110. * are often dirty.
  111. */
  112. rwlock_t lhash_lock ____cacheline_aligned;
  113. atomic_t lhash_users;
  114. wait_queue_head_t lhash_wait;
  115. struct kmem_cache *bind_bucket_cachep;
  116. };
  117. static inline struct inet_ehash_bucket *inet_ehash_bucket(
  118. struct inet_hashinfo *hashinfo,
  119. unsigned int hash)
  120. {
  121. return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
  122. }
  123. static inline rwlock_t *inet_ehash_lockp(
  124. struct inet_hashinfo *hashinfo,
  125. unsigned int hash)
  126. {
  127. return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
  128. }
  129. static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
  130. {
  131. unsigned int i, size = 256;
  132. #if defined(CONFIG_PROVE_LOCKING)
  133. unsigned int nr_pcpus = 2;
  134. #else
  135. unsigned int nr_pcpus = num_possible_cpus();
  136. #endif
  137. if (nr_pcpus >= 4)
  138. size = 512;
  139. if (nr_pcpus >= 8)
  140. size = 1024;
  141. if (nr_pcpus >= 16)
  142. size = 2048;
  143. if (nr_pcpus >= 32)
  144. size = 4096;
  145. if (sizeof(rwlock_t) != 0) {
  146. #ifdef CONFIG_NUMA
  147. if (size * sizeof(rwlock_t) > PAGE_SIZE)
  148. hashinfo->ehash_locks = vmalloc(size * sizeof(rwlock_t));
  149. else
  150. #endif
  151. hashinfo->ehash_locks = kmalloc(size * sizeof(rwlock_t),
  152. GFP_KERNEL);
  153. if (!hashinfo->ehash_locks)
  154. return ENOMEM;
  155. for (i = 0; i < size; i++)
  156. rwlock_init(&hashinfo->ehash_locks[i]);
  157. }
  158. hashinfo->ehash_locks_mask = size - 1;
  159. return 0;
  160. }
  161. static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
  162. {
  163. if (hashinfo->ehash_locks) {
  164. #ifdef CONFIG_NUMA
  165. unsigned int size = (hashinfo->ehash_locks_mask + 1) *
  166. sizeof(rwlock_t);
  167. if (size > PAGE_SIZE)
  168. vfree(hashinfo->ehash_locks);
  169. else
  170. #endif
  171. kfree(hashinfo->ehash_locks);
  172. hashinfo->ehash_locks = NULL;
  173. }
  174. }
  175. extern struct inet_bind_bucket *
  176. inet_bind_bucket_create(struct kmem_cache *cachep,
  177. struct inet_bind_hashbucket *head,
  178. const unsigned short snum);
  179. extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
  180. struct inet_bind_bucket *tb);
  181. static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
  182. {
  183. return lport & (bhash_size - 1);
  184. }
  185. extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
  186. const unsigned short snum);
  187. /* These can have wildcards, don't try too hard. */
  188. static inline int inet_lhashfn(const unsigned short num)
  189. {
  190. return num & (INET_LHTABLE_SIZE - 1);
  191. }
  192. static inline int inet_sk_listen_hashfn(const struct sock *sk)
  193. {
  194. return inet_lhashfn(inet_sk(sk)->num);
  195. }
  196. /* Caller must disable local BH processing. */
  197. static inline void __inet_inherit_port(struct inet_hashinfo *table,
  198. struct sock *sk, struct sock *child)
  199. {
  200. const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size);
  201. struct inet_bind_hashbucket *head = &table->bhash[bhash];
  202. struct inet_bind_bucket *tb;
  203. spin_lock(&head->lock);
  204. tb = inet_csk(sk)->icsk_bind_hash;
  205. sk_add_bind_node(child, &tb->owners);
  206. inet_csk(child)->icsk_bind_hash = tb;
  207. spin_unlock(&head->lock);
  208. }
  209. static inline void inet_inherit_port(struct inet_hashinfo *table,
  210. struct sock *sk, struct sock *child)
  211. {
  212. local_bh_disable();
  213. __inet_inherit_port(table, sk, child);
  214. local_bh_enable();
  215. }
  216. extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk);
  217. extern void inet_listen_wlock(struct inet_hashinfo *hashinfo);
  218. /*
  219. * - We may sleep inside this lock.
  220. * - If sleeping is not required (or called from BH),
  221. * use plain read_(un)lock(&inet_hashinfo.lhash_lock).
  222. */
  223. static inline void inet_listen_lock(struct inet_hashinfo *hashinfo)
  224. {
  225. /* read_lock synchronizes to candidates to writers */
  226. read_lock(&hashinfo->lhash_lock);
  227. atomic_inc(&hashinfo->lhash_users);
  228. read_unlock(&hashinfo->lhash_lock);
  229. }
  230. static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo)
  231. {
  232. if (atomic_dec_and_test(&hashinfo->lhash_users))
  233. wake_up(&hashinfo->lhash_wait);
  234. }
  235. static inline void __inet_hash(struct inet_hashinfo *hashinfo,
  236. struct sock *sk, const int listen_possible)
  237. {
  238. struct hlist_head *list;
  239. rwlock_t *lock;
  240. BUG_TRAP(sk_unhashed(sk));
  241. if (listen_possible && sk->sk_state == TCP_LISTEN) {
  242. list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
  243. lock = &hashinfo->lhash_lock;
  244. inet_listen_wlock(hashinfo);
  245. } else {
  246. struct inet_ehash_bucket *head;
  247. sk->sk_hash = inet_sk_ehashfn(sk);
  248. head = inet_ehash_bucket(hashinfo, sk->sk_hash);
  249. list = &head->chain;
  250. lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
  251. write_lock(lock);
  252. }
  253. __sk_add_node(sk, list);
  254. sock_prot_inc_use(sk->sk_prot);
  255. write_unlock(lock);
  256. if (listen_possible && sk->sk_state == TCP_LISTEN)
  257. wake_up(&hashinfo->lhash_wait);
  258. }
  259. static inline void inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk)
  260. {
  261. if (sk->sk_state != TCP_CLOSE) {
  262. local_bh_disable();
  263. __inet_hash(hashinfo, sk, 1);
  264. local_bh_enable();
  265. }
  266. }
  267. static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk)
  268. {
  269. rwlock_t *lock;
  270. if (sk_unhashed(sk))
  271. goto out;
  272. if (sk->sk_state == TCP_LISTEN) {
  273. local_bh_disable();
  274. inet_listen_wlock(hashinfo);
  275. lock = &hashinfo->lhash_lock;
  276. } else {
  277. lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
  278. write_lock_bh(lock);
  279. }
  280. if (__sk_del_node_init(sk))
  281. sock_prot_dec_use(sk->sk_prot);
  282. write_unlock_bh(lock);
  283. out:
  284. if (sk->sk_state == TCP_LISTEN)
  285. wake_up(&hashinfo->lhash_wait);
  286. }
  287. extern struct sock *__inet_lookup_listener(struct inet_hashinfo *hashinfo,
  288. const __be32 daddr,
  289. const unsigned short hnum,
  290. const int dif);
  291. static inline struct sock *inet_lookup_listener(struct inet_hashinfo *hashinfo,
  292. __be32 daddr, __be16 dport, int dif)
  293. {
  294. return __inet_lookup_listener(hashinfo, daddr, ntohs(dport), dif);
  295. }
  296. /* Socket demux engine toys. */
  297. /* What happens here is ugly; there's a pair of adjacent fields in
  298. struct inet_sock; __be16 dport followed by __u16 num. We want to
  299. search by pair, so we combine the keys into a single 32bit value
  300. and compare with 32bit value read from &...->dport. Let's at least
  301. make sure that it's not mixed with anything else...
  302. On 64bit targets we combine comparisons with pair of adjacent __be32
  303. fields in the same way.
  304. */
  305. typedef __u32 __bitwise __portpair;
  306. #ifdef __BIG_ENDIAN
  307. #define INET_COMBINED_PORTS(__sport, __dport) \
  308. ((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
  309. #else /* __LITTLE_ENDIAN */
  310. #define INET_COMBINED_PORTS(__sport, __dport) \
  311. ((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
  312. #endif
  313. #if (BITS_PER_LONG == 64)
  314. typedef __u64 __bitwise __addrpair;
  315. #ifdef __BIG_ENDIAN
  316. #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
  317. const __addrpair __name = (__force __addrpair) ( \
  318. (((__force __u64)(__be32)(__saddr)) << 32) | \
  319. ((__force __u64)(__be32)(__daddr)));
  320. #else /* __LITTLE_ENDIAN */
  321. #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
  322. const __addrpair __name = (__force __addrpair) ( \
  323. (((__force __u64)(__be32)(__daddr)) << 32) | \
  324. ((__force __u64)(__be32)(__saddr)));
  325. #endif /* __BIG_ENDIAN */
  326. #define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
  327. (((__sk)->sk_hash == (__hash)) && \
  328. ((*((__addrpair *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \
  329. ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports)) && \
  330. (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
  331. #define INET_TW_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
  332. (((__sk)->sk_hash == (__hash)) && \
  333. ((*((__addrpair *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \
  334. ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
  335. (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
  336. #else /* 32-bit arch */
  337. #define INET_ADDR_COOKIE(__name, __saddr, __daddr)
  338. #define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif) \
  339. (((__sk)->sk_hash == (__hash)) && \
  340. (inet_sk(__sk)->daddr == (__saddr)) && \
  341. (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
  342. ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports)) && \
  343. (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
  344. #define INET_TW_MATCH(__sk, __hash,__cookie, __saddr, __daddr, __ports, __dif) \
  345. (((__sk)->sk_hash == (__hash)) && \
  346. (inet_twsk(__sk)->tw_daddr == (__saddr)) && \
  347. (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \
  348. ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
  349. (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
  350. #endif /* 64-bit arch */
  351. /*
  352. * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
  353. * not check it for lookups anymore, thanks Alexey. -DaveM
  354. *
  355. * Local BH must be disabled here.
  356. */
  357. static inline struct sock *
  358. __inet_lookup_established(struct inet_hashinfo *hashinfo,
  359. const __be32 saddr, const __be16 sport,
  360. const __be32 daddr, const u16 hnum,
  361. const int dif)
  362. {
  363. INET_ADDR_COOKIE(acookie, saddr, daddr)
  364. const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
  365. struct sock *sk;
  366. const struct hlist_node *node;
  367. /* Optimize here for direct hit, only listening connections can
  368. * have wildcards anyways.
  369. */
  370. unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport);
  371. struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
  372. rwlock_t *lock = inet_ehash_lockp(hashinfo, hash);
  373. prefetch(head->chain.first);
  374. read_lock(lock);
  375. sk_for_each(sk, node, &head->chain) {
  376. if (INET_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
  377. goto hit; /* You sunk my battleship! */
  378. }
  379. /* Must check for a TIME_WAIT'er before going to listener hash. */
  380. sk_for_each(sk, node, &head->twchain) {
  381. if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
  382. goto hit;
  383. }
  384. sk = NULL;
  385. out:
  386. read_unlock(lock);
  387. return sk;
  388. hit:
  389. sock_hold(sk);
  390. goto out;
  391. }
  392. static inline struct sock *
  393. inet_lookup_established(struct inet_hashinfo *hashinfo,
  394. const __be32 saddr, const __be16 sport,
  395. const __be32 daddr, const __be16 dport,
  396. const int dif)
  397. {
  398. return __inet_lookup_established(hashinfo, saddr, sport, daddr,
  399. ntohs(dport), dif);
  400. }
  401. static inline struct sock *__inet_lookup(struct inet_hashinfo *hashinfo,
  402. const __be32 saddr, const __be16 sport,
  403. const __be32 daddr, const __be16 dport,
  404. const int dif)
  405. {
  406. u16 hnum = ntohs(dport);
  407. struct sock *sk = __inet_lookup_established(hashinfo, saddr, sport, daddr,
  408. hnum, dif);
  409. return sk ? : __inet_lookup_listener(hashinfo, daddr, hnum, dif);
  410. }
  411. static inline struct sock *inet_lookup(struct inet_hashinfo *hashinfo,
  412. const __be32 saddr, const __be16 sport,
  413. const __be32 daddr, const __be16 dport,
  414. const int dif)
  415. {
  416. struct sock *sk;
  417. local_bh_disable();
  418. sk = __inet_lookup(hashinfo, saddr, sport, daddr, dport, dif);
  419. local_bh_enable();
  420. return sk;
  421. }
  422. extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
  423. struct sock *sk);
  424. #endif /* _INET_HASHTABLES_H */