inet_hashtables.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Generic INET transport hashtables
  7. *
  8. * Authors: Lotsa people, from code originally in tcp
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/random.h>
  17. #include <linux/sched.h>
  18. #include <linux/slab.h>
  19. #include <linux/wait.h>
  20. #include <net/inet_connection_sock.h>
  21. #include <net/inet_hashtables.h>
  22. #include <net/secure_seq.h>
  23. #include <net/ip.h>
  24. static unsigned int inet_ehashfn(struct net *net, const __be32 laddr,
  25. const __u16 lport, const __be32 faddr,
  26. const __be16 fport)
  27. {
  28. static u32 inet_ehash_secret __read_mostly;
  29. net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
  30. return __inet_ehashfn(laddr, lport, faddr, fport,
  31. inet_ehash_secret + net_hash_mix(net));
  32. }
  33. static unsigned int inet_sk_ehashfn(const struct sock *sk)
  34. {
  35. const struct inet_sock *inet = inet_sk(sk);
  36. const __be32 laddr = inet->inet_rcv_saddr;
  37. const __u16 lport = inet->inet_num;
  38. const __be32 faddr = inet->inet_daddr;
  39. const __be16 fport = inet->inet_dport;
  40. struct net *net = sock_net(sk);
  41. return inet_ehashfn(net, laddr, lport, faddr, fport);
  42. }
  43. /*
  44. * Allocate and initialize a new local port bind bucket.
  45. * The bindhash mutex for snum's hash chain must be held here.
  46. */
  47. struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
  48. struct net *net,
  49. struct inet_bind_hashbucket *head,
  50. const unsigned short snum)
  51. {
  52. struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
  53. if (tb != NULL) {
  54. write_pnet(&tb->ib_net, hold_net(net));
  55. tb->port = snum;
  56. tb->fastreuse = 0;
  57. tb->fastreuseport = 0;
  58. tb->num_owners = 0;
  59. INIT_HLIST_HEAD(&tb->owners);
  60. hlist_add_head(&tb->node, &head->chain);
  61. }
  62. return tb;
  63. }
  64. /*
  65. * Caller must hold hashbucket lock for this tb with local BH disabled
  66. */
  67. void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
  68. {
  69. if (hlist_empty(&tb->owners)) {
  70. __hlist_del(&tb->node);
  71. release_net(ib_net(tb));
  72. kmem_cache_free(cachep, tb);
  73. }
  74. }
  75. void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
  76. const unsigned short snum)
  77. {
  78. struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
  79. atomic_inc(&hashinfo->bsockets);
  80. inet_sk(sk)->inet_num = snum;
  81. sk_add_bind_node(sk, &tb->owners);
  82. tb->num_owners++;
  83. inet_csk(sk)->icsk_bind_hash = tb;
  84. }
  85. /*
  86. * Get rid of any references to a local port held by the given sock.
  87. */
  88. static void __inet_put_port(struct sock *sk)
  89. {
  90. struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
  91. const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num,
  92. hashinfo->bhash_size);
  93. struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
  94. struct inet_bind_bucket *tb;
  95. atomic_dec(&hashinfo->bsockets);
  96. spin_lock(&head->lock);
  97. tb = inet_csk(sk)->icsk_bind_hash;
  98. __sk_del_bind_node(sk);
  99. tb->num_owners--;
  100. inet_csk(sk)->icsk_bind_hash = NULL;
  101. inet_sk(sk)->inet_num = 0;
  102. inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
  103. spin_unlock(&head->lock);
  104. }
  105. void inet_put_port(struct sock *sk)
  106. {
  107. local_bh_disable();
  108. __inet_put_port(sk);
  109. local_bh_enable();
  110. }
  111. EXPORT_SYMBOL(inet_put_port);
  112. int __inet_inherit_port(struct sock *sk, struct sock *child)
  113. {
  114. struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
  115. unsigned short port = inet_sk(child)->inet_num;
  116. const int bhash = inet_bhashfn(sock_net(sk), port,
  117. table->bhash_size);
  118. struct inet_bind_hashbucket *head = &table->bhash[bhash];
  119. struct inet_bind_bucket *tb;
  120. spin_lock(&head->lock);
  121. tb = inet_csk(sk)->icsk_bind_hash;
  122. if (tb->port != port) {
  123. /* NOTE: using tproxy and redirecting skbs to a proxy
  124. * on a different listener port breaks the assumption
  125. * that the listener socket's icsk_bind_hash is the same
  126. * as that of the child socket. We have to look up or
  127. * create a new bind bucket for the child here. */
  128. inet_bind_bucket_for_each(tb, &head->chain) {
  129. if (net_eq(ib_net(tb), sock_net(sk)) &&
  130. tb->port == port)
  131. break;
  132. }
  133. if (!tb) {
  134. tb = inet_bind_bucket_create(table->bind_bucket_cachep,
  135. sock_net(sk), head, port);
  136. if (!tb) {
  137. spin_unlock(&head->lock);
  138. return -ENOMEM;
  139. }
  140. }
  141. }
  142. inet_bind_hash(child, tb, port);
  143. spin_unlock(&head->lock);
  144. return 0;
  145. }
  146. EXPORT_SYMBOL_GPL(__inet_inherit_port);
  147. static inline int compute_score(struct sock *sk, struct net *net,
  148. const unsigned short hnum, const __be32 daddr,
  149. const int dif)
  150. {
  151. int score = -1;
  152. struct inet_sock *inet = inet_sk(sk);
  153. if (net_eq(sock_net(sk), net) && inet->inet_num == hnum &&
  154. !ipv6_only_sock(sk)) {
  155. __be32 rcv_saddr = inet->inet_rcv_saddr;
  156. score = sk->sk_family == PF_INET ? 2 : 1;
  157. if (rcv_saddr) {
  158. if (rcv_saddr != daddr)
  159. return -1;
  160. score += 4;
  161. }
  162. if (sk->sk_bound_dev_if) {
  163. if (sk->sk_bound_dev_if != dif)
  164. return -1;
  165. score += 4;
  166. }
  167. }
  168. return score;
  169. }
  170. /*
  171. * Don't inline this cruft. Here are some nice properties to exploit here. The
  172. * BSD API does not allow a listening sock to specify the remote port nor the
  173. * remote address for the connection. So always assume those are both
  174. * wildcarded during the search since they can never be otherwise.
  175. */
  176. struct sock *__inet_lookup_listener(struct net *net,
  177. struct inet_hashinfo *hashinfo,
  178. const __be32 saddr, __be16 sport,
  179. const __be32 daddr, const unsigned short hnum,
  180. const int dif)
  181. {
  182. struct sock *sk, *result;
  183. struct hlist_nulls_node *node;
  184. unsigned int hash = inet_lhashfn(net, hnum);
  185. struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
  186. int score, hiscore, matches = 0, reuseport = 0;
  187. u32 phash = 0;
  188. rcu_read_lock();
  189. begin:
  190. result = NULL;
  191. hiscore = 0;
  192. sk_nulls_for_each_rcu(sk, node, &ilb->head) {
  193. score = compute_score(sk, net, hnum, daddr, dif);
  194. if (score > hiscore) {
  195. result = sk;
  196. hiscore = score;
  197. reuseport = sk->sk_reuseport;
  198. if (reuseport) {
  199. phash = inet_ehashfn(net, daddr, hnum,
  200. saddr, sport);
  201. matches = 1;
  202. }
  203. } else if (score == hiscore && reuseport) {
  204. matches++;
  205. if (((u64)phash * matches) >> 32 == 0)
  206. result = sk;
  207. phash = next_pseudo_random32(phash);
  208. }
  209. }
  210. /*
  211. * if the nulls value we got at the end of this lookup is
  212. * not the expected one, we must restart lookup.
  213. * We probably met an item that was moved to another chain.
  214. */
  215. if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
  216. goto begin;
  217. if (result) {
  218. if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
  219. result = NULL;
  220. else if (unlikely(compute_score(result, net, hnum, daddr,
  221. dif) < hiscore)) {
  222. sock_put(result);
  223. goto begin;
  224. }
  225. }
  226. rcu_read_unlock();
  227. return result;
  228. }
  229. EXPORT_SYMBOL_GPL(__inet_lookup_listener);
  230. /* All sockets share common refcount, but have different destructors */
  231. void sock_gen_put(struct sock *sk)
  232. {
  233. if (!atomic_dec_and_test(&sk->sk_refcnt))
  234. return;
  235. if (sk->sk_state == TCP_TIME_WAIT)
  236. inet_twsk_free(inet_twsk(sk));
  237. else
  238. sk_free(sk);
  239. }
  240. EXPORT_SYMBOL_GPL(sock_gen_put);
  241. struct sock *__inet_lookup_established(struct net *net,
  242. struct inet_hashinfo *hashinfo,
  243. const __be32 saddr, const __be16 sport,
  244. const __be32 daddr, const u16 hnum,
  245. const int dif)
  246. {
  247. INET_ADDR_COOKIE(acookie, saddr, daddr)
  248. const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
  249. struct sock *sk;
  250. const struct hlist_nulls_node *node;
  251. /* Optimize here for direct hit, only listening connections can
  252. * have wildcards anyways.
  253. */
  254. unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
  255. unsigned int slot = hash & hashinfo->ehash_mask;
  256. struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
  257. rcu_read_lock();
  258. begin:
  259. sk_nulls_for_each_rcu(sk, node, &head->chain) {
  260. if (sk->sk_hash != hash)
  261. continue;
  262. if (likely(INET_MATCH(sk, net, acookie,
  263. saddr, daddr, ports, dif))) {
  264. if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
  265. goto out;
  266. if (unlikely(!INET_MATCH(sk, net, acookie,
  267. saddr, daddr, ports, dif))) {
  268. sock_gen_put(sk);
  269. goto begin;
  270. }
  271. goto found;
  272. }
  273. }
  274. /*
  275. * if the nulls value we got at the end of this lookup is
  276. * not the expected one, we must restart lookup.
  277. * We probably met an item that was moved to another chain.
  278. */
  279. if (get_nulls_value(node) != slot)
  280. goto begin;
  281. out:
  282. sk = NULL;
  283. found:
  284. rcu_read_unlock();
  285. return sk;
  286. }
  287. EXPORT_SYMBOL_GPL(__inet_lookup_established);
  288. /* called with local bh disabled */
  289. static int __inet_check_established(struct inet_timewait_death_row *death_row,
  290. struct sock *sk, __u16 lport,
  291. struct inet_timewait_sock **twp)
  292. {
  293. struct inet_hashinfo *hinfo = death_row->hashinfo;
  294. struct inet_sock *inet = inet_sk(sk);
  295. __be32 daddr = inet->inet_rcv_saddr;
  296. __be32 saddr = inet->inet_daddr;
  297. int dif = sk->sk_bound_dev_if;
  298. INET_ADDR_COOKIE(acookie, saddr, daddr)
  299. const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
  300. struct net *net = sock_net(sk);
  301. unsigned int hash = inet_ehashfn(net, daddr, lport,
  302. saddr, inet->inet_dport);
  303. struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
  304. spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
  305. struct sock *sk2;
  306. const struct hlist_nulls_node *node;
  307. struct inet_timewait_sock *tw = NULL;
  308. int twrefcnt = 0;
  309. spin_lock(lock);
  310. sk_nulls_for_each(sk2, node, &head->chain) {
  311. if (sk2->sk_hash != hash)
  312. continue;
  313. if (likely(INET_MATCH(sk2, net, acookie,
  314. saddr, daddr, ports, dif))) {
  315. if (sk2->sk_state == TCP_TIME_WAIT) {
  316. tw = inet_twsk(sk2);
  317. if (twsk_unique(sk, sk2, twp))
  318. break;
  319. }
  320. goto not_unique;
  321. }
  322. }
  323. /* Must record num and sport now. Otherwise we will see
  324. * in hash table socket with a funny identity.
  325. */
  326. inet->inet_num = lport;
  327. inet->inet_sport = htons(lport);
  328. sk->sk_hash = hash;
  329. WARN_ON(!sk_unhashed(sk));
  330. __sk_nulls_add_node_rcu(sk, &head->chain);
  331. if (tw) {
  332. twrefcnt = inet_twsk_unhash(tw);
  333. NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
  334. }
  335. spin_unlock(lock);
  336. if (twrefcnt)
  337. inet_twsk_put(tw);
  338. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
  339. if (twp) {
  340. *twp = tw;
  341. } else if (tw) {
  342. /* Silly. Should hash-dance instead... */
  343. inet_twsk_deschedule(tw, death_row);
  344. inet_twsk_put(tw);
  345. }
  346. return 0;
  347. not_unique:
  348. spin_unlock(lock);
  349. return -EADDRNOTAVAIL;
  350. }
  351. static inline u32 inet_sk_port_offset(const struct sock *sk)
  352. {
  353. const struct inet_sock *inet = inet_sk(sk);
  354. return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
  355. inet->inet_daddr,
  356. inet->inet_dport);
  357. }
  358. int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
  359. {
  360. struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
  361. struct hlist_nulls_head *list;
  362. spinlock_t *lock;
  363. struct inet_ehash_bucket *head;
  364. int twrefcnt = 0;
  365. WARN_ON(!sk_unhashed(sk));
  366. sk->sk_hash = inet_sk_ehashfn(sk);
  367. head = inet_ehash_bucket(hashinfo, sk->sk_hash);
  368. list = &head->chain;
  369. lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
  370. spin_lock(lock);
  371. __sk_nulls_add_node_rcu(sk, list);
  372. if (tw) {
  373. WARN_ON(sk->sk_hash != tw->tw_hash);
  374. twrefcnt = inet_twsk_unhash(tw);
  375. }
  376. spin_unlock(lock);
  377. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
  378. return twrefcnt;
  379. }
  380. EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
  381. static void __inet_hash(struct sock *sk)
  382. {
  383. struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
  384. struct inet_listen_hashbucket *ilb;
  385. if (sk->sk_state != TCP_LISTEN) {
  386. __inet_hash_nolisten(sk, NULL);
  387. return;
  388. }
  389. WARN_ON(!sk_unhashed(sk));
  390. ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
  391. spin_lock(&ilb->lock);
  392. __sk_nulls_add_node_rcu(sk, &ilb->head);
  393. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
  394. spin_unlock(&ilb->lock);
  395. }
  396. void inet_hash(struct sock *sk)
  397. {
  398. if (sk->sk_state != TCP_CLOSE) {
  399. local_bh_disable();
  400. __inet_hash(sk);
  401. local_bh_enable();
  402. }
  403. }
  404. EXPORT_SYMBOL_GPL(inet_hash);
  405. void inet_unhash(struct sock *sk)
  406. {
  407. struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
  408. spinlock_t *lock;
  409. int done;
  410. if (sk_unhashed(sk))
  411. return;
  412. if (sk->sk_state == TCP_LISTEN)
  413. lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock;
  414. else
  415. lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
  416. spin_lock_bh(lock);
  417. done = __sk_nulls_del_node_init_rcu(sk);
  418. if (done)
  419. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
  420. spin_unlock_bh(lock);
  421. }
  422. EXPORT_SYMBOL_GPL(inet_unhash);
  423. int __inet_hash_connect(struct inet_timewait_death_row *death_row,
  424. struct sock *sk, u32 port_offset,
  425. int (*check_established)(struct inet_timewait_death_row *,
  426. struct sock *, __u16, struct inet_timewait_sock **),
  427. int (*hash)(struct sock *sk, struct inet_timewait_sock *twp))
  428. {
  429. struct inet_hashinfo *hinfo = death_row->hashinfo;
  430. const unsigned short snum = inet_sk(sk)->inet_num;
  431. struct inet_bind_hashbucket *head;
  432. struct inet_bind_bucket *tb;
  433. int ret;
  434. struct net *net = sock_net(sk);
  435. int twrefcnt = 1;
  436. if (!snum) {
  437. int i, remaining, low, high, port;
  438. static u32 hint;
  439. u32 offset = hint + port_offset;
  440. struct inet_timewait_sock *tw = NULL;
  441. inet_get_local_port_range(net, &low, &high);
  442. remaining = (high - low) + 1;
  443. local_bh_disable();
  444. for (i = 1; i <= remaining; i++) {
  445. port = low + (i + offset) % remaining;
  446. if (inet_is_reserved_local_port(port))
  447. continue;
  448. head = &hinfo->bhash[inet_bhashfn(net, port,
  449. hinfo->bhash_size)];
  450. spin_lock(&head->lock);
  451. /* Does not bother with rcv_saddr checks,
  452. * because the established check is already
  453. * unique enough.
  454. */
  455. inet_bind_bucket_for_each(tb, &head->chain) {
  456. if (net_eq(ib_net(tb), net) &&
  457. tb->port == port) {
  458. if (tb->fastreuse >= 0 ||
  459. tb->fastreuseport >= 0)
  460. goto next_port;
  461. WARN_ON(hlist_empty(&tb->owners));
  462. if (!check_established(death_row, sk,
  463. port, &tw))
  464. goto ok;
  465. goto next_port;
  466. }
  467. }
  468. tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
  469. net, head, port);
  470. if (!tb) {
  471. spin_unlock(&head->lock);
  472. break;
  473. }
  474. tb->fastreuse = -1;
  475. tb->fastreuseport = -1;
  476. goto ok;
  477. next_port:
  478. spin_unlock(&head->lock);
  479. }
  480. local_bh_enable();
  481. return -EADDRNOTAVAIL;
  482. ok:
  483. hint += i;
  484. /* Head lock still held and bh's disabled */
  485. inet_bind_hash(sk, tb, port);
  486. if (sk_unhashed(sk)) {
  487. inet_sk(sk)->inet_sport = htons(port);
  488. twrefcnt += hash(sk, tw);
  489. }
  490. if (tw)
  491. twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
  492. spin_unlock(&head->lock);
  493. if (tw) {
  494. inet_twsk_deschedule(tw, death_row);
  495. while (twrefcnt) {
  496. twrefcnt--;
  497. inet_twsk_put(tw);
  498. }
  499. }
  500. ret = 0;
  501. goto out;
  502. }
  503. head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)];
  504. tb = inet_csk(sk)->icsk_bind_hash;
  505. spin_lock_bh(&head->lock);
  506. if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
  507. hash(sk, NULL);
  508. spin_unlock_bh(&head->lock);
  509. return 0;
  510. } else {
  511. spin_unlock(&head->lock);
  512. /* No definite answer... Walk to established hash table */
  513. ret = check_established(death_row, sk, snum, NULL);
  514. out:
  515. local_bh_enable();
  516. return ret;
  517. }
  518. }
  519. /*
  520. * Bind a port for a connect operation and hash it.
  521. */
  522. int inet_hash_connect(struct inet_timewait_death_row *death_row,
  523. struct sock *sk)
  524. {
  525. return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk),
  526. __inet_check_established, __inet_hash_nolisten);
  527. }
  528. EXPORT_SYMBOL_GPL(inet_hash_connect);
  529. void inet_hashinfo_init(struct inet_hashinfo *h)
  530. {
  531. int i;
  532. atomic_set(&h->bsockets, 0);
  533. for (i = 0; i < INET_LHTABLE_SIZE; i++) {
  534. spin_lock_init(&h->listening_hash[i].lock);
  535. INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
  536. i + LISTENING_NULLS_BASE);
  537. }
  538. }
  539. EXPORT_SYMBOL_GPL(inet_hashinfo_init);