|
@@ -30,6 +30,7 @@
|
|
|
#include <linux/netdevice.h>
|
|
|
#include <linux/socket.h>
|
|
|
#include <linux/mm.h>
|
|
|
+#include <linux/nsproxy.h>
|
|
|
#include <linux/rculist_nulls.h>
|
|
|
|
|
|
#include <net/netfilter/nf_conntrack.h>
|
|
@@ -84,9 +85,10 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
|
|
|
return ((u64)h * size) >> 32;
|
|
|
}
|
|
|
|
|
|
-static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
|
|
|
+static inline u_int32_t hash_conntrack(const struct net *net,
|
|
|
+ const struct nf_conntrack_tuple *tuple)
|
|
|
{
|
|
|
- return __hash_conntrack(tuple, nf_conntrack_htable_size,
|
|
|
+ return __hash_conntrack(tuple, net->ct.htable_size,
|
|
|
nf_conntrack_hash_rnd);
|
|
|
}
|
|
|
|
|
@@ -294,7 +296,7 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
|
|
|
{
|
|
|
struct nf_conntrack_tuple_hash *h;
|
|
|
struct hlist_nulls_node *n;
|
|
|
- unsigned int hash = hash_conntrack(tuple);
|
|
|
+ unsigned int hash = hash_conntrack(net, tuple);
|
|
|
|
|
|
/* Disable BHs the entire time since we normally need to disable them
|
|
|
* at least once for the stats anyway.
|
|
@@ -364,10 +366,11 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
|
|
|
|
|
|
void nf_conntrack_hash_insert(struct nf_conn *ct)
|
|
|
{
|
|
|
+ struct net *net = nf_ct_net(ct);
|
|
|
unsigned int hash, repl_hash;
|
|
|
|
|
|
- hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
|
|
- repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
|
|
+ hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
|
|
+ repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
|
|
|
|
|
__nf_conntrack_hash_insert(ct, hash, repl_hash);
|
|
|
}
|
|
@@ -395,8 +398,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
|
|
if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
|
|
|
return NF_ACCEPT;
|
|
|
|
|
|
- hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
|
|
- repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
|
|
+ hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
|
|
+ repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
|
|
|
|
|
/* We're not in hash table, and we refuse to set up related
|
|
|
connections for unconfirmed conns. But packet copies and
|
|
@@ -466,7 +469,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
|
|
|
struct net *net = nf_ct_net(ignored_conntrack);
|
|
|
struct nf_conntrack_tuple_hash *h;
|
|
|
struct hlist_nulls_node *n;
|
|
|
- unsigned int hash = hash_conntrack(tuple);
|
|
|
+ unsigned int hash = hash_conntrack(net, tuple);
|
|
|
|
|
|
/* Disable BHs the entire time since we need to disable them at
|
|
|
* least once for the stats anyway.
|
|
@@ -501,7 +504,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
|
|
|
int dropped = 0;
|
|
|
|
|
|
rcu_read_lock();
|
|
|
- for (i = 0; i < nf_conntrack_htable_size; i++) {
|
|
|
+ for (i = 0; i < net->ct.htable_size; i++) {
|
|
|
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
|
|
|
hnnode) {
|
|
|
tmp = nf_ct_tuplehash_to_ctrack(h);
|
|
@@ -521,7 +524,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
|
|
|
if (cnt >= NF_CT_EVICTION_RANGE)
|
|
|
break;
|
|
|
|
|
|
- hash = (hash + 1) % nf_conntrack_htable_size;
|
|
|
+ hash = (hash + 1) % net->ct.htable_size;
|
|
|
}
|
|
|
rcu_read_unlock();
|
|
|
|
|
@@ -555,7 +558,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
|
|
|
|
|
|
if (nf_conntrack_max &&
|
|
|
unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
|
|
|
- unsigned int hash = hash_conntrack(orig);
|
|
|
+ unsigned int hash = hash_conntrack(net, orig);
|
|
|
if (!early_drop(net, hash)) {
|
|
|
atomic_dec(&net->ct.count);
|
|
|
if (net_ratelimit())
|
|
@@ -1012,7 +1015,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
|
|
|
struct hlist_nulls_node *n;
|
|
|
|
|
|
spin_lock_bh(&nf_conntrack_lock);
|
|
|
- for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
|
|
|
+ for (; *bucket < net->ct.htable_size; (*bucket)++) {
|
|
|
hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
|
|
|
ct = nf_ct_tuplehash_to_ctrack(h);
|
|
|
if (iter(ct, data))
|
|
@@ -1130,7 +1133,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
|
|
|
}
|
|
|
|
|
|
nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
|
|
|
- nf_conntrack_htable_size);
|
|
|
+ net->ct.htable_size);
|
|
|
nf_conntrack_ecache_fini(net);
|
|
|
nf_conntrack_acct_fini(net);
|
|
|
nf_conntrack_expect_fini(net);
|
|
@@ -1190,10 +1193,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
|
|
|
{
|
|
|
int i, bucket, vmalloced, old_vmalloced;
|
|
|
unsigned int hashsize, old_size;
|
|
|
- int rnd;
|
|
|
struct hlist_nulls_head *hash, *old_hash;
|
|
|
struct nf_conntrack_tuple_hash *h;
|
|
|
|
|
|
+ if (current->nsproxy->net_ns != &init_net)
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+
|
|
|
/* On boot, we can set this without any fancy locking. */
|
|
|
if (!nf_conntrack_htable_size)
|
|
|
return param_set_uint(val, kp);
|
|
@@ -1206,33 +1211,29 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
|
|
|
if (!hash)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- /* We have to rehahs for the new table anyway, so we also can
|
|
|
- * use a newrandom seed */
|
|
|
- get_random_bytes(&rnd, sizeof(rnd));
|
|
|
-
|
|
|
/* Lookups in the old hash might happen in parallel, which means we
|
|
|
* might get false negatives during connection lookup. New connections
|
|
|
* created because of a false negative won't make it into the hash
|
|
|
* though since that required taking the lock.
|
|
|
*/
|
|
|
spin_lock_bh(&nf_conntrack_lock);
|
|
|
- for (i = 0; i < nf_conntrack_htable_size; i++) {
|
|
|
+ for (i = 0; i < init_net.ct.htable_size; i++) {
|
|
|
while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
|
|
|
h = hlist_nulls_entry(init_net.ct.hash[i].first,
|
|
|
struct nf_conntrack_tuple_hash, hnnode);
|
|
|
hlist_nulls_del_rcu(&h->hnnode);
|
|
|
- bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
|
|
|
+ bucket = __hash_conntrack(&h->tuple, hashsize,
|
|
|
+ nf_conntrack_hash_rnd);
|
|
|
hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
|
|
|
}
|
|
|
}
|
|
|
- old_size = nf_conntrack_htable_size;
|
|
|
+ old_size = init_net.ct.htable_size;
|
|
|
old_vmalloced = init_net.ct.hash_vmalloc;
|
|
|
old_hash = init_net.ct.hash;
|
|
|
|
|
|
- nf_conntrack_htable_size = hashsize;
|
|
|
+ init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
|
|
|
init_net.ct.hash_vmalloc = vmalloced;
|
|
|
init_net.ct.hash = hash;
|
|
|
- nf_conntrack_hash_rnd = rnd;
|
|
|
spin_unlock_bh(&nf_conntrack_lock);
|
|
|
|
|
|
nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
|
|
@@ -1328,7 +1329,9 @@ static int nf_conntrack_init_net(struct net *net)
|
|
|
ret = -ENOMEM;
|
|
|
goto err_cache;
|
|
|
}
|
|
|
- net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
|
|
|
+
|
|
|
+ net->ct.htable_size = nf_conntrack_htable_size;
|
|
|
+ net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
|
|
|
&net->ct.hash_vmalloc, 1);
|
|
|
if (!net->ct.hash) {
|
|
|
ret = -ENOMEM;
|
|
@@ -1353,7 +1356,7 @@ err_acct:
|
|
|
nf_conntrack_expect_fini(net);
|
|
|
err_expect:
|
|
|
nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
|
|
|
- nf_conntrack_htable_size);
|
|
|
+ net->ct.htable_size);
|
|
|
err_hash:
|
|
|
kmem_cache_destroy(net->ct.nf_conntrack_cachep);
|
|
|
err_cache:
|