|
@@ -464,7 +464,8 @@ static noinline int early_drop(unsigned int hash)
|
|
return dropped;
|
|
return dropped;
|
|
}
|
|
}
|
|
|
|
|
|
-struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
|
|
|
|
|
|
+struct nf_conn *nf_conntrack_alloc(struct net *net,
|
|
|
|
+ const struct nf_conntrack_tuple *orig,
|
|
const struct nf_conntrack_tuple *repl,
|
|
const struct nf_conntrack_tuple *repl,
|
|
gfp_t gfp)
|
|
gfp_t gfp)
|
|
{
|
|
{
|
|
@@ -503,6 +504,9 @@ struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
|
|
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
|
|
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
|
|
/* Don't set timer yet: wait for confirmation */
|
|
/* Don't set timer yet: wait for confirmation */
|
|
setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
|
|
setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
|
|
|
|
+#ifdef CONFIG_NET_NS
|
|
|
|
+ ct->ct_net = net;
|
|
|
|
+#endif
|
|
INIT_RCU_HEAD(&ct->rcu);
|
|
INIT_RCU_HEAD(&ct->rcu);
|
|
|
|
|
|
return ct;
|
|
return ct;
|
|
@@ -528,7 +532,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_free);
|
|
/* Allocate a new conntrack: we return -ENOMEM if classification
|
|
/* Allocate a new conntrack: we return -ENOMEM if classification
|
|
failed due to stress. Otherwise it really is unclassifiable. */
|
|
failed due to stress. Otherwise it really is unclassifiable. */
|
|
static struct nf_conntrack_tuple_hash *
|
|
static struct nf_conntrack_tuple_hash *
|
|
-init_conntrack(const struct nf_conntrack_tuple *tuple,
|
|
|
|
|
|
+init_conntrack(struct net *net,
|
|
|
|
+ const struct nf_conntrack_tuple *tuple,
|
|
struct nf_conntrack_l3proto *l3proto,
|
|
struct nf_conntrack_l3proto *l3proto,
|
|
struct nf_conntrack_l4proto *l4proto,
|
|
struct nf_conntrack_l4proto *l4proto,
|
|
struct sk_buff *skb,
|
|
struct sk_buff *skb,
|
|
@@ -544,7 +549,7 @@ init_conntrack(const struct nf_conntrack_tuple *tuple,
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
- ct = nf_conntrack_alloc(tuple, &repl_tuple, GFP_ATOMIC);
|
|
|
|
|
|
+ ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC);
|
|
if (ct == NULL || IS_ERR(ct)) {
|
|
if (ct == NULL || IS_ERR(ct)) {
|
|
pr_debug("Can't allocate conntrack.\n");
|
|
pr_debug("Can't allocate conntrack.\n");
|
|
return (struct nf_conntrack_tuple_hash *)ct;
|
|
return (struct nf_conntrack_tuple_hash *)ct;
|
|
@@ -631,7 +636,8 @@ resolve_normal_ct(struct sk_buff *skb,
|
|
/* look for tuple match */
|
|
/* look for tuple match */
|
|
h = nf_conntrack_find_get(&tuple);
|
|
h = nf_conntrack_find_get(&tuple);
|
|
if (!h) {
|
|
if (!h) {
|
|
- h = init_conntrack(&tuple, l3proto, l4proto, skb, dataoff);
|
|
|
|
|
|
+ h = init_conntrack(&init_net, &tuple, l3proto, l4proto, skb,
|
|
|
|
+ dataoff);
|
|
if (!h)
|
|
if (!h)
|
|
return NULL;
|
|
return NULL;
|
|
if (IS_ERR(h))
|
|
if (IS_ERR(h))
|
|
@@ -1185,6 +1191,9 @@ int nf_conntrack_init(struct net *net)
|
|
|
|
|
|
/* Set up fake conntrack:
|
|
/* Set up fake conntrack:
|
|
- to never be deleted, not in any hashes */
|
|
- to never be deleted, not in any hashes */
|
|
|
|
+#ifdef CONFIG_NET_NS
|
|
|
|
+ nf_conntrack_untracked.ct_net = &init_net;
|
|
|
|
+#endif
|
|
atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
|
|
atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
|
|
/* - and look it like as a confirmed connection */
|
|
/* - and look it like as a confirmed connection */
|
|
set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
|
|
set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
|