|
@@ -335,7 +335,8 @@ begin:
|
|
h = __nf_conntrack_find(net, tuple);
|
|
h = __nf_conntrack_find(net, tuple);
|
|
if (h) {
|
|
if (h) {
|
|
ct = nf_ct_tuplehash_to_ctrack(h);
|
|
ct = nf_ct_tuplehash_to_ctrack(h);
|
|
- if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
|
|
|
|
|
|
+ if (unlikely(nf_ct_is_dying(ct) ||
|
|
|
|
+ !atomic_inc_not_zero(&ct->ct_general.use)))
|
|
h = NULL;
|
|
h = NULL;
|
|
else {
|
|
else {
|
|
if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) {
|
|
if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) {
|
|
@@ -425,7 +426,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
|
/* Remove from unconfirmed list */
|
|
/* Remove from unconfirmed list */
|
|
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
|
|
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
|
|
|
|
|
|
- __nf_conntrack_hash_insert(ct, hash, repl_hash);
|
|
|
|
/* Timer relative to confirmation time, not original
|
|
/* Timer relative to confirmation time, not original
|
|
setting time, otherwise we'd get timer wrap in
|
|
setting time, otherwise we'd get timer wrap in
|
|
weird delay cases. */
|
|
weird delay cases. */
|
|
@@ -433,8 +433,16 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
|
add_timer(&ct->timeout);
|
|
add_timer(&ct->timeout);
|
|
atomic_inc(&ct->ct_general.use);
|
|
atomic_inc(&ct->ct_general.use);
|
|
set_bit(IPS_CONFIRMED_BIT, &ct->status);
|
|
set_bit(IPS_CONFIRMED_BIT, &ct->status);
|
|
|
|
+
|
|
|
|
+ /* Since the lookup is lockless, hash insertion must be done after
|
|
|
|
+ * starting the timer and setting the CONFIRMED bit. The RCU barriers
|
|
|
|
+ * guarantee that no other CPU can find the conntrack before the above
|
|
|
|
+ * stores are visible.
|
|
|
|
+ */
|
|
|
|
+ __nf_conntrack_hash_insert(ct, hash, repl_hash);
|
|
NF_CT_STAT_INC(net, insert);
|
|
NF_CT_STAT_INC(net, insert);
|
|
spin_unlock_bh(&nf_conntrack_lock);
|
|
spin_unlock_bh(&nf_conntrack_lock);
|
|
|
|
+
|
|
help = nfct_help(ct);
|
|
help = nfct_help(ct);
|
|
if (help && help->helper)
|
|
if (help && help->helper)
|
|
nf_conntrack_event_cache(IPCT_HELPER, ct);
|
|
nf_conntrack_event_cache(IPCT_HELPER, ct);
|
|
@@ -503,7 +511,8 @@ static noinline int early_drop(struct net *net, unsigned int hash)
|
|
cnt++;
|
|
cnt++;
|
|
}
|
|
}
|
|
|
|
|
|
- if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
|
|
|
|
|
|
+ if (ct && unlikely(nf_ct_is_dying(ct) ||
|
|
|
|
+ !atomic_inc_not_zero(&ct->ct_general.use)))
|
|
ct = NULL;
|
|
ct = NULL;
|
|
if (ct || cnt >= NF_CT_EVICTION_RANGE)
|
|
if (ct || cnt >= NF_CT_EVICTION_RANGE)
|
|
break;
|
|
break;
|
|
@@ -1267,13 +1276,19 @@ err_cache:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * We need to use special "null" values, not used in hash table
|
|
|
|
+ */
|
|
|
|
+#define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
|
|
|
|
+#define DYING_NULLS_VAL ((1<<30)+1)
|
|
|
|
+
|
|
static int nf_conntrack_init_net(struct net *net)
|
|
static int nf_conntrack_init_net(struct net *net)
|
|
{
|
|
{
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
atomic_set(&net->ct.count, 0);
|
|
atomic_set(&net->ct.count, 0);
|
|
- INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, 0);
|
|
|
|
- INIT_HLIST_NULLS_HEAD(&net->ct.dying, 0);
|
|
|
|
|
|
+ INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
|
|
|
|
+ INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
|
|
net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
|
|
net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
|
|
if (!net->ct.stat) {
|
|
if (!net->ct.stat) {
|
|
ret = -ENOMEM;
|
|
ret = -ENOMEM;
|