|
@@ -39,6 +39,7 @@
|
|
|
#include <net/netfilter/nf_conntrack_core.h>
|
|
|
#include <net/netfilter/nf_conntrack_extend.h>
|
|
|
#include <net/netfilter/nf_conntrack_acct.h>
|
|
|
+#include <net/netfilter/nf_conntrack_ecache.h>
|
|
|
#include <net/netfilter/nf_nat.h>
|
|
|
#include <net/netfilter/nf_nat_core.h>
|
|
|
|
|
@@ -182,10 +183,6 @@ destroy_conntrack(struct nf_conntrack *nfct)
|
|
|
NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
|
|
|
NF_CT_ASSERT(!timer_pending(&ct->timeout));
|
|
|
|
|
|
- if (!test_bit(IPS_DYING_BIT, &ct->status))
|
|
|
- nf_conntrack_event(IPCT_DESTROY, ct);
|
|
|
- set_bit(IPS_DYING_BIT, &ct->status);
|
|
|
-
|
|
|
/* To make sure we don't get any weird locking issues here:
|
|
|
* destroy_conntrack() MUST NOT be called with a write lock
|
|
|
* to nf_conntrack_lock!!! -HW */
|
|
@@ -219,27 +216,70 @@ destroy_conntrack(struct nf_conntrack *nfct)
|
|
|
nf_conntrack_free(ct);
|
|
|
}
|
|
|
|
|
|
-static void death_by_timeout(unsigned long ul_conntrack)
|
|
|
+void nf_ct_delete_from_lists(struct nf_conn *ct)
|
|
|
{
|
|
|
- struct nf_conn *ct = (void *)ul_conntrack;
|
|
|
struct net *net = nf_ct_net(ct);
|
|
|
- struct nf_conn_help *help = nfct_help(ct);
|
|
|
- struct nf_conntrack_helper *helper;
|
|
|
-
|
|
|
- if (help) {
|
|
|
- rcu_read_lock();
|
|
|
- helper = rcu_dereference(help->helper);
|
|
|
- if (helper && helper->destroy)
|
|
|
- helper->destroy(ct);
|
|
|
- rcu_read_unlock();
|
|
|
- }
|
|
|
|
|
|
+ nf_ct_helper_destroy(ct);
|
|
|
spin_lock_bh(&nf_conntrack_lock);
|
|
|
/* Inside lock so preempt is disabled on module removal path.
|
|
|
* Otherwise we can get spurious warnings. */
|
|
|
NF_CT_STAT_INC(net, delete_list);
|
|
|
clean_from_lists(ct);
|
|
|
spin_unlock_bh(&nf_conntrack_lock);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
|
|
|
+
|
|
|
+static void death_by_event(unsigned long ul_conntrack)
|
|
|
+{
|
|
|
+ struct nf_conn *ct = (void *)ul_conntrack;
|
|
|
+ struct net *net = nf_ct_net(ct);
|
|
|
+
|
|
|
+ if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
|
|
|
+ /* bad luck, let's retry again */
|
|
|
+ ct->timeout.expires = jiffies +
|
|
|
+ (random32() % net->ct.sysctl_events_retry_timeout);
|
|
|
+ add_timer(&ct->timeout);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ /* we've got the event delivered, now it's dying */
|
|
|
+ set_bit(IPS_DYING_BIT, &ct->status);
|
|
|
+ spin_lock(&nf_conntrack_lock);
|
|
|
+ hlist_nulls_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
|
|
|
+ spin_unlock(&nf_conntrack_lock);
|
|
|
+ nf_ct_put(ct);
|
|
|
+}
|
|
|
+
|
|
|
+void nf_ct_insert_dying_list(struct nf_conn *ct)
|
|
|
+{
|
|
|
+ struct net *net = nf_ct_net(ct);
|
|
|
+
|
|
|
+ /* add this conntrack to the dying list */
|
|
|
+ spin_lock_bh(&nf_conntrack_lock);
|
|
|
+ hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
|
|
|
+ &net->ct.dying);
|
|
|
+ spin_unlock_bh(&nf_conntrack_lock);
|
|
|
+ /* set a new timer to retry event delivery */
|
|
|
+ setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
|
|
|
+ ct->timeout.expires = jiffies +
|
|
|
+ (random32() % net->ct.sysctl_events_retry_timeout);
|
|
|
+ add_timer(&ct->timeout);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
|
|
|
+
|
|
|
+static void death_by_timeout(unsigned long ul_conntrack)
|
|
|
+{
|
|
|
+ struct nf_conn *ct = (void *)ul_conntrack;
|
|
|
+
|
|
|
+ if (!test_bit(IPS_DYING_BIT, &ct->status) &&
|
|
|
+ unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
|
|
|
+ /* destroy event was not delivered */
|
|
|
+ nf_ct_delete_from_lists(ct);
|
|
|
+ nf_ct_insert_dying_list(ct);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ set_bit(IPS_DYING_BIT, &ct->status);
|
|
|
+ nf_ct_delete_from_lists(ct);
|
|
|
nf_ct_put(ct);
|
|
|
}
|
|
|
|
|
@@ -577,6 +617,7 @@ init_conntrack(struct net *net,
|
|
|
}
|
|
|
|
|
|
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
|
|
|
+ nf_ct_ecache_ext_add(ct, GFP_ATOMIC);
|
|
|
|
|
|
spin_lock_bh(&nf_conntrack_lock);
|
|
|
exp = nf_ct_find_expectation(net, tuple);
|
|
@@ -807,8 +848,6 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
|
|
|
NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
|
|
|
NF_CT_ASSERT(skb);
|
|
|
|
|
|
- spin_lock_bh(&nf_conntrack_lock);
|
|
|
-
|
|
|
/* Only update if this is not a fixed timeout */
|
|
|
if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
|
|
|
goto acct;
|
|
@@ -822,11 +861,8 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
|
|
|
/* Only update the timeout if the new timeout is at least
|
|
|
HZ jiffies from the old timeout. Need del_timer for race
|
|
|
avoidance (may already be dying). */
|
|
|
- if (newtime - ct->timeout.expires >= HZ
|
|
|
- && del_timer(&ct->timeout)) {
|
|
|
- ct->timeout.expires = newtime;
|
|
|
- add_timer(&ct->timeout);
|
|
|
- }
|
|
|
+ if (newtime - ct->timeout.expires >= HZ)
|
|
|
+ mod_timer_pending(&ct->timeout, newtime);
|
|
|
}
|
|
|
|
|
|
acct:
|
|
@@ -835,13 +871,13 @@ acct:
|
|
|
|
|
|
acct = nf_conn_acct_find(ct);
|
|
|
if (acct) {
|
|
|
+ spin_lock_bh(&ct->lock);
|
|
|
acct[CTINFO2DIR(ctinfo)].packets++;
|
|
|
acct[CTINFO2DIR(ctinfo)].bytes +=
|
|
|
skb->len - skb_network_offset(skb);
|
|
|
+ spin_unlock_bh(&ct->lock);
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
- spin_unlock_bh(&nf_conntrack_lock);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
|
|
|
|
|
@@ -853,14 +889,14 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
|
|
|
if (do_acct) {
|
|
|
struct nf_conn_counter *acct;
|
|
|
|
|
|
- spin_lock_bh(&nf_conntrack_lock);
|
|
|
acct = nf_conn_acct_find(ct);
|
|
|
if (acct) {
|
|
|
+ spin_lock_bh(&ct->lock);
|
|
|
acct[CTINFO2DIR(ctinfo)].packets++;
|
|
|
acct[CTINFO2DIR(ctinfo)].bytes +=
|
|
|
skb->len - skb_network_offset(skb);
|
|
|
+ spin_unlock_bh(&ct->lock);
|
|
|
}
|
|
|
- spin_unlock_bh(&nf_conntrack_lock);
|
|
|
}
|
|
|
|
|
|
if (del_timer(&ct->timeout)) {
|
|
@@ -994,11 +1030,13 @@ static int kill_report(struct nf_conn *i, void *data)
|
|
|
{
|
|
|
struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
|
|
|
|
|
|
- /* get_next_corpse sets the dying bit for us */
|
|
|
- nf_conntrack_event_report(IPCT_DESTROY,
|
|
|
- i,
|
|
|
- fr->pid,
|
|
|
- fr->report);
|
|
|
+ /* If we fail to deliver the event, death_by_timeout() will retry */
|
|
|
+ if (nf_conntrack_event_report(IPCT_DESTROY, i,
|
|
|
+ fr->pid, fr->report) < 0)
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ /* Avoid the delivery of the destroy event in death_by_timeout(). */
|
|
|
+ set_bit(IPS_DYING_BIT, &i->status);
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
@@ -1027,6 +1065,21 @@ void nf_conntrack_flush_report(struct net *net, u32 pid, int report)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
|
|
|
|
|
|
+static void nf_ct_release_dying_list(void)
|
|
|
+{
|
|
|
+ struct nf_conntrack_tuple_hash *h;
|
|
|
+ struct nf_conn *ct;
|
|
|
+ struct hlist_nulls_node *n;
|
|
|
+
|
|
|
+ spin_lock_bh(&nf_conntrack_lock);
|
|
|
+ hlist_nulls_for_each_entry(h, n, &init_net.ct.dying, hnnode) {
|
|
|
+ ct = nf_ct_tuplehash_to_ctrack(h);
|
|
|
+ /* never fails to remove them, no listeners at this point */
|
|
|
+ nf_ct_kill(ct);
|
|
|
+ }
|
|
|
+ spin_unlock_bh(&nf_conntrack_lock);
|
|
|
+}
|
|
|
+
|
|
|
static void nf_conntrack_cleanup_init_net(void)
|
|
|
{
|
|
|
nf_conntrack_helper_fini();
|
|
@@ -1036,10 +1089,9 @@ static void nf_conntrack_cleanup_init_net(void)
|
|
|
|
|
|
static void nf_conntrack_cleanup_net(struct net *net)
|
|
|
{
|
|
|
- nf_ct_event_cache_flush(net);
|
|
|
- nf_conntrack_ecache_fini(net);
|
|
|
i_see_dead_people:
|
|
|
nf_ct_iterate_cleanup(net, kill_all, NULL);
|
|
|
+ nf_ct_release_dying_list();
|
|
|
if (atomic_read(&net->ct.count) != 0) {
|
|
|
schedule();
|
|
|
goto i_see_dead_people;
|
|
@@ -1050,6 +1102,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
|
|
|
|
|
|
nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
|
|
|
nf_conntrack_htable_size);
|
|
|
+ nf_conntrack_ecache_fini(net);
|
|
|
nf_conntrack_acct_fini(net);
|
|
|
nf_conntrack_expect_fini(net);
|
|
|
free_percpu(net->ct.stat);
|
|
@@ -1220,14 +1273,12 @@ static int nf_conntrack_init_net(struct net *net)
|
|
|
|
|
|
atomic_set(&net->ct.count, 0);
|
|
|
INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, 0);
|
|
|
+ INIT_HLIST_NULLS_HEAD(&net->ct.dying, 0);
|
|
|
net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
|
|
|
if (!net->ct.stat) {
|
|
|
ret = -ENOMEM;
|
|
|
goto err_stat;
|
|
|
}
|
|
|
- ret = nf_conntrack_ecache_init(net);
|
|
|
- if (ret < 0)
|
|
|
- goto err_ecache;
|
|
|
net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
|
|
|
&net->ct.hash_vmalloc, 1);
|
|
|
if (!net->ct.hash) {
|
|
@@ -1241,6 +1292,9 @@ static int nf_conntrack_init_net(struct net *net)
|
|
|
ret = nf_conntrack_acct_init(net);
|
|
|
if (ret < 0)
|
|
|
goto err_acct;
|
|
|
+ ret = nf_conntrack_ecache_init(net);
|
|
|
+ if (ret < 0)
|
|
|
+ goto err_ecache;
|
|
|
|
|
|
/* Set up fake conntrack:
|
|
|
- to never be deleted, not in any hashes */
|
|
@@ -1253,14 +1307,14 @@ static int nf_conntrack_init_net(struct net *net)
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
+err_ecache:
|
|
|
+ nf_conntrack_acct_fini(net);
|
|
|
err_acct:
|
|
|
nf_conntrack_expect_fini(net);
|
|
|
err_expect:
|
|
|
nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
|
|
|
nf_conntrack_htable_size);
|
|
|
err_hash:
|
|
|
- nf_conntrack_ecache_fini(net);
|
|
|
-err_ecache:
|
|
|
free_percpu(net->ct.stat);
|
|
|
err_stat:
|
|
|
return ret;
|