|
@@ -183,10 +183,6 @@ destroy_conntrack(struct nf_conntrack *nfct)
|
|
|
NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
|
|
|
NF_CT_ASSERT(!timer_pending(&ct->timeout));
|
|
|
|
|
|
- if (!test_bit(IPS_DYING_BIT, &ct->status))
|
|
|
- nf_conntrack_event(IPCT_DESTROY, ct);
|
|
|
- set_bit(IPS_DYING_BIT, &ct->status);
|
|
|
-
|
|
|
/* To make sure we don't get any weird locking issues here:
|
|
|
* destroy_conntrack() MUST NOT be called with a write lock
|
|
|
* to nf_conntrack_lock!!! -HW */
|
|
@@ -220,9 +216,8 @@ destroy_conntrack(struct nf_conntrack *nfct)
|
|
|
nf_conntrack_free(ct);
|
|
|
}
|
|
|
|
|
|
-static void death_by_timeout(unsigned long ul_conntrack)
|
|
|
+void nf_ct_delete_from_lists(struct nf_conn *ct)
|
|
|
{
|
|
|
- struct nf_conn *ct = (void *)ul_conntrack;
|
|
|
struct net *net = nf_ct_net(ct);
|
|
|
|
|
|
nf_ct_helper_destroy(ct);
|
|
@@ -232,6 +227,59 @@ static void death_by_timeout(unsigned long ul_conntrack)
|
|
|
NF_CT_STAT_INC(net, delete_list);
|
|
|
clean_from_lists(ct);
|
|
|
spin_unlock_bh(&nf_conntrack_lock);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
|
|
|
+
|
|
|
+static void death_by_event(unsigned long ul_conntrack)
|
|
|
+{
|
|
|
+ struct nf_conn *ct = (void *)ul_conntrack;
|
|
|
+ struct net *net = nf_ct_net(ct);
|
|
|
+
|
|
|
+ if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
|
|
|
+ /* bad luck, let's retry again */
|
|
|
+ ct->timeout.expires = jiffies +
|
|
|
+ (random32() % net->ct.sysctl_events_retry_timeout);
|
|
|
+ add_timer(&ct->timeout);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ /* we've got the event delivered, now it's dying */
|
|
|
+ set_bit(IPS_DYING_BIT, &ct->status);
|
|
|
+ spin_lock(&nf_conntrack_lock);
|
|
|
+ hlist_nulls_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
|
|
|
+ spin_unlock(&nf_conntrack_lock);
|
|
|
+ nf_ct_put(ct);
|
|
|
+}
|
|
|
+
|
|
|
+void nf_ct_insert_dying_list(struct nf_conn *ct)
|
|
|
+{
|
|
|
+ struct net *net = nf_ct_net(ct);
|
|
|
+
|
|
|
+ /* add this conntrack to the dying list */
|
|
|
+ spin_lock_bh(&nf_conntrack_lock);
|
|
|
+ hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
|
|
|
+ &net->ct.dying);
|
|
|
+ spin_unlock_bh(&nf_conntrack_lock);
|
|
|
+ /* set a new timer to retry event delivery */
|
|
|
+ setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
|
|
|
+ ct->timeout.expires = jiffies +
|
|
|
+ (random32() % net->ct.sysctl_events_retry_timeout);
|
|
|
+ add_timer(&ct->timeout);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
|
|
|
+
|
|
|
+static void death_by_timeout(unsigned long ul_conntrack)
|
|
|
+{
|
|
|
+ struct nf_conn *ct = (void *)ul_conntrack;
|
|
|
+
|
|
|
+ if (!test_bit(IPS_DYING_BIT, &ct->status) &&
|
|
|
+ unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
|
|
|
+ /* destroy event was not delivered */
|
|
|
+ nf_ct_delete_from_lists(ct);
|
|
|
+ nf_ct_insert_dying_list(ct);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ set_bit(IPS_DYING_BIT, &ct->status);
|
|
|
+ nf_ct_delete_from_lists(ct);
|
|
|
nf_ct_put(ct);
|
|
|
}
|
|
|
|
|
@@ -982,11 +1030,13 @@ static int kill_report(struct nf_conn *i, void *data)
|
|
|
{
|
|
|
struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
|
|
|
|
|
|
- /* get_next_corpse sets the dying bit for us */
|
|
|
- nf_conntrack_event_report(IPCT_DESTROY,
|
|
|
- i,
|
|
|
- fr->pid,
|
|
|
- fr->report);
|
|
|
+ /* If we fail to deliver the event, death_by_timeout() will retry */
|
|
|
+ if (nf_conntrack_event_report(IPCT_DESTROY, i,
|
|
|
+ fr->pid, fr->report) < 0)
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ /* Avoid the delivery of the destroy event in death_by_timeout(). */
|
|
|
+ set_bit(IPS_DYING_BIT, &i->status);
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
@@ -1015,6 +1065,21 @@ void nf_conntrack_flush_report(struct net *net, u32 pid, int report)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
|
|
|
|
|
|
+static void nf_ct_release_dying_list(void)
|
|
|
+{
|
|
|
+ struct nf_conntrack_tuple_hash *h;
|
|
|
+ struct nf_conn *ct;
|
|
|
+ struct hlist_nulls_node *n;
|
|
|
+
|
|
|
+ spin_lock_bh(&nf_conntrack_lock);
|
|
|
+ hlist_nulls_for_each_entry(h, n, &init_net.ct.dying, hnnode) {
|
|
|
+ ct = nf_ct_tuplehash_to_ctrack(h);
|
|
|
+ /* never fails to remove them, no listeners at this point */
|
|
|
+ nf_ct_kill(ct);
|
|
|
+ }
|
|
|
+ spin_unlock_bh(&nf_conntrack_lock);
|
|
|
+}
|
|
|
+
|
|
|
static void nf_conntrack_cleanup_init_net(void)
|
|
|
{
|
|
|
nf_conntrack_helper_fini();
|
|
@@ -1026,6 +1091,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
|
|
|
{
|
|
|
i_see_dead_people:
|
|
|
nf_ct_iterate_cleanup(net, kill_all, NULL);
|
|
|
+ nf_ct_release_dying_list();
|
|
|
if (atomic_read(&net->ct.count) != 0) {
|
|
|
schedule();
|
|
|
goto i_see_dead_people;
|
|
@@ -1207,6 +1273,7 @@ static int nf_conntrack_init_net(struct net *net)
|
|
|
|
|
|
atomic_set(&net->ct.count, 0);
|
|
|
INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, 0);
|
|
|
+ INIT_HLIST_NULLS_HEAD(&net->ct.dying, 0);
|
|
|
net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
|
|
|
if (!net->ct.stat) {
|
|
|
ret = -ENOMEM;
|