|
@@ -377,21 +377,30 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
|
|
|
|
|
|
+#define NF_CT_EVICTION_RANGE 8
|
|
|
+
|
|
|
/* There's a small race here where we may free a just-assured
|
|
|
connection. Too bad: we're in trouble anyway. */
|
|
|
-static int early_drop(struct hlist_head *chain)
|
|
|
+static int early_drop(unsigned int hash)
|
|
|
{
|
|
|
/* Use oldest entry, which is roughly LRU */
|
|
|
struct nf_conntrack_tuple_hash *h;
|
|
|
struct nf_conn *ct = NULL, *tmp;
|
|
|
struct hlist_node *n;
|
|
|
+ unsigned int i, cnt = 0;
|
|
|
int dropped = 0;
|
|
|
|
|
|
read_lock_bh(&nf_conntrack_lock);
|
|
|
- hlist_for_each_entry(h, n, chain, hnode) {
|
|
|
- tmp = nf_ct_tuplehash_to_ctrack(h);
|
|
|
- if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
|
|
|
- ct = tmp;
|
|
|
+ for (i = 0; i < nf_conntrack_htable_size; i++) {
|
|
|
+ hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) {
|
|
|
+ tmp = nf_ct_tuplehash_to_ctrack(h);
|
|
|
+ if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
|
|
|
+ ct = tmp;
|
|
|
+ cnt++;
|
|
|
+ }
|
|
|
+ if (ct || cnt >= NF_CT_EVICTION_RANGE)
|
|
|
+ break;
|
|
|
+ hash = (hash + 1) % nf_conntrack_htable_size;
|
|
|
}
|
|
|
if (ct)
|
|
|
atomic_inc(&ct->ct_general.use);
|
|
@@ -425,8 +434,7 @@ struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
|
|
|
if (nf_conntrack_max
|
|
|
&& atomic_read(&nf_conntrack_count) > nf_conntrack_max) {
|
|
|
unsigned int hash = hash_conntrack(orig);
|
|
|
- /* Try dropping from this hash chain. */
|
|
|
- if (!early_drop(&nf_conntrack_hash[hash])) {
|
|
|
+ if (!early_drop(hash)) {
|
|
|
atomic_dec(&nf_conntrack_count);
|
|
|
if (net_ratelimit())
|
|
|
printk(KERN_WARNING
|