|
@@ -22,6 +22,9 @@
|
|
#include <linux/netdevice.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/netfilter.h>
|
|
#include <linux/netfilter.h>
|
|
#include <linux/module.h>
|
|
#include <linux/module.h>
|
|
|
|
+#include <linux/bootmem.h>
|
|
|
|
+#include <linux/vmalloc.h>
|
|
|
|
+#include <linux/cache.h>
|
|
#include <net/xfrm.h>
|
|
#include <net/xfrm.h>
|
|
#include <net/ip.h>
|
|
#include <net/ip.h>
|
|
|
|
|
|
@@ -30,26 +33,8 @@ EXPORT_SYMBOL(xfrm_cfg_mutex);
|
|
|
|
|
|
static DEFINE_RWLOCK(xfrm_policy_lock);
|
|
static DEFINE_RWLOCK(xfrm_policy_lock);
|
|
|
|
|
|
-struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2];
|
|
|
|
-EXPORT_SYMBOL(xfrm_policy_list);
|
|
|
|
-#ifdef CONFIG_XFRM_SUB_POLICY
|
|
|
|
-struct xfrm_policy *xfrm_policy_list_sub[XFRM_POLICY_MAX*2];
|
|
|
|
-EXPORT_SYMBOL(xfrm_policy_list_sub);
|
|
|
|
-
|
|
|
|
-#define XFRM_POLICY_LISTS(type) \
|
|
|
|
- ((type == XFRM_POLICY_TYPE_SUB) ? xfrm_policy_list_sub : \
|
|
|
|
- xfrm_policy_list)
|
|
|
|
-#define XFRM_POLICY_LISTHEAD(type, dir) \
|
|
|
|
- ((type == XFRM_POLICY_TYPE_SUB) ? xfrm_policy_list_sub[dir] : \
|
|
|
|
- xfrm_policy_list[dir])
|
|
|
|
-#define XFRM_POLICY_LISTHEADP(type, dir) \
|
|
|
|
- ((type == XFRM_POLICY_TYPE_SUB) ? &xfrm_policy_list_sub[dir] : \
|
|
|
|
- &xfrm_policy_list[dir])
|
|
|
|
-#else
|
|
|
|
-#define XFRM_POLICY_LISTS(type) xfrm_policy_list
|
|
|
|
-#define XFRM_POLICY_LISTHEAD(type, dif) xfrm_policy_list[dir]
|
|
|
|
-#define XFRM_POLICY_LISTHEADP(type, dif) &xfrm_policy_list[dir]
|
|
|
|
-#endif
|
|
|
|
|
|
+unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
|
|
|
|
+EXPORT_SYMBOL(xfrm_policy_count);
|
|
|
|
|
|
static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
|
|
static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
|
|
static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
|
|
static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
|
|
@@ -57,8 +42,7 @@ static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
|
|
static kmem_cache_t *xfrm_dst_cache __read_mostly;
|
|
static kmem_cache_t *xfrm_dst_cache __read_mostly;
|
|
|
|
|
|
static struct work_struct xfrm_policy_gc_work;
|
|
static struct work_struct xfrm_policy_gc_work;
|
|
-static struct list_head xfrm_policy_gc_list =
|
|
|
|
- LIST_HEAD_INIT(xfrm_policy_gc_list);
|
|
|
|
|
|
+static HLIST_HEAD(xfrm_policy_gc_list);
|
|
static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
|
|
static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
|
|
|
|
|
|
static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
|
|
static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
|
|
@@ -328,8 +312,10 @@ struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
|
|
policy = kzalloc(sizeof(struct xfrm_policy), gfp);
|
|
policy = kzalloc(sizeof(struct xfrm_policy), gfp);
|
|
|
|
|
|
if (policy) {
|
|
if (policy) {
|
|
- atomic_set(&policy->refcnt, 1);
|
|
|
|
|
|
+ INIT_HLIST_NODE(&policy->bydst);
|
|
|
|
+ INIT_HLIST_NODE(&policy->byidx);
|
|
rwlock_init(&policy->lock);
|
|
rwlock_init(&policy->lock);
|
|
|
|
+ atomic_set(&policy->refcnt, 1);
|
|
init_timer(&policy->timer);
|
|
init_timer(&policy->timer);
|
|
policy->timer.data = (unsigned long)policy;
|
|
policy->timer.data = (unsigned long)policy;
|
|
policy->timer.function = xfrm_policy_timer;
|
|
policy->timer.function = xfrm_policy_timer;
|
|
@@ -375,17 +361,16 @@ static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
|
|
static void xfrm_policy_gc_task(void *data)
|
|
static void xfrm_policy_gc_task(void *data)
|
|
{
|
|
{
|
|
struct xfrm_policy *policy;
|
|
struct xfrm_policy *policy;
|
|
- struct list_head *entry, *tmp;
|
|
|
|
- struct list_head gc_list = LIST_HEAD_INIT(gc_list);
|
|
|
|
|
|
+ struct hlist_node *entry, *tmp;
|
|
|
|
+ struct hlist_head gc_list;
|
|
|
|
|
|
spin_lock_bh(&xfrm_policy_gc_lock);
|
|
spin_lock_bh(&xfrm_policy_gc_lock);
|
|
- list_splice_init(&xfrm_policy_gc_list, &gc_list);
|
|
|
|
|
|
+ gc_list.first = xfrm_policy_gc_list.first;
|
|
|
|
+ INIT_HLIST_HEAD(&xfrm_policy_gc_list);
|
|
spin_unlock_bh(&xfrm_policy_gc_lock);
|
|
spin_unlock_bh(&xfrm_policy_gc_lock);
|
|
|
|
|
|
- list_for_each_safe(entry, tmp, &gc_list) {
|
|
|
|
- policy = list_entry(entry, struct xfrm_policy, list);
|
|
|
|
|
|
+ hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
|
|
xfrm_policy_gc_kill(policy);
|
|
xfrm_policy_gc_kill(policy);
|
|
- }
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/* Rule must be locked. Release descentant resources, announce
|
|
/* Rule must be locked. Release descentant resources, announce
|
|
@@ -407,70 +392,354 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
|
|
}
|
|
}
|
|
|
|
|
|
spin_lock(&xfrm_policy_gc_lock);
|
|
spin_lock(&xfrm_policy_gc_lock);
|
|
- list_add(&policy->list, &xfrm_policy_gc_list);
|
|
|
|
|
|
+ hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
|
|
spin_unlock(&xfrm_policy_gc_lock);
|
|
spin_unlock(&xfrm_policy_gc_lock);
|
|
|
|
|
|
schedule_work(&xfrm_policy_gc_work);
|
|
schedule_work(&xfrm_policy_gc_work);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+struct xfrm_policy_hash {
|
|
|
|
+ struct hlist_head *table;
|
|
|
|
+ unsigned int hmask;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static struct hlist_head xfrm_policy_inexact[XFRM_POLICY_MAX*2];
|
|
|
|
+static struct xfrm_policy_hash xfrm_policy_bydst[XFRM_POLICY_MAX*2] __read_mostly;
|
|
|
|
+static struct hlist_head *xfrm_policy_byidx __read_mostly;
|
|
|
|
+static unsigned int xfrm_idx_hmask __read_mostly;
|
|
|
|
+static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
|
|
|
|
+
|
|
|
|
+static inline unsigned int __idx_hash(u32 index, unsigned int hmask)
|
|
|
|
+{
|
|
|
|
+ return (index ^ (index >> 8)) & hmask;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline unsigned int idx_hash(u32 index)
|
|
|
|
+{
|
|
|
|
+ return __idx_hash(index, xfrm_idx_hmask);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline unsigned int __sel_hash(struct xfrm_selector *sel, unsigned short family, unsigned int hmask)
|
|
|
|
+{
|
|
|
|
+ xfrm_address_t *daddr = &sel->daddr;
|
|
|
|
+ xfrm_address_t *saddr = &sel->saddr;
|
|
|
|
+ unsigned int h = 0;
|
|
|
|
+
|
|
|
|
+ switch (family) {
|
|
|
|
+ case AF_INET:
|
|
|
|
+ if (sel->prefixlen_d != 32 ||
|
|
|
|
+ sel->prefixlen_s != 32)
|
|
|
|
+ return hmask + 1;
|
|
|
|
+
|
|
|
|
+ h = ntohl(daddr->a4 ^ saddr->a4);
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case AF_INET6:
|
|
|
|
+ if (sel->prefixlen_d != 128 ||
|
|
|
|
+ sel->prefixlen_s != 128)
|
|
|
|
+ return hmask + 1;
|
|
|
|
+
|
|
|
|
+ h = ntohl(daddr->a6[2] ^ daddr->a6[3] ^
|
|
|
|
+ saddr->a6[2] ^ saddr->a6[3]);
|
|
|
|
+ break;
|
|
|
|
+ };
|
|
|
|
+ h ^= (h >> 16);
|
|
|
|
+ return h & hmask;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline unsigned int __addr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, unsigned int hmask)
|
|
|
|
+{
|
|
|
|
+ unsigned int h = 0;
|
|
|
|
+
|
|
|
|
+ switch (family) {
|
|
|
|
+ case AF_INET:
|
|
|
|
+ h = ntohl(daddr->a4 ^ saddr->a4);
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case AF_INET6:
|
|
|
|
+ h = ntohl(daddr->a6[2] ^ daddr->a6[3] ^
|
|
|
|
+ saddr->a6[2] ^ saddr->a6[3]);
|
|
|
|
+ break;
|
|
|
|
+ };
|
|
|
|
+ h ^= (h >> 16);
|
|
|
|
+ return h & hmask;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct hlist_head *policy_hash_bysel(struct xfrm_selector *sel, unsigned short family, int dir)
|
|
|
|
+{
|
|
|
|
+ unsigned int hmask = xfrm_policy_bydst[dir].hmask;
|
|
|
|
+ unsigned int hash = __sel_hash(sel, family, hmask);
|
|
|
|
+
|
|
|
|
+ return (hash == hmask + 1 ?
|
|
|
|
+ &xfrm_policy_inexact[dir] :
|
|
|
|
+ xfrm_policy_bydst[dir].table + hash);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct hlist_head *policy_hash_direct(xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
|
|
|
|
+{
|
|
|
|
+ unsigned int hmask = xfrm_policy_bydst[dir].hmask;
|
|
|
|
+ unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
|
|
|
|
+
|
|
|
|
+ return xfrm_policy_bydst[dir].table + hash;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct hlist_head *xfrm_policy_hash_alloc(unsigned int sz)
|
|
|
|
+{
|
|
|
|
+ struct hlist_head *n;
|
|
|
|
+
|
|
|
|
+ if (sz <= PAGE_SIZE)
|
|
|
|
+ n = kmalloc(sz, GFP_KERNEL);
|
|
|
|
+ else if (hashdist)
|
|
|
|
+ n = __vmalloc(sz, GFP_KERNEL, PAGE_KERNEL);
|
|
|
|
+ else
|
|
|
|
+ n = (struct hlist_head *)
|
|
|
|
+ __get_free_pages(GFP_KERNEL, get_order(sz));
|
|
|
|
+
|
|
|
|
+ if (n)
|
|
|
|
+ memset(n, 0, sz);
|
|
|
|
+
|
|
|
|
+ return n;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void xfrm_policy_hash_free(struct hlist_head *n, unsigned int sz)
|
|
|
|
+{
|
|
|
|
+ if (sz <= PAGE_SIZE)
|
|
|
|
+ kfree(n);
|
|
|
|
+ else if (hashdist)
|
|
|
|
+ vfree(n);
|
|
|
|
+ else
|
|
|
|
+ free_pages((unsigned long)n, get_order(sz));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void xfrm_dst_hash_transfer(struct hlist_head *list,
|
|
|
|
+ struct hlist_head *ndsttable,
|
|
|
|
+ unsigned int nhashmask)
|
|
|
|
+{
|
|
|
|
+ struct hlist_node *entry, *tmp;
|
|
|
|
+ struct xfrm_policy *pol;
|
|
|
|
+
|
|
|
|
+ hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
|
|
|
|
+ unsigned int h;
|
|
|
|
+
|
|
|
|
+ h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
|
|
|
|
+ pol->family, nhashmask);
|
|
|
|
+ hlist_add_head(&pol->bydst, ndsttable+h);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void xfrm_idx_hash_transfer(struct hlist_head *list,
|
|
|
|
+ struct hlist_head *nidxtable,
|
|
|
|
+ unsigned int nhashmask)
|
|
|
|
+{
|
|
|
|
+ struct hlist_node *entry, *tmp;
|
|
|
|
+ struct xfrm_policy *pol;
|
|
|
|
+
|
|
|
|
+ hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
|
|
|
|
+ unsigned int h;
|
|
|
|
+
|
|
|
|
+ h = __idx_hash(pol->index, nhashmask);
|
|
|
|
+ hlist_add_head(&pol->byidx, nidxtable+h);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
|
|
|
|
+{
|
|
|
|
+ return ((old_hmask + 1) << 1) - 1;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void xfrm_bydst_resize(int dir)
|
|
|
|
+{
|
|
|
|
+ unsigned int hmask = xfrm_policy_bydst[dir].hmask;
|
|
|
|
+ unsigned int nhashmask = xfrm_new_hash_mask(hmask);
|
|
|
|
+ unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
|
|
|
|
+ struct hlist_head *odst = xfrm_policy_bydst[dir].table;
|
|
|
|
+ struct hlist_head *ndst = xfrm_policy_hash_alloc(nsize);
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ if (!ndst)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ write_lock_bh(&xfrm_policy_lock);
|
|
|
|
+
|
|
|
|
+ for (i = hmask; i >= 0; i--)
|
|
|
|
+ xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
|
|
|
|
+
|
|
|
|
+ xfrm_policy_bydst[dir].table = ndst;
|
|
|
|
+ xfrm_policy_bydst[dir].hmask = nhashmask;
|
|
|
|
+
|
|
|
|
+ write_unlock_bh(&xfrm_policy_lock);
|
|
|
|
+
|
|
|
|
+ xfrm_policy_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void xfrm_byidx_resize(int total)
|
|
|
|
+{
|
|
|
|
+ unsigned int hmask = xfrm_idx_hmask;
|
|
|
|
+ unsigned int nhashmask = xfrm_new_hash_mask(hmask);
|
|
|
|
+ unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
|
|
|
|
+ struct hlist_head *oidx = xfrm_policy_byidx;
|
|
|
|
+ struct hlist_head *nidx = xfrm_policy_hash_alloc(nsize);
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ if (!nidx)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ write_lock_bh(&xfrm_policy_lock);
|
|
|
|
+
|
|
|
|
+ for (i = hmask; i >= 0; i--)
|
|
|
|
+ xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
|
|
|
|
+
|
|
|
|
+ xfrm_policy_byidx = nidx;
|
|
|
|
+ xfrm_idx_hmask = nhashmask;
|
|
|
|
+
|
|
|
|
+ write_unlock_bh(&xfrm_policy_lock);
|
|
|
|
+
|
|
|
|
+ xfrm_policy_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int xfrm_bydst_should_resize(int dir, int *total)
|
|
|
|
+{
|
|
|
|
+ unsigned int cnt = xfrm_policy_count[dir];
|
|
|
|
+ unsigned int hmask = xfrm_policy_bydst[dir].hmask;
|
|
|
|
+
|
|
|
|
+ if (total)
|
|
|
|
+ *total += cnt;
|
|
|
|
+
|
|
|
|
+ if ((hmask + 1) < xfrm_policy_hashmax &&
|
|
|
|
+ cnt > hmask)
|
|
|
|
+ return 1;
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int xfrm_byidx_should_resize(int total)
|
|
|
|
+{
|
|
|
|
+ unsigned int hmask = xfrm_idx_hmask;
|
|
|
|
+
|
|
|
|
+ if ((hmask + 1) < xfrm_policy_hashmax &&
|
|
|
|
+ total > hmask)
|
|
|
|
+ return 1;
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static DEFINE_MUTEX(hash_resize_mutex);
|
|
|
|
+
|
|
|
|
+static void xfrm_hash_resize(void *__unused)
|
|
|
|
+{
|
|
|
|
+ int dir, total;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&hash_resize_mutex);
|
|
|
|
+
|
|
|
|
+ total = 0;
|
|
|
|
+ for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
|
|
|
|
+ if (xfrm_bydst_should_resize(dir, &total))
|
|
|
|
+ xfrm_bydst_resize(dir);
|
|
|
|
+ }
|
|
|
|
+ if (xfrm_byidx_should_resize(total))
|
|
|
|
+ xfrm_byidx_resize(total);
|
|
|
|
+
|
|
|
|
+ mutex_unlock(&hash_resize_mutex);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
|
|
|
|
+
|
|
/* Generate new index... KAME seems to generate them ordered by cost
|
|
/* Generate new index... KAME seems to generate them ordered by cost
|
|
* of an absolute inpredictability of ordering of rules. This will not pass. */
|
|
* of an absolute inpredictability of ordering of rules. This will not pass. */
|
|
static u32 xfrm_gen_index(u8 type, int dir)
|
|
static u32 xfrm_gen_index(u8 type, int dir)
|
|
{
|
|
{
|
|
- u32 idx;
|
|
|
|
- struct xfrm_policy *p;
|
|
|
|
static u32 idx_generator;
|
|
static u32 idx_generator;
|
|
|
|
|
|
for (;;) {
|
|
for (;;) {
|
|
|
|
+ struct hlist_node *entry;
|
|
|
|
+ struct hlist_head *list;
|
|
|
|
+ struct xfrm_policy *p;
|
|
|
|
+ u32 idx;
|
|
|
|
+ int found;
|
|
|
|
+
|
|
idx = (idx_generator | dir);
|
|
idx = (idx_generator | dir);
|
|
idx_generator += 8;
|
|
idx_generator += 8;
|
|
if (idx == 0)
|
|
if (idx == 0)
|
|
idx = 8;
|
|
idx = 8;
|
|
- for (p = XFRM_POLICY_LISTHEAD(type, dir); p; p = p->next) {
|
|
|
|
- if (p->index == idx)
|
|
|
|
|
|
+ list = xfrm_policy_byidx + idx_hash(idx);
|
|
|
|
+ found = 0;
|
|
|
|
+ hlist_for_each_entry(p, entry, list, byidx) {
|
|
|
|
+ if (p->index == idx) {
|
|
|
|
+ found = 1;
|
|
break;
|
|
break;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
- if (!p)
|
|
|
|
|
|
+ if (!found)
|
|
return idx;
|
|
return idx;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
|
|
|
|
+{
|
|
|
|
+ u32 *p1 = (u32 *) s1;
|
|
|
|
+ u32 *p2 = (u32 *) s2;
|
|
|
|
+ int len = sizeof(struct xfrm_selector) / sizeof(u32);
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < len; i++) {
|
|
|
|
+ if (p1[i] != p2[i])
|
|
|
|
+ return 1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
|
|
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
|
|
{
|
|
{
|
|
- struct xfrm_policy *pol, **p;
|
|
|
|
- struct xfrm_policy *delpol = NULL;
|
|
|
|
- struct xfrm_policy **newpos = NULL;
|
|
|
|
|
|
+ struct xfrm_policy *pol;
|
|
|
|
+ struct xfrm_policy *delpol;
|
|
|
|
+ struct hlist_head *chain;
|
|
|
|
+ struct hlist_node *entry, *newpos, *last;
|
|
struct dst_entry *gc_list;
|
|
struct dst_entry *gc_list;
|
|
|
|
|
|
write_lock_bh(&xfrm_policy_lock);
|
|
write_lock_bh(&xfrm_policy_lock);
|
|
- for (p = XFRM_POLICY_LISTHEADP(policy->type, dir); (pol=*p)!=NULL;) {
|
|
|
|
- if (!delpol && memcmp(&policy->selector, &pol->selector, sizeof(pol->selector)) == 0 &&
|
|
|
|
|
|
+ chain = policy_hash_bysel(&policy->selector, policy->family, dir);
|
|
|
|
+ delpol = NULL;
|
|
|
|
+ newpos = NULL;
|
|
|
|
+ last = NULL;
|
|
|
|
+ hlist_for_each_entry(pol, entry, chain, bydst) {
|
|
|
|
+ if (!delpol &&
|
|
|
|
+ pol->type == policy->type &&
|
|
|
|
+ !selector_cmp(&pol->selector, &policy->selector) &&
|
|
xfrm_sec_ctx_match(pol->security, policy->security)) {
|
|
xfrm_sec_ctx_match(pol->security, policy->security)) {
|
|
if (excl) {
|
|
if (excl) {
|
|
write_unlock_bh(&xfrm_policy_lock);
|
|
write_unlock_bh(&xfrm_policy_lock);
|
|
return -EEXIST;
|
|
return -EEXIST;
|
|
}
|
|
}
|
|
- *p = pol->next;
|
|
|
|
delpol = pol;
|
|
delpol = pol;
|
|
if (policy->priority > pol->priority)
|
|
if (policy->priority > pol->priority)
|
|
continue;
|
|
continue;
|
|
} else if (policy->priority >= pol->priority) {
|
|
} else if (policy->priority >= pol->priority) {
|
|
- p = &pol->next;
|
|
|
|
|
|
+ last = &pol->bydst;
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
if (!newpos)
|
|
if (!newpos)
|
|
- newpos = p;
|
|
|
|
|
|
+ newpos = &pol->bydst;
|
|
if (delpol)
|
|
if (delpol)
|
|
break;
|
|
break;
|
|
- p = &pol->next;
|
|
|
|
|
|
+ last = &pol->bydst;
|
|
}
|
|
}
|
|
|
|
+ if (!newpos)
|
|
|
|
+ newpos = last;
|
|
if (newpos)
|
|
if (newpos)
|
|
- p = newpos;
|
|
|
|
|
|
+ hlist_add_after(newpos, &policy->bydst);
|
|
|
|
+ else
|
|
|
|
+ hlist_add_head(&policy->bydst, chain);
|
|
xfrm_pol_hold(policy);
|
|
xfrm_pol_hold(policy);
|
|
- policy->next = *p;
|
|
|
|
- *p = policy;
|
|
|
|
|
|
+ xfrm_policy_count[dir]++;
|
|
atomic_inc(&flow_cache_genid);
|
|
atomic_inc(&flow_cache_genid);
|
|
|
|
+ if (delpol) {
|
|
|
|
+ hlist_del(&delpol->bydst);
|
|
|
|
+ hlist_del(&delpol->byidx);
|
|
|
|
+ xfrm_policy_count[dir]--;
|
|
|
|
+ }
|
|
policy->index = delpol ? delpol->index : xfrm_gen_index(policy->type, dir);
|
|
policy->index = delpol ? delpol->index : xfrm_gen_index(policy->type, dir);
|
|
|
|
+ hlist_add_head(&policy->byidx, xfrm_policy_byidx+idx_hash(policy->index));
|
|
policy->curlft.add_time = (unsigned long)xtime.tv_sec;
|
|
policy->curlft.add_time = (unsigned long)xtime.tv_sec;
|
|
policy->curlft.use_time = 0;
|
|
policy->curlft.use_time = 0;
|
|
if (!mod_timer(&policy->timer, jiffies + HZ))
|
|
if (!mod_timer(&policy->timer, jiffies + HZ))
|
|
@@ -479,10 +748,13 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
|
|
|
|
|
|
if (delpol)
|
|
if (delpol)
|
|
xfrm_policy_kill(delpol);
|
|
xfrm_policy_kill(delpol);
|
|
|
|
+ else if (xfrm_bydst_should_resize(dir, NULL))
|
|
|
|
+ schedule_work(&xfrm_hash_work);
|
|
|
|
|
|
read_lock_bh(&xfrm_policy_lock);
|
|
read_lock_bh(&xfrm_policy_lock);
|
|
gc_list = NULL;
|
|
gc_list = NULL;
|
|
- for (policy = policy->next; policy; policy = policy->next) {
|
|
|
|
|
|
+ entry = &policy->bydst;
|
|
|
|
+ hlist_for_each_entry_continue(policy, entry, bydst) {
|
|
struct dst_entry *dst;
|
|
struct dst_entry *dst;
|
|
|
|
|
|
write_lock(&policy->lock);
|
|
write_lock(&policy->lock);
|
|
@@ -515,67 +787,112 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
|
|
struct xfrm_selector *sel,
|
|
struct xfrm_selector *sel,
|
|
struct xfrm_sec_ctx *ctx, int delete)
|
|
struct xfrm_sec_ctx *ctx, int delete)
|
|
{
|
|
{
|
|
- struct xfrm_policy *pol, **p;
|
|
|
|
|
|
+ struct xfrm_policy *pol, *ret;
|
|
|
|
+ struct hlist_head *chain;
|
|
|
|
+ struct hlist_node *entry;
|
|
|
|
|
|
write_lock_bh(&xfrm_policy_lock);
|
|
write_lock_bh(&xfrm_policy_lock);
|
|
- for (p = XFRM_POLICY_LISTHEADP(type, dir); (pol=*p)!=NULL; p = &pol->next) {
|
|
|
|
- if ((memcmp(sel, &pol->selector, sizeof(*sel)) == 0) &&
|
|
|
|
- (xfrm_sec_ctx_match(ctx, pol->security))) {
|
|
|
|
|
|
+ chain = policy_hash_bysel(sel, sel->family, dir);
|
|
|
|
+ ret = NULL;
|
|
|
|
+ hlist_for_each_entry(pol, entry, chain, bydst) {
|
|
|
|
+ if (pol->type == type &&
|
|
|
|
+ !selector_cmp(sel, &pol->selector) &&
|
|
|
|
+ xfrm_sec_ctx_match(ctx, pol->security)) {
|
|
xfrm_pol_hold(pol);
|
|
xfrm_pol_hold(pol);
|
|
- if (delete)
|
|
|
|
- *p = pol->next;
|
|
|
|
|
|
+ if (delete) {
|
|
|
|
+ hlist_del(&pol->bydst);
|
|
|
|
+ hlist_del(&pol->byidx);
|
|
|
|
+ xfrm_policy_count[dir]--;
|
|
|
|
+ }
|
|
|
|
+ ret = pol;
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
write_unlock_bh(&xfrm_policy_lock);
|
|
write_unlock_bh(&xfrm_policy_lock);
|
|
|
|
|
|
- if (pol && delete) {
|
|
|
|
|
|
+ if (ret && delete) {
|
|
atomic_inc(&flow_cache_genid);
|
|
atomic_inc(&flow_cache_genid);
|
|
- xfrm_policy_kill(pol);
|
|
|
|
|
|
+ xfrm_policy_kill(ret);
|
|
}
|
|
}
|
|
- return pol;
|
|
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
|
|
EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
|
|
|
|
|
|
struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete)
|
|
struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete)
|
|
{
|
|
{
|
|
- struct xfrm_policy *pol, **p;
|
|
|
|
|
|
+ struct xfrm_policy *pol, *ret;
|
|
|
|
+ struct hlist_head *chain;
|
|
|
|
+ struct hlist_node *entry;
|
|
|
|
|
|
write_lock_bh(&xfrm_policy_lock);
|
|
write_lock_bh(&xfrm_policy_lock);
|
|
- for (p = XFRM_POLICY_LISTHEADP(type, dir); (pol=*p)!=NULL; p = &pol->next) {
|
|
|
|
- if (pol->index == id) {
|
|
|
|
|
|
+ chain = xfrm_policy_byidx + idx_hash(id);
|
|
|
|
+ ret = NULL;
|
|
|
|
+ hlist_for_each_entry(pol, entry, chain, byidx) {
|
|
|
|
+ if (pol->type == type && pol->index == id) {
|
|
xfrm_pol_hold(pol);
|
|
xfrm_pol_hold(pol);
|
|
- if (delete)
|
|
|
|
- *p = pol->next;
|
|
|
|
|
|
+ if (delete) {
|
|
|
|
+ hlist_del(&pol->bydst);
|
|
|
|
+ hlist_del(&pol->byidx);
|
|
|
|
+ xfrm_policy_count[dir]--;
|
|
|
|
+ }
|
|
|
|
+ ret = pol;
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
write_unlock_bh(&xfrm_policy_lock);
|
|
write_unlock_bh(&xfrm_policy_lock);
|
|
|
|
|
|
- if (pol && delete) {
|
|
|
|
|
|
+ if (ret && delete) {
|
|
atomic_inc(&flow_cache_genid);
|
|
atomic_inc(&flow_cache_genid);
|
|
- xfrm_policy_kill(pol);
|
|
|
|
|
|
+ xfrm_policy_kill(ret);
|
|
}
|
|
}
|
|
- return pol;
|
|
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(xfrm_policy_byid);
|
|
EXPORT_SYMBOL(xfrm_policy_byid);
|
|
|
|
|
|
void xfrm_policy_flush(u8 type)
|
|
void xfrm_policy_flush(u8 type)
|
|
{
|
|
{
|
|
- struct xfrm_policy *xp;
|
|
|
|
- struct xfrm_policy **p_list = XFRM_POLICY_LISTS(type);
|
|
|
|
int dir;
|
|
int dir;
|
|
|
|
|
|
write_lock_bh(&xfrm_policy_lock);
|
|
write_lock_bh(&xfrm_policy_lock);
|
|
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
|
|
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
|
|
- while ((xp = p_list[dir]) != NULL) {
|
|
|
|
- p_list[dir] = xp->next;
|
|
|
|
|
|
+ struct xfrm_policy *pol;
|
|
|
|
+ struct hlist_node *entry;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ again1:
|
|
|
|
+ hlist_for_each_entry(pol, entry,
|
|
|
|
+ &xfrm_policy_inexact[dir], bydst) {
|
|
|
|
+ if (pol->type != type)
|
|
|
|
+ continue;
|
|
|
|
+ hlist_del(&pol->bydst);
|
|
|
|
+ hlist_del(&pol->byidx);
|
|
write_unlock_bh(&xfrm_policy_lock);
|
|
write_unlock_bh(&xfrm_policy_lock);
|
|
|
|
|
|
- xfrm_policy_kill(xp);
|
|
|
|
|
|
+ xfrm_policy_kill(pol);
|
|
|
|
|
|
write_lock_bh(&xfrm_policy_lock);
|
|
write_lock_bh(&xfrm_policy_lock);
|
|
|
|
+ goto again1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
|
|
|
|
+ again2:
|
|
|
|
+ hlist_for_each_entry(pol, entry,
|
|
|
|
+ xfrm_policy_bydst[dir].table + i,
|
|
|
|
+ bydst) {
|
|
|
|
+ if (pol->type != type)
|
|
|
|
+ continue;
|
|
|
|
+ hlist_del(&pol->bydst);
|
|
|
|
+ hlist_del(&pol->byidx);
|
|
|
|
+ write_unlock_bh(&xfrm_policy_lock);
|
|
|
|
+
|
|
|
|
+ xfrm_policy_kill(pol);
|
|
|
|
+
|
|
|
|
+ write_lock_bh(&xfrm_policy_lock);
|
|
|
|
+ goto again2;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ xfrm_policy_count[dir] = 0;
|
|
}
|
|
}
|
|
atomic_inc(&flow_cache_genid);
|
|
atomic_inc(&flow_cache_genid);
|
|
write_unlock_bh(&xfrm_policy_lock);
|
|
write_unlock_bh(&xfrm_policy_lock);
|
|
@@ -585,15 +902,27 @@ EXPORT_SYMBOL(xfrm_policy_flush);
|
|
int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*),
|
|
int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*),
|
|
void *data)
|
|
void *data)
|
|
{
|
|
{
|
|
- struct xfrm_policy *xp;
|
|
|
|
- int dir;
|
|
|
|
- int count = 0;
|
|
|
|
- int error = 0;
|
|
|
|
|
|
+ struct xfrm_policy *pol;
|
|
|
|
+ struct hlist_node *entry;
|
|
|
|
+ int dir, count, error;
|
|
|
|
|
|
read_lock_bh(&xfrm_policy_lock);
|
|
read_lock_bh(&xfrm_policy_lock);
|
|
|
|
+ count = 0;
|
|
for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
|
|
for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
|
|
- for (xp = XFRM_POLICY_LISTHEAD(type, dir); xp; xp = xp->next)
|
|
|
|
- count++;
|
|
|
|
|
|
+ struct hlist_head *table = xfrm_policy_bydst[dir].table;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ hlist_for_each_entry(pol, entry,
|
|
|
|
+ &xfrm_policy_inexact[dir], bydst) {
|
|
|
|
+ if (pol->type == type)
|
|
|
|
+ count++;
|
|
|
|
+ }
|
|
|
|
+ for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
|
|
|
|
+ hlist_for_each_entry(pol, entry, table + i, bydst) {
|
|
|
|
+ if (pol->type == type)
|
|
|
|
+ count++;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
if (count == 0) {
|
|
if (count == 0) {
|
|
@@ -602,13 +931,28 @@ int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*)
|
|
}
|
|
}
|
|
|
|
|
|
for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
|
|
for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
|
|
- for (xp = XFRM_POLICY_LISTHEAD(type, dir); xp; xp = xp->next) {
|
|
|
|
- error = func(xp, dir%XFRM_POLICY_MAX, --count, data);
|
|
|
|
|
|
+ struct hlist_head *table = xfrm_policy_bydst[dir].table;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ hlist_for_each_entry(pol, entry,
|
|
|
|
+ &xfrm_policy_inexact[dir], bydst) {
|
|
|
|
+ if (pol->type != type)
|
|
|
|
+ continue;
|
|
|
|
+ error = func(pol, dir % XFRM_POLICY_MAX, --count, data);
|
|
if (error)
|
|
if (error)
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
+ for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
|
|
|
|
+ hlist_for_each_entry(pol, entry, table + i, bydst) {
|
|
|
|
+ if (pol->type != type)
|
|
|
|
+ continue;
|
|
|
|
+ error = func(pol, dir % XFRM_POLICY_MAX, --count, data);
|
|
|
|
+ if (error)
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
}
|
|
}
|
|
-
|
|
|
|
|
|
+ error = 0;
|
|
out:
|
|
out:
|
|
read_unlock_bh(&xfrm_policy_lock);
|
|
read_unlock_bh(&xfrm_policy_lock);
|
|
return error;
|
|
return error;
|
|
@@ -617,31 +961,61 @@ EXPORT_SYMBOL(xfrm_policy_walk);
|
|
|
|
|
|
/* Find policy to apply to this flow. */
|
|
/* Find policy to apply to this flow. */
|
|
|
|
|
|
-static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
|
|
|
|
- u16 family, u8 dir)
|
|
|
|
|
|
+static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
|
|
|
|
+ u8 type, u16 family, int dir)
|
|
{
|
|
{
|
|
- struct xfrm_policy *pol;
|
|
|
|
|
|
+ struct xfrm_selector *sel = &pol->selector;
|
|
|
|
+ int match;
|
|
|
|
|
|
- read_lock_bh(&xfrm_policy_lock);
|
|
|
|
- for (pol = XFRM_POLICY_LISTHEAD(type, dir); pol; pol = pol->next) {
|
|
|
|
- struct xfrm_selector *sel = &pol->selector;
|
|
|
|
- int match;
|
|
|
|
|
|
+ if (pol->family != family ||
|
|
|
|
+ pol->type != type)
|
|
|
|
+ return 0;
|
|
|
|
|
|
- if (pol->family != family)
|
|
|
|
- continue;
|
|
|
|
|
|
+ match = xfrm_selector_match(sel, fl, family);
|
|
|
|
+ if (match) {
|
|
|
|
+ if (!security_xfrm_policy_lookup(pol, fl->secid, dir))
|
|
|
|
+ return 1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
|
|
- match = xfrm_selector_match(sel, fl, family);
|
|
|
|
|
|
+static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
|
|
|
|
+ u16 family, u8 dir)
|
|
|
|
+{
|
|
|
|
+ struct xfrm_policy *pol, *ret;
|
|
|
|
+ xfrm_address_t *daddr, *saddr;
|
|
|
|
+ struct hlist_node *entry;
|
|
|
|
+ struct hlist_head *chain;
|
|
|
|
|
|
- if (match) {
|
|
|
|
- if (!security_xfrm_policy_lookup(pol, fl->secid, dir)) {
|
|
|
|
|
|
+ daddr = xfrm_flowi_daddr(fl, family);
|
|
|
|
+ saddr = xfrm_flowi_saddr(fl, family);
|
|
|
|
+ if (unlikely(!daddr || !saddr))
|
|
|
|
+ return NULL;
|
|
|
|
+
|
|
|
|
+ read_lock_bh(&xfrm_policy_lock);
|
|
|
|
+ chain = policy_hash_direct(daddr, saddr, family, dir);
|
|
|
|
+ ret = NULL;
|
|
|
|
+ hlist_for_each_entry(pol, entry, chain, bydst) {
|
|
|
|
+ if (xfrm_policy_match(pol, fl, type, family, dir)) {
|
|
|
|
+ xfrm_pol_hold(pol);
|
|
|
|
+ ret = pol;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ if (!ret) {
|
|
|
|
+ chain = &xfrm_policy_inexact[dir];
|
|
|
|
+ hlist_for_each_entry(pol, entry, chain, bydst) {
|
|
|
|
+ if (xfrm_policy_match(pol, fl, type, family, dir)) {
|
|
xfrm_pol_hold(pol);
|
|
xfrm_pol_hold(pol);
|
|
|
|
+ ret = pol;
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
read_unlock_bh(&xfrm_policy_lock);
|
|
read_unlock_bh(&xfrm_policy_lock);
|
|
|
|
|
|
- return pol;
|
|
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
static void xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
|
|
static void xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
|
|
@@ -657,7 +1031,7 @@ static void xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
|
|
pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir);
|
|
pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir);
|
|
|
|
|
|
#ifdef CONFIG_XFRM_SUB_POLICY
|
|
#ifdef CONFIG_XFRM_SUB_POLICY
|
|
- end:
|
|
|
|
|
|
+end:
|
|
#endif
|
|
#endif
|
|
if ((*objp = (void *) pol) != NULL)
|
|
if ((*objp = (void *) pol) != NULL)
|
|
*obj_refp = &pol->refcnt;
|
|
*obj_refp = &pol->refcnt;
|
|
@@ -704,26 +1078,29 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struc
|
|
|
|
|
|
static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
|
|
static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
|
|
{
|
|
{
|
|
- struct xfrm_policy **p_list = XFRM_POLICY_LISTS(pol->type);
|
|
|
|
|
|
+ struct hlist_head *chain = policy_hash_bysel(&pol->selector,
|
|
|
|
+ pol->family, dir);
|
|
|
|
|
|
- pol->next = p_list[dir];
|
|
|
|
- p_list[dir] = pol;
|
|
|
|
|
|
+ hlist_add_head(&pol->bydst, chain);
|
|
|
|
+ hlist_add_head(&pol->byidx, xfrm_policy_byidx+idx_hash(pol->index));
|
|
|
|
+ xfrm_policy_count[dir]++;
|
|
xfrm_pol_hold(pol);
|
|
xfrm_pol_hold(pol);
|
|
|
|
+
|
|
|
|
+ if (xfrm_bydst_should_resize(dir, NULL))
|
|
|
|
+ schedule_work(&xfrm_hash_work);
|
|
}
|
|
}
|
|
|
|
|
|
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
|
|
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
|
|
int dir)
|
|
int dir)
|
|
{
|
|
{
|
|
- struct xfrm_policy **polp;
|
|
|
|
|
|
+ if (hlist_unhashed(&pol->bydst))
|
|
|
|
+ return NULL;
|
|
|
|
|
|
- for (polp = XFRM_POLICY_LISTHEADP(pol->type, dir);
|
|
|
|
- *polp != NULL; polp = &(*polp)->next) {
|
|
|
|
- if (*polp == pol) {
|
|
|
|
- *polp = pol->next;
|
|
|
|
- return pol;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- return NULL;
|
|
|
|
|
|
+ hlist_del(&pol->bydst);
|
|
|
|
+ hlist_del(&pol->byidx);
|
|
|
|
+ xfrm_policy_count[dir]--;
|
|
|
|
+
|
|
|
|
+ return pol;
|
|
}
|
|
}
|
|
|
|
|
|
int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
|
|
int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
|
|
@@ -968,7 +1345,8 @@ restart:
|
|
|
|
|
|
if (!policy) {
|
|
if (!policy) {
|
|
/* To accelerate a bit... */
|
|
/* To accelerate a bit... */
|
|
- if ((dst_orig->flags & DST_NOXFRM) || xfrm_policy_lists_empty(XFRM_POLICY_OUT))
|
|
|
|
|
|
+ if ((dst_orig->flags & DST_NOXFRM) ||
|
|
|
|
+ !xfrm_policy_count[XFRM_POLICY_OUT])
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
policy = flow_cache_lookup(fl, dst_orig->ops->family,
|
|
policy = flow_cache_lookup(fl, dst_orig->ops->family,
|
|
@@ -1413,50 +1791,50 @@ static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
|
|
return dst;
|
|
return dst;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p)
|
|
|
|
+{
|
|
|
|
+ struct dst_entry *dst, **dstp;
|
|
|
|
+
|
|
|
|
+ write_lock(&pol->lock);
|
|
|
|
+ dstp = &pol->bundles;
|
|
|
|
+ while ((dst=*dstp) != NULL) {
|
|
|
|
+ if (func(dst)) {
|
|
|
|
+ *dstp = dst->next;
|
|
|
|
+ dst->next = *gc_list_p;
|
|
|
|
+ *gc_list_p = dst;
|
|
|
|
+ } else {
|
|
|
|
+ dstp = &dst->next;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ write_unlock(&pol->lock);
|
|
|
|
+}
|
|
|
|
+
|
|
static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
|
|
static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
|
|
{
|
|
{
|
|
- int i;
|
|
|
|
- struct xfrm_policy *pol;
|
|
|
|
- struct dst_entry *dst, **dstp, *gc_list = NULL;
|
|
|
|
|
|
+ struct dst_entry *gc_list = NULL;
|
|
|
|
+ int dir;
|
|
|
|
|
|
read_lock_bh(&xfrm_policy_lock);
|
|
read_lock_bh(&xfrm_policy_lock);
|
|
- for (i=0; i<2*XFRM_POLICY_MAX; i++) {
|
|
|
|
-#ifdef CONFIG_XFRM_SUB_POLICY
|
|
|
|
- for (pol = xfrm_policy_list_sub[i]; pol; pol = pol->next) {
|
|
|
|
- write_lock(&pol->lock);
|
|
|
|
- dstp = &pol->bundles;
|
|
|
|
- while ((dst=*dstp) != NULL) {
|
|
|
|
- if (func(dst)) {
|
|
|
|
- *dstp = dst->next;
|
|
|
|
- dst->next = gc_list;
|
|
|
|
- gc_list = dst;
|
|
|
|
- } else {
|
|
|
|
- dstp = &dst->next;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- write_unlock(&pol->lock);
|
|
|
|
- }
|
|
|
|
|
|
+ for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
|
|
|
|
+ struct xfrm_policy *pol;
|
|
|
|
+ struct hlist_node *entry;
|
|
|
|
+ struct hlist_head *table;
|
|
|
|
+ int i;
|
|
|
|
|
|
-#endif
|
|
|
|
- for (pol = xfrm_policy_list[i]; pol; pol = pol->next) {
|
|
|
|
- write_lock(&pol->lock);
|
|
|
|
- dstp = &pol->bundles;
|
|
|
|
- while ((dst=*dstp) != NULL) {
|
|
|
|
- if (func(dst)) {
|
|
|
|
- *dstp = dst->next;
|
|
|
|
- dst->next = gc_list;
|
|
|
|
- gc_list = dst;
|
|
|
|
- } else {
|
|
|
|
- dstp = &dst->next;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- write_unlock(&pol->lock);
|
|
|
|
|
|
+ hlist_for_each_entry(pol, entry,
|
|
|
|
+ &xfrm_policy_inexact[dir], bydst)
|
|
|
|
+ prune_one_bundle(pol, func, &gc_list);
|
|
|
|
+
|
|
|
|
+ table = xfrm_policy_bydst[dir].table;
|
|
|
|
+ for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
|
|
|
|
+ hlist_for_each_entry(pol, entry, table + i, bydst)
|
|
|
|
+ prune_one_bundle(pol, func, &gc_list);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
read_unlock_bh(&xfrm_policy_lock);
|
|
read_unlock_bh(&xfrm_policy_lock);
|
|
|
|
|
|
while (gc_list) {
|
|
while (gc_list) {
|
|
- dst = gc_list;
|
|
|
|
|
|
+ struct dst_entry *dst = gc_list;
|
|
gc_list = dst->next;
|
|
gc_list = dst->next;
|
|
dst_free(dst);
|
|
dst_free(dst);
|
|
}
|
|
}
|
|
@@ -1680,6 +2058,9 @@ static struct notifier_block xfrm_dev_notifier = {
|
|
|
|
|
|
static void __init xfrm_policy_init(void)
|
|
static void __init xfrm_policy_init(void)
|
|
{
|
|
{
|
|
|
|
+ unsigned int hmask, sz;
|
|
|
|
+ int dir;
|
|
|
|
+
|
|
xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
|
|
xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
|
|
sizeof(struct xfrm_dst),
|
|
sizeof(struct xfrm_dst),
|
|
0, SLAB_HWCACHE_ALIGN,
|
|
0, SLAB_HWCACHE_ALIGN,
|
|
@@ -1687,6 +2068,26 @@ static void __init xfrm_policy_init(void)
|
|
if (!xfrm_dst_cache)
|
|
if (!xfrm_dst_cache)
|
|
panic("XFRM: failed to allocate xfrm_dst_cache\n");
|
|
panic("XFRM: failed to allocate xfrm_dst_cache\n");
|
|
|
|
|
|
|
|
+ hmask = 8 - 1;
|
|
|
|
+ sz = (hmask+1) * sizeof(struct hlist_head);
|
|
|
|
+
|
|
|
|
+ xfrm_policy_byidx = xfrm_policy_hash_alloc(sz);
|
|
|
|
+ xfrm_idx_hmask = hmask;
|
|
|
|
+ if (!xfrm_policy_byidx)
|
|
|
|
+ panic("XFRM: failed to allocate byidx hash\n");
|
|
|
|
+
|
|
|
|
+ for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
|
|
|
|
+ struct xfrm_policy_hash *htab;
|
|
|
|
+
|
|
|
|
+ INIT_HLIST_HEAD(&xfrm_policy_inexact[dir]);
|
|
|
|
+
|
|
|
|
+ htab = &xfrm_policy_bydst[dir];
|
|
|
|
+ htab->table = xfrm_policy_hash_alloc(sz);
|
|
|
|
+ htab->hmask = hmask;
|
|
|
|
+ if (!htab->table)
|
|
|
|
+ panic("XFRM: failed to allocate bydst hash\n");
|
|
|
|
+ }
|
|
|
|
+
|
|
INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
|
|
INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
|
|
register_netdevice_notifier(&xfrm_dev_notifier);
|
|
register_netdevice_notifier(&xfrm_dev_notifier);
|
|
}
|
|
}
|