|
@@ -52,20 +52,27 @@ static void inet_frag_secret_rebuild(unsigned long dummy)
|
|
|
unsigned long now = jiffies;
|
|
|
int i;
|
|
|
|
|
|
+ /* Per bucket lock NOT needed here, due to write lock protection */
|
|
|
write_lock(&f->lock);
|
|
|
+
|
|
|
get_random_bytes(&f->rnd, sizeof(u32));
|
|
|
for (i = 0; i < INETFRAGS_HASHSZ; i++) {
|
|
|
+ struct inet_frag_bucket *hb;
|
|
|
struct inet_frag_queue *q;
|
|
|
struct hlist_node *n;
|
|
|
|
|
|
- hlist_for_each_entry_safe(q, n, &f->hash[i], list) {
|
|
|
+ hb = &f->hash[i];
|
|
|
+ hlist_for_each_entry_safe(q, n, &hb->chain, list) {
|
|
|
unsigned int hval = f->hashfn(q);
|
|
|
|
|
|
if (hval != i) {
|
|
|
+ struct inet_frag_bucket *hb_dest;
|
|
|
+
|
|
|
hlist_del(&q->list);
|
|
|
|
|
|
/* Relink to new hash chain. */
|
|
|
- hlist_add_head(&q->list, &f->hash[hval]);
|
|
|
+ hb_dest = &f->hash[hval];
|
|
|
+ hlist_add_head(&q->list, &hb_dest->chain);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -78,9 +85,12 @@ void inet_frags_init(struct inet_frags *f)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
- for (i = 0; i < INETFRAGS_HASHSZ; i++)
|
|
|
- INIT_HLIST_HEAD(&f->hash[i]);
|
|
|
+ for (i = 0; i < INETFRAGS_HASHSZ; i++) {
|
|
|
+ struct inet_frag_bucket *hb = &f->hash[i];
|
|
|
|
|
|
+ spin_lock_init(&hb->chain_lock);
|
|
|
+ INIT_HLIST_HEAD(&hb->chain);
|
|
|
+ }
|
|
|
rwlock_init(&f->lock);
|
|
|
|
|
|
f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
|
|
@@ -122,9 +132,18 @@ EXPORT_SYMBOL(inet_frags_exit_net);
|
|
|
|
|
|
static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
|
|
|
{
|
|
|
- write_lock(&f->lock);
|
|
|
+ struct inet_frag_bucket *hb;
|
|
|
+ unsigned int hash;
|
|
|
+
|
|
|
+ read_lock(&f->lock);
|
|
|
+ hash = f->hashfn(fq);
|
|
|
+ hb = &f->hash[hash];
|
|
|
+
|
|
|
+ spin_lock(&hb->chain_lock);
|
|
|
hlist_del(&fq->list);
|
|
|
- write_unlock(&f->lock);
|
|
|
+ spin_unlock(&hb->chain_lock);
|
|
|
+
|
|
|
+ read_unlock(&f->lock);
|
|
|
inet_frag_lru_del(fq);
|
|
|
}
|
|
|
|
|
@@ -226,27 +245,32 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
|
|
|
struct inet_frag_queue *qp_in, struct inet_frags *f,
|
|
|
void *arg)
|
|
|
{
|
|
|
+ struct inet_frag_bucket *hb;
|
|
|
struct inet_frag_queue *qp;
|
|
|
#ifdef CONFIG_SMP
|
|
|
#endif
|
|
|
unsigned int hash;
|
|
|
|
|
|
- write_lock(&f->lock);
|
|
|
+ read_lock(&f->lock); /* Protects against hash rebuild */
|
|
|
/*
|
|
|
* While we stayed w/o the lock other CPU could update
|
|
|
* the rnd seed, so we need to re-calculate the hash
|
|
|
* chain. Fortunatelly the qp_in can be used to get one.
|
|
|
*/
|
|
|
hash = f->hashfn(qp_in);
|
|
|
+ hb = &f->hash[hash];
|
|
|
+ spin_lock(&hb->chain_lock);
|
|
|
+
|
|
|
#ifdef CONFIG_SMP
|
|
|
/* With SMP race we have to recheck hash table, because
|
|
|
* such entry could be created on other cpu, while we
|
|
|
- * promoted read lock to write lock.
|
|
|
+ * released the hash bucket lock.
|
|
|
*/
|
|
|
- hlist_for_each_entry(qp, &f->hash[hash], list) {
|
|
|
+ hlist_for_each_entry(qp, &hb->chain, list) {
|
|
|
if (qp->net == nf && f->match(qp, arg)) {
|
|
|
atomic_inc(&qp->refcnt);
|
|
|
- write_unlock(&f->lock);
|
|
|
+ spin_unlock(&hb->chain_lock);
|
|
|
+ read_unlock(&f->lock);
|
|
|
qp_in->last_in |= INET_FRAG_COMPLETE;
|
|
|
inet_frag_put(qp_in, f);
|
|
|
return qp;
|
|
@@ -258,8 +282,9 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
|
|
|
atomic_inc(&qp->refcnt);
|
|
|
|
|
|
atomic_inc(&qp->refcnt);
|
|
|
- hlist_add_head(&qp->list, &f->hash[hash]);
|
|
|
- write_unlock(&f->lock);
|
|
|
+ hlist_add_head(&qp->list, &hb->chain);
|
|
|
+ spin_unlock(&hb->chain_lock);
|
|
|
+ read_unlock(&f->lock);
|
|
|
inet_frag_lru_add(nf, qp);
|
|
|
return qp;
|
|
|
}
|
|
@@ -300,17 +325,23 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
|
|
|
struct inet_frags *f, void *key, unsigned int hash)
|
|
|
__releases(&f->lock)
|
|
|
{
|
|
|
+ struct inet_frag_bucket *hb;
|
|
|
struct inet_frag_queue *q;
|
|
|
int depth = 0;
|
|
|
|
|
|
- hlist_for_each_entry(q, &f->hash[hash], list) {
|
|
|
+ hb = &f->hash[hash];
|
|
|
+
|
|
|
+ spin_lock(&hb->chain_lock);
|
|
|
+ hlist_for_each_entry(q, &hb->chain, list) {
|
|
|
if (q->net == nf && f->match(q, key)) {
|
|
|
atomic_inc(&q->refcnt);
|
|
|
+ spin_unlock(&hb->chain_lock);
|
|
|
read_unlock(&f->lock);
|
|
|
return q;
|
|
|
}
|
|
|
depth++;
|
|
|
}
|
|
|
+ spin_unlock(&hb->chain_lock);
|
|
|
read_unlock(&f->lock);
|
|
|
|
|
|
if (depth <= INETFRAGS_MAXDEPTH)
|