|
@@ -154,11 +154,9 @@ void __init inet_initpeers(void)
|
|
/* Called with or without local BH being disabled. */
|
|
/* Called with or without local BH being disabled. */
|
|
static void unlink_from_unused(struct inet_peer *p)
|
|
static void unlink_from_unused(struct inet_peer *p)
|
|
{
|
|
{
|
|
- if (!list_empty(&p->unused)) {
|
|
|
|
- spin_lock_bh(&unused_peers.lock);
|
|
|
|
- list_del_init(&p->unused);
|
|
|
|
- spin_unlock_bh(&unused_peers.lock);
|
|
|
|
- }
|
|
|
|
|
|
+ spin_lock_bh(&unused_peers.lock);
|
|
|
|
+ list_del_init(&p->unused);
|
|
|
|
+ spin_unlock_bh(&unused_peers.lock);
|
|
}
|
|
}
|
|
|
|
|
|
static int addr_compare(const struct inetpeer_addr *a,
|
|
static int addr_compare(const struct inetpeer_addr *a,
|
|
@@ -205,6 +203,20 @@ static int addr_compare(const struct inetpeer_addr *a,
|
|
u; \
|
|
u; \
|
|
})
|
|
})
|
|
|
|
|
|
|
|
+static bool atomic_add_unless_return(atomic_t *ptr, int a, int u, int *newv)
|
|
|
|
+{
|
|
|
|
+ int cur, old = atomic_read(ptr);
|
|
|
|
+
|
|
|
|
+ while (old != u) {
|
|
|
|
+ *newv = old + a;
|
|
|
|
+ cur = atomic_cmpxchg(ptr, old, *newv);
|
|
|
|
+ if (cur == old)
|
|
|
|
+ return true;
|
|
|
|
+ old = cur;
|
|
|
|
+ }
|
|
|
|
+ return false;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Called with rcu_read_lock()
|
|
* Called with rcu_read_lock()
|
|
* Because we hold no lock against a writer, its quite possible we fall
|
|
* Because we hold no lock against a writer, its quite possible we fall
|
|
@@ -213,7 +225,8 @@ static int addr_compare(const struct inetpeer_addr *a,
|
|
* We exit from this function if number of links exceeds PEER_MAXDEPTH
|
|
* We exit from this function if number of links exceeds PEER_MAXDEPTH
|
|
*/
|
|
*/
|
|
static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
|
|
static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
|
|
- struct inet_peer_base *base)
|
|
|
|
|
|
+ struct inet_peer_base *base,
|
|
|
|
+ int *newrefcnt)
|
|
{
|
|
{
|
|
struct inet_peer *u = rcu_dereference(base->root);
|
|
struct inet_peer *u = rcu_dereference(base->root);
|
|
int count = 0;
|
|
int count = 0;
|
|
@@ -226,7 +239,7 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
|
|
* distinction between an unused entry (refcnt=0) and
|
|
* distinction between an unused entry (refcnt=0) and
|
|
* a freed one.
|
|
* a freed one.
|
|
*/
|
|
*/
|
|
- if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1)))
|
|
|
|
|
|
+ if (!atomic_add_unless_return(&u->refcnt, 1, -1, newrefcnt))
|
|
u = NULL;
|
|
u = NULL;
|
|
return u;
|
|
return u;
|
|
}
|
|
}
|
|
@@ -465,22 +478,23 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
|
|
struct inet_peer_base *base = family_to_base(daddr->family);
|
|
struct inet_peer_base *base = family_to_base(daddr->family);
|
|
struct inet_peer *p;
|
|
struct inet_peer *p;
|
|
unsigned int sequence;
|
|
unsigned int sequence;
|
|
- int invalidated;
|
|
|
|
|
|
+ int invalidated, newrefcnt = 0;
|
|
|
|
|
|
/* Look up for the address quickly, lockless.
|
|
/* Look up for the address quickly, lockless.
|
|
* Because of a concurrent writer, we might not find an existing entry.
|
|
* Because of a concurrent writer, we might not find an existing entry.
|
|
*/
|
|
*/
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
sequence = read_seqbegin(&base->lock);
|
|
sequence = read_seqbegin(&base->lock);
|
|
- p = lookup_rcu(daddr, base);
|
|
|
|
|
|
+ p = lookup_rcu(daddr, base, &newrefcnt);
|
|
invalidated = read_seqretry(&base->lock, sequence);
|
|
invalidated = read_seqretry(&base->lock, sequence);
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
|
|
|
|
if (p) {
|
|
if (p) {
|
|
- /* The existing node has been found.
|
|
|
|
|
|
+found: /* The existing node has been found.
|
|
* Remove the entry from unused list if it was there.
|
|
* Remove the entry from unused list if it was there.
|
|
*/
|
|
*/
|
|
- unlink_from_unused(p);
|
|
|
|
|
|
+ if (newrefcnt == 1)
|
|
|
|
+ unlink_from_unused(p);
|
|
return p;
|
|
return p;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -494,11 +508,9 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
|
|
write_seqlock_bh(&base->lock);
|
|
write_seqlock_bh(&base->lock);
|
|
p = lookup(daddr, stack, base);
|
|
p = lookup(daddr, stack, base);
|
|
if (p != peer_avl_empty) {
|
|
if (p != peer_avl_empty) {
|
|
- atomic_inc(&p->refcnt);
|
|
|
|
|
|
+ newrefcnt = atomic_inc_return(&p->refcnt);
|
|
write_sequnlock_bh(&base->lock);
|
|
write_sequnlock_bh(&base->lock);
|
|
- /* Remove the entry from unused list if it was there. */
|
|
|
|
- unlink_from_unused(p);
|
|
|
|
- return p;
|
|
|
|
|
|
+ goto found;
|
|
}
|
|
}
|
|
p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
|
|
p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
|
|
if (p) {
|
|
if (p) {
|