|
@@ -69,7 +69,6 @@ static int mroute_do_pim;
|
|
|
#endif
|
|
|
|
|
|
static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */
|
|
|
-static atomic_t cache_resolve_queue_len; /* Size of unresolved */
|
|
|
|
|
|
/* Special spinlock for queue of unresolved entries */
|
|
|
static DEFINE_SPINLOCK(mfc_unres_lock);
|
|
@@ -519,7 +518,7 @@ static void ip6mr_destroy_unres(struct mfc6_cache *c)
|
|
|
{
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
- atomic_dec(&cache_resolve_queue_len);
|
|
|
+ atomic_dec(&init_net.ipv6.cache_resolve_queue_len);
|
|
|
|
|
|
while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
|
|
|
if (ipv6_hdr(skb)->version == 0) {
|
|
@@ -561,7 +560,7 @@ static void ipmr_do_expire_process(unsigned long dummy)
|
|
|
ip6mr_destroy_unres(c);
|
|
|
}
|
|
|
|
|
|
- if (atomic_read(&cache_resolve_queue_len))
|
|
|
+ if (mfc_unres_queue != NULL)
|
|
|
mod_timer(&ipmr_expire_timer, jiffies + expires);
|
|
|
}
|
|
|
|
|
@@ -572,7 +571,7 @@ static void ipmr_expire_process(unsigned long dummy)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- if (atomic_read(&cache_resolve_queue_len))
|
|
|
+ if (mfc_unres_queue != NULL)
|
|
|
ipmr_do_expire_process(dummy);
|
|
|
|
|
|
spin_unlock(&mfc_unres_lock);
|
|
@@ -852,7 +851,8 @@ ip6mr_cache_unresolved(mifi_t mifi, struct sk_buff *skb)
|
|
|
|
|
|
spin_lock_bh(&mfc_unres_lock);
|
|
|
for (c = mfc_unres_queue; c; c = c->next) {
|
|
|
- if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
|
|
|
+ if (net_eq(mfc6_net(c), &init_net) &&
|
|
|
+ ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
|
|
|
ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr))
|
|
|
break;
|
|
|
}
|
|
@@ -862,7 +862,7 @@ ip6mr_cache_unresolved(mifi_t mifi, struct sk_buff *skb)
|
|
|
* Create a new entry if allowable
|
|
|
*/
|
|
|
|
|
|
- if (atomic_read(&cache_resolve_queue_len) >= 10 ||
|
|
|
+ if (atomic_read(&init_net.ipv6.cache_resolve_queue_len) >= 10 ||
|
|
|
(c = ip6mr_cache_alloc_unres(&init_net)) == NULL) {
|
|
|
spin_unlock_bh(&mfc_unres_lock);
|
|
|
|
|
@@ -891,7 +891,7 @@ ip6mr_cache_unresolved(mifi_t mifi, struct sk_buff *skb)
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
- atomic_inc(&cache_resolve_queue_len);
|
|
|
+ atomic_inc(&init_net.ipv6.cache_resolve_queue_len);
|
|
|
c->next = mfc_unres_queue;
|
|
|
mfc_unres_queue = c;
|
|
|
|
|
@@ -1119,14 +1119,16 @@ static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock)
|
|
|
spin_lock_bh(&mfc_unres_lock);
|
|
|
for (cp = &mfc_unres_queue; (uc = *cp) != NULL;
|
|
|
cp = &uc->next) {
|
|
|
- if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
|
|
|
+ if (net_eq(mfc6_net(uc), &init_net) &&
|
|
|
+ ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
|
|
|
ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
|
|
|
*cp = uc->next;
|
|
|
- if (atomic_dec_and_test(&cache_resolve_queue_len))
|
|
|
- del_timer(&ipmr_expire_timer);
|
|
|
+ atomic_dec(&init_net.ipv6.cache_resolve_queue_len);
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
+ if (mfc_unres_queue == NULL)
|
|
|
+ del_timer(&ipmr_expire_timer);
|
|
|
spin_unlock_bh(&mfc_unres_lock);
|
|
|
|
|
|
if (uc) {
|
|
@@ -1172,18 +1174,18 @@ static void mroute_clean_tables(struct sock *sk)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (atomic_read(&cache_resolve_queue_len) != 0) {
|
|
|
- struct mfc6_cache *c;
|
|
|
+ if (atomic_read(&init_net.ipv6.cache_resolve_queue_len) != 0) {
|
|
|
+ struct mfc6_cache *c, **cp;
|
|
|
|
|
|
spin_lock_bh(&mfc_unres_lock);
|
|
|
- while (mfc_unres_queue != NULL) {
|
|
|
- c = mfc_unres_queue;
|
|
|
- mfc_unres_queue = c->next;
|
|
|
- spin_unlock_bh(&mfc_unres_lock);
|
|
|
-
|
|
|
+ cp = &mfc_unres_queue;
|
|
|
+ while ((c = *cp) != NULL) {
|
|
|
+ if (!net_eq(mfc6_net(c), &init_net)) {
|
|
|
+ cp = &c->next;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ *cp = c->next;
|
|
|
ip6mr_destroy_unres(c);
|
|
|
-
|
|
|
- spin_lock_bh(&mfc_unres_lock);
|
|
|
}
|
|
|
spin_unlock_bh(&mfc_unres_lock);
|
|
|
}
|