|
@@ -62,7 +62,7 @@
|
|
|
* Device mc lists are changed by bh at least if IPv6 is enabled,
|
|
|
* so that it must be bh protected.
|
|
|
*
|
|
|
- * We block accesses to device mc filters with dev->xmit_lock.
|
|
|
+ * We block accesses to device mc filters with netif_tx_lock.
|
|
|
*/
|
|
|
|
|
|
/*
|
|
@@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_device *dev)
|
|
|
|
|
|
void dev_mc_upload(struct net_device *dev)
|
|
|
{
|
|
|
- spin_lock_bh(&dev->xmit_lock);
|
|
|
+ netif_tx_lock_bh(dev);
|
|
|
__dev_mc_upload(dev);
|
|
|
- spin_unlock_bh(&dev->xmit_lock);
|
|
|
+ netif_tx_unlock_bh(dev);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
|
|
|
int err = 0;
|
|
|
struct dev_mc_list *dmi, **dmip;
|
|
|
|
|
|
- spin_lock_bh(&dev->xmit_lock);
|
|
|
+ netif_tx_lock_bh(dev);
|
|
|
|
|
|
for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) {
|
|
|
/*
|
|
@@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
|
|
|
*/
|
|
|
__dev_mc_upload(dev);
|
|
|
|
|
|
- spin_unlock_bh(&dev->xmit_lock);
|
|
|
+ netif_tx_unlock_bh(dev);
|
|
|
return 0;
|
|
|
}
|
|
|
}
|
|
|
err = -ENOENT;
|
|
|
done:
|
|
|
- spin_unlock_bh(&dev->xmit_lock);
|
|
|
+ netif_tx_unlock_bh(dev);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
|
|
|
|
|
|
dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC);
|
|
|
|
|
|
- spin_lock_bh(&dev->xmit_lock);
|
|
|
+ netif_tx_lock_bh(dev);
|
|
|
for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
|
|
|
if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
|
|
|
dmi->dmi_addrlen == alen) {
|
|
@@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
|
|
|
}
|
|
|
|
|
|
if ((dmi = dmi1) == NULL) {
|
|
|
- spin_unlock_bh(&dev->xmit_lock);
|
|
|
+ netif_tx_unlock_bh(dev);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
memcpy(dmi->dmi_addr, addr, alen);
|
|
@@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
|
|
|
|
|
|
__dev_mc_upload(dev);
|
|
|
|
|
|
- spin_unlock_bh(&dev->xmit_lock);
|
|
|
+ netif_tx_unlock_bh(dev);
|
|
|
return 0;
|
|
|
|
|
|
done:
|
|
|
- spin_unlock_bh(&dev->xmit_lock);
|
|
|
+ netif_tx_unlock_bh(dev);
|
|
|
kfree(dmi1);
|
|
|
return err;
|
|
|
}
|
|
@@ -204,7 +204,7 @@ done:
|
|
|
|
|
|
void dev_mc_discard(struct net_device *dev)
|
|
|
{
|
|
|
- spin_lock_bh(&dev->xmit_lock);
|
|
|
+ netif_tx_lock_bh(dev);
|
|
|
|
|
|
while (dev->mc_list != NULL) {
|
|
|
struct dev_mc_list *tmp = dev->mc_list;
|
|
@@ -215,7 +215,7 @@ void dev_mc_discard(struct net_device *dev)
|
|
|
}
|
|
|
dev->mc_count = 0;
|
|
|
|
|
|
- spin_unlock_bh(&dev->xmit_lock);
|
|
|
+ netif_tx_unlock_bh(dev);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
@@ -250,7 +250,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
|
|
|
struct dev_mc_list *m;
|
|
|
struct net_device *dev = v;
|
|
|
|
|
|
- spin_lock_bh(&dev->xmit_lock);
|
|
|
+ netif_tx_lock_bh(dev);
|
|
|
for (m = dev->mc_list; m; m = m->next) {
|
|
|
int i;
|
|
|
|
|
@@ -262,7 +262,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
|
|
|
|
|
|
seq_putc(seq, '\n');
|
|
|
}
|
|
|
- spin_unlock_bh(&dev->xmit_lock);
|
|
|
+ netif_tx_unlock_bh(dev);
|
|
|
return 0;
|
|
|
}
|
|
|
|