|
@@ -206,17 +206,17 @@ static void netpoll_poll_dev(struct net_device *dev)
|
|
|
* the dev_open/close paths use this to block netpoll activity
|
|
|
* while changing device state
|
|
|
*/
|
|
|
- if (!mutex_trylock(&ni->dev_lock))
|
|
|
+ if (!down_trylock(&ni->dev_lock))
|
|
|
return;
|
|
|
|
|
|
if (!netif_running(dev)) {
|
|
|
- mutex_unlock(&ni->dev_lock);
|
|
|
+ up(&ni->dev_lock);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
ops = dev->netdev_ops;
|
|
|
if (!ops->ndo_poll_controller) {
|
|
|
- mutex_unlock(&ni->dev_lock);
|
|
|
+ up(&ni->dev_lock);
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -225,7 +225,7 @@ static void netpoll_poll_dev(struct net_device *dev)
|
|
|
|
|
|
poll_napi(dev);
|
|
|
|
|
|
- mutex_unlock(&ni->dev_lock);
|
|
|
+ up(&ni->dev_lock);
|
|
|
|
|
|
if (dev->flags & IFF_SLAVE) {
|
|
|
if (ni) {
|
|
@@ -255,7 +255,7 @@ int netpoll_rx_disable(struct net_device *dev)
|
|
|
idx = srcu_read_lock(&netpoll_srcu);
|
|
|
ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
|
|
|
if (ni)
|
|
|
- mutex_lock(&ni->dev_lock);
|
|
|
+ down(&ni->dev_lock);
|
|
|
srcu_read_unlock(&netpoll_srcu, idx);
|
|
|
return 0;
|
|
|
}
|
|
@@ -267,7 +267,7 @@ void netpoll_rx_enable(struct net_device *dev)
|
|
|
rcu_read_lock();
|
|
|
ni = rcu_dereference(dev->npinfo);
|
|
|
if (ni)
|
|
|
- mutex_unlock(&ni->dev_lock);
|
|
|
+ up(&ni->dev_lock);
|
|
|
rcu_read_unlock();
|
|
|
}
|
|
|
EXPORT_SYMBOL(netpoll_rx_enable);
|
|
@@ -1047,7 +1047,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
|
|
|
INIT_LIST_HEAD(&npinfo->rx_np);
|
|
|
|
|
|
spin_lock_init(&npinfo->rx_lock);
|
|
|
- mutex_init(&npinfo->dev_lock);
|
|
|
+ sema_init(&npinfo->dev_lock, 1);
|
|
|
skb_queue_head_init(&npinfo->neigh_tx);
|
|
|
skb_queue_head_init(&npinfo->txq);
|
|
|
INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
|