|
@@ -1222,52 +1222,90 @@ int dev_open(struct net_device *dev)
|
|
|
}
|
|
|
EXPORT_SYMBOL(dev_open);
|
|
|
|
|
|
-static int __dev_close(struct net_device *dev)
|
|
|
+static int __dev_close_many(struct list_head *head)
|
|
|
{
|
|
|
- const struct net_device_ops *ops = dev->netdev_ops;
|
|
|
+ struct net_device *dev;
|
|
|
|
|
|
ASSERT_RTNL();
|
|
|
might_sleep();
|
|
|
|
|
|
- /*
|
|
|
- * Tell people we are going down, so that they can
|
|
|
- * prepare to death, when device is still operating.
|
|
|
- */
|
|
|
- call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
|
|
|
+ list_for_each_entry(dev, head, unreg_list) {
|
|
|
+ /*
|
|
|
+ * Tell people we are going down, so that they can
|
|
|
+ * prepare to death, when device is still operating.
|
|
|
+ */
|
|
|
+ call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
|
|
|
|
|
|
- clear_bit(__LINK_STATE_START, &dev->state);
|
|
|
+ clear_bit(__LINK_STATE_START, &dev->state);
|
|
|
|
|
|
- /* Synchronize to scheduled poll. We cannot touch poll list,
|
|
|
- * it can be even on different cpu. So just clear netif_running().
|
|
|
- *
|
|
|
- * dev->stop() will invoke napi_disable() on all of it's
|
|
|
- * napi_struct instances on this device.
|
|
|
- */
|
|
|
- smp_mb__after_clear_bit(); /* Commit netif_running(). */
|
|
|
+ /* Synchronize to scheduled poll. We cannot touch poll list, it
|
|
|
+ * can be even on different cpu. So just clear netif_running().
|
|
|
+ *
|
|
|
+ * dev->stop() will invoke napi_disable() on all of it's
|
|
|
+ * napi_struct instances on this device.
|
|
|
+ */
|
|
|
+ smp_mb__after_clear_bit(); /* Commit netif_running(). */
|
|
|
+ }
|
|
|
|
|
|
- dev_deactivate(dev);
|
|
|
+ dev_deactivate_many(head);
|
|
|
|
|
|
- /*
|
|
|
- * Call the device specific close. This cannot fail.
|
|
|
- * Only if device is UP
|
|
|
- *
|
|
|
- * We allow it to be called even after a DETACH hot-plug
|
|
|
- * event.
|
|
|
- */
|
|
|
- if (ops->ndo_stop)
|
|
|
- ops->ndo_stop(dev);
|
|
|
+ list_for_each_entry(dev, head, unreg_list) {
|
|
|
+ const struct net_device_ops *ops = dev->netdev_ops;
|
|
|
|
|
|
- /*
|
|
|
- * Device is now down.
|
|
|
- */
|
|
|
+ /*
|
|
|
+ * Call the device specific close. This cannot fail.
|
|
|
+ * Only if device is UP
|
|
|
+ *
|
|
|
+ * We allow it to be called even after a DETACH hot-plug
|
|
|
+ * event.
|
|
|
+ */
|
|
|
+ if (ops->ndo_stop)
|
|
|
+ ops->ndo_stop(dev);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Device is now down.
|
|
|
+ */
|
|
|
+
|
|
|
+ dev->flags &= ~IFF_UP;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Shutdown NET_DMA
|
|
|
+ */
|
|
|
+ net_dmaengine_put();
|
|
|
+ }
|
|
|
|
|
|
- dev->flags &= ~IFF_UP;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int __dev_close(struct net_device *dev)
|
|
|
+{
|
|
|
+ LIST_HEAD(single);
|
|
|
+
|
|
|
+ list_add(&dev->unreg_list, &single);
|
|
|
+ return __dev_close_many(&single);
|
|
|
+}
|
|
|
+
|
|
|
+int dev_close_many(struct list_head *head)
|
|
|
+{
|
|
|
+ struct net_device *dev, *tmp;
|
|
|
+ LIST_HEAD(tmp_list);
|
|
|
+
|
|
|
+ list_for_each_entry_safe(dev, tmp, head, unreg_list)
|
|
|
+ if (!(dev->flags & IFF_UP))
|
|
|
+ list_move(&dev->unreg_list, &tmp_list);
|
|
|
+
|
|
|
+ __dev_close_many(head);
|
|
|
|
|
|
/*
|
|
|
- * Shutdown NET_DMA
|
|
|
+ * Tell people we are down
|
|
|
*/
|
|
|
- net_dmaengine_put();
|
|
|
+ list_for_each_entry(dev, head, unreg_list) {
|
|
|
+ rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
|
|
|
+ call_netdevice_notifiers(NETDEV_DOWN, dev);
|
|
|
+ }
|
|
|
|
|
|
+ /* rollback_registered_many needs the complete original list */
|
|
|
+ list_splice(&tmp_list, head);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1282,16 +1320,10 @@ static int __dev_close(struct net_device *dev)
|
|
|
*/
|
|
|
int dev_close(struct net_device *dev)
|
|
|
{
|
|
|
- if (!(dev->flags & IFF_UP))
|
|
|
- return 0;
|
|
|
-
|
|
|
- __dev_close(dev);
|
|
|
+ LIST_HEAD(single);
|
|
|
|
|
|
- /*
|
|
|
- * Tell people we are down
|
|
|
- */
|
|
|
- rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
|
|
|
- call_netdevice_notifiers(NETDEV_DOWN, dev);
|
|
|
+ list_add(&dev->unreg_list, &single);
|
|
|
+ dev_close_many(&single);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -4963,10 +4995,12 @@ static void rollback_registered_many(struct list_head *head)
|
|
|
}
|
|
|
|
|
|
BUG_ON(dev->reg_state != NETREG_REGISTERED);
|
|
|
+ }
|
|
|
|
|
|
- /* If device is running, close it first. */
|
|
|
- dev_close(dev);
|
|
|
+ /* If device is running, close it first. */
|
|
|
+ dev_close_many(head);
|
|
|
|
|
|
+ list_for_each_entry(dev, head, unreg_list) {
|
|
|
/* And unlink it from device chain. */
|
|
|
unlist_netdevice(dev);
|
|
|
|