|
@@ -1406,7 +1406,6 @@ rollback:
|
|
|
nb->notifier_call(nb, NETDEV_DOWN, dev);
|
|
|
}
|
|
|
nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
|
|
|
- nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1448,7 +1447,6 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
|
|
|
nb->notifier_call(nb, NETDEV_DOWN, dev);
|
|
|
}
|
|
|
nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
|
|
|
- nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
|
|
|
}
|
|
|
}
|
|
|
unlock:
|
|
@@ -1468,7 +1466,8 @@ EXPORT_SYMBOL(unregister_netdevice_notifier);
|
|
|
|
|
|
int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
|
|
|
{
|
|
|
- ASSERT_RTNL();
|
|
|
+ if (val != NETDEV_UNREGISTER_FINAL)
|
|
|
+ ASSERT_RTNL();
|
|
|
return raw_notifier_call_chain(&netdev_chain, val, dev);
|
|
|
}
|
|
|
EXPORT_SYMBOL(call_netdevice_notifiers);
|
|
@@ -5331,10 +5330,6 @@ static void rollback_registered_many(struct list_head *head)
|
|
|
netdev_unregister_kobject(dev);
|
|
|
}
|
|
|
|
|
|
- /* Process any work delayed until the end of the batch */
|
|
|
- dev = list_first_entry(head, struct net_device, unreg_list);
|
|
|
- call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
|
|
|
-
|
|
|
synchronize_net();
|
|
|
|
|
|
list_for_each_entry(dev, head, unreg_list)
|
|
@@ -5787,9 +5782,8 @@ static void netdev_wait_allrefs(struct net_device *dev)
|
|
|
|
|
|
/* Rebroadcast unregister notification */
|
|
|
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
|
|
|
- /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
|
|
|
- * should have already handle it the first time */
|
|
|
-
|
|
|
+ rcu_barrier();
|
|
|
+ call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
|
|
|
if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
|
|
|
&dev->state)) {
|
|
|
/* We must not have linkwatch events
|
|
@@ -5851,9 +5845,8 @@ void netdev_run_todo(void)
|
|
|
|
|
|
__rtnl_unlock();
|
|
|
|
|
|
- /* Wait for rcu callbacks to finish before attempting to drain
|
|
|
- * the device list. This usually avoids a 250ms wait.
|
|
|
- */
|
|
|
+
|
|
|
+ /* Wait for rcu callbacks to finish before next phase */
|
|
|
if (!list_empty(&list))
|
|
|
rcu_barrier();
|
|
|
|
|
@@ -5862,6 +5855,8 @@ void netdev_run_todo(void)
|
|
|
= list_first_entry(&list, struct net_device, todo_list);
|
|
|
list_del(&dev->todo_list);
|
|
|
|
|
|
+ call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
|
|
|
+
|
|
|
if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
|
|
|
pr_err("network todo '%s' but state %d\n",
|
|
|
dev->name, dev->reg_state);
|
|
@@ -6256,7 +6251,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
|
|
|
the device is just moving and can keep their slaves up.
|
|
|
*/
|
|
|
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
|
|
|
- call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
|
|
|
rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
|
|
|
|
|
|
/*
|