|
@@ -2918,6 +2918,12 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void dev_change_rx_flags(struct net_device *dev, int flags)
|
|
|
|
+{
|
|
|
|
+ if (dev->flags & IFF_UP && dev->change_rx_flags)
|
|
|
|
+ dev->change_rx_flags(dev, flags);
|
|
|
|
+}
|
|
|
|
+
|
|
static int __dev_set_promiscuity(struct net_device *dev, int inc)
|
|
static int __dev_set_promiscuity(struct net_device *dev, int inc)
|
|
{
|
|
{
|
|
unsigned short old_flags = dev->flags;
|
|
unsigned short old_flags = dev->flags;
|
|
@@ -2955,8 +2961,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
|
|
current->uid, current->gid,
|
|
current->uid, current->gid,
|
|
audit_get_sessionid(current));
|
|
audit_get_sessionid(current));
|
|
|
|
|
|
- if (dev->change_rx_flags)
|
|
|
|
- dev->change_rx_flags(dev, IFF_PROMISC);
|
|
|
|
|
|
+ dev_change_rx_flags(dev, IFF_PROMISC);
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -3022,8 +3027,7 @@ int dev_set_allmulti(struct net_device *dev, int inc)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
if (dev->flags ^ old_flags) {
|
|
if (dev->flags ^ old_flags) {
|
|
- if (dev->change_rx_flags)
|
|
|
|
- dev->change_rx_flags(dev, IFF_ALLMULTI);
|
|
|
|
|
|
+ dev_change_rx_flags(dev, IFF_ALLMULTI);
|
|
dev_set_rx_mode(dev);
|
|
dev_set_rx_mode(dev);
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
@@ -3347,8 +3351,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
|
|
* Load in the correct multicast list now the flags have changed.
|
|
* Load in the correct multicast list now the flags have changed.
|
|
*/
|
|
*/
|
|
|
|
|
|
- if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
|
|
|
|
- dev->change_rx_flags(dev, IFF_MULTICAST);
|
|
|
|
|
|
+ if ((old_flags ^ flags) & IFF_MULTICAST)
|
|
|
|
+ dev_change_rx_flags(dev, IFF_MULTICAST);
|
|
|
|
|
|
dev_set_rx_mode(dev);
|
|
dev_set_rx_mode(dev);
|
|
|
|
|
|
@@ -3808,14 +3812,11 @@ static int dev_new_index(struct net *net)
|
|
}
|
|
}
|
|
|
|
|
|
/* Delayed registration/unregisteration */
|
|
/* Delayed registration/unregisteration */
|
|
-static DEFINE_SPINLOCK(net_todo_list_lock);
|
|
|
|
static LIST_HEAD(net_todo_list);
|
|
static LIST_HEAD(net_todo_list);
|
|
|
|
|
|
static void net_set_todo(struct net_device *dev)
|
|
static void net_set_todo(struct net_device *dev)
|
|
{
|
|
{
|
|
- spin_lock(&net_todo_list_lock);
|
|
|
|
list_add_tail(&dev->todo_list, &net_todo_list);
|
|
list_add_tail(&dev->todo_list, &net_todo_list);
|
|
- spin_unlock(&net_todo_list_lock);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static void rollback_registered(struct net_device *dev)
|
|
static void rollback_registered(struct net_device *dev)
|
|
@@ -4142,33 +4143,24 @@ static void netdev_wait_allrefs(struct net_device *dev)
|
|
* free_netdev(y1);
|
|
* free_netdev(y1);
|
|
* free_netdev(y2);
|
|
* free_netdev(y2);
|
|
*
|
|
*
|
|
- * We are invoked by rtnl_unlock() after it drops the semaphore.
|
|
|
|
|
|
+ * We are invoked by rtnl_unlock().
|
|
* This allows us to deal with problems:
|
|
* This allows us to deal with problems:
|
|
* 1) We can delete sysfs objects which invoke hotplug
|
|
* 1) We can delete sysfs objects which invoke hotplug
|
|
* without deadlocking with linkwatch via keventd.
|
|
* without deadlocking with linkwatch via keventd.
|
|
* 2) Since we run with the RTNL semaphore not held, we can sleep
|
|
* 2) Since we run with the RTNL semaphore not held, we can sleep
|
|
* safely in order to wait for the netdev refcnt to drop to zero.
|
|
* safely in order to wait for the netdev refcnt to drop to zero.
|
|
|
|
+ *
|
|
|
|
+ * We must not return until all unregister events added during
|
|
|
|
+ * the interval the lock was held have been completed.
|
|
*/
|
|
*/
|
|
-static DEFINE_MUTEX(net_todo_run_mutex);
|
|
|
|
void netdev_run_todo(void)
|
|
void netdev_run_todo(void)
|
|
{
|
|
{
|
|
struct list_head list;
|
|
struct list_head list;
|
|
|
|
|
|
- /* Need to guard against multiple cpu's getting out of order. */
|
|
|
|
- mutex_lock(&net_todo_run_mutex);
|
|
|
|
-
|
|
|
|
- /* Not safe to do outside the semaphore. We must not return
|
|
|
|
- * until all unregister events invoked by the local processor
|
|
|
|
- * have been completed (either by this todo run, or one on
|
|
|
|
- * another cpu).
|
|
|
|
- */
|
|
|
|
- if (list_empty(&net_todo_list))
|
|
|
|
- goto out;
|
|
|
|
-
|
|
|
|
/* Snapshot list, allow later requests */
|
|
/* Snapshot list, allow later requests */
|
|
- spin_lock(&net_todo_list_lock);
|
|
|
|
list_replace_init(&net_todo_list, &list);
|
|
list_replace_init(&net_todo_list, &list);
|
|
- spin_unlock(&net_todo_list_lock);
|
|
|
|
|
|
+
|
|
|
|
+ __rtnl_unlock();
|
|
|
|
|
|
while (!list_empty(&list)) {
|
|
while (!list_empty(&list)) {
|
|
struct net_device *dev
|
|
struct net_device *dev
|
|
@@ -4200,9 +4192,6 @@ void netdev_run_todo(void)
|
|
/* Free network device */
|
|
/* Free network device */
|
|
kobject_put(&dev->dev.kobj);
|
|
kobject_put(&dev->dev.kobj);
|
|
}
|
|
}
|
|
-
|
|
|
|
-out:
|
|
|
|
- mutex_unlock(&net_todo_run_mutex);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static struct net_device_stats *internal_stats(struct net_device *dev)
|
|
static struct net_device_stats *internal_stats(struct net_device *dev)
|