|
@@ -910,6 +910,52 @@ static DEFINE_MUTEX(xps_map_mutex);
|
|
|
#define xmap_dereference(P) \
|
|
|
rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
|
|
|
|
|
|
+static void xps_queue_release(struct netdev_queue *queue)
|
|
|
+{
|
|
|
+ struct net_device *dev = queue->dev;
|
|
|
+ struct xps_dev_maps *dev_maps;
|
|
|
+ struct xps_map *map;
|
|
|
+ unsigned long index;
|
|
|
+ int i, pos, nonempty = 0;
|
|
|
+
|
|
|
+ index = get_netdev_queue_index(queue);
|
|
|
+
|
|
|
+ mutex_lock(&xps_map_mutex);
|
|
|
+ dev_maps = xmap_dereference(dev->xps_maps);
|
|
|
+
|
|
|
+ if (dev_maps) {
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
+ map = xmap_dereference(dev_maps->cpu_map[i]);
|
|
|
+ if (!map)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ for (pos = 0; pos < map->len; pos++)
|
|
|
+ if (map->queues[pos] == index)
|
|
|
+ break;
|
|
|
+
|
|
|
+ if (pos < map->len) {
|
|
|
+ if (map->len > 1)
|
|
|
+ map->queues[pos] =
|
|
|
+ map->queues[--map->len];
|
|
|
+ else {
|
|
|
+ RCU_INIT_POINTER(dev_maps->cpu_map[i],
|
|
|
+ NULL);
|
|
|
+ kfree_rcu(map, rcu);
|
|
|
+ map = NULL;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (map)
|
|
|
+ nonempty = 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!nonempty) {
|
|
|
+ RCU_INIT_POINTER(dev->xps_maps, NULL);
|
|
|
+ kfree_rcu(dev_maps, rcu);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ mutex_unlock(&xps_map_mutex);
|
|
|
+}
|
|
|
+
|
|
|
static ssize_t store_xps_map(struct netdev_queue *queue,
|
|
|
struct netdev_queue_attribute *attribute,
|
|
|
const char *buf, size_t len)
|
|
@@ -1054,49 +1100,8 @@ static struct attribute *netdev_queue_default_attrs[] = {
|
|
|
static void netdev_queue_release(struct kobject *kobj)
|
|
|
{
|
|
|
struct netdev_queue *queue = to_netdev_queue(kobj);
|
|
|
- struct net_device *dev = queue->dev;
|
|
|
- struct xps_dev_maps *dev_maps;
|
|
|
- struct xps_map *map;
|
|
|
- unsigned long index;
|
|
|
- int i, pos, nonempty = 0;
|
|
|
-
|
|
|
- index = get_netdev_queue_index(queue);
|
|
|
-
|
|
|
- mutex_lock(&xps_map_mutex);
|
|
|
- dev_maps = xmap_dereference(dev->xps_maps);
|
|
|
|
|
|
- if (dev_maps) {
|
|
|
- for_each_possible_cpu(i) {
|
|
|
- map = xmap_dereference(dev_maps->cpu_map[i]);
|
|
|
- if (!map)
|
|
|
- continue;
|
|
|
-
|
|
|
- for (pos = 0; pos < map->len; pos++)
|
|
|
- if (map->queues[pos] == index)
|
|
|
- break;
|
|
|
-
|
|
|
- if (pos < map->len) {
|
|
|
- if (map->len > 1)
|
|
|
- map->queues[pos] =
|
|
|
- map->queues[--map->len];
|
|
|
- else {
|
|
|
- RCU_INIT_POINTER(dev_maps->cpu_map[i],
|
|
|
- NULL);
|
|
|
- kfree_rcu(map, rcu);
|
|
|
- map = NULL;
|
|
|
- }
|
|
|
- }
|
|
|
- if (map)
|
|
|
- nonempty = 1;
|
|
|
- }
|
|
|
-
|
|
|
- if (!nonempty) {
|
|
|
- RCU_INIT_POINTER(dev->xps_maps, NULL);
|
|
|
- kfree_rcu(dev_maps, rcu);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- mutex_unlock(&xps_map_mutex);
|
|
|
+ xps_queue_release(queue);
|
|
|
|
|
|
memset(kobj, 0, sizeof(*kobj));
|
|
|
dev_put(queue->dev);
|