|
@@ -392,20 +392,22 @@ void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
|
|
|
|
|
|
+/*
|
|
|
+ * should be called under rcu read lock or queue lock to make sure blkg pointer
|
|
|
+ * is valid.
|
|
|
+ */
|
|
|
void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
|
|
|
uint64_t bytes, bool direction, bool sync)
|
|
|
{
|
|
|
- struct blkio_group_stats *stats;
|
|
|
- unsigned long flags;
|
|
|
+ struct blkio_group_stats_cpu *stats_cpu;
|
|
|
|
|
|
- spin_lock_irqsave(&blkg->stats_lock, flags);
|
|
|
- stats = &blkg->stats;
|
|
|
- stats->sectors += bytes >> 9;
|
|
|
- blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
|
|
|
- sync);
|
|
|
- blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
|
|
|
- direction, sync);
|
|
|
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
|
|
|
+ stats_cpu = this_cpu_ptr(blkg->stats_cpu);
|
|
|
+
|
|
|
+ stats_cpu->sectors += bytes >> 9;
|
|
|
+ blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
|
|
|
+ 1, direction, sync);
|
|
|
+ blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
|
|
|
+ bytes, direction, sync);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
|
|
|
|
|
@@ -440,6 +442,20 @@ void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
|
|
|
|
|
|
+/*
|
|
|
+ * This function allocates the per cpu stats for blkio_group. Should be called
|
|
|
+ * from sleepable context as alloc_per_cpu() requires that.
|
|
|
+ */
|
|
|
+int blkio_alloc_blkg_stats(struct blkio_group *blkg)
|
|
|
+{
|
|
|
+ /* Allocate memory for per cpu stats */
|
|
|
+ blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
|
|
|
+ if (!blkg->stats_cpu)
|
|
|
+ return -ENOMEM;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
|
|
|
+
|
|
|
void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
|
|
|
struct blkio_group *blkg, void *key, dev_t dev,
|
|
|
enum blkio_policy_id plid)
|
|
@@ -600,6 +616,53 @@ static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
|
|
|
return val;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
|
|
|
+ enum stat_type_cpu type, enum stat_sub_type sub_type)
|
|
|
+{
|
|
|
+ int cpu;
|
|
|
+ struct blkio_group_stats_cpu *stats_cpu;
|
|
|
+ uint64_t val = 0;
|
|
|
+
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
+ stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
|
|
|
+
|
|
|
+ if (type == BLKIO_STAT_CPU_SECTORS)
|
|
|
+ val += stats_cpu->sectors;
|
|
|
+ else
|
|
|
+ val += stats_cpu->stat_arr_cpu[type][sub_type];
|
|
|
+ }
|
|
|
+
|
|
|
+ return val;
|
|
|
+}
|
|
|
+
|
|
|
+static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
|
|
|
+ struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
|
|
|
+{
|
|
|
+ uint64_t disk_total, val;
|
|
|
+ char key_str[MAX_KEY_LEN];
|
|
|
+ enum stat_sub_type sub_type;
|
|
|
+
|
|
|
+ if (type == BLKIO_STAT_CPU_SECTORS) {
|
|
|
+ val = blkio_read_stat_cpu(blkg, type, 0);
|
|
|
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
|
|
|
+ }
|
|
|
+
|
|
|
+ for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
|
|
|
+ sub_type++) {
|
|
|
+ blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
|
|
|
+ val = blkio_read_stat_cpu(blkg, type, sub_type);
|
|
|
+ cb->fill(cb, key_str, val);
|
|
|
+ }
|
|
|
+
|
|
|
+ disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
|
|
|
+ blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
|
|
|
+
|
|
|
+ blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
|
|
|
+ cb->fill(cb, key_str, disk_total);
|
|
|
+ return disk_total;
|
|
|
+}
|
|
|
+
|
|
|
/* This should be called with blkg->stats_lock held */
|
|
|
static uint64_t blkio_get_stat(struct blkio_group *blkg,
|
|
|
struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
|
|
@@ -611,9 +674,6 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg,
|
|
|
if (type == BLKIO_STAT_TIME)
|
|
|
return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
|
|
|
blkg->stats.time, cb, dev);
|
|
|
- if (type == BLKIO_STAT_SECTORS)
|
|
|
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
|
|
|
- blkg->stats.sectors, cb, dev);
|
|
|
#ifdef CONFIG_DEBUG_BLK_CGROUP
|
|
|
if (type == BLKIO_STAT_UNACCOUNTED_TIME)
|
|
|
return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
|
|
@@ -1077,8 +1137,8 @@ static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
|
|
|
}
|
|
|
|
|
|
static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
|
|
|
- struct cftype *cft, struct cgroup_map_cb *cb, enum stat_type type,
|
|
|
- bool show_total)
|
|
|
+ struct cftype *cft, struct cgroup_map_cb *cb,
|
|
|
+ enum stat_type type, bool show_total, bool pcpu)
|
|
|
{
|
|
|
struct blkio_group *blkg;
|
|
|
struct hlist_node *n;
|
|
@@ -1089,10 +1149,15 @@ static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
|
|
|
if (blkg->dev) {
|
|
|
if (!cftype_blkg_same_policy(cft, blkg))
|
|
|
continue;
|
|
|
- spin_lock_irq(&blkg->stats_lock);
|
|
|
- cgroup_total += blkio_get_stat(blkg, cb, blkg->dev,
|
|
|
- type);
|
|
|
- spin_unlock_irq(&blkg->stats_lock);
|
|
|
+ if (pcpu)
|
|
|
+ cgroup_total += blkio_get_stat_cpu(blkg, cb,
|
|
|
+ blkg->dev, type);
|
|
|
+ else {
|
|
|
+ spin_lock_irq(&blkg->stats_lock);
|
|
|
+ cgroup_total += blkio_get_stat(blkg, cb,
|
|
|
+ blkg->dev, type);
|
|
|
+ spin_unlock_irq(&blkg->stats_lock);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
if (show_total)
|
|
@@ -1116,47 +1181,47 @@ static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
|
|
|
switch(name) {
|
|
|
case BLKIO_PROP_time:
|
|
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
|
|
- BLKIO_STAT_TIME, 0);
|
|
|
+ BLKIO_STAT_TIME, 0, 0);
|
|
|
case BLKIO_PROP_sectors:
|
|
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
|
|
- BLKIO_STAT_SECTORS, 0);
|
|
|
+ BLKIO_STAT_CPU_SECTORS, 0, 1);
|
|
|
case BLKIO_PROP_io_service_bytes:
|
|
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
|
|
- BLKIO_STAT_SERVICE_BYTES, 1);
|
|
|
+ BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
|
|
|
case BLKIO_PROP_io_serviced:
|
|
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
|
|
- BLKIO_STAT_SERVICED, 1);
|
|
|
+ BLKIO_STAT_CPU_SERVICED, 1, 1);
|
|
|
case BLKIO_PROP_io_service_time:
|
|
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
|
|
- BLKIO_STAT_SERVICE_TIME, 1);
|
|
|
+ BLKIO_STAT_SERVICE_TIME, 1, 0);
|
|
|
case BLKIO_PROP_io_wait_time:
|
|
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
|
|
- BLKIO_STAT_WAIT_TIME, 1);
|
|
|
+ BLKIO_STAT_WAIT_TIME, 1, 0);
|
|
|
case BLKIO_PROP_io_merged:
|
|
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
|
|
- BLKIO_STAT_MERGED, 1);
|
|
|
+ BLKIO_STAT_MERGED, 1, 0);
|
|
|
case BLKIO_PROP_io_queued:
|
|
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
|
|
- BLKIO_STAT_QUEUED, 1);
|
|
|
+ BLKIO_STAT_QUEUED, 1, 0);
|
|
|
#ifdef CONFIG_DEBUG_BLK_CGROUP
|
|
|
case BLKIO_PROP_unaccounted_time:
|
|
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
|
|
- BLKIO_STAT_UNACCOUNTED_TIME, 0);
|
|
|
+ BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
|
|
|
case BLKIO_PROP_dequeue:
|
|
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
|
|
- BLKIO_STAT_DEQUEUE, 0);
|
|
|
+ BLKIO_STAT_DEQUEUE, 0, 0);
|
|
|
case BLKIO_PROP_avg_queue_size:
|
|
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
|
|
- BLKIO_STAT_AVG_QUEUE_SIZE, 0);
|
|
|
+ BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
|
|
|
case BLKIO_PROP_group_wait_time:
|
|
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
|
|
- BLKIO_STAT_GROUP_WAIT_TIME, 0);
|
|
|
+ BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
|
|
|
case BLKIO_PROP_idle_time:
|
|
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
|
|
- BLKIO_STAT_IDLE_TIME, 0);
|
|
|
+ BLKIO_STAT_IDLE_TIME, 0, 0);
|
|
|
case BLKIO_PROP_empty_time:
|
|
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
|
|
- BLKIO_STAT_EMPTY_TIME, 0);
|
|
|
+ BLKIO_STAT_EMPTY_TIME, 0, 0);
|
|
|
#endif
|
|
|
default:
|
|
|
BUG();
|
|
@@ -1166,10 +1231,10 @@ static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
|
|
|
switch(name){
|
|
|
case BLKIO_THROTL_io_service_bytes:
|
|
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
|
|
- BLKIO_STAT_SERVICE_BYTES, 1);
|
|
|
+ BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
|
|
|
case BLKIO_THROTL_io_serviced:
|
|
|
return blkio_read_blkg_stats(blkcg, cft, cb,
|
|
|
- BLKIO_STAT_SERVICED, 1);
|
|
|
+ BLKIO_STAT_CPU_SERVICED, 1, 1);
|
|
|
default:
|
|
|
BUG();
|
|
|
}
|