|
@@ -248,9 +248,9 @@ struct cfq_group {
|
|
|
struct cfq_rb_root service_trees[2][3];
|
|
|
struct cfq_rb_root service_tree_idle;
|
|
|
|
|
|
- unsigned long saved_workload_slice;
|
|
|
- enum wl_type_t saved_workload;
|
|
|
- enum wl_class_t saved_serving_class;
|
|
|
+ unsigned long saved_wl_slice;
|
|
|
+ enum wl_type_t saved_wl_type;
|
|
|
+ enum wl_class_t saved_wl_class;
|
|
|
|
|
|
/* number of requests that are on the dispatch list or inside driver */
|
|
|
int dispatched;
|
|
@@ -280,8 +280,8 @@ struct cfq_data {
|
|
|
/*
|
|
|
* The priority currently being served
|
|
|
*/
|
|
|
- enum wl_class_t serving_class;
|
|
|
- enum wl_type_t serving_type;
|
|
|
+ enum wl_class_t serving_wl_class;
|
|
|
+ enum wl_type_t serving_wl_type;
|
|
|
unsigned long workload_expires;
|
|
|
struct cfq_group *serving_group;
|
|
|
|
|
@@ -1241,7 +1241,7 @@ cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
|
|
|
|
|
cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
|
|
|
cfq_group_service_tree_del(st, cfqg);
|
|
|
- cfqg->saved_workload_slice = 0;
|
|
|
+ cfqg->saved_wl_slice = 0;
|
|
|
cfqg_stats_update_dequeue(cfqg);
|
|
|
}
|
|
|
|
|
@@ -1301,12 +1301,12 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
|
|
|
|
|
|
/* This group is being expired. Save the context */
|
|
|
if (time_after(cfqd->workload_expires, jiffies)) {
|
|
|
- cfqg->saved_workload_slice = cfqd->workload_expires
|
|
|
+ cfqg->saved_wl_slice = cfqd->workload_expires
|
|
|
- jiffies;
|
|
|
- cfqg->saved_workload = cfqd->serving_type;
|
|
|
- cfqg->saved_serving_class = cfqd->serving_class;
|
|
|
+ cfqg->saved_wl_type = cfqd->serving_wl_type;
|
|
|
+ cfqg->saved_wl_class = cfqd->serving_wl_class;
|
|
|
} else
|
|
|
- cfqg->saved_workload_slice = 0;
|
|
|
+ cfqg->saved_wl_slice = 0;
|
|
|
|
|
|
cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
|
|
|
st->min_vdisktime);
|
|
@@ -2031,7 +2031,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
|
|
|
{
|
|
|
if (cfqq) {
|
|
|
cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
|
|
|
- cfqd->serving_class, cfqd->serving_type);
|
|
|
+ cfqd->serving_wl_class, cfqd->serving_wl_type);
|
|
|
cfqg_stats_update_avg_queue_size(cfqq->cfqg);
|
|
|
cfqq->slice_start = 0;
|
|
|
cfqq->dispatch_start = jiffies;
|
|
@@ -2118,8 +2118,8 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
|
|
|
static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
|
|
|
{
|
|
|
struct cfq_rb_root *service_tree =
|
|
|
- service_tree_for(cfqd->serving_group, cfqd->serving_class,
|
|
|
- cfqd->serving_type);
|
|
|
+ service_tree_for(cfqd->serving_group, cfqd->serving_wl_class,
|
|
|
+ cfqd->serving_wl_type);
|
|
|
|
|
|
if (!cfqd->rq_queued)
|
|
|
return NULL;
|
|
@@ -2523,20 +2523,20 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
|
|
unsigned count;
|
|
|
struct cfq_rb_root *st;
|
|
|
unsigned group_slice;
|
|
|
- enum wl_class_t original_class = cfqd->serving_class;
|
|
|
+ enum wl_class_t original_class = cfqd->serving_wl_class;
|
|
|
|
|
|
/* Choose next priority. RT > BE > IDLE */
|
|
|
if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
|
|
|
- cfqd->serving_class = RT_WORKLOAD;
|
|
|
+ cfqd->serving_wl_class = RT_WORKLOAD;
|
|
|
else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
|
|
|
- cfqd->serving_class = BE_WORKLOAD;
|
|
|
+ cfqd->serving_wl_class = BE_WORKLOAD;
|
|
|
else {
|
|
|
- cfqd->serving_class = IDLE_WORKLOAD;
|
|
|
+ cfqd->serving_wl_class = IDLE_WORKLOAD;
|
|
|
cfqd->workload_expires = jiffies + 1;
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- if (original_class != cfqd->serving_class)
|
|
|
+ if (original_class != cfqd->serving_wl_class)
|
|
|
goto new_workload;
|
|
|
|
|
|
/*
|
|
@@ -2544,7 +2544,8 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
|
|
* (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
|
|
|
* expiration time
|
|
|
*/
|
|
|
- st = service_tree_for(cfqg, cfqd->serving_class, cfqd->serving_type);
|
|
|
+ st = service_tree_for(cfqg, cfqd->serving_wl_class,
|
|
|
+ cfqd->serving_wl_type);
|
|
|
count = st->count;
|
|
|
|
|
|
/*
|
|
@@ -2555,9 +2556,10 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
|
|
|
|
|
new_workload:
|
|
|
/* otherwise select new workload type */
|
|
|
- cfqd->serving_type =
|
|
|
- cfq_choose_wl(cfqd, cfqg, cfqd->serving_class);
|
|
|
- st = service_tree_for(cfqg, cfqd->serving_class, cfqd->serving_type);
|
|
|
+ cfqd->serving_wl_type = cfq_choose_wl(cfqd, cfqg,
|
|
|
+ cfqd->serving_wl_class);
|
|
|
+ st = service_tree_for(cfqg, cfqd->serving_wl_class,
|
|
|
+ cfqd->serving_wl_type);
|
|
|
count = st->count;
|
|
|
|
|
|
/*
|
|
@@ -2568,11 +2570,11 @@ new_workload:
|
|
|
group_slice = cfq_group_slice(cfqd, cfqg);
|
|
|
|
|
|
slice = group_slice * count /
|
|
|
- max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_class],
|
|
|
- cfq_group_busy_queues_wl(cfqd->serving_class, cfqd,
|
|
|
+ max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
|
|
|
+ cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
|
|
|
cfqg));
|
|
|
|
|
|
- if (cfqd->serving_type == ASYNC_WORKLOAD) {
|
|
|
+ if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
|
|
|
unsigned int tmp;
|
|
|
|
|
|
/*
|
|
@@ -2618,10 +2620,10 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd)
|
|
|
cfqd->serving_group = cfqg;
|
|
|
|
|
|
/* Restore the workload type data */
|
|
|
- if (cfqg->saved_workload_slice) {
|
|
|
- cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
|
|
|
- cfqd->serving_type = cfqg->saved_workload;
|
|
|
- cfqd->serving_class = cfqg->saved_serving_class;
|
|
|
+ if (cfqg->saved_wl_slice) {
|
|
|
+ cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
|
|
|
+ cfqd->serving_wl_type = cfqg->saved_wl_type;
|
|
|
+ cfqd->serving_wl_class = cfqg->saved_wl_class;
|
|
|
} else
|
|
|
cfqd->workload_expires = jiffies - 1;
|
|
|
|
|
@@ -3404,7 +3406,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
|
|
|
return true;
|
|
|
|
|
|
/* Allow preemption only if we are idling on sync-noidle tree */
|
|
|
- if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
|
|
|
+ if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
|
|
|
cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
|
|
|
new_cfqq->service_tree->count == 2 &&
|
|
|
RB_EMPTY_ROOT(&cfqq->sort_list))
|
|
@@ -3456,7 +3458,7 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
|
|
* doesn't happen
|
|
|
*/
|
|
|
if (old_type != cfqq_type(cfqq))
|
|
|
- cfqq->cfqg->saved_workload_slice = 0;
|
|
|
+ cfqq->cfqg->saved_wl_slice = 0;
|
|
|
|
|
|
/*
|
|
|
* Put the new queue at the front of the of the current list,
|