|
@@ -283,7 +283,7 @@ struct cfq_data {
|
|
|
*/
|
|
|
struct cfq_queue oom_cfqq;
|
|
|
|
|
|
- unsigned long last_end_sync_rq;
|
|
|
+ unsigned long last_delayed_sync;
|
|
|
|
|
|
/* List of cfq groups being managed on this device*/
|
|
|
struct hlist_head cfqg_list;
|
|
@@ -319,7 +319,6 @@ enum cfqq_state_flags {
|
|
|
CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
|
|
|
CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
|
|
|
CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
|
|
|
- CFQ_CFQQ_FLAG_wait_busy_done, /* Got new request. Expire the queue */
|
|
|
};
|
|
|
|
|
|
#define CFQ_CFQQ_FNS(name) \
|
|
@@ -348,7 +347,6 @@ CFQ_CFQQ_FNS(sync);
|
|
|
CFQ_CFQQ_FNS(coop);
|
|
|
CFQ_CFQQ_FNS(deep);
|
|
|
CFQ_CFQQ_FNS(wait_busy);
|
|
|
-CFQ_CFQQ_FNS(wait_busy_done);
|
|
|
#undef CFQ_CFQQ_FNS
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_CFQ_IOSCHED
|
|
@@ -1574,7 +1572,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|
|
|
|
|
cfq_clear_cfqq_wait_request(cfqq);
|
|
|
cfq_clear_cfqq_wait_busy(cfqq);
|
|
|
- cfq_clear_cfqq_wait_busy_done(cfqq);
|
|
|
|
|
|
/*
|
|
|
* store what was left of this slice, if the queue idled/timed out
|
|
@@ -1749,6 +1746,12 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
|
|
|
if (CFQQ_SEEKY(cur_cfqq))
|
|
|
return NULL;
|
|
|
|
|
|
+ /*
|
|
|
+ * Don't search priority tree if it's the only queue in the group.
|
|
|
+ */
|
|
|
+ if (cur_cfqq->cfqg->nr_cfqq == 1)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
/*
|
|
|
* We should notice if some of the queues are cooperating, eg
|
|
|
* working closely on the same area of the disk. In that case,
|
|
@@ -2110,7 +2113,9 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd)
|
|
|
cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
|
|
|
cfqd->serving_type = cfqg->saved_workload;
|
|
|
cfqd->serving_prio = cfqg->saved_serving_prio;
|
|
|
- }
|
|
|
+ } else
|
|
|
+ cfqd->workload_expires = jiffies - 1;
|
|
|
+
|
|
|
choose_service_tree(cfqd, cfqg);
|
|
|
}
|
|
|
|
|
@@ -2128,13 +2133,34 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
|
|
|
|
|
|
if (!cfqd->rq_queued)
|
|
|
return NULL;
|
|
|
+
|
|
|
/*
|
|
|
- * The active queue has run out of time, expire it and select new.
|
|
|
+ * We were waiting for group to get backlogged. Expire the queue
|
|
|
*/
|
|
|
- if ((cfq_slice_used(cfqq) || cfq_cfqq_wait_busy_done(cfqq))
|
|
|
- && !cfq_cfqq_must_dispatch(cfqq))
|
|
|
+ if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
|
|
|
goto expire;
|
|
|
|
|
|
+ /*
|
|
|
+ * The active queue has run out of time, expire it and select new.
|
|
|
+ */
|
|
|
+ if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
|
|
|
+ /*
|
|
|
+ * If slice had not expired at the completion of last request
|
|
|
+ * we might not have turned on wait_busy flag. Don't expire
|
|
|
+ * the queue yet. Allow the group to get backlogged.
|
|
|
+ *
|
|
|
+ * The very fact that we have used the slice, that means we
|
|
|
+ * have been idling all along on this queue and it should be
|
|
|
+ * ok to wait for this request to complete.
|
|
|
+ */
|
|
|
+ if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
|
|
|
+ && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
|
|
|
+ cfqq = NULL;
|
|
|
+ goto keep_queue;
|
|
|
+ } else
|
|
|
+ goto expire;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* The active queue has requests and isn't expired, allow it to
|
|
|
* dispatch.
|
|
@@ -2264,7 +2290,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
|
|
* based on the last sync IO we serviced
|
|
|
*/
|
|
|
if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
|
|
|
- unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
|
|
|
+ unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
|
|
|
unsigned int depth;
|
|
|
|
|
|
depth = last_sync / cfqd->cfq_slice[1];
|
|
@@ -3165,10 +3191,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|
|
cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
|
|
|
|
|
|
if (cfqq == cfqd->active_queue) {
|
|
|
- if (cfq_cfqq_wait_busy(cfqq)) {
|
|
|
- cfq_clear_cfqq_wait_busy(cfqq);
|
|
|
- cfq_mark_cfqq_wait_busy_done(cfqq);
|
|
|
- }
|
|
|
/*
|
|
|
* Remember that we saw a request from this process, but
|
|
|
* don't start queuing just yet. Otherwise we risk seeing lots
|
|
@@ -3183,6 +3205,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|
|
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
|
|
|
cfqd->busy_queues > 1) {
|
|
|
del_timer(&cfqd->idle_slice_timer);
|
|
|
+ cfq_clear_cfqq_wait_request(cfqq);
|
|
|
__blk_run_queue(cfqd->queue);
|
|
|
} else
|
|
|
cfq_mark_cfqq_must_dispatch(cfqq);
|
|
@@ -3251,6 +3274,35 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
|
|
|
cfqd->hw_tag = 0;
|
|
|
}
|
|
|
|
|
|
+static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
|
|
+{
|
|
|
+ struct cfq_io_context *cic = cfqd->active_cic;
|
|
|
+
|
|
|
+ /* If there are other queues in the group, don't wait */
|
|
|
+ if (cfqq->cfqg->nr_cfqq > 1)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ if (cfq_slice_used(cfqq))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ /* if slice left is less than think time, wait busy */
|
|
|
+ if (cic && sample_valid(cic->ttime_samples)
|
|
|
+ && (cfqq->slice_end - jiffies < cic->ttime_mean))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If think times is less than a jiffy than ttime_mean=0 and above
|
|
|
+ * will not be true. It might happen that slice has not expired yet
|
|
|
+ * but will expire soon (4-5 ns) during select_queue(). To cover the
|
|
|
+ * case where think time is less than a jiffy, mark the queue wait
|
|
|
+ * busy if only 1 jiffy is left in the slice.
|
|
|
+ */
|
|
|
+ if (cfqq->slice_end - jiffies == 1)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
|
|
{
|
|
|
struct cfq_queue *cfqq = RQ_CFQQ(rq);
|
|
@@ -3273,7 +3325,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
|
|
|
|
|
if (sync) {
|
|
|
RQ_CIC(rq)->last_end_request = now;
|
|
|
- cfqd->last_end_sync_rq = now;
|
|
|
+ if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
|
|
|
+ cfqd->last_delayed_sync = now;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -3289,11 +3342,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * If this queue consumed its slice and this is last queue
|
|
|
- * in the group, wait for next request before we expire
|
|
|
- * the queue
|
|
|
+ * Should we wait for next request to come in before we expire
|
|
|
+ * the queue.
|
|
|
*/
|
|
|
- if (cfq_slice_used(cfqq) && cfqq->cfqg->nr_cfqq == 1) {
|
|
|
+ if (cfq_should_wait_busy(cfqd, cfqq)) {
|
|
|
cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
|
|
|
cfq_mark_cfqq_wait_busy(cfqq);
|
|
|
}
|
|
@@ -3711,7 +3763,11 @@ static void *cfq_init_queue(struct request_queue *q)
|
|
|
cfqd->cfq_latency = 1;
|
|
|
cfqd->cfq_group_isolation = 0;
|
|
|
cfqd->hw_tag = -1;
|
|
|
- cfqd->last_end_sync_rq = jiffies;
|
|
|
+ /*
|
|
|
+ * we optimistically start assuming sync ops weren't delayed in last
|
|
|
+ * second, in order to have larger depth for async operations.
|
|
|
+ */
|
|
|
+ cfqd->last_delayed_sync = jiffies - HZ;
|
|
|
INIT_RCU_HEAD(&cfqd->rcu);
|
|
|
return cfqd;
|
|
|
}
|