|
@@ -129,8 +129,6 @@ struct cfq_queue {
|
|
|
unsigned long slice_end;
|
|
|
long slice_resid;
|
|
|
|
|
|
- /* pending metadata requests */
|
|
|
- int meta_pending;
|
|
|
/* number of requests that are on the dispatch list or inside driver */
|
|
|
int dispatched;
|
|
|
|
|
@@ -670,9 +668,6 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
|
|
|
if (rq_is_sync(rq1) != rq_is_sync(rq2))
|
|
|
return rq_is_sync(rq1) ? rq1 : rq2;
|
|
|
|
|
|
- if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_META)
|
|
|
- return rq1->cmd_flags & REQ_META ? rq1 : rq2;
|
|
|
-
|
|
|
s1 = blk_rq_pos(rq1);
|
|
|
s2 = blk_rq_pos(rq2);
|
|
|
|
|
@@ -1593,10 +1588,6 @@ static void cfq_remove_request(struct request *rq)
|
|
|
cfqq->cfqd->rq_queued--;
|
|
|
cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
|
|
|
rq_data_dir(rq), rq_is_sync(rq));
|
|
|
- if (rq->cmd_flags & REQ_META) {
|
|
|
- WARN_ON(!cfqq->meta_pending);
|
|
|
- cfqq->meta_pending--;
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
static int cfq_merge(struct request_queue *q, struct request **req,
|
|
@@ -3334,13 +3325,6 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
|
|
|
RB_EMPTY_ROOT(&cfqq->sort_list))
|
|
|
return true;
|
|
|
|
|
|
- /*
|
|
|
- * So both queues are sync. Let the new request get disk time if
|
|
|
- * it's a metadata request and the current queue is doing regular IO.
|
|
|
- */
|
|
|
- if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
|
|
|
- return true;
|
|
|
-
|
|
|
/*
|
|
|
* Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
|
|
|
*/
|
|
@@ -3405,8 +3389,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|
|
struct cfq_io_context *cic = RQ_CIC(rq);
|
|
|
|
|
|
cfqd->rq_queued++;
|
|
|
- if (rq->cmd_flags & REQ_META)
|
|
|
- cfqq->meta_pending++;
|
|
|
|
|
|
cfq_update_io_thinktime(cfqd, cic);
|
|
|
cfq_update_io_seektime(cfqd, cfqq, rq);
|