|
@@ -47,6 +47,7 @@ static const int cfq_hist_divisor = 4;
|
|
|
#define CFQ_SERVICE_SHIFT 12
|
|
|
|
|
|
#define CFQQ_SEEK_THR (sector_t)(8 * 100)
|
|
|
+#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
|
|
|
#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
|
|
|
#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
|
|
|
|
|
@@ -1660,9 +1661,9 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
|
|
|
}
|
|
|
|
|
|
static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|
|
- struct request *rq, bool for_preempt)
|
|
|
+ struct request *rq)
|
|
|
{
|
|
|
- return cfq_dist_from_last(cfqd, rq) <= CFQQ_SEEK_THR;
|
|
|
+ return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
|
|
|
}
|
|
|
|
|
|
static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
|
|
@@ -1689,7 +1690,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
|
|
|
* will contain the closest sector.
|
|
|
*/
|
|
|
__cfqq = rb_entry(parent, struct cfq_queue, p_node);
|
|
|
- if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
|
|
|
+ if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
|
|
|
return __cfqq;
|
|
|
|
|
|
if (blk_rq_pos(__cfqq->next_rq) < sector)
|
|
@@ -1700,7 +1701,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
|
|
|
return NULL;
|
|
|
|
|
|
__cfqq = rb_entry(node, struct cfq_queue, p_node);
|
|
|
- if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
|
|
|
+ if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
|
|
|
return __cfqq;
|
|
|
|
|
|
return NULL;
|
|
@@ -3103,7 +3104,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
|
|
|
* if this request is as-good as one we would expect from the
|
|
|
* current cfqq, let it preempt
|
|
|
*/
|
|
|
- if (cfq_rq_close(cfqd, cfqq, rq, true))
|
|
|
+ if (cfq_rq_close(cfqd, cfqq, rq))
|
|
|
return true;
|
|
|
|
|
|
return false;
|