|
@@ -191,8 +191,14 @@ struct cfq_data {
|
|
|
*/
|
|
|
int rq_queued;
|
|
|
int hw_tag;
|
|
|
- int hw_tag_samples;
|
|
|
- int rq_in_driver_peak;
|
|
|
+ /*
|
|
|
+ * hw_tag can be
|
|
|
+ * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
|
|
|
+ * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
|
|
|
+ * 0 => no NCQ
|
|
|
+ */
|
|
|
+ int hw_tag_est_depth;
|
|
|
+ unsigned int hw_tag_samples;
|
|
|
|
|
|
/*
|
|
|
* idle window management
|
|
@@ -2518,8 +2524,11 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
|
|
|
{
|
|
|
struct cfq_queue *cfqq = cfqd->active_queue;
|
|
|
|
|
|
- if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak)
|
|
|
- cfqd->rq_in_driver_peak = rq_in_driver(cfqd);
|
|
|
+ if (rq_in_driver(cfqd) > cfqd->hw_tag_est_depth)
|
|
|
+ cfqd->hw_tag_est_depth = rq_in_driver(cfqd);
|
|
|
+
|
|
|
+ if (cfqd->hw_tag == 1)
|
|
|
+ return;
|
|
|
|
|
|
if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
|
|
|
rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
|
|
@@ -2538,13 +2547,10 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
|
|
|
if (cfqd->hw_tag_samples++ < 50)
|
|
|
return;
|
|
|
|
|
|
- if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN)
|
|
|
+ if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
|
|
|
cfqd->hw_tag = 1;
|
|
|
else
|
|
|
cfqd->hw_tag = 0;
|
|
|
-
|
|
|
- cfqd->hw_tag_samples = 0;
|
|
|
- cfqd->rq_in_driver_peak = 0;
|
|
|
}
|
|
|
|
|
|
static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
|
@@ -2951,7 +2957,7 @@ static void *cfq_init_queue(struct request_queue *q)
|
|
|
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
|
|
|
cfqd->cfq_slice_idle = cfq_slice_idle;
|
|
|
cfqd->cfq_latency = 1;
|
|
|
- cfqd->hw_tag = 1;
|
|
|
+ cfqd->hw_tag = -1;
|
|
|
cfqd->last_end_sync_rq = jiffies;
|
|
|
return cfqd;
|
|
|
}
|