|
@@ -92,6 +92,8 @@ struct cfq_data {
|
|
|
struct cfq_queue *active_queue;
|
|
|
struct cfq_io_context *active_cic;
|
|
|
|
|
|
+ struct cfq_queue *async_cfqq[IOPRIO_BE_NR];
|
|
|
+
|
|
|
struct timer_list idle_class_timer;
|
|
|
|
|
|
sector_t last_position;
|
|
@@ -1351,8 +1353,8 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
|
|
|
}
|
|
|
|
|
|
static struct cfq_queue *
|
|
|
-cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
|
|
|
- gfp_t gfp_mask)
|
|
|
+cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
|
|
|
+ struct task_struct *tsk, gfp_t gfp_mask)
|
|
|
{
|
|
|
struct cfq_queue *cfqq, *new_cfqq = NULL;
|
|
|
struct cfq_io_context *cic;
|
|
@@ -1405,12 +1407,35 @@ retry:
|
|
|
if (new_cfqq)
|
|
|
kmem_cache_free(cfq_pool, new_cfqq);
|
|
|
|
|
|
- atomic_inc(&cfqq->ref);
|
|
|
out:
|
|
|
WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
|
|
|
return cfqq;
|
|
|
}
|
|
|
|
|
|
+static struct cfq_queue *
|
|
|
+cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
|
|
|
+ gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ const int ioprio = task_ioprio(tsk);
|
|
|
+ struct cfq_queue *cfqq = NULL;
|
|
|
+
|
|
|
+ if (!is_sync)
|
|
|
+ cfqq = cfqd->async_cfqq[ioprio];
|
|
|
+ if (!cfqq)
|
|
|
+ cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * pin the queue now that it's allocated, scheduler exit will prune it
|
|
|
+ */
|
|
|
+ if (!is_sync && !cfqd->async_cfqq[ioprio]) {
|
|
|
+ atomic_inc(&cfqq->ref);
|
|
|
+ cfqd->async_cfqq[ioprio] = cfqq;
|
|
|
+ }
|
|
|
+
|
|
|
+ atomic_inc(&cfqq->ref);
|
|
|
+ return cfqq;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* We drop cfq io contexts lazily, so we may find a dead one.
|
|
|
*/
|
|
@@ -2019,6 +2044,7 @@ static void cfq_exit_queue(elevator_t *e)
|
|
|
{
|
|
|
struct cfq_data *cfqd = e->elevator_data;
|
|
|
request_queue_t *q = cfqd->queue;
|
|
|
+ int i;
|
|
|
|
|
|
cfq_shutdown_timer_wq(cfqd);
|
|
|
|
|
@@ -2035,6 +2061,13 @@ static void cfq_exit_queue(elevator_t *e)
|
|
|
__cfq_exit_single_io_context(cfqd, cic);
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * Put the async queues
|
|
|
+ */
|
|
|
+ for (i = 0; i < IOPRIO_BE_NR; i++)
|
|
|
+ if (cfqd->async_cfqq[i])
|
|
|
+ cfq_put_queue(cfqd->async_cfqq[i]);
|
|
|
+
|
|
|
spin_unlock_irq(q->queue_lock);
|
|
|
|
|
|
cfq_shutdown_timer_wq(cfqd);
|