|
@@ -2935,117 +2935,6 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
|
|
return cfqq;
|
|
return cfqq;
|
|
}
|
|
}
|
|
|
|
|
|
-/**
|
|
|
|
- * ioc_create_icq - create and link io_cq
|
|
|
|
- * @q: request_queue of interest
|
|
|
|
- * @gfp_mask: allocation mask
|
|
|
|
- *
|
|
|
|
- * Make sure io_cq linking %current->io_context and @q exists. If either
|
|
|
|
- * io_context and/or icq don't exist, they will be created using @gfp_mask.
|
|
|
|
- *
|
|
|
|
- * The caller is responsible for ensuring @ioc won't go away and @q is
|
|
|
|
- * alive and will stay alive until this function returns.
|
|
|
|
- */
|
|
|
|
-static struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
|
|
|
|
-{
|
|
|
|
- struct elevator_type *et = q->elevator->type;
|
|
|
|
- struct io_context *ioc;
|
|
|
|
- struct io_cq *icq;
|
|
|
|
-
|
|
|
|
- /* allocate stuff */
|
|
|
|
- ioc = create_io_context(current, gfp_mask, q->node);
|
|
|
|
- if (!ioc)
|
|
|
|
- return NULL;
|
|
|
|
-
|
|
|
|
- icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
|
|
|
|
- q->node);
|
|
|
|
- if (!icq)
|
|
|
|
- return NULL;
|
|
|
|
-
|
|
|
|
- if (radix_tree_preload(gfp_mask) < 0) {
|
|
|
|
- kmem_cache_free(et->icq_cache, icq);
|
|
|
|
- return NULL;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- icq->ioc = ioc;
|
|
|
|
- icq->q = q;
|
|
|
|
- INIT_LIST_HEAD(&icq->q_node);
|
|
|
|
- INIT_HLIST_NODE(&icq->ioc_node);
|
|
|
|
-
|
|
|
|
- /* lock both q and ioc and try to link @icq */
|
|
|
|
- spin_lock_irq(q->queue_lock);
|
|
|
|
- spin_lock(&ioc->lock);
|
|
|
|
-
|
|
|
|
- if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
|
|
|
|
- hlist_add_head(&icq->ioc_node, &ioc->icq_list);
|
|
|
|
- list_add(&icq->q_node, &q->icq_list);
|
|
|
|
- if (et->ops.elevator_init_icq_fn)
|
|
|
|
- et->ops.elevator_init_icq_fn(icq);
|
|
|
|
- } else {
|
|
|
|
- kmem_cache_free(et->icq_cache, icq);
|
|
|
|
- icq = ioc_lookup_icq(ioc, q);
|
|
|
|
- if (!icq)
|
|
|
|
- printk(KERN_ERR "cfq: icq link failed!\n");
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- spin_unlock(&ioc->lock);
|
|
|
|
- spin_unlock_irq(q->queue_lock);
|
|
|
|
- radix_tree_preload_end();
|
|
|
|
- return icq;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/**
|
|
|
|
- * cfq_get_cic - acquire cfq_io_cq and bump refcnt on io_context
|
|
|
|
- * @cfqd: cfqd to setup cic for
|
|
|
|
- * @gfp_mask: allocation mask
|
|
|
|
- *
|
|
|
|
- * Return cfq_io_cq associating @cfqd and %current->io_context and
|
|
|
|
- * bump refcnt on io_context. If ioc or cic doesn't exist, they're created
|
|
|
|
- * using @gfp_mask.
|
|
|
|
- *
|
|
|
|
- * Must be called under queue_lock which may be released and re-acquired.
|
|
|
|
- * This function also may sleep depending on @gfp_mask.
|
|
|
|
- */
|
|
|
|
-static struct cfq_io_cq *cfq_get_cic(struct cfq_data *cfqd, gfp_t gfp_mask)
|
|
|
|
-{
|
|
|
|
- struct request_queue *q = cfqd->queue;
|
|
|
|
- struct cfq_io_cq *cic = NULL;
|
|
|
|
- struct io_context *ioc;
|
|
|
|
-
|
|
|
|
- lockdep_assert_held(q->queue_lock);
|
|
|
|
-
|
|
|
|
- while (true) {
|
|
|
|
- /* fast path */
|
|
|
|
- ioc = current->io_context;
|
|
|
|
- if (likely(ioc)) {
|
|
|
|
- cic = cfq_cic_lookup(cfqd, ioc);
|
|
|
|
- if (likely(cic))
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* slow path - unlock, create missing ones and retry */
|
|
|
|
- spin_unlock_irq(q->queue_lock);
|
|
|
|
- cic = icq_to_cic(ioc_create_icq(q, gfp_mask));
|
|
|
|
- spin_lock_irq(q->queue_lock);
|
|
|
|
- if (!cic)
|
|
|
|
- return NULL;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* bump @ioc's refcnt and handle changed notifications */
|
|
|
|
- get_io_context(ioc);
|
|
|
|
-
|
|
|
|
- if (unlikely(cic->icq.changed)) {
|
|
|
|
- if (test_and_clear_bit(ICQ_IOPRIO_CHANGED, &cic->icq.changed))
|
|
|
|
- changed_ioprio(cic);
|
|
|
|
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
|
|
|
- if (test_and_clear_bit(ICQ_CGROUP_CHANGED, &cic->icq.changed))
|
|
|
|
- changed_cgroup(cic);
|
|
|
|
-#endif
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return cic;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void
|
|
static void
|
|
__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
|
|
__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
|
|
{
|
|
{
|
|
@@ -3524,8 +3413,6 @@ static void cfq_put_request(struct request *rq)
|
|
BUG_ON(!cfqq->allocated[rw]);
|
|
BUG_ON(!cfqq->allocated[rw]);
|
|
cfqq->allocated[rw]--;
|
|
cfqq->allocated[rw]--;
|
|
|
|
|
|
- put_io_context(RQ_CIC(rq)->icq.ioc, cfqq->cfqd->queue);
|
|
|
|
-
|
|
|
|
/* Put down rq reference on cfqg */
|
|
/* Put down rq reference on cfqg */
|
|
cfq_put_cfqg(RQ_CFQG(rq));
|
|
cfq_put_cfqg(RQ_CFQG(rq));
|
|
rq->elv.priv[0] = NULL;
|
|
rq->elv.priv[0] = NULL;
|
|
@@ -3574,7 +3461,7 @@ static int
|
|
cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
|
cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
|
{
|
|
{
|
|
struct cfq_data *cfqd = q->elevator->elevator_data;
|
|
struct cfq_data *cfqd = q->elevator->elevator_data;
|
|
- struct cfq_io_cq *cic;
|
|
|
|
|
|
+ struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
|
|
const int rw = rq_data_dir(rq);
|
|
const int rw = rq_data_dir(rq);
|
|
const bool is_sync = rq_is_sync(rq);
|
|
const bool is_sync = rq_is_sync(rq);
|
|
struct cfq_queue *cfqq;
|
|
struct cfq_queue *cfqq;
|
|
@@ -3582,9 +3469,16 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
|
might_sleep_if(gfp_mask & __GFP_WAIT);
|
|
might_sleep_if(gfp_mask & __GFP_WAIT);
|
|
|
|
|
|
spin_lock_irq(q->queue_lock);
|
|
spin_lock_irq(q->queue_lock);
|
|
- cic = cfq_get_cic(cfqd, gfp_mask);
|
|
|
|
- if (!cic)
|
|
|
|
- goto queue_fail;
|
|
|
|
|
|
+
|
|
|
|
+ /* handle changed notifications */
|
|
|
|
+ if (unlikely(cic->icq.changed)) {
|
|
|
|
+ if (test_and_clear_bit(ICQ_IOPRIO_CHANGED, &cic->icq.changed))
|
|
|
|
+ changed_ioprio(cic);
|
|
|
|
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
|
|
|
+ if (test_and_clear_bit(ICQ_CGROUP_CHANGED, &cic->icq.changed))
|
|
|
|
+ changed_cgroup(cic);
|
|
|
|
+#endif
|
|
|
|
+ }
|
|
|
|
|
|
new_queue:
|
|
new_queue:
|
|
cfqq = cic_to_cfqq(cic, is_sync);
|
|
cfqq = cic_to_cfqq(cic, is_sync);
|
|
@@ -3615,17 +3509,10 @@ new_queue:
|
|
cfqq->allocated[rw]++;
|
|
cfqq->allocated[rw]++;
|
|
|
|
|
|
cfqq->ref++;
|
|
cfqq->ref++;
|
|
- rq->elv.icq = &cic->icq;
|
|
|
|
rq->elv.priv[0] = cfqq;
|
|
rq->elv.priv[0] = cfqq;
|
|
rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
|
|
rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
|
|
spin_unlock_irq(q->queue_lock);
|
|
spin_unlock_irq(q->queue_lock);
|
|
return 0;
|
|
return 0;
|
|
-
|
|
|
|
-queue_fail:
|
|
|
|
- cfq_schedule_dispatch(cfqd);
|
|
|
|
- spin_unlock_irq(q->queue_lock);
|
|
|
|
- cfq_log(cfqd, "set_request fail");
|
|
|
|
- return 1;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static void cfq_kick_queue(struct work_struct *work)
|
|
static void cfq_kick_queue(struct work_struct *work)
|