|
@@ -192,6 +192,7 @@ struct cfq_group {
|
|
|
struct blkio_group blkg;
|
|
|
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
|
|
struct hlist_node cfqd_node;
|
|
|
+ atomic_t ref;
|
|
|
#endif
|
|
|
};
|
|
|
|
|
@@ -924,6 +925,14 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
|
|
|
*st = CFQ_RB_ROOT;
|
|
|
RB_CLEAR_NODE(&cfqg->rb_node);
|
|
|
|
|
|
+ /*
|
|
|
+ * Take the initial reference that will be released on destroy
|
|
|
+ * This can be thought of a joint reference by cgroup and
|
|
|
+ * elevator which will be dropped by either elevator exit
|
|
|
+ * or cgroup deletion path depending on who is exiting first.
|
|
|
+ */
|
|
|
+ atomic_set(&cfqg->ref, 1);
|
|
|
+
|
|
|
/* Add group onto cgroup list */
|
|
|
blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd);
|
|
|
|
|
@@ -960,7 +969,77 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
|
|
|
cfqg = &cfqq->cfqd->root_group;
|
|
|
|
|
|
cfqq->cfqg = cfqg;
|
|
|
+ /* cfqq reference on cfqg */
|
|
|
+ atomic_inc(&cfqq->cfqg->ref);
|
|
|
+}
|
|
|
+
|
|
|
+static void cfq_put_cfqg(struct cfq_group *cfqg)
|
|
|
+{
|
|
|
+ struct cfq_rb_root *st;
|
|
|
+ int i, j;
|
|
|
+
|
|
|
+ BUG_ON(atomic_read(&cfqg->ref) <= 0);
|
|
|
+ if (!atomic_dec_and_test(&cfqg->ref))
|
|
|
+ return;
|
|
|
+ for_each_cfqg_st(cfqg, i, j, st)
|
|
|
+ BUG_ON(!RB_EMPTY_ROOT(&st->rb) || st->active != NULL);
|
|
|
+ kfree(cfqg);
|
|
|
+}
|
|
|
+
|
|
|
+static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
|
|
|
+{
|
|
|
+ /* Something wrong if we are trying to remove same group twice */
|
|
|
+ BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
|
|
|
+
|
|
|
+ hlist_del_init(&cfqg->cfqd_node);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Put the reference taken at the time of creation so that when all
|
|
|
+ * queues are gone, group can be destroyed.
|
|
|
+ */
|
|
|
+ cfq_put_cfqg(cfqg);
|
|
|
+}
|
|
|
+
|
|
|
+static void cfq_release_cfq_groups(struct cfq_data *cfqd)
|
|
|
+{
|
|
|
+ struct hlist_node *pos, *n;
|
|
|
+ struct cfq_group *cfqg;
|
|
|
+
|
|
|
+ hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
|
|
|
+ /*
|
|
|
+ * If cgroup removal path got to blk_group first and removed
|
|
|
+ * it from cgroup list, then it will take care of destroying
|
|
|
+ * cfqg also.
|
|
|
+ */
|
|
|
+ if (!blkiocg_del_blkio_group(&cfqg->blkg))
|
|
|
+ cfq_destroy_cfqg(cfqd, cfqg);
|
|
|
+ }
|
|
|
}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Blk cgroup controller notification saying that blkio_group object is being
|
|
|
+ * delinked as associated cgroup object is going away. That also means that
|
|
|
+ * no new IO will come in this group. So get rid of this group as soon as
|
|
|
+ * any pending IO in the group is finished.
|
|
|
+ *
|
|
|
+ * This function is called under rcu_read_lock(). key is the rcu protected
|
|
|
+ * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
|
|
|
+ * read lock.
|
|
|
+ *
|
|
|
+ * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
|
|
|
+ * it should not be NULL as even if elevator was exiting, cgroup deltion
|
|
|
+ * path got to it first.
|
|
|
+ */
|
|
|
+void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ struct cfq_data *cfqd = key;
|
|
|
+
|
|
|
+ spin_lock_irqsave(cfqd->queue->queue_lock, flags);
|
|
|
+ cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
|
|
|
+ spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
#else /* GROUP_IOSCHED */
|
|
|
static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
|
|
|
{
|
|
@@ -971,6 +1050,9 @@ cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
|
|
|
cfqq->cfqg = cfqg;
|
|
|
}
|
|
|
|
|
|
+static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
|
|
|
+static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
|
|
|
+
|
|
|
#endif /* GROUP_IOSCHED */
|
|
|
|
|
|
/*
|
|
@@ -2172,11 +2254,13 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
|
|
|
* task holds one reference to the queue, dropped when task exits. each rq
|
|
|
* in-flight on this queue also holds a reference, dropped when rq is freed.
|
|
|
*
|
|
|
+ * Each cfq queue took a reference on the parent group. Drop it now.
|
|
|
* queue lock must be held here.
|
|
|
*/
|
|
|
static void cfq_put_queue(struct cfq_queue *cfqq)
|
|
|
{
|
|
|
struct cfq_data *cfqd = cfqq->cfqd;
|
|
|
+ struct cfq_group *cfqg;
|
|
|
|
|
|
BUG_ON(atomic_read(&cfqq->ref) <= 0);
|
|
|
|
|
@@ -2186,6 +2270,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
|
|
|
cfq_log_cfqq(cfqd, cfqq, "put_queue");
|
|
|
BUG_ON(rb_first(&cfqq->sort_list));
|
|
|
BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
|
|
|
+ cfqg = cfqq->cfqg;
|
|
|
|
|
|
if (unlikely(cfqd->active_queue == cfqq)) {
|
|
|
__cfq_slice_expired(cfqd, cfqq, 0);
|
|
@@ -2194,6 +2279,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
|
|
|
|
|
|
BUG_ON(cfq_cfqq_on_rr(cfqq));
|
|
|
kmem_cache_free(cfq_pool, cfqq);
|
|
|
+ cfq_put_cfqg(cfqg);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -3369,11 +3455,15 @@ static void cfq_exit_queue(struct elevator_queue *e)
|
|
|
}
|
|
|
|
|
|
cfq_put_async_queues(cfqd);
|
|
|
+ cfq_release_cfq_groups(cfqd);
|
|
|
+ blkiocg_del_blkio_group(&cfqd->root_group.blkg);
|
|
|
|
|
|
spin_unlock_irq(q->queue_lock);
|
|
|
|
|
|
cfq_shutdown_timer_wq(cfqd);
|
|
|
|
|
|
+ /* Wait for cfqg->blkg->key accessors to exit their grace periods. */
|
|
|
+ synchronize_rcu();
|
|
|
kfree(cfqd);
|
|
|
}
|
|
|
|
|
@@ -3401,6 +3491,11 @@ static void *cfq_init_queue(struct request_queue *q)
|
|
|
cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
|
|
|
|
|
|
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
|
|
+ /*
|
|
|
+ * Take a reference to root group which we never drop. This is just
|
|
|
+ * to make sure that cfq_put_cfqg() does not try to kfree root group
|
|
|
+ */
|
|
|
+ atomic_set(&cfqg->ref, 1);
|
|
|
blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd);
|
|
|
#endif
|
|
|
/*
|