|
@@ -85,9 +85,6 @@ struct throtl_grp {
|
|
|
unsigned long slice_start[2];
|
|
|
unsigned long slice_end[2];
|
|
|
|
|
|
- /* Some throttle limits got updated for the group */
|
|
|
- int limits_changed;
|
|
|
-
|
|
|
/* Per cpu stats pointer */
|
|
|
struct tg_stats_cpu __percpu *stats_cpu;
|
|
|
|
|
@@ -112,8 +109,6 @@ struct throtl_data
|
|
|
|
|
|
/* Work for dispatching throttled bios */
|
|
|
struct delayed_work throtl_work;
|
|
|
-
|
|
|
- int limits_changed;
|
|
|
};
|
|
|
|
|
|
/* list and work item to allocate percpu group stats */
|
|
@@ -223,7 +218,6 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
|
|
|
RB_CLEAR_NODE(&tg->rb_node);
|
|
|
bio_list_init(&tg->bio_lists[0]);
|
|
|
bio_list_init(&tg->bio_lists[1]);
|
|
|
- tg->limits_changed = false;
|
|
|
|
|
|
tg->bps[READ] = -1;
|
|
|
tg->bps[WRITE] = -1;
|
|
@@ -826,45 +820,6 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
|
|
|
return nr_disp;
|
|
|
}
|
|
|
|
|
|
-static void throtl_process_limit_change(struct throtl_data *td)
|
|
|
-{
|
|
|
- struct request_queue *q = td->queue;
|
|
|
- struct blkcg_gq *blkg, *n;
|
|
|
-
|
|
|
- if (!td->limits_changed)
|
|
|
- return;
|
|
|
-
|
|
|
- xchg(&td->limits_changed, false);
|
|
|
-
|
|
|
- throtl_log(td, "limits changed");
|
|
|
-
|
|
|
- list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
|
|
|
- struct throtl_grp *tg = blkg_to_tg(blkg);
|
|
|
-
|
|
|
- if (!tg->limits_changed)
|
|
|
- continue;
|
|
|
-
|
|
|
- if (!xchg(&tg->limits_changed, false))
|
|
|
- continue;
|
|
|
-
|
|
|
- throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
|
|
|
- " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
|
|
|
- tg->iops[READ], tg->iops[WRITE]);
|
|
|
-
|
|
|
- /*
|
|
|
- * Restart the slices for both READ and WRITES. It
|
|
|
- * might happen that a group's limit are dropped
|
|
|
- * suddenly and we don't want to account recently
|
|
|
- * dispatched IO with new low rate
|
|
|
- */
|
|
|
- throtl_start_new_slice(td, tg, 0);
|
|
|
- throtl_start_new_slice(td, tg, 1);
|
|
|
-
|
|
|
- if (throtl_tg_on_rr(tg))
|
|
|
- tg_update_disptime(td, tg);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
/* Dispatch throttled bios. Should be called without queue lock held. */
|
|
|
static int throtl_dispatch(struct request_queue *q)
|
|
|
{
|
|
@@ -876,8 +831,6 @@ static int throtl_dispatch(struct request_queue *q)
|
|
|
|
|
|
spin_lock_irq(q->queue_lock);
|
|
|
|
|
|
- throtl_process_limit_change(td);
|
|
|
-
|
|
|
if (!total_nr_queued(td))
|
|
|
goto out;
|
|
|
|
|
@@ -925,8 +878,7 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
|
|
|
|
|
|
struct delayed_work *dwork = &td->throtl_work;
|
|
|
|
|
|
- /* schedule work if limits changed even if no bio is queued */
|
|
|
- if (total_nr_queued(td) || td->limits_changed) {
|
|
|
+ if (total_nr_queued(td)) {
|
|
|
mod_delayed_work(kthrotld_workqueue, dwork, delay);
|
|
|
throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
|
|
|
delay, jiffies);
|
|
@@ -1023,10 +975,25 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
|
|
|
else
|
|
|
*(unsigned int *)((void *)tg + cft->private) = ctx.v;
|
|
|
|
|
|
- /* XXX: we don't need the following deferred processing */
|
|
|
- xchg(&tg->limits_changed, true);
|
|
|
- xchg(&td->limits_changed, true);
|
|
|
- throtl_schedule_delayed_work(td, 0);
|
|
|
+ throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
|
|
|
+ tg->bps[READ], tg->bps[WRITE],
|
|
|
+ tg->iops[READ], tg->iops[WRITE]);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We're already holding queue_lock and know @tg is valid. Let's
|
|
|
+ * apply the new config directly.
|
|
|
+ *
|
|
|
+ * Restart the slices for both READ and WRITES. It might happen
|
|
|
+ * that a group's limit are dropped suddenly and we don't want to
|
|
|
+ * account recently dispatched IO with new low rate.
|
|
|
+ */
|
|
|
+ throtl_start_new_slice(td, tg, 0);
|
|
|
+ throtl_start_new_slice(td, tg, 1);
|
|
|
+
|
|
|
+ if (throtl_tg_on_rr(tg)) {
|
|
|
+ tg_update_disptime(td, tg);
|
|
|
+ throtl_schedule_next_dispatch(td);
|
|
|
+ }
|
|
|
|
|
|
blkg_conf_finish(&ctx);
|
|
|
return 0;
|
|
@@ -1239,7 +1206,6 @@ int blk_throtl_init(struct request_queue *q)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
td->tg_service_tree = THROTL_RB_ROOT;
|
|
|
- td->limits_changed = false;
|
|
|
INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
|
|
|
|
|
|
q->td = td;
|