Эх сурвалжийг харах

sched: rt-bandwidth group disable fixes

More extensive disable of bandwidth control. It allows sysctl_sched_rt_runtime
to disable full group bandwidth control.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Peter Zijlstra 17 жил өмнө
parent
commit
0b148fa048

+ 8 - 1
kernel/sched.c

@@ -204,11 +204,13 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
 	rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
 	rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
 }
 }
 
 
+static inline int rt_bandwidth_enabled(void);
+
 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
 {
 {
 	ktime_t now;
 	ktime_t now;
 
 
-	if (rt_b->rt_runtime == RUNTIME_INF)
+	if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF)
 		return;
 		return;
 
 
 	if (hrtimer_active(&rt_b->rt_period_timer))
 	if (hrtimer_active(&rt_b->rt_period_timer))
@@ -839,6 +841,11 @@ static inline u64 global_rt_runtime(void)
 	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
 	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
 }
 }
 
 
+static inline int rt_bandwidth_enabled(void)
+{
+	return sysctl_sched_rt_runtime >= 0;
+}
+
 #ifndef prepare_arch_switch
 #ifndef prepare_arch_switch
 # define prepare_arch_switch(next)	do { } while (0)
 # define prepare_arch_switch(next)	do { } while (0)
 #endif
 #endif

+ 4 - 1
kernel/sched_rt.c

@@ -386,7 +386,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
 	int i, idle = 1;
 	int i, idle = 1;
 	cpumask_t span;
 	cpumask_t span;
 
 
-	if (rt_b->rt_runtime == RUNTIME_INF)
+	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
 		return 1;
 		return 1;
 
 
 	span = sched_rt_period_mask();
 	span = sched_rt_period_mask();
@@ -484,6 +484,9 @@ static void update_curr_rt(struct rq *rq)
 	curr->se.exec_start = rq->clock;
 	curr->se.exec_start = rq->clock;
 	cpuacct_charge(curr, delta_exec);
 	cpuacct_charge(curr, delta_exec);
 
 
+	if (!rt_bandwidth_enabled())
+		return;
+
 	for_each_sched_rt_entity(rt_se) {
 	for_each_sched_rt_entity(rt_se) {
 		rt_rq = rt_rq_of_se(rt_se);
 		rt_rq = rt_rq_of_se(rt_se);