|
@@ -183,6 +183,14 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
|
|
return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
|
|
return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+typedef struct task_group *rt_rq_iter_t;
|
|
|
|
+
|
|
|
|
+#define for_each_rt_rq(rt_rq, iter, rq) \
|
|
|
|
+ for (iter = list_entry_rcu(task_groups.next, typeof(*iter), list); \
|
|
|
|
+ (&iter->list != &task_groups) && \
|
|
|
|
+ (rt_rq = iter->rt_rq[cpu_of(rq)]); \
|
|
|
|
+ iter = list_entry_rcu(iter->list.next, typeof(*iter), list))
|
|
|
|
+
|
|
static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
|
|
static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
|
|
{
|
|
{
|
|
list_add_rcu(&rt_rq->leaf_rt_rq_list,
|
|
list_add_rcu(&rt_rq->leaf_rt_rq_list,
|
|
@@ -288,6 +296,11 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
|
|
return ktime_to_ns(def_rt_bandwidth.rt_period);
|
|
return ktime_to_ns(def_rt_bandwidth.rt_period);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+typedef struct rt_rq *rt_rq_iter_t;
|
|
|
|
+
|
|
|
|
+#define for_each_rt_rq(rt_rq, iter, rq) \
|
|
|
|
+ for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
|
|
|
|
+
|
|
static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
|
|
static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
@@ -402,12 +415,13 @@ next:
|
|
static void __disable_runtime(struct rq *rq)
|
|
static void __disable_runtime(struct rq *rq)
|
|
{
|
|
{
|
|
struct root_domain *rd = rq->rd;
|
|
struct root_domain *rd = rq->rd;
|
|
|
|
+ rt_rq_iter_t iter;
|
|
struct rt_rq *rt_rq;
|
|
struct rt_rq *rt_rq;
|
|
|
|
|
|
if (unlikely(!scheduler_running))
|
|
if (unlikely(!scheduler_running))
|
|
return;
|
|
return;
|
|
|
|
|
|
- for_each_leaf_rt_rq(rt_rq, rq) {
|
|
|
|
|
|
+ for_each_rt_rq(rt_rq, iter, rq) {
|
|
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
|
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
|
s64 want;
|
|
s64 want;
|
|
int i;
|
|
int i;
|
|
@@ -487,6 +501,7 @@ static void disable_runtime(struct rq *rq)
|
|
|
|
|
|
static void __enable_runtime(struct rq *rq)
|
|
static void __enable_runtime(struct rq *rq)
|
|
{
|
|
{
|
|
|
|
+ rt_rq_iter_t iter;
|
|
struct rt_rq *rt_rq;
|
|
struct rt_rq *rt_rq;
|
|
|
|
|
|
if (unlikely(!scheduler_running))
|
|
if (unlikely(!scheduler_running))
|
|
@@ -495,7 +510,7 @@ static void __enable_runtime(struct rq *rq)
|
|
/*
|
|
/*
|
|
* Reset each runqueue's bandwidth settings
|
|
* Reset each runqueue's bandwidth settings
|
|
*/
|
|
*/
|
|
- for_each_leaf_rt_rq(rt_rq, rq) {
|
|
|
|
|
|
+ for_each_rt_rq(rt_rq, iter, rq) {
|
|
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
|
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
|
|
|
|
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
|
@@ -1796,10 +1811,11 @@ extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
|
|
|
|
|
|
static void print_rt_stats(struct seq_file *m, int cpu)
|
|
static void print_rt_stats(struct seq_file *m, int cpu)
|
|
{
|
|
{
|
|
|
|
+ rt_rq_iter_t iter;
|
|
struct rt_rq *rt_rq;
|
|
struct rt_rq *rt_rq;
|
|
|
|
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
- for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
|
|
|
|
|
|
+ for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
|
|
print_rt_rq(m, cpu, rt_rq);
|
|
print_rt_rq(m, cpu, rt_rq);
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
}
|
|
}
|