|
@@ -486,7 +486,7 @@ struct rq {
|
|
*/
|
|
*/
|
|
unsigned long nr_uninterruptible;
|
|
unsigned long nr_uninterruptible;
|
|
|
|
|
|
- struct task_struct *curr, *idle;
|
|
|
|
|
|
+ struct task_struct *curr, *idle, *stop;
|
|
unsigned long next_balance;
|
|
unsigned long next_balance;
|
|
struct mm_struct *prev_mm;
|
|
struct mm_struct *prev_mm;
|
|
|
|
|
|
@@ -1837,7 +1837,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
|
|
|
|
|
|
static const struct sched_class rt_sched_class;
|
|
static const struct sched_class rt_sched_class;
|
|
|
|
|
|
-#define sched_class_highest (&rt_sched_class)
|
|
|
|
|
|
+#define sched_class_highest (&stop_sched_class)
|
|
#define for_each_class(class) \
|
|
#define for_each_class(class) \
|
|
for (class = sched_class_highest; class; class = class->next)
|
|
for (class = sched_class_highest; class; class = class->next)
|
|
|
|
|
|
@@ -1917,10 +1917,41 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
|
|
#include "sched_idletask.c"
|
|
#include "sched_idletask.c"
|
|
#include "sched_fair.c"
|
|
#include "sched_fair.c"
|
|
#include "sched_rt.c"
|
|
#include "sched_rt.c"
|
|
|
|
+#include "sched_stoptask.c"
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
#ifdef CONFIG_SCHED_DEBUG
|
|
# include "sched_debug.c"
|
|
# include "sched_debug.c"
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+void sched_set_stop_task(int cpu, struct task_struct *stop)
|
|
|
|
+{
|
|
|
|
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
|
|
|
|
+ struct task_struct *old_stop = cpu_rq(cpu)->stop;
|
|
|
|
+
|
|
|
|
+ if (stop) {
|
|
|
|
+ /*
|
|
|
|
+ * Make it appear like a SCHED_FIFO task, its something
|
|
|
|
+ * userspace knows about and won't get confused about.
|
|
|
|
+ *
|
|
|
|
+ * Also, it will make PI more or less work without too
|
|
|
|
+ * much confusion -- but then, stop work should not
|
|
|
|
+ * rely on PI working anyway.
|
|
|
|
+ */
|
|
|
|
+ sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
|
|
|
|
+
|
|
|
|
+ stop->sched_class = &stop_sched_class;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ cpu_rq(cpu)->stop = stop;
|
|
|
|
+
|
|
|
|
+ if (old_stop) {
|
|
|
|
+ /*
|
|
|
|
+ * Reset it back to a normal scheduling class so that
|
|
|
|
+ * it can die in pieces.
|
|
|
|
+ */
|
|
|
|
+ old_stop->sched_class = &rt_sched_class;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* __normal_prio - return the priority that is based on the static prio
|
|
* __normal_prio - return the priority that is based on the static prio
|
|
*/
|
|
*/
|
|
@@ -3720,17 +3751,13 @@ pick_next_task(struct rq *rq)
|
|
return p;
|
|
return p;
|
|
}
|
|
}
|
|
|
|
|
|
- class = sched_class_highest;
|
|
|
|
- for ( ; ; ) {
|
|
|
|
|
|
+ for_each_class(class) {
|
|
p = class->pick_next_task(rq);
|
|
p = class->pick_next_task(rq);
|
|
if (p)
|
|
if (p)
|
|
return p;
|
|
return p;
|
|
- /*
|
|
|
|
- * Will never be NULL as the idle class always
|
|
|
|
- * returns a non-NULL p:
|
|
|
|
- */
|
|
|
|
- class = class->next;
|
|
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ BUG(); /* the idle class will always have a runnable task */
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -4659,6 +4686,15 @@ recheck:
|
|
*/
|
|
*/
|
|
rq = __task_rq_lock(p);
|
|
rq = __task_rq_lock(p);
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Changing the policy of the stop threads its a very bad idea
|
|
|
|
+ */
|
|
|
|
+ if (p == rq->stop) {
|
|
|
|
+ __task_rq_unlock(rq);
|
|
|
|
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
if (user) {
|
|
if (user) {
|
|
/*
|
|
/*
|