|
@@ -1164,6 +1164,7 @@ struct sched_entity {
|
|
|
/* rq "owned" by this entity/group: */
|
|
|
struct cfs_rq *my_q;
|
|
|
#endif
|
|
|
+
|
|
|
/*
|
|
|
* Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
|
|
|
* removed when useful for applications beyond shares distribution (e.g.
|
|
@@ -1191,6 +1192,7 @@ struct sched_rt_entity {
|
|
|
#endif
|
|
|
};
|
|
|
|
|
|
+
|
|
|
struct rcu_node;
|
|
|
|
|
|
enum perf_event_task_context {
|
|
@@ -1596,37 +1598,6 @@ static inline void set_numabalancing_state(bool enabled)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-/*
|
|
|
- * Priority of a process goes from 0..MAX_PRIO-1, valid RT
|
|
|
- * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
|
|
|
- * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
|
|
|
- * values are inverted: lower p->prio value means higher priority.
|
|
|
- *
|
|
|
- * The MAX_USER_RT_PRIO value allows the actual maximum
|
|
|
- * RT priority to be separate from the value exported to
|
|
|
- * user-space. This allows kernel threads to set their
|
|
|
- * priority to a value higher than any user task. Note:
|
|
|
- * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
|
|
|
- */
|
|
|
-
|
|
|
-#define MAX_USER_RT_PRIO 100
|
|
|
-#define MAX_RT_PRIO MAX_USER_RT_PRIO
|
|
|
-
|
|
|
-#define MAX_PRIO (MAX_RT_PRIO + 40)
|
|
|
-#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
|
|
|
-
|
|
|
-static inline int rt_prio(int prio)
|
|
|
-{
|
|
|
- if (unlikely(prio < MAX_RT_PRIO))
|
|
|
- return 1;
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static inline int rt_task(struct task_struct *p)
|
|
|
-{
|
|
|
- return rt_prio(p->prio);
|
|
|
-}
|
|
|
-
|
|
|
static inline struct pid *task_pid(struct task_struct *task)
|
|
|
{
|
|
|
return task->pids[PIDTYPE_PID].pid;
|
|
@@ -2054,26 +2025,6 @@ static inline void sched_autogroup_fork(struct signal_struct *sig) { }
|
|
|
static inline void sched_autogroup_exit(struct signal_struct *sig) { }
|
|
|
#endif
|
|
|
|
|
|
-#ifdef CONFIG_RT_MUTEXES
|
|
|
-extern int rt_mutex_getprio(struct task_struct *p);
|
|
|
-extern void rt_mutex_setprio(struct task_struct *p, int prio);
|
|
|
-extern void rt_mutex_adjust_pi(struct task_struct *p);
|
|
|
-static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
|
|
|
-{
|
|
|
- return tsk->pi_blocked_on != NULL;
|
|
|
-}
|
|
|
-#else
|
|
|
-static inline int rt_mutex_getprio(struct task_struct *p)
|
|
|
-{
|
|
|
- return p->normal_prio;
|
|
|
-}
|
|
|
-# define rt_mutex_adjust_pi(p) do { } while (0)
|
|
|
-static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
|
|
|
-{
|
|
|
- return false;
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
extern bool yield_to(struct task_struct *p, bool preempt);
|
|
|
extern void set_user_nice(struct task_struct *p, long nice);
|
|
|
extern int task_prio(const struct task_struct *p);
|
|
@@ -2703,8 +2654,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
|
|
|
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
|
|
|
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
|
|
|
|
|
|
-extern void normalize_rt_tasks(void);
|
|
|
-
|
|
|
#ifdef CONFIG_CGROUP_SCHED
|
|
|
|
|
|
extern struct task_group root_task_group;
|