|
@@ -79,11 +79,6 @@ static const struct sched_class fair_sched_class;
|
|
|
* CFS operations on generic schedulable entities:
|
|
|
*/
|
|
|
|
|
|
-static inline struct task_struct *task_of(struct sched_entity *se)
|
|
|
-{
|
|
|
- return container_of(se, struct task_struct, se);
|
|
|
-}
|
|
|
-
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
|
|
|
/* cpu runqueue to which this cfs_rq is attached */
|
|
@@ -95,6 +90,14 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
|
|
|
/* An entity is a task if it doesn't "own" a runqueue */
|
|
|
#define entity_is_task(se) (!se->my_q)
|
|
|
|
|
|
+static inline struct task_struct *task_of(struct sched_entity *se)
|
|
|
+{
|
|
|
+#ifdef CONFIG_SCHED_DEBUG
|
|
|
+ WARN_ON_ONCE(!entity_is_task(se));
|
|
|
+#endif
|
|
|
+ return container_of(se, struct task_struct, se);
|
|
|
+}
|
|
|
+
|
|
|
/* Walk up scheduling entities hierarchy */
|
|
|
#define for_each_sched_entity(se) \
|
|
|
for (; se; se = se->parent)
|
|
@@ -186,7 +189,12 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-#else /* CONFIG_FAIR_GROUP_SCHED */
|
|
|
+#else /* !CONFIG_FAIR_GROUP_SCHED */
|
|
|
+
|
|
|
+static inline struct task_struct *task_of(struct sched_entity *se)
|
|
|
+{
|
|
|
+ return container_of(se, struct task_struct, se);
|
|
|
+}
|
|
|
|
|
|
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
|
|
|
{
|