|
@@ -763,15 +763,23 @@ enum cpu_idle_type {
|
|
|
#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
|
|
|
#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
|
|
|
|
|
|
-#define BALANCE_FOR_MC_POWER \
|
|
|
- (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
|
|
|
+extern int sched_mc_power_savings, sched_smt_power_savings;
|
|
|
+
|
|
|
+static inline int sd_balance_for_mc_power(void)
|
|
|
+{
|
|
|
+ if (sched_smt_power_savings)
|
|
|
+ return SD_POWERSAVINGS_BALANCE;
|
|
|
|
|
|
-#define BALANCE_FOR_PKG_POWER \
|
|
|
- ((sched_mc_power_savings || sched_smt_power_savings) ? \
|
|
|
- SD_POWERSAVINGS_BALANCE : 0)
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
-#define test_sd_parent(sd, flag) ((sd->parent && \
|
|
|
- (sd->parent->flags & flag)) ? 1 : 0)
|
|
|
+static inline int sd_balance_for_package_power(void)
|
|
|
+{
|
|
|
+ if (sched_mc_power_savings | sched_smt_power_savings)
|
|
|
+ return SD_POWERSAVINGS_BALANCE;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
|
|
|
struct sched_group {
|
|
@@ -1399,6 +1407,15 @@ struct task_struct {
|
|
|
#endif
|
|
|
};
|
|
|
|
|
|
+/* Test a flag in parent sched domain */
|
|
|
+static inline int test_sd_parent(struct sched_domain *sd, int flag)
|
|
|
+{
|
|
|
+ if (sd->parent && (sd->parent->flags & flag))
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Priority of a process goes from 0..MAX_PRIO-1, valid RT
|
|
|
* priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
|
|
@@ -2256,8 +2273,6 @@ __trace_special(void *__tr, void *__data,
|
|
|
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
|
|
|
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
|
|
|
|
|
|
-extern int sched_mc_power_savings, sched_smt_power_savings;
|
|
|
-
|
|
|
extern void normalize_rt_tasks(void);
|
|
|
|
|
|
#ifdef CONFIG_GROUP_SCHED
|