Browse Source

Merge branch 'sched-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (46 commits)
  sched: Add comments to find_busiest_group() function
  sched: Refactor the power savings balance code
  sched: Optimize the !power_savings_balance during fbg()
  sched: Create a helper function to calculate imbalance
  sched: Create helper to calculate small_imbalance in fbg()
  sched: Create a helper function to calculate sched_domain stats for fbg()
  sched: Define structure to store the sched_domain statistics for fbg()
  sched: Create a helper function to calculate sched_group stats for fbg()
  sched: Define structure to store the sched_group statistics for fbg()
  sched: Fix indentations in find_busiest_group() using gotos
  sched: Simple helper functions for find_busiest_group()
  sched: remove unused fields from struct rq
  sched: jiffies not printed per CPU
  sched: small optimisation of can_migrate_task()
  sched: fix typos in documentation
  sched: add avg_overlap decay
  x86, sched_clock(): mark variables read-mostly
  sched: optimize ttwu vs group scheduling
  sched: TIF_NEED_RESCHED -> need_reshed() cleanup
  sched: don't rebalance if attached on NULL domain
  ...
Linus Torvalds 16 years ago
parent
commit
831576fe40

+ 0 - 2
Documentation/scheduler/00-INDEX

@@ -2,8 +2,6 @@
 	- this file.
 	- this file.
 sched-arch.txt
 sched-arch.txt
 	- CPU Scheduler implementation hints for architecture specific code.
 	- CPU Scheduler implementation hints for architecture specific code.
-sched-coding.txt
-	- reference for various scheduler-related methods in the O(1) scheduler.
 sched-design-CFS.txt
 sched-design-CFS.txt
 	- goals, design and implementation of the Complete Fair Scheduler.
 	- goals, design and implementation of the Complete Fair Scheduler.
 sched-domains.txt
 sched-domains.txt

+ 0 - 126
Documentation/scheduler/sched-coding.txt

@@ -1,126 +0,0 @@
-     Reference for various scheduler-related methods in the O(1) scheduler
-		Robert Love <rml@tech9.net>, MontaVista Software
-
-
-Note most of these methods are local to kernel/sched.c - this is by design.
-The scheduler is meant to be self-contained and abstracted away.  This document
-is primarily for understanding the scheduler, not interfacing to it.  Some of
-the discussed interfaces, however, are general process/scheduling methods.
-They are typically defined in include/linux/sched.h.
-
-
-Main Scheduling Methods
------------------------
-
-void load_balance(runqueue_t *this_rq, int idle)
-	Attempts to pull tasks from one cpu to another to balance cpu usage,
-	if needed.  This method is called explicitly if the runqueues are
-	imbalanced or periodically by the timer tick.  Prior to calling,
-	the current runqueue must be locked and interrupts disabled.
-
-void schedule()
-	The main scheduling function.  Upon return, the highest priority
-	process will be active.
-
-
-Locking
--------
-
-Each runqueue has its own lock, rq->lock.  When multiple runqueues need
-to be locked, lock acquires must be ordered by ascending &runqueue value.
-
-A specific runqueue is locked via
-
-	task_rq_lock(task_t pid, unsigned long *flags)
-
-which disables preemption, disables interrupts, and locks the runqueue pid is
-running on.  Likewise,
-
-	task_rq_unlock(task_t pid, unsigned long *flags)
-
-unlocks the runqueue pid is running on, restores interrupts to their previous
-state, and reenables preemption.
-
-The routines
-
-	double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
-
-and
-
-	double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
-
-safely lock and unlock, respectively, the two specified runqueues.  They do
-not, however, disable and restore interrupts.  Users are required to do so
-manually before and after calls.
-
-
-Values
-------
-
-MAX_PRIO
-	The maximum priority of the system, stored in the task as task->prio.
-	Lower priorities are higher.  Normal (non-RT) priorities range from
-	MAX_RT_PRIO to (MAX_PRIO - 1).
-MAX_RT_PRIO
-	The maximum real-time priority of the system.  Valid RT priorities
-	range from 0 to (MAX_RT_PRIO - 1).
-MAX_USER_RT_PRIO
-	The maximum real-time priority that is exported to user-space.  Should
-	always be equal to or less than MAX_RT_PRIO.  Setting it less allows
-	kernel threads to have higher priorities than any user-space task.
-MIN_TIMESLICE
-MAX_TIMESLICE
-	Respectively, the minimum and maximum timeslices (quanta) of a process.
-
-Data
-----
-
-struct runqueue
-	The main per-CPU runqueue data structure.
-struct task_struct
-	The main per-process data structure.
-
-
-General Methods
----------------
-
-cpu_rq(cpu)
-	Returns the runqueue of the specified cpu.
-this_rq()
-	Returns the runqueue of the current cpu.
-task_rq(pid)
-	Returns the runqueue which holds the specified pid.
-cpu_curr(cpu)
-	Returns the task currently running on the given cpu.
-rt_task(pid)
-	Returns true if pid is real-time, false if not.
-
-
-Process Control Methods
------------------------
-
-void set_user_nice(task_t *p, long nice)
-	Sets the "nice" value of task p to the given value.
-int setscheduler(pid_t pid, int policy, struct sched_param *param)
-	Sets the scheduling policy and parameters for the given pid.
-int set_cpus_allowed(task_t *p, unsigned long new_mask)
-	Sets a given task's CPU affinity and migrates it to a proper cpu.
-	Callers must have a valid reference to the task and assure the
-	task not exit prematurely.  No locks can be held during the call.
-set_task_state(tsk, state_value)
-	Sets the given task's state to the given value.
-set_current_state(state_value)
-	Sets the current task's state to the given value.
-void set_tsk_need_resched(struct task_struct *tsk)
-	Sets need_resched in the given task.
-void clear_tsk_need_resched(struct task_struct *tsk)
-	Clears need_resched in the given task.
-void set_need_resched()
-	Sets need_resched in the current task.
-void clear_need_resched()
-	Clears need_resched in the current task.
-int need_resched()
-	Returns true if need_resched is set in the current task, false
-	otherwise.
-yield()
-	Place the current process at the end of the runqueue and call schedule.

+ 7 - 1
arch/x86/kernel/cpu/intel.c

@@ -4,6 +4,7 @@
 #include <linux/string.h>
 #include <linux/string.h>
 #include <linux/bitops.h>
 #include <linux/bitops.h>
 #include <linux/smp.h>
 #include <linux/smp.h>
+#include <linux/sched.h>
 #include <linux/thread_info.h>
 #include <linux/thread_info.h>
 #include <linux/module.h>
 #include <linux/module.h>
 
 
@@ -56,11 +57,16 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
 
 
 	/*
 	/*
 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
-	 * with P/T states and does not stop in deep C-states
+	 * with P/T states and does not stop in deep C-states.
+	 *
+	 * It is also reliable across cores and sockets. (but not across
+	 * cabinets - we turn it off in that case explicitly.)
 	 */
 	 */
 	if (c->x86_power & (1 << 8)) {
 	if (c->x86_power & (1 << 8)) {
 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
+		set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
+		sched_clock_stable = 1;
 	}
 	}
 
 
 }
 }

+ 5 - 4
arch/x86/kernel/tsc.c

@@ -17,20 +17,21 @@
 #include <asm/delay.h>
 #include <asm/delay.h>
 #include <asm/hypervisor.h>
 #include <asm/hypervisor.h>
 
 
-unsigned int cpu_khz;           /* TSC clocks / usec, not used here */
+unsigned int __read_mostly cpu_khz;	/* TSC clocks / usec, not used here */
 EXPORT_SYMBOL(cpu_khz);
 EXPORT_SYMBOL(cpu_khz);
-unsigned int tsc_khz;
+
+unsigned int __read_mostly tsc_khz;
 EXPORT_SYMBOL(tsc_khz);
 EXPORT_SYMBOL(tsc_khz);
 
 
 /*
 /*
  * TSC can be unstable due to cpufreq or due to unsynced TSCs
  * TSC can be unstable due to cpufreq or due to unsynced TSCs
  */
  */
-static int tsc_unstable;
+static int __read_mostly tsc_unstable;
 
 
 /* native_sched_clock() is called before tsc_init(), so
 /* native_sched_clock() is called before tsc_init(), so
    we must start with the TSC soft disabled to prevent
    we must start with the TSC soft disabled to prevent
    erroneous rdtsc usage on !cpu_has_tsc processors */
    erroneous rdtsc usage on !cpu_has_tsc processors */
-static int tsc_disabled = -1;
+static int __read_mostly tsc_disabled = -1;
 
 
 static int tsc_clocksource_reliable;
 static int tsc_clocksource_reliable;
 /*
 /*

+ 1 - 0
include/linux/init_task.h

@@ -147,6 +147,7 @@ extern struct cred init_cred;
 		.nr_cpus_allowed = NR_CPUS,				\
 		.nr_cpus_allowed = NR_CPUS,				\
 	},								\
 	},								\
 	.tasks		= LIST_HEAD_INIT(tsk.tasks),			\
 	.tasks		= LIST_HEAD_INIT(tsk.tasks),			\
+	.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \
 	.ptraced	= LIST_HEAD_INIT(tsk.ptraced),			\
 	.ptraced	= LIST_HEAD_INIT(tsk.ptraced),			\
 	.ptrace_entry	= LIST_HEAD_INIT(tsk.ptrace_entry),		\
 	.ptrace_entry	= LIST_HEAD_INIT(tsk.ptrace_entry),		\
 	.real_parent	= &tsk,						\
 	.real_parent	= &tsk,						\

+ 9 - 1
include/linux/latencytop.h

@@ -9,6 +9,7 @@
 #ifndef _INCLUDE_GUARD_LATENCYTOP_H_
 #ifndef _INCLUDE_GUARD_LATENCYTOP_H_
 #define _INCLUDE_GUARD_LATENCYTOP_H_
 #define _INCLUDE_GUARD_LATENCYTOP_H_
 
 
+#include <linux/compiler.h>
 #ifdef CONFIG_LATENCYTOP
 #ifdef CONFIG_LATENCYTOP
 
 
 #define LT_SAVECOUNT		32
 #define LT_SAVECOUNT		32
@@ -24,7 +25,14 @@ struct latency_record {
 
 
 struct task_struct;
 struct task_struct;
 
 
-void account_scheduler_latency(struct task_struct *task, int usecs, int inter);
+extern int latencytop_enabled;
+void __account_scheduler_latency(struct task_struct *task, int usecs, int inter);
+static inline void
+account_scheduler_latency(struct task_struct *task, int usecs, int inter)
+{
+	if (unlikely(latencytop_enabled))
+		__account_scheduler_latency(task, usecs, inter);
+}
 
 
 void clear_all_latency_tracing(struct task_struct *p);
 void clear_all_latency_tracing(struct task_struct *p);
 
 

+ 6 - 3
include/linux/plist.h

@@ -96,6 +96,10 @@ struct plist_node {
 # define PLIST_HEAD_LOCK_INIT(_lock)
 # define PLIST_HEAD_LOCK_INIT(_lock)
 #endif
 #endif
 
 
+#define _PLIST_HEAD_INIT(head)				\
+	.prio_list = LIST_HEAD_INIT((head).prio_list),	\
+	.node_list = LIST_HEAD_INIT((head).node_list)
+
 /**
 /**
  * PLIST_HEAD_INIT - static struct plist_head initializer
  * PLIST_HEAD_INIT - static struct plist_head initializer
  * @head:	struct plist_head variable name
  * @head:	struct plist_head variable name
@@ -103,8 +107,7 @@ struct plist_node {
  */
  */
 #define PLIST_HEAD_INIT(head, _lock)			\
 #define PLIST_HEAD_INIT(head, _lock)			\
 {							\
 {							\
-	.prio_list = LIST_HEAD_INIT((head).prio_list),	\
-	.node_list = LIST_HEAD_INIT((head).node_list),	\
+        _PLIST_HEAD_INIT(head),                         \
 	PLIST_HEAD_LOCK_INIT(&(_lock))			\
 	PLIST_HEAD_LOCK_INIT(&(_lock))			\
 }
 }
 
 
@@ -116,7 +119,7 @@ struct plist_node {
 #define PLIST_NODE_INIT(node, __prio)			\
 #define PLIST_NODE_INIT(node, __prio)			\
 {							\
 {							\
 	.prio  = (__prio),				\
 	.prio  = (__prio),				\
-	.plist = PLIST_HEAD_INIT((node).plist, NULL),	\
+	.plist = { _PLIST_HEAD_INIT((node).plist) }, 	\
 }
 }
 
 
 /**
 /**

+ 16 - 1
include/linux/sched.h

@@ -998,6 +998,7 @@ struct sched_class {
 			      struct rq *busiest, struct sched_domain *sd,
 			      struct rq *busiest, struct sched_domain *sd,
 			      enum cpu_idle_type idle);
 			      enum cpu_idle_type idle);
 	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
 	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
+	int (*needs_post_schedule) (struct rq *this_rq);
 	void (*post_schedule) (struct rq *this_rq);
 	void (*post_schedule) (struct rq *this_rq);
 	void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
 	void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
 
 
@@ -1052,6 +1053,10 @@ struct sched_entity {
 	u64			last_wakeup;
 	u64			last_wakeup;
 	u64			avg_overlap;
 	u64			avg_overlap;
 
 
+	u64			start_runtime;
+	u64			avg_wakeup;
+	u64			nr_migrations;
+
 #ifdef CONFIG_SCHEDSTATS
 #ifdef CONFIG_SCHEDSTATS
 	u64			wait_start;
 	u64			wait_start;
 	u64			wait_max;
 	u64			wait_max;
@@ -1067,7 +1072,6 @@ struct sched_entity {
 	u64			exec_max;
 	u64			exec_max;
 	u64			slice_max;
 	u64			slice_max;
 
 
-	u64			nr_migrations;
 	u64			nr_migrations_cold;
 	u64			nr_migrations_cold;
 	u64			nr_failed_migrations_affine;
 	u64			nr_failed_migrations_affine;
 	u64			nr_failed_migrations_running;
 	u64			nr_failed_migrations_running;
@@ -1164,6 +1168,7 @@ struct task_struct {
 #endif
 #endif
 
 
 	struct list_head tasks;
 	struct list_head tasks;
+	struct plist_node pushable_tasks;
 
 
 	struct mm_struct *mm, *active_mm;
 	struct mm_struct *mm, *active_mm;
 
 
@@ -1675,6 +1680,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
 	return set_cpus_allowed_ptr(p, &new_mask);
 	return set_cpus_allowed_ptr(p, &new_mask);
 }
 }
 
 
+/*
+ * Architectures can set this to 1 if they have specified
+ * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
+ * but then during bootup it turns out that sched_clock()
+ * is reliable after all:
+ */
+#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+extern int sched_clock_stable;
+#endif
+
 extern unsigned long long sched_clock(void);
 extern unsigned long long sched_clock(void);
 
 
 extern void sched_clock_init(void);
 extern void sched_clock_init(void);

+ 0 - 1
init/Kconfig

@@ -966,7 +966,6 @@ config SLABINFO
 
 
 config RT_MUTEXES
 config RT_MUTEXES
 	boolean
 	boolean
-	select PLIST
 
 
 config BASE_SMALL
 config BASE_SMALL
 	int
 	int

+ 71 - 12
kernel/latencytop.c

@@ -9,6 +9,44 @@
  * as published by the Free Software Foundation; version 2
  * as published by the Free Software Foundation; version 2
  * of the License.
  * of the License.
  */
  */
+
+/*
+ * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
+ * used by the "latencytop" userspace tool. The latency that is tracked is not
+ * the 'traditional' interrupt latency (which is primarily caused by something
+ * else consuming CPU), but instead, it is the latency an application encounters
+ * because the kernel sleeps on its behalf for various reasons.
+ *
+ * This code tracks 2 levels of statistics:
+ * 1) System level latency
+ * 2) Per process latency
+ *
+ * The latency is stored in fixed sized data structures in an accumulated form;
+ * if the "same" latency cause is hit twice, this will be tracked as one entry
+ * in the data structure. Both the count, total accumulated latency and maximum
+ * latency are tracked in this data structure. When the fixed size structure is
+ * full, no new causes are tracked until the buffer is flushed by writing to
+ * the /proc file; the userspace tool does this on a regular basis.
+ *
+ * A latency cause is identified by a stringified backtrace at the point that
+ * the scheduler gets invoked. The userland tool will use this string to
+ * identify the cause of the latency in human readable form.
+ *
+ * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
+ * These files look like this:
+ *
+ * Latency Top version : v0.1
+ * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
+ * |    |    |    |
+ * |    |    |    +----> the stringified backtrace
+ * |    |    +---------> The maximum latency for this entry in microseconds
+ * |    +--------------> The accumulated latency for this entry (microseconds)
+ * +-------------------> The number of times this entry is hit
+ *
+ * (note: the average latency is the accumulated latency divided by the number
+ * of times)
+ */
+
 #include <linux/latencytop.h>
 #include <linux/latencytop.h>
 #include <linux/kallsyms.h>
 #include <linux/kallsyms.h>
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
@@ -72,7 +110,7 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
 				firstnonnull = i;
 				firstnonnull = i;
 			continue;
 			continue;
 		}
 		}
-		for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
+		for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
 			unsigned long record = lat->backtrace[q];
 			unsigned long record = lat->backtrace[q];
 
 
 			if (latency_record[i].backtrace[q] != record) {
 			if (latency_record[i].backtrace[q] != record) {
@@ -101,31 +139,52 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
 	memcpy(&latency_record[i], lat, sizeof(struct latency_record));
 	memcpy(&latency_record[i], lat, sizeof(struct latency_record));
 }
 }
 
 
-static inline void store_stacktrace(struct task_struct *tsk, struct latency_record *lat)
+/*
+ * Iterator to store a backtrace into a latency record entry
+ */
+static inline void store_stacktrace(struct task_struct *tsk,
+					struct latency_record *lat)
 {
 {
 	struct stack_trace trace;
 	struct stack_trace trace;
 
 
 	memset(&trace, 0, sizeof(trace));
 	memset(&trace, 0, sizeof(trace));
 	trace.max_entries = LT_BACKTRACEDEPTH;
 	trace.max_entries = LT_BACKTRACEDEPTH;
 	trace.entries = &lat->backtrace[0];
 	trace.entries = &lat->backtrace[0];
-	trace.skip = 0;
 	save_stack_trace_tsk(tsk, &trace);
 	save_stack_trace_tsk(tsk, &trace);
 }
 }
 
 
+/**
+ * __account_scheduler_latency - record an occured latency
+ * @tsk - the task struct of the task hitting the latency
+ * @usecs - the duration of the latency in microseconds
+ * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
+ *
+ * This function is the main entry point for recording latency entries
+ * as called by the scheduler.
+ *
+ * This function has a few special cases to deal with normal 'non-latency'
+ * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
+ * since this usually is caused by waiting for events via select() and co.
+ *
+ * Negative latencies (caused by time going backwards) are also explicitly
+ * skipped.
+ */
 void __sched
 void __sched
-account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
+__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 	int i, q;
 	int i, q;
 	struct latency_record lat;
 	struct latency_record lat;
 
 
-	if (!latencytop_enabled)
-		return;
-
 	/* Long interruptible waits are generally user requested... */
 	/* Long interruptible waits are generally user requested... */
 	if (inter && usecs > 5000)
 	if (inter && usecs > 5000)
 		return;
 		return;
 
 
+	/* Negative sleeps are time going backwards */
+	/* Zero-time sleeps are non-interesting */
+	if (usecs <= 0)
+		return;
+
 	memset(&lat, 0, sizeof(lat));
 	memset(&lat, 0, sizeof(lat));
 	lat.count = 1;
 	lat.count = 1;
 	lat.time = usecs;
 	lat.time = usecs;
@@ -143,12 +202,12 @@ account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
 	if (tsk->latency_record_count >= LT_SAVECOUNT)
 	if (tsk->latency_record_count >= LT_SAVECOUNT)
 		goto out_unlock;
 		goto out_unlock;
 
 
-	for (i = 0; i < LT_SAVECOUNT ; i++) {
+	for (i = 0; i < LT_SAVECOUNT; i++) {
 		struct latency_record *mylat;
 		struct latency_record *mylat;
 		int same = 1;
 		int same = 1;
 
 
 		mylat = &tsk->latency_record[i];
 		mylat = &tsk->latency_record[i];
-		for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
+		for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
 			unsigned long record = lat.backtrace[q];
 			unsigned long record = lat.backtrace[q];
 
 
 			if (mylat->backtrace[q] != record) {
 			if (mylat->backtrace[q] != record) {
@@ -186,7 +245,7 @@ static int lstats_show(struct seq_file *m, void *v)
 	for (i = 0; i < MAXLR; i++) {
 	for (i = 0; i < MAXLR; i++) {
 		if (latency_record[i].backtrace[0]) {
 		if (latency_record[i].backtrace[0]) {
 			int q;
 			int q;
-			seq_printf(m, "%i %li %li ",
+			seq_printf(m, "%i %lu %lu ",
 				latency_record[i].count,
 				latency_record[i].count,
 				latency_record[i].time,
 				latency_record[i].time,
 				latency_record[i].max);
 				latency_record[i].max);
@@ -223,7 +282,7 @@ static int lstats_open(struct inode *inode, struct file *filp)
 	return single_open(filp, lstats_show, NULL);
 	return single_open(filp, lstats_show, NULL);
 }
 }
 
 
-static struct file_operations lstats_fops = {
+static const struct file_operations lstats_fops = {
 	.open		= lstats_open,
 	.open		= lstats_open,
 	.read		= seq_read,
 	.read		= seq_read,
 	.write		= lstats_write,
 	.write		= lstats_write,
@@ -236,4 +295,4 @@ static int __init init_lstats_procfs(void)
 	proc_create("latency_stats", 0644, NULL, &lstats_fops);
 	proc_create("latency_stats", 0644, NULL, &lstats_fops);
 	return 0;
 	return 0;
 }
 }
-__initcall(init_lstats_procfs);
+device_initcall(init_lstats_procfs);

File diff suppressed because it is too large
+ 552 - 215
kernel/sched.c


+ 17 - 13
kernel/sched_clock.c

@@ -24,11 +24,11 @@
  * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
  * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
  * consistent between cpus (never more than 2 jiffies difference).
  * consistent between cpus (never more than 2 jiffies difference).
  */
  */
-#include <linux/sched.h>
-#include <linux/percpu.h>
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
-#include <linux/ktime.h>
 #include <linux/module.h>
 #include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/ktime.h>
+#include <linux/sched.h>
 
 
 /*
 /*
  * Scheduler clock - returns current time in nanosec units.
  * Scheduler clock - returns current time in nanosec units.
@@ -43,6 +43,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
 static __read_mostly int sched_clock_running;
 static __read_mostly int sched_clock_running;
 
 
 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+__read_mostly int sched_clock_stable;
 
 
 struct sched_clock_data {
 struct sched_clock_data {
 	/*
 	/*
@@ -87,7 +88,7 @@ void sched_clock_init(void)
 }
 }
 
 
 /*
 /*
- * min,max except they take wrapping into account
+ * min, max except they take wrapping into account
  */
  */
 
 
 static inline u64 wrap_min(u64 x, u64 y)
 static inline u64 wrap_min(u64 x, u64 y)
@@ -111,15 +112,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
 	s64 delta = now - scd->tick_raw;
 	s64 delta = now - scd->tick_raw;
 	u64 clock, min_clock, max_clock;
 	u64 clock, min_clock, max_clock;
 
 
-	WARN_ON_ONCE(!irqs_disabled());
-
 	if (unlikely(delta < 0))
 	if (unlikely(delta < 0))
 		delta = 0;
 		delta = 0;
 
 
 	/*
 	/*
 	 * scd->clock = clamp(scd->tick_gtod + delta,
 	 * scd->clock = clamp(scd->tick_gtod + delta,
-	 * 		      max(scd->tick_gtod, scd->clock),
-	 * 		      scd->tick_gtod + TICK_NSEC);
+	 *		      max(scd->tick_gtod, scd->clock),
+	 *		      scd->tick_gtod + TICK_NSEC);
 	 */
 	 */
 
 
 	clock = scd->tick_gtod + delta;
 	clock = scd->tick_gtod + delta;
@@ -148,12 +147,13 @@ static void lock_double_clock(struct sched_clock_data *data1,
 
 
 u64 sched_clock_cpu(int cpu)
 u64 sched_clock_cpu(int cpu)
 {
 {
-	struct sched_clock_data *scd = cpu_sdc(cpu);
 	u64 now, clock, this_clock, remote_clock;
 	u64 now, clock, this_clock, remote_clock;
+	struct sched_clock_data *scd;
 
 
-	if (unlikely(!sched_clock_running))
-		return 0ull;
+	if (sched_clock_stable)
+		return sched_clock();
 
 
+	scd = cpu_sdc(cpu);
 	WARN_ON_ONCE(!irqs_disabled());
 	WARN_ON_ONCE(!irqs_disabled());
 	now = sched_clock();
 	now = sched_clock();
 
 
@@ -195,14 +195,18 @@ u64 sched_clock_cpu(int cpu)
 
 
 void sched_clock_tick(void)
 void sched_clock_tick(void)
 {
 {
-	struct sched_clock_data *scd = this_scd();
+	struct sched_clock_data *scd;
 	u64 now, now_gtod;
 	u64 now, now_gtod;
 
 
+	if (sched_clock_stable)
+		return;
+
 	if (unlikely(!sched_clock_running))
 	if (unlikely(!sched_clock_running))
 		return;
 		return;
 
 
 	WARN_ON_ONCE(!irqs_disabled());
 	WARN_ON_ONCE(!irqs_disabled());
 
 
+	scd = this_scd();
 	now_gtod = ktime_to_ns(ktime_get());
 	now_gtod = ktime_to_ns(ktime_get());
 	now = sched_clock();
 	now = sched_clock();
 
 
@@ -250,7 +254,7 @@ u64 sched_clock_cpu(int cpu)
 	return sched_clock();
 	return sched_clock();
 }
 }
 
 
-#endif
+#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
 
 
 unsigned long long cpu_clock(int cpu)
 unsigned long long cpu_clock(int cpu)
 {
 {

+ 3 - 5
kernel/sched_debug.c

@@ -272,7 +272,6 @@ static void print_cpu(struct seq_file *m, int cpu)
 	P(nr_switches);
 	P(nr_switches);
 	P(nr_load_updates);
 	P(nr_load_updates);
 	P(nr_uninterruptible);
 	P(nr_uninterruptible);
-	SEQ_printf(m, "  .%-30s: %lu\n", "jiffies", jiffies);
 	PN(next_balance);
 	PN(next_balance);
 	P(curr->pid);
 	P(curr->pid);
 	PN(clock);
 	PN(clock);
@@ -287,9 +286,6 @@ static void print_cpu(struct seq_file *m, int cpu)
 #ifdef CONFIG_SCHEDSTATS
 #ifdef CONFIG_SCHEDSTATS
 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
 
 
-	P(yld_exp_empty);
-	P(yld_act_empty);
-	P(yld_both_empty);
 	P(yld_count);
 	P(yld_count);
 
 
 	P(sched_switch);
 	P(sched_switch);
@@ -314,7 +310,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
 	u64 now = ktime_to_ns(ktime_get());
 	u64 now = ktime_to_ns(ktime_get());
 	int cpu;
 	int cpu;
 
 
-	SEQ_printf(m, "Sched Debug Version: v0.08, %s %.*s\n",
+	SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n",
 		init_utsname()->release,
 		init_utsname()->release,
 		(int)strcspn(init_utsname()->version, " "),
 		(int)strcspn(init_utsname()->version, " "),
 		init_utsname()->version);
 		init_utsname()->version);
@@ -325,6 +321,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
 	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
 	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
 #define PN(x) \
 #define PN(x) \
 	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
 	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
+	P(jiffies);
 	PN(sysctl_sched_latency);
 	PN(sysctl_sched_latency);
 	PN(sysctl_sched_min_granularity);
 	PN(sysctl_sched_min_granularity);
 	PN(sysctl_sched_wakeup_granularity);
 	PN(sysctl_sched_wakeup_granularity);
@@ -397,6 +394,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 	PN(se.vruntime);
 	PN(se.vruntime);
 	PN(se.sum_exec_runtime);
 	PN(se.sum_exec_runtime);
 	PN(se.avg_overlap);
 	PN(se.avg_overlap);
+	PN(se.avg_wakeup);
 
 
 	nr_switches = p->nvcsw + p->nivcsw;
 	nr_switches = p->nvcsw + p->nivcsw;
 
 

+ 53 - 6
kernel/sched_fair.c

@@ -1314,16 +1314,63 @@ out:
 }
 }
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_SMP */
 
 
-static unsigned long wakeup_gran(struct sched_entity *se)
+/*
+ * Adaptive granularity
+ *
+ * se->avg_wakeup gives the average time a task runs until it does a wakeup,
+ * with the limit of wakeup_gran -- when it never does a wakeup.
+ *
+ * So the smaller avg_wakeup is the faster we want this task to preempt,
+ * but we don't want to treat the preemptee unfairly and therefore allow it
+ * to run for at least the amount of time we'd like to run.
+ *
+ * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
+ *
+ * NOTE: we use *nr_running to scale with load, this nicely matches the
+ *       degrading latency on load.
+ */
+static unsigned long
+adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
+{
+	u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+	u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
+	u64 gran = 0;
+
+	if (this_run < expected_wakeup)
+		gran = expected_wakeup - this_run;
+
+	return min_t(s64, gran, sysctl_sched_wakeup_granularity);
+}
+
+static unsigned long
+wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
 {
 {
 	unsigned long gran = sysctl_sched_wakeup_granularity;
 	unsigned long gran = sysctl_sched_wakeup_granularity;
 
 
+	if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
+		gran = adaptive_gran(curr, se);
+
 	/*
 	/*
-	 * More easily preempt - nice tasks, while not making it harder for
-	 * + nice tasks.
+	 * Since its curr running now, convert the gran from real-time
+	 * to virtual-time in his units.
 	 */
 	 */
-	if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD)
-		gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se);
+	if (sched_feat(ASYM_GRAN)) {
+		/*
+		 * By using 'se' instead of 'curr' we penalize light tasks, so
+		 * they get preempted easier. That is, if 'se' < 'curr' then
+		 * the resulting gran will be larger, therefore penalizing the
+		 * lighter, if otoh 'se' > 'curr' then the resulting gran will
+		 * be smaller, again penalizing the lighter task.
+		 *
+		 * This is especially important for buddies when the leftmost
+		 * task is higher priority than the buddy.
+		 */
+		if (unlikely(se->load.weight != NICE_0_LOAD))
+			gran = calc_delta_fair(gran, se);
+	} else {
+		if (unlikely(curr->load.weight != NICE_0_LOAD))
+			gran = calc_delta_fair(gran, curr);
+	}
 
 
 	return gran;
 	return gran;
 }
 }
@@ -1350,7 +1397,7 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
 	if (vdiff <= 0)
 	if (vdiff <= 0)
 		return -1;
 		return -1;
 
 
-	gran = wakeup_gran(curr);
+	gran = wakeup_gran(curr, se);
 	if (vdiff > gran)
 	if (vdiff > gran)
 		return 1;
 		return 1;
 
 

+ 2 - 1
kernel/sched_features.h

@@ -1,5 +1,6 @@
 SCHED_FEAT(NEW_FAIR_SLEEPERS, 1)
 SCHED_FEAT(NEW_FAIR_SLEEPERS, 1)
-SCHED_FEAT(NORMALIZED_SLEEPER, 1)
+SCHED_FEAT(NORMALIZED_SLEEPER, 0)
+SCHED_FEAT(ADAPTIVE_GRAN, 1)
 SCHED_FEAT(WAKEUP_PREEMPT, 1)
 SCHED_FEAT(WAKEUP_PREEMPT, 1)
 SCHED_FEAT(START_DEBIT, 1)
 SCHED_FEAT(START_DEBIT, 1)
 SCHED_FEAT(AFFINE_WAKEUPS, 1)
 SCHED_FEAT(AFFINE_WAKEUPS, 1)

+ 374 - 163
kernel/sched_rt.c

@@ -3,6 +3,40 @@
  * policies)
  * policies)
  */
  */
 
 
+static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
+{
+	return container_of(rt_se, struct task_struct, rt);
+}
+
+#ifdef CONFIG_RT_GROUP_SCHED
+
+static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+{
+	return rt_rq->rq;
+}
+
+static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+{
+	return rt_se->rt_rq;
+}
+
+#else /* CONFIG_RT_GROUP_SCHED */
+
+static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+{
+	return container_of(rt_rq, struct rq, rt);
+}
+
+static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+{
+	struct task_struct *p = rt_task_of(rt_se);
+	struct rq *rq = task_rq(p);
+
+	return &rq->rt;
+}
+
+#endif /* CONFIG_RT_GROUP_SCHED */
+
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 
 
 static inline int rt_overloaded(struct rq *rq)
 static inline int rt_overloaded(struct rq *rq)
@@ -37,25 +71,69 @@ static inline void rt_clear_overload(struct rq *rq)
 	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
 	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
 }
 }
 
 
-static void update_rt_migration(struct rq *rq)
+static void update_rt_migration(struct rt_rq *rt_rq)
 {
 {
-	if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
-		if (!rq->rt.overloaded) {
-			rt_set_overload(rq);
-			rq->rt.overloaded = 1;
+	if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
+		if (!rt_rq->overloaded) {
+			rt_set_overload(rq_of_rt_rq(rt_rq));
+			rt_rq->overloaded = 1;
 		}
 		}
-	} else if (rq->rt.overloaded) {
-		rt_clear_overload(rq);
-		rq->rt.overloaded = 0;
+	} else if (rt_rq->overloaded) {
+		rt_clear_overload(rq_of_rt_rq(rt_rq));
+		rt_rq->overloaded = 0;
 	}
 	}
 }
 }
-#endif /* CONFIG_SMP */
 
 
-static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
+static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+	if (rt_se->nr_cpus_allowed > 1)
+		rt_rq->rt_nr_migratory++;
+
+	update_rt_migration(rt_rq);
+}
+
+static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+	if (rt_se->nr_cpus_allowed > 1)
+		rt_rq->rt_nr_migratory--;
+
+	update_rt_migration(rt_rq);
+}
+
+static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+{
+	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
+	plist_node_init(&p->pushable_tasks, p->prio);
+	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
+}
+
+static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
+{
+	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
+}
+
+#else
+
+static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 {
 {
-	return container_of(rt_se, struct task_struct, rt);
 }
 }
 
 
+static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
+{
+}
+
+static inline
+void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+}
+
+static inline
+void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+}
+
+#endif /* CONFIG_SMP */
+
 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 {
 {
 	return !list_empty(&rt_se->run_list);
 	return !list_empty(&rt_se->run_list);
@@ -79,16 +157,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 #define for_each_leaf_rt_rq(rt_rq, rq) \
 #define for_each_leaf_rt_rq(rt_rq, rq) \
 	list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
 	list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
 
 
-static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
-{
-	return rt_rq->rq;
-}
-
-static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
-{
-	return rt_se->rt_rq;
-}
-
 #define for_each_sched_rt_entity(rt_se) \
 #define for_each_sched_rt_entity(rt_se) \
 	for (; rt_se; rt_se = rt_se->parent)
 	for (; rt_se; rt_se = rt_se->parent)
 
 
@@ -108,7 +176,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 	if (rt_rq->rt_nr_running) {
 	if (rt_rq->rt_nr_running) {
 		if (rt_se && !on_rt_rq(rt_se))
 		if (rt_se && !on_rt_rq(rt_se))
 			enqueue_rt_entity(rt_se);
 			enqueue_rt_entity(rt_se);
-		if (rt_rq->highest_prio < curr->prio)
+		if (rt_rq->highest_prio.curr < curr->prio)
 			resched_task(curr);
 			resched_task(curr);
 	}
 	}
 }
 }
@@ -176,19 +244,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 #define for_each_leaf_rt_rq(rt_rq, rq) \
 #define for_each_leaf_rt_rq(rt_rq, rq) \
 	for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
 	for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
 
 
-static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
-{
-	return container_of(rt_rq, struct rq, rt);
-}
-
-static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
-{
-	struct task_struct *p = rt_task_of(rt_se);
-	struct rq *rq = task_rq(p);
-
-	return &rq->rt;
-}
-
 #define for_each_sched_rt_entity(rt_se) \
 #define for_each_sched_rt_entity(rt_se) \
 	for (; rt_se; rt_se = NULL)
 	for (; rt_se; rt_se = NULL)
 
 
@@ -473,7 +528,7 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
 
 
 	if (rt_rq)
 	if (rt_rq)
-		return rt_rq->highest_prio;
+		return rt_rq->highest_prio.curr;
 #endif
 #endif
 
 
 	return rt_task_of(rt_se)->prio;
 	return rt_task_of(rt_se)->prio;
@@ -547,91 +602,174 @@ static void update_curr_rt(struct rq *rq)
 	}
 	}
 }
 }
 
 
-static inline
-void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+#if defined CONFIG_SMP
+
+static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
+
+static inline int next_prio(struct rq *rq)
 {
 {
-	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
-	rt_rq->rt_nr_running++;
-#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
-	if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
-#ifdef CONFIG_SMP
-		struct rq *rq = rq_of_rt_rq(rt_rq);
-#endif
+	struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
+
+	if (next && rt_prio(next->prio))
+		return next->prio;
+	else
+		return MAX_RT_PRIO;
+}
+
+static void
+inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
+{
+	struct rq *rq = rq_of_rt_rq(rt_rq);
+
+	if (prio < prev_prio) {
+
+		/*
+		 * If the new task is higher in priority than anything on the
+		 * run-queue, we know that the previous high becomes our
+		 * next-highest.
+		 */
+		rt_rq->highest_prio.next = prev_prio;
 
 
-		rt_rq->highest_prio = rt_se_prio(rt_se);
-#ifdef CONFIG_SMP
 		if (rq->online)
 		if (rq->online)
-			cpupri_set(&rq->rd->cpupri, rq->cpu,
-				   rt_se_prio(rt_se));
-#endif
-	}
-#endif
-#ifdef CONFIG_SMP
-	if (rt_se->nr_cpus_allowed > 1) {
-		struct rq *rq = rq_of_rt_rq(rt_rq);
+			cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
 
 
-		rq->rt.rt_nr_migratory++;
-	}
+	} else if (prio == rt_rq->highest_prio.curr)
+		/*
+		 * If the next task is equal in priority to the highest on
+		 * the run-queue, then we implicitly know that the next highest
+		 * task cannot be any lower than current
+		 */
+		rt_rq->highest_prio.next = prio;
+	else if (prio < rt_rq->highest_prio.next)
+		/*
+		 * Otherwise, we need to recompute next-highest
+		 */
+		rt_rq->highest_prio.next = next_prio(rq);
+}
 
 
-	update_rt_migration(rq_of_rt_rq(rt_rq));
-#endif
-#ifdef CONFIG_RT_GROUP_SCHED
-	if (rt_se_boosted(rt_se))
-		rt_rq->rt_nr_boosted++;
+static void
+dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
+{
+	struct rq *rq = rq_of_rt_rq(rt_rq);
 
 
-	if (rt_rq->tg)
-		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
-#else
-	start_rt_bandwidth(&def_rt_bandwidth);
-#endif
+	if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
+		rt_rq->highest_prio.next = next_prio(rq);
+
+	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
+		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
 }
 }
 
 
+#else /* CONFIG_SMP */
+
 static inline
 static inline
-void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
-{
-#ifdef CONFIG_SMP
-	int highest_prio = rt_rq->highest_prio;
-#endif
+void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
+static inline
+void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
+
+#endif /* CONFIG_SMP */
 
 
-	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
-	WARN_ON(!rt_rq->rt_nr_running);
-	rt_rq->rt_nr_running--;
 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
+static void
+inc_rt_prio(struct rt_rq *rt_rq, int prio)
+{
+	int prev_prio = rt_rq->highest_prio.curr;
+
+	if (prio < prev_prio)
+		rt_rq->highest_prio.curr = prio;
+
+	inc_rt_prio_smp(rt_rq, prio, prev_prio);
+}
+
+static void
+dec_rt_prio(struct rt_rq *rt_rq, int prio)
+{
+	int prev_prio = rt_rq->highest_prio.curr;
+
 	if (rt_rq->rt_nr_running) {
 	if (rt_rq->rt_nr_running) {
-		struct rt_prio_array *array;
 
 
-		WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
-		if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
-			/* recalculate */
-			array = &rt_rq->active;
-			rt_rq->highest_prio =
+		WARN_ON(prio < prev_prio);
+
+		/*
+		 * This may have been our highest task, and therefore
+		 * we may have some recomputation to do
+		 */
+		if (prio == prev_prio) {
+			struct rt_prio_array *array = &rt_rq->active;
+
+			rt_rq->highest_prio.curr =
 				sched_find_first_bit(array->bitmap);
 				sched_find_first_bit(array->bitmap);
-		} /* otherwise leave rq->highest prio alone */
+		}
+
 	} else
 	} else
-		rt_rq->highest_prio = MAX_RT_PRIO;
-#endif
-#ifdef CONFIG_SMP
-	if (rt_se->nr_cpus_allowed > 1) {
-		struct rq *rq = rq_of_rt_rq(rt_rq);
-		rq->rt.rt_nr_migratory--;
-	}
+		rt_rq->highest_prio.curr = MAX_RT_PRIO;
 
 
-	if (rt_rq->highest_prio != highest_prio) {
-		struct rq *rq = rq_of_rt_rq(rt_rq);
+	dec_rt_prio_smp(rt_rq, prio, prev_prio);
+}
 
 
-		if (rq->online)
-			cpupri_set(&rq->rd->cpupri, rq->cpu,
-				   rt_rq->highest_prio);
-	}
+#else
+
+static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
+static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
+
+#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
 
 
-	update_rt_migration(rq_of_rt_rq(rt_rq));
-#endif /* CONFIG_SMP */
 #ifdef CONFIG_RT_GROUP_SCHED
 #ifdef CONFIG_RT_GROUP_SCHED
+
+static void
+inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+	if (rt_se_boosted(rt_se))
+		rt_rq->rt_nr_boosted++;
+
+	if (rt_rq->tg)
+		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
+}
+
+static void
+dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
 	if (rt_se_boosted(rt_se))
 	if (rt_se_boosted(rt_se))
 		rt_rq->rt_nr_boosted--;
 		rt_rq->rt_nr_boosted--;
 
 
 	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
 	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
-#endif
+}
+
+#else /* CONFIG_RT_GROUP_SCHED */
+
+static void
+inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+	start_rt_bandwidth(&def_rt_bandwidth);
+}
+
+static inline
+void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
+
+#endif /* CONFIG_RT_GROUP_SCHED */
+
+static inline
+void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+	int prio = rt_se_prio(rt_se);
+
+	WARN_ON(!rt_prio(prio));
+	rt_rq->rt_nr_running++;
+
+	inc_rt_prio(rt_rq, prio);
+	inc_rt_migration(rt_se, rt_rq);
+	inc_rt_group(rt_se, rt_rq);
+}
+
+static inline
+void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+{
+	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
+	WARN_ON(!rt_rq->rt_nr_running);
+	rt_rq->rt_nr_running--;
+
+	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
+	dec_rt_migration(rt_se, rt_rq);
+	dec_rt_group(rt_se, rt_rq);
 }
 }
 
 
 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
@@ -718,6 +856,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
 
 
 	enqueue_rt_entity(rt_se);
 	enqueue_rt_entity(rt_se);
 
 
+	if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
+		enqueue_pushable_task(rq, p);
+
 	inc_cpu_load(rq, p->se.load.weight);
 	inc_cpu_load(rq, p->se.load.weight);
 }
 }
 
 
@@ -728,6 +869,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
 	update_curr_rt(rq);
 	update_curr_rt(rq);
 	dequeue_rt_entity(rt_se);
 	dequeue_rt_entity(rt_se);
 
 
+	dequeue_pushable_task(rq, p);
+
 	dec_cpu_load(rq, p->se.load.weight);
 	dec_cpu_load(rq, p->se.load.weight);
 }
 }
 
 
@@ -878,7 +1021,7 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
 	return next;
 	return next;
 }
 }
 
 
-static struct task_struct *pick_next_task_rt(struct rq *rq)
+static struct task_struct *_pick_next_task_rt(struct rq *rq)
 {
 {
 	struct sched_rt_entity *rt_se;
 	struct sched_rt_entity *rt_se;
 	struct task_struct *p;
 	struct task_struct *p;
@@ -900,6 +1043,18 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
 
 
 	p = rt_task_of(rt_se);
 	p = rt_task_of(rt_se);
 	p->se.exec_start = rq->clock;
 	p->se.exec_start = rq->clock;
+
+	return p;
+}
+
+static struct task_struct *pick_next_task_rt(struct rq *rq)
+{
+	struct task_struct *p = _pick_next_task_rt(rq);
+
+	/* The running task is never eligible for pushing */
+	if (p)
+		dequeue_pushable_task(rq, p);
+
 	return p;
 	return p;
 }
 }
 
 
@@ -907,6 +1062,13 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 {
 {
 	update_curr_rt(rq);
 	update_curr_rt(rq);
 	p->se.exec_start = 0;
 	p->se.exec_start = 0;
+
+	/*
+	 * The previous task needs to be made eligible for pushing
+	 * if it is still active
+	 */
+	if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
+		enqueue_pushable_task(rq, p);
 }
 }
 
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
@@ -1072,7 +1234,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
 		}
 		}
 
 
 		/* If this rq is still suitable use it. */
 		/* If this rq is still suitable use it. */
-		if (lowest_rq->rt.highest_prio > task->prio)
+		if (lowest_rq->rt.highest_prio.curr > task->prio)
 			break;
 			break;
 
 
 		/* try again */
 		/* try again */
@@ -1083,6 +1245,31 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
 	return lowest_rq;
 	return lowest_rq;
 }
 }
 
 
+static inline int has_pushable_tasks(struct rq *rq)
+{
+	return !plist_head_empty(&rq->rt.pushable_tasks);
+}
+
+static struct task_struct *pick_next_pushable_task(struct rq *rq)
+{
+	struct task_struct *p;
+
+	if (!has_pushable_tasks(rq))
+		return NULL;
+
+	p = plist_first_entry(&rq->rt.pushable_tasks,
+			      struct task_struct, pushable_tasks);
+
+	BUG_ON(rq->cpu != task_cpu(p));
+	BUG_ON(task_current(rq, p));
+	BUG_ON(p->rt.nr_cpus_allowed <= 1);
+
+	BUG_ON(!p->se.on_rq);
+	BUG_ON(!rt_task(p));
+
+	return p;
+}
+
 /*
 /*
  * If the current CPU has more than one RT task, see if the non
  * If the current CPU has more than one RT task, see if the non
  * running task can migrate over to a CPU that is running a task
  * running task can migrate over to a CPU that is running a task
@@ -1092,13 +1279,11 @@ static int push_rt_task(struct rq *rq)
 {
 {
 	struct task_struct *next_task;
 	struct task_struct *next_task;
 	struct rq *lowest_rq;
 	struct rq *lowest_rq;
-	int ret = 0;
-	int paranoid = RT_MAX_TRIES;
 
 
 	if (!rq->rt.overloaded)
 	if (!rq->rt.overloaded)
 		return 0;
 		return 0;
 
 
-	next_task = pick_next_highest_task_rt(rq, -1);
+	next_task = pick_next_pushable_task(rq);
 	if (!next_task)
 	if (!next_task)
 		return 0;
 		return 0;
 
 
@@ -1127,16 +1312,34 @@ static int push_rt_task(struct rq *rq)
 		struct task_struct *task;
 		struct task_struct *task;
 		/*
 		/*
 		 * find lock_lowest_rq releases rq->lock
 		 * find lock_lowest_rq releases rq->lock
-		 * so it is possible that next_task has changed.
-		 * If it has, then try again.
+		 * so it is possible that next_task has migrated.
+		 *
+		 * We need to make sure that the task is still on the same
+		 * run-queue and is also still the next task eligible for
+		 * pushing.
 		 */
 		 */
-		task = pick_next_highest_task_rt(rq, -1);
-		if (unlikely(task != next_task) && task && paranoid--) {
-			put_task_struct(next_task);
-			next_task = task;
-			goto retry;
+		task = pick_next_pushable_task(rq);
+		if (task_cpu(next_task) == rq->cpu && task == next_task) {
+			/*
+			 * If we get here, the task hasnt moved at all, but
+			 * it has failed to push.  We will not try again,
+			 * since the other cpus will pull from us when they
+			 * are ready.
+			 */
+			dequeue_pushable_task(rq, next_task);
+			goto out;
 		}
 		}
-		goto out;
+
+		if (!task)
+			/* No more tasks, just exit */
+			goto out;
+
+		/*
+		 * Something has shifted, try again.
+		 */
+		put_task_struct(next_task);
+		next_task = task;
+		goto retry;
 	}
 	}
 
 
 	deactivate_task(rq, next_task, 0);
 	deactivate_task(rq, next_task, 0);
@@ -1147,23 +1350,12 @@ static int push_rt_task(struct rq *rq)
 
 
 	double_unlock_balance(rq, lowest_rq);
 	double_unlock_balance(rq, lowest_rq);
 
 
-	ret = 1;
 out:
 out:
 	put_task_struct(next_task);
 	put_task_struct(next_task);
 
 
-	return ret;
+	return 1;
 }
 }
 
 
-/*
- * TODO: Currently we just use the second highest prio task on
- *       the queue, and stop when it can't migrate (or there's
- *       no more RT tasks).  There may be a case where a lower
- *       priority RT task has a different affinity than the
- *       higher RT task. In this case the lower RT task could
- *       possibly be able to migrate where as the higher priority
- *       RT task could not.  We currently ignore this issue.
- *       Enhancements are welcome!
- */
 static void push_rt_tasks(struct rq *rq)
 static void push_rt_tasks(struct rq *rq)
 {
 {
 	/* push_rt_task will return true if it moved an RT */
 	/* push_rt_task will return true if it moved an RT */
@@ -1174,33 +1366,35 @@ static void push_rt_tasks(struct rq *rq)
 static int pull_rt_task(struct rq *this_rq)
 static int pull_rt_task(struct rq *this_rq)
 {
 {
 	int this_cpu = this_rq->cpu, ret = 0, cpu;
 	int this_cpu = this_rq->cpu, ret = 0, cpu;
-	struct task_struct *p, *next;
+	struct task_struct *p;
 	struct rq *src_rq;
 	struct rq *src_rq;
 
 
 	if (likely(!rt_overloaded(this_rq)))
 	if (likely(!rt_overloaded(this_rq)))
 		return 0;
 		return 0;
 
 
-	next = pick_next_task_rt(this_rq);
-
 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
 		if (this_cpu == cpu)
 		if (this_cpu == cpu)
 			continue;
 			continue;
 
 
 		src_rq = cpu_rq(cpu);
 		src_rq = cpu_rq(cpu);
+
+		/*
+		 * Don't bother taking the src_rq->lock if the next highest
+		 * task is known to be lower-priority than our current task.
+		 * This may look racy, but if this value is about to go
+		 * logically higher, the src_rq will push this task away.
+		 * And if its going logically lower, we do not care
+		 */
+		if (src_rq->rt.highest_prio.next >=
+		    this_rq->rt.highest_prio.curr)
+			continue;
+
 		/*
 		/*
 		 * We can potentially drop this_rq's lock in
 		 * We can potentially drop this_rq's lock in
 		 * double_lock_balance, and another CPU could
 		 * double_lock_balance, and another CPU could
-		 * steal our next task - hence we must cause
-		 * the caller to recalculate the next task
-		 * in that case:
+		 * alter this_rq
 		 */
 		 */
-		if (double_lock_balance(this_rq, src_rq)) {
-			struct task_struct *old_next = next;
-
-			next = pick_next_task_rt(this_rq);
-			if (next != old_next)
-				ret = 1;
-		}
+		double_lock_balance(this_rq, src_rq);
 
 
 		/*
 		/*
 		 * Are there still pullable RT tasks?
 		 * Are there still pullable RT tasks?
@@ -1214,7 +1408,7 @@ static int pull_rt_task(struct rq *this_rq)
 		 * Do we have an RT task that preempts
 		 * Do we have an RT task that preempts
 		 * the to-be-scheduled task?
 		 * the to-be-scheduled task?
 		 */
 		 */
-		if (p && (!next || (p->prio < next->prio))) {
+		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
 			WARN_ON(p == src_rq->curr);
 			WARN_ON(p == src_rq->curr);
 			WARN_ON(!p->se.on_rq);
 			WARN_ON(!p->se.on_rq);
 
 
@@ -1224,12 +1418,9 @@ static int pull_rt_task(struct rq *this_rq)
 			 * This is just that p is wakeing up and hasn't
 			 * This is just that p is wakeing up and hasn't
 			 * had a chance to schedule. We only pull
 			 * had a chance to schedule. We only pull
 			 * p if it is lower in priority than the
 			 * p if it is lower in priority than the
-			 * current task on the run queue or
-			 * this_rq next task is lower in prio than
-			 * the current task on that rq.
+			 * current task on the run queue
 			 */
 			 */
-			if (p->prio < src_rq->curr->prio ||
-			    (next && next->prio < src_rq->curr->prio))
+			if (p->prio < src_rq->curr->prio)
 				goto skip;
 				goto skip;
 
 
 			ret = 1;
 			ret = 1;
@@ -1242,13 +1433,7 @@ static int pull_rt_task(struct rq *this_rq)
 			 * case there's an even higher prio task
 			 * case there's an even higher prio task
 			 * in another runqueue. (low likelyhood
 			 * in another runqueue. (low likelyhood
 			 * but possible)
 			 * but possible)
-			 *
-			 * Update next so that we won't pick a task
-			 * on another cpu with a priority lower (or equal)
-			 * than the one we just picked.
 			 */
 			 */
-			next = p;
-
 		}
 		}
  skip:
  skip:
 		double_unlock_balance(this_rq, src_rq);
 		double_unlock_balance(this_rq, src_rq);
@@ -1260,24 +1445,27 @@ static int pull_rt_task(struct rq *this_rq)
 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
 {
 {
 	/* Try to pull RT tasks here if we lower this rq's prio */
 	/* Try to pull RT tasks here if we lower this rq's prio */
-	if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
+	if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
 		pull_rt_task(rq);
 		pull_rt_task(rq);
 }
 }
 
 
+/*
+ * assumes rq->lock is held
+ */
+static int needs_post_schedule_rt(struct rq *rq)
+{
+	return has_pushable_tasks(rq);
+}
+
 static void post_schedule_rt(struct rq *rq)
 static void post_schedule_rt(struct rq *rq)
 {
 {
 	/*
 	/*
-	 * If we have more than one rt_task queued, then
-	 * see if we can push the other rt_tasks off to other CPUS.
-	 * Note we may release the rq lock, and since
-	 * the lock was owned by prev, we need to release it
-	 * first via finish_lock_switch and then reaquire it here.
+	 * This is only called if needs_post_schedule_rt() indicates that
+	 * we need to push tasks away
 	 */
 	 */
-	if (unlikely(rq->rt.overloaded)) {
-		spin_lock_irq(&rq->lock);
-		push_rt_tasks(rq);
-		spin_unlock_irq(&rq->lock);
-	}
+	spin_lock_irq(&rq->lock);
+	push_rt_tasks(rq);
+	spin_unlock_irq(&rq->lock);
 }
 }
 
 
 /*
 /*
@@ -1288,7 +1476,8 @@ static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
 {
 {
 	if (!task_running(rq, p) &&
 	if (!task_running(rq, p) &&
 	    !test_tsk_need_resched(rq->curr) &&
 	    !test_tsk_need_resched(rq->curr) &&
-	    rq->rt.overloaded)
+	    has_pushable_tasks(rq) &&
+	    p->rt.nr_cpus_allowed > 1)
 		push_rt_tasks(rq);
 		push_rt_tasks(rq);
 }
 }
 
 
@@ -1324,6 +1513,24 @@ static void set_cpus_allowed_rt(struct task_struct *p,
 	if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
 	if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
 		struct rq *rq = task_rq(p);
 		struct rq *rq = task_rq(p);
 
 
+		if (!task_current(rq, p)) {
+			/*
+			 * Make sure we dequeue this task from the pushable list
+			 * before going further.  It will either remain off of
+			 * the list because we are no longer pushable, or it
+			 * will be requeued.
+			 */
+			if (p->rt.nr_cpus_allowed > 1)
+				dequeue_pushable_task(rq, p);
+
+			/*
+			 * Requeue if our weight is changing and still > 1
+			 */
+			if (weight > 1)
+				enqueue_pushable_task(rq, p);
+
+		}
+
 		if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
 		if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
 			rq->rt.rt_nr_migratory++;
 			rq->rt.rt_nr_migratory++;
 		} else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
 		} else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
@@ -1331,7 +1538,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
 			rq->rt.rt_nr_migratory--;
 			rq->rt.rt_nr_migratory--;
 		}
 		}
 
 
-		update_rt_migration(rq);
+		update_rt_migration(&rq->rt);
 	}
 	}
 
 
 	cpumask_copy(&p->cpus_allowed, new_mask);
 	cpumask_copy(&p->cpus_allowed, new_mask);
@@ -1346,7 +1553,7 @@ static void rq_online_rt(struct rq *rq)
 
 
 	__enable_runtime(rq);
 	__enable_runtime(rq);
 
 
-	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
+	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
 }
 }
 
 
 /* Assumes rq->lock is held */
 /* Assumes rq->lock is held */
@@ -1438,7 +1645,7 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
 		 * can release the rq lock and p could migrate.
 		 * can release the rq lock and p could migrate.
 		 * Only reschedule if p is still on the same runqueue.
 		 * Only reschedule if p is still on the same runqueue.
 		 */
 		 */
-		if (p->prio > rq->rt.highest_prio && rq->curr == p)
+		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
 			resched_task(p);
 			resched_task(p);
 #else
 #else
 		/* For UP simply resched on drop of prio */
 		/* For UP simply resched on drop of prio */
@@ -1509,6 +1716,9 @@ static void set_curr_task_rt(struct rq *rq)
 	struct task_struct *p = rq->curr;
 	struct task_struct *p = rq->curr;
 
 
 	p->se.exec_start = rq->clock;
 	p->se.exec_start = rq->clock;
+
+	/* The running task is never eligible for pushing */
+	dequeue_pushable_task(rq, p);
 }
 }
 
 
 static const struct sched_class rt_sched_class = {
 static const struct sched_class rt_sched_class = {
@@ -1531,6 +1741,7 @@ static const struct sched_class rt_sched_class = {
 	.rq_online              = rq_online_rt,
 	.rq_online              = rq_online_rt,
 	.rq_offline             = rq_offline_rt,
 	.rq_offline             = rq_offline_rt,
 	.pre_schedule		= pre_schedule_rt,
 	.pre_schedule		= pre_schedule_rt,
+	.needs_post_schedule	= needs_post_schedule_rt,
 	.post_schedule		= post_schedule_rt,
 	.post_schedule		= post_schedule_rt,
 	.task_wake_up		= task_wake_up_rt,
 	.task_wake_up		= task_wake_up_rt,
 	.switched_from		= switched_from_rt,
 	.switched_from		= switched_from_rt,

+ 3 - 4
kernel/sched_stats.h

@@ -4,7 +4,7 @@
  * bump this up when changing the output format or the meaning of an existing
  * bump this up when changing the output format or the meaning of an existing
  * format, so that tools can adapt (or abort)
  * format, so that tools can adapt (or abort)
  */
  */
-#define SCHEDSTAT_VERSION 14
+#define SCHEDSTAT_VERSION 15
 
 
 static int show_schedstat(struct seq_file *seq, void *v)
 static int show_schedstat(struct seq_file *seq, void *v)
 {
 {
@@ -26,9 +26,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
 
 
 		/* runqueue-specific stats */
 		/* runqueue-specific stats */
 		seq_printf(seq,
 		seq_printf(seq,
-		    "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
-		    cpu, rq->yld_both_empty,
-		    rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
+		    "cpu%d %u %u %u %u %u %u %llu %llu %lu",
+		    cpu, rq->yld_count,
 		    rq->sched_switch, rq->sched_count, rq->sched_goidle,
 		    rq->sched_switch, rq->sched_count, rq->sched_goidle,
 		    rq->ttwu_count, rq->ttwu_local,
 		    rq->ttwu_count, rq->ttwu_local,
 		    rq->rq_cpu_time,
 		    rq->rq_cpu_time,

+ 0 - 6
lib/Kconfig

@@ -136,12 +136,6 @@ config TEXTSEARCH_BM
 config TEXTSEARCH_FSM
 config TEXTSEARCH_FSM
 	tristate
 	tristate
 
 
-#
-# plist support is select#ed if needed
-#
-config PLIST
-	boolean
-
 config HAS_IOMEM
 config HAS_IOMEM
 	boolean
 	boolean
 	depends on !NO_IOMEM
 	depends on !NO_IOMEM

+ 2 - 2
lib/Makefile

@@ -11,7 +11,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
 	 rbtree.o radix-tree.o dump_stack.o \
 	 rbtree.o radix-tree.o dump_stack.o \
 	 idr.o int_sqrt.o extable.o prio_tree.o \
 	 idr.o int_sqrt.o extable.o prio_tree.o \
 	 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
 	 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
-	 proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o
+	 proportions.o prio_heap.o ratelimit.o show_mem.o \
+	 is_single_threaded.o plist.o
 
 
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
 lib-$(CONFIG_SMP) += cpumask.o
@@ -40,7 +41,6 @@ lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
 lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
 lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
 obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
 obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
 obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
 obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
-obj-$(CONFIG_PLIST) += plist.o
 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
 obj-$(CONFIG_DEBUG_LIST) += list_debug.o
 obj-$(CONFIG_DEBUG_LIST) += list_debug.o
 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o

+ 1 - 1
lib/kernel_lock.c

@@ -39,7 +39,7 @@ static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
 int __lockfunc __reacquire_kernel_lock(void)
 int __lockfunc __reacquire_kernel_lock(void)
 {
 {
 	while (!_raw_spin_trylock(&kernel_flag)) {
 	while (!_raw_spin_trylock(&kernel_flag)) {
-		if (test_thread_flag(TIF_NEED_RESCHED))
+		if (need_resched())
 			return -EAGAIN;
 			return -EAGAIN;
 		cpu_relax();
 		cpu_relax();
 	}
 	}

Some files were not shown because too many files changed in this diff