|
@@ -1592,6 +1592,10 @@ static void __sched_fork(struct task_struct *p)
|
|
|
INIT_LIST_HEAD(&p->run_list);
|
|
|
p->se.on_rq = 0;
|
|
|
|
|
|
+#ifdef CONFIG_PREEMPT_NOTIFIERS
|
|
|
+ INIT_HLIST_HEAD(&p->preempt_notifiers);
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* We mark the process as running here, but have not actually
|
|
|
* inserted it onto the runqueue yet. This guarantees that
|
|
@@ -1673,6 +1677,63 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
|
|
task_rq_unlock(rq, &flags);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_PREEMPT_NOTIFIERS
|
|
|
+
|
|
|
+/**
|
|
|
+ * preempt_notifier_register - tell me when current is being being preempted
|
|
|
+ * and rescheduled
|
|
|
+ */
|
|
|
+void preempt_notifier_register(struct preempt_notifier *notifier)
|
|
|
+{
|
|
|
+ hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(preempt_notifier_register);
|
|
|
+
|
|
|
+/**
|
|
|
+ * preempt_notifier_unregister - no longer interested in preemption notifications
|
|
|
+ *
|
|
|
+ * This is safe to call from within a preemption notifier.
|
|
|
+ */
|
|
|
+void preempt_notifier_unregister(struct preempt_notifier *notifier)
|
|
|
+{
|
|
|
+ hlist_del(¬ifier->link);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
|
|
|
+
|
|
|
+static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
|
|
|
+{
|
|
|
+ struct preempt_notifier *notifier;
|
|
|
+ struct hlist_node *node;
|
|
|
+
|
|
|
+ hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
|
|
|
+ notifier->ops->sched_in(notifier, raw_smp_processor_id());
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+fire_sched_out_preempt_notifiers(struct task_struct *curr,
|
|
|
+ struct task_struct *next)
|
|
|
+{
|
|
|
+ struct preempt_notifier *notifier;
|
|
|
+ struct hlist_node *node;
|
|
|
+
|
|
|
+ hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
|
|
|
+ notifier->ops->sched_out(notifier, next);
|
|
|
+}
|
|
|
+
|
|
|
+#else
|
|
|
+
|
|
|
+static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+fire_sched_out_preempt_notifiers(struct task_struct *curr,
|
|
|
+ struct task_struct *next)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+#endif
|
|
|
+
|
|
|
/**
|
|
|
* prepare_task_switch - prepare to switch tasks
|
|
|
* @rq: the runqueue preparing to switch
|
|
@@ -1685,8 +1746,11 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
|
|
* prepare_task_switch sets up locking and calls architecture specific
|
|
|
* hooks.
|
|
|
*/
|
|
|
-static inline void prepare_task_switch(struct rq *rq, struct task_struct *next)
|
|
|
+static inline void
|
|
|
+prepare_task_switch(struct rq *rq, struct task_struct *prev,
|
|
|
+ struct task_struct *next)
|
|
|
{
|
|
|
+ fire_sched_out_preempt_notifiers(prev, next);
|
|
|
prepare_lock_switch(rq, next);
|
|
|
prepare_arch_switch(next);
|
|
|
}
|
|
@@ -1728,6 +1792,7 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
|
|
|
prev_state = prev->state;
|
|
|
finish_arch_switch(prev);
|
|
|
finish_lock_switch(rq, prev);
|
|
|
+ fire_sched_in_preempt_notifiers(current);
|
|
|
if (mm)
|
|
|
mmdrop(mm);
|
|
|
if (unlikely(prev_state == TASK_DEAD)) {
|
|
@@ -1768,7 +1833,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
|
|
|
{
|
|
|
struct mm_struct *mm, *oldmm;
|
|
|
|
|
|
- prepare_task_switch(rq, next);
|
|
|
+ prepare_task_switch(rq, prev, next);
|
|
|
mm = next->mm;
|
|
|
oldmm = prev->active_mm;
|
|
|
/*
|
|
@@ -6335,6 +6400,10 @@ void __init sched_init(void)
|
|
|
|
|
|
set_load_weight(&init_task);
|
|
|
|
|
|
+#ifdef CONFIG_PREEMPT_NOTIFIERS
|
|
|
+ INIT_HLIST_HEAD(&init_task.preempt_notifiers);
|
|
|
+#endif
|
|
|
+
|
|
|
#ifdef CONFIG_SMP
|
|
|
nr_cpu_ids = highest_cpu + 1;
|
|
|
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
|