소스 검색

ftrace: remove add-hoc code

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Ingo Molnar 17 년 전
부모
커밋
4d9493c90f
3개의 변경된 파일2개의 추가작업 그리고 58개의 파일을 삭제
  1. 0 47
      kernel/sched.c
  2. 0 3
      kernel/sched_fair.c
  3. 2 8
      kernel/trace/trace_sched_switch.c

+ 0 - 47
kernel/sched.c

@@ -2412,53 +2412,6 @@ static int sched_balance_self(int cpu, int flag)
 
 #endif /* CONFIG_SMP */
 
-#ifdef CONFIG_CONTEXT_SWITCH_TRACER
-
-void ftrace_task(struct task_struct *p, void *__tr, void *__data)
-{
-#if 0
-	/*  
-	 * trace timeline tree
-	 */
-	__trace_special(__tr, __data,
-			p->pid, p->se.vruntime, p->se.sum_exec_runtime);
-#else
-	/*
-	 * trace balance metrics
-	 */
-	__trace_special(__tr, __data,
-			p->pid, p->se.avg_overlap, 0);
-#endif
-}
-
-void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
-{
-	struct task_struct *p;
-	struct sched_entity *se;
-	struct rb_node *curr;
-	struct rq *rq = __rq;
-
-	if (rq->cfs.curr) {
-		p = task_of(rq->cfs.curr);
-		ftrace_task(p, __tr, __data);
-	}
-	if (rq->cfs.next) {
-		p = task_of(rq->cfs.next);
-		ftrace_task(p, __tr, __data);
-	}
-
-	for (curr = first_fair(&rq->cfs); curr; curr = rb_next(curr)) {
-		se = rb_entry(curr, struct sched_entity, run_node);
-		if (!entity_is_task(se))
-			continue;
-
-		p = task_of(se);
-		ftrace_task(p, __tr, __data);
-	}
-}
-
-#endif
-
 /***
  * try_to_wake_up - wake up a thread
  * @p: the to-be-woken-up thread

+ 0 - 3
kernel/sched_fair.c

@@ -1061,8 +1061,6 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
 	if (!(this_sd->flags & SD_WAKE_AFFINE))
 		return 0;
 
-	ftrace_special(__LINE__, curr->se.avg_overlap, sync);
-	ftrace_special(__LINE__, p->se.avg_overlap, -1);
 	/*
 	 * If the currently running task will sleep within
 	 * a reasonable amount of time then attract this newly
@@ -1240,7 +1238,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
 	if (unlikely(se == pse))
 		return;
 
-	ftrace_special(__LINE__, p->pid, se->last_wakeup);
 	cfs_rq_of(pse)->next = pse;
 
 	/*

+ 2 - 8
kernel/trace/trace_sched_switch.c

@@ -36,11 +36,8 @@ ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
 	data = tr->data[cpu];
 	disabled = atomic_inc_return(&data->disabled);
 
-	if (likely(disabled == 1)) {
+	if (likely(disabled == 1))
 		tracing_sched_switch_trace(tr, data, prev, next, flags);
-		if (trace_flags & TRACE_ITER_SCHED_TREE)
-			ftrace_all_fair_tasks(__rq, tr, data);
-	}
 
 	atomic_dec(&data->disabled);
 	local_irq_restore(flags);
@@ -65,11 +62,8 @@ wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
 	data = tr->data[cpu];
 	disabled = atomic_inc_return(&data->disabled);
 
-	if (likely(disabled == 1)) {
+	if (likely(disabled == 1))
 		tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
-		if (trace_flags & TRACE_ITER_SCHED_TREE)
-			ftrace_all_fair_tasks(__rq, tr, data);
-	}
 
 	atomic_dec(&data->disabled);
 	local_irq_restore(flags);