Browse Source

Merge branch 'core/locking' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into core/locking

Ingo Molnar 15 years ago
parent
commit
0e417fe1f2
2 changed files with 8 additions and 9 deletions
  1. 6 1
      kernel/lockdep.c
  2. 2 8
      kernel/lockdep_internals.h

+ 6 - 1
kernel/lockdep.c

@@ -2298,7 +2298,12 @@ void trace_hardirqs_on_caller(unsigned long ip)
 		return;
 
 	if (unlikely(curr->hardirqs_enabled)) {
-		debug_atomic_inc(redundant_hardirqs_on);
+		/*
+		 * Neither irq nor preemption are disabled here
+		 * so this is racy by nature but loosing one hit
+		 * in a stat is not a big deal.
+		 */
+		this_cpu_inc(lockdep_stats.redundant_hardirqs_on);
 		return;
 	}
 	/* we'll do an OFF -> ON transition: */

+ 2 - 8
kernel/lockdep_internals.h

@@ -140,19 +140,13 @@ struct lockdep_stats {
 DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
 
 #define debug_atomic_inc(ptr)			{		\
-	struct lockdep_stats *__cpu_lockdep_stats;		\
-								\
 	WARN_ON_ONCE(!irqs_disabled());				\
-	__cpu_lockdep_stats = &__get_cpu_var(lockdep_stats);	\
-	__cpu_lockdep_stats->ptr++;				\
+	this_cpu_inc(lockdep_stats.ptr);			\
 }
 
 #define debug_atomic_dec(ptr)			{		\
-	struct lockdep_stats *__cpu_lockdep_stats;		\
-								\
 	WARN_ON_ONCE(!irqs_disabled());				\
-	__cpu_lockdep_stats = &__get_cpu_var(lockdep_stats);	\
-	__cpu_lockdep_stats->ptr--;				\
+	this_cpu_inc(lockdep_stats.ptr);			\
 }
 
 #define debug_atomic_read(ptr)		({				\