Browse Source

lockdep: Use cpu_clock() for lockstat

Some tracepoint magic (TRACE_EVENT(lock_acquired)) relies on
the fact that lock hold times are positive and uses div64 on
that. That triggered a build warning on MIPS, and probably
causes bad output in certain circumstances as well.

Make it truly positive.

Reported-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1254818502.21044.112.camel@laptop>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Peter Zijlstra 15 năm trước cách đây
mục cha
commit
3365e77987
1 tập tin đã thay đổi với 12 bổ sung8 xóa
  1. 12 8
      kernel/lockdep.c

+ 12 - 8
kernel/lockdep.c

@@ -142,6 +142,11 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
 #ifdef CONFIG_LOCK_STAT
 #ifdef CONFIG_LOCK_STAT
 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
 
 
+static inline u64 lockstat_clock(void)
+{
+	return cpu_clock(smp_processor_id());
+}
+
 static int lock_point(unsigned long points[], unsigned long ip)
 static int lock_point(unsigned long points[], unsigned long ip)
 {
 {
 	int i;
 	int i;
@@ -158,7 +163,7 @@ static int lock_point(unsigned long points[], unsigned long ip)
 	return i;
 	return i;
 }
 }
 
 
-static void lock_time_inc(struct lock_time *lt, s64 time)
+static void lock_time_inc(struct lock_time *lt, u64 time)
 {
 {
 	if (time > lt->max)
 	if (time > lt->max)
 		lt->max = time;
 		lt->max = time;
@@ -234,12 +239,12 @@ static void put_lock_stats(struct lock_class_stats *stats)
 static void lock_release_holdtime(struct held_lock *hlock)
 static void lock_release_holdtime(struct held_lock *hlock)
 {
 {
 	struct lock_class_stats *stats;
 	struct lock_class_stats *stats;
-	s64 holdtime;
+	u64 holdtime;
 
 
 	if (!lock_stat)
 	if (!lock_stat)
 		return;
 		return;
 
 
-	holdtime = sched_clock() - hlock->holdtime_stamp;
+	holdtime = lockstat_clock() - hlock->holdtime_stamp;
 
 
 	stats = get_lock_stats(hlock_class(hlock));
 	stats = get_lock_stats(hlock_class(hlock));
 	if (hlock->read)
 	if (hlock->read)
@@ -2792,7 +2797,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 	hlock->references = references;
 	hlock->references = references;
 #ifdef CONFIG_LOCK_STAT
 #ifdef CONFIG_LOCK_STAT
 	hlock->waittime_stamp = 0;
 	hlock->waittime_stamp = 0;
-	hlock->holdtime_stamp = sched_clock();
+	hlock->holdtime_stamp = lockstat_clock();
 #endif
 #endif
 
 
 	if (check == 2 && !mark_irqflags(curr, hlock))
 	if (check == 2 && !mark_irqflags(curr, hlock))
@@ -3322,7 +3327,7 @@ found_it:
 	if (hlock->instance != lock)
 	if (hlock->instance != lock)
 		return;
 		return;
 
 
-	hlock->waittime_stamp = sched_clock();
+	hlock->waittime_stamp = lockstat_clock();
 
 
 	contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
 	contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
 	contending_point = lock_point(hlock_class(hlock)->contending_point,
 	contending_point = lock_point(hlock_class(hlock)->contending_point,
@@ -3345,8 +3350,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
 	struct held_lock *hlock, *prev_hlock;
 	struct held_lock *hlock, *prev_hlock;
 	struct lock_class_stats *stats;
 	struct lock_class_stats *stats;
 	unsigned int depth;
 	unsigned int depth;
-	u64 now;
-	s64 waittime = 0;
+	u64 now, waittime = 0;
 	int i, cpu;
 	int i, cpu;
 
 
 	depth = curr->lockdep_depth;
 	depth = curr->lockdep_depth;
@@ -3374,7 +3378,7 @@ found_it:
 
 
 	cpu = smp_processor_id();
 	cpu = smp_processor_id();
 	if (hlock->waittime_stamp) {
 	if (hlock->waittime_stamp) {
-		now = sched_clock();
+		now = lockstat_clock();
 		waittime = now - hlock->waittime_stamp;
 		waittime = now - hlock->waittime_stamp;
 		hlock->holdtime_stamp = now;
 		hlock->holdtime_stamp = now;
 	}
 	}