|
@@ -58,7 +58,7 @@
|
|
|
#include <linux/list.h>
|
|
|
#include <linux/stacktrace.h>
|
|
|
|
|
|
-static DEFINE_SPINLOCK(latency_lock);
|
|
|
+static DEFINE_RAW_SPINLOCK(latency_lock);
|
|
|
|
|
|
#define MAXLR 128
|
|
|
static struct latency_record latency_record[MAXLR];
|
|
@@ -72,19 +72,19 @@ void clear_all_latency_tracing(struct task_struct *p)
|
|
|
if (!latencytop_enabled)
|
|
|
return;
|
|
|
|
|
|
- spin_lock_irqsave(&latency_lock, flags);
|
|
|
+ raw_spin_lock_irqsave(&latency_lock, flags);
|
|
|
memset(&p->latency_record, 0, sizeof(p->latency_record));
|
|
|
p->latency_record_count = 0;
|
|
|
- spin_unlock_irqrestore(&latency_lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&latency_lock, flags);
|
|
|
}
|
|
|
|
|
|
static void clear_global_latency_tracing(void)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
|
|
|
- spin_lock_irqsave(&latency_lock, flags);
|
|
|
+ raw_spin_lock_irqsave(&latency_lock, flags);
|
|
|
memset(&latency_record, 0, sizeof(latency_record));
|
|
|
- spin_unlock_irqrestore(&latency_lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&latency_lock, flags);
|
|
|
}
|
|
|
|
|
|
static void __sched
|
|
@@ -190,7 +190,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
|
|
|
lat.max = usecs;
|
|
|
store_stacktrace(tsk, &lat);
|
|
|
|
|
|
- spin_lock_irqsave(&latency_lock, flags);
|
|
|
+ raw_spin_lock_irqsave(&latency_lock, flags);
|
|
|
|
|
|
account_global_scheduler_latency(tsk, &lat);
|
|
|
|
|
@@ -231,7 +231,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
|
|
|
memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
|
|
|
|
|
|
out_unlock:
|
|
|
- spin_unlock_irqrestore(&latency_lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&latency_lock, flags);
|
|
|
}
|
|
|
|
|
|
static int lstats_show(struct seq_file *m, void *v)
|