|
@@ -28,6 +28,7 @@
|
|
#include <asm/apic.h>
|
|
#include <asm/apic.h>
|
|
#include <asm/stacktrace.h>
|
|
#include <asm/stacktrace.h>
|
|
#include <asm/nmi.h>
|
|
#include <asm/nmi.h>
|
|
|
|
+#include <asm/compat.h>
|
|
|
|
|
|
static u64 perf_event_mask __read_mostly;
|
|
static u64 perf_event_mask __read_mostly;
|
|
|
|
|
|
@@ -158,7 +159,7 @@ struct x86_pmu {
|
|
struct perf_event *event);
|
|
struct perf_event *event);
|
|
struct event_constraint *event_constraints;
|
|
struct event_constraint *event_constraints;
|
|
|
|
|
|
- void (*cpu_prepare)(int cpu);
|
|
|
|
|
|
+ int (*cpu_prepare)(int cpu);
|
|
void (*cpu_starting)(int cpu);
|
|
void (*cpu_starting)(int cpu);
|
|
void (*cpu_dying)(int cpu);
|
|
void (*cpu_dying)(int cpu);
|
|
void (*cpu_dead)(int cpu);
|
|
void (*cpu_dead)(int cpu);
|
|
@@ -1333,11 +1334,12 @@ static int __cpuinit
|
|
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
{
|
|
{
|
|
unsigned int cpu = (long)hcpu;
|
|
unsigned int cpu = (long)hcpu;
|
|
|
|
+ int ret = NOTIFY_OK;
|
|
|
|
|
|
switch (action & ~CPU_TASKS_FROZEN) {
|
|
switch (action & ~CPU_TASKS_FROZEN) {
|
|
case CPU_UP_PREPARE:
|
|
case CPU_UP_PREPARE:
|
|
if (x86_pmu.cpu_prepare)
|
|
if (x86_pmu.cpu_prepare)
|
|
- x86_pmu.cpu_prepare(cpu);
|
|
|
|
|
|
+ ret = x86_pmu.cpu_prepare(cpu);
|
|
break;
|
|
break;
|
|
|
|
|
|
case CPU_STARTING:
|
|
case CPU_STARTING:
|
|
@@ -1350,6 +1352,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
x86_pmu.cpu_dying(cpu);
|
|
x86_pmu.cpu_dying(cpu);
|
|
break;
|
|
break;
|
|
|
|
|
|
|
|
+ case CPU_UP_CANCELED:
|
|
case CPU_DEAD:
|
|
case CPU_DEAD:
|
|
if (x86_pmu.cpu_dead)
|
|
if (x86_pmu.cpu_dead)
|
|
x86_pmu.cpu_dead(cpu);
|
|
x86_pmu.cpu_dead(cpu);
|
|
@@ -1359,7 +1362,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- return NOTIFY_OK;
|
|
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
static void __init pmu_check_apic(void)
|
|
static void __init pmu_check_apic(void)
|
|
@@ -1628,14 +1631,42 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
|
|
return len;
|
|
return len;
|
|
}
|
|
}
|
|
|
|
|
|
-static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
|
|
|
|
|
|
+#ifdef CONFIG_COMPAT
|
|
|
|
+static inline int
|
|
|
|
+perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
|
{
|
|
{
|
|
- unsigned long bytes;
|
|
|
|
|
|
+ /* 32-bit process in 64-bit kernel. */
|
|
|
|
+ struct stack_frame_ia32 frame;
|
|
|
|
+ const void __user *fp;
|
|
|
|
|
|
- bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
|
|
|
|
|
|
+ if (!test_thread_flag(TIF_IA32))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ fp = compat_ptr(regs->bp);
|
|
|
|
+ while (entry->nr < PERF_MAX_STACK_DEPTH) {
|
|
|
|
+ unsigned long bytes;
|
|
|
|
+ frame.next_frame = 0;
|
|
|
|
+ frame.return_address = 0;
|
|
|
|
+
|
|
|
|
+ bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
|
|
|
|
+ if (bytes != sizeof(frame))
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ if (fp < compat_ptr(regs->sp))
|
|
|
|
+ break;
|
|
|
|
|
|
- return bytes == sizeof(*frame);
|
|
|
|
|
|
+ callchain_store(entry, frame.return_address);
|
|
|
|
+ fp = compat_ptr(frame.next_frame);
|
|
|
|
+ }
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+static inline int
|
|
|
|
+perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
|
|
|
+{
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
+#endif
|
|
|
|
|
|
static void
|
|
static void
|
|
perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
|
perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
|
@@ -1651,11 +1682,16 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
|
callchain_store(entry, PERF_CONTEXT_USER);
|
|
callchain_store(entry, PERF_CONTEXT_USER);
|
|
callchain_store(entry, regs->ip);
|
|
callchain_store(entry, regs->ip);
|
|
|
|
|
|
|
|
+ if (perf_callchain_user32(regs, entry))
|
|
|
|
+ return;
|
|
|
|
+
|
|
while (entry->nr < PERF_MAX_STACK_DEPTH) {
|
|
while (entry->nr < PERF_MAX_STACK_DEPTH) {
|
|
|
|
+ unsigned long bytes;
|
|
frame.next_frame = NULL;
|
|
frame.next_frame = NULL;
|
|
frame.return_address = 0;
|
|
frame.return_address = 0;
|
|
|
|
|
|
- if (!copy_stack_frame(fp, &frame))
|
|
|
|
|
|
+ bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
|
|
|
|
+ if (bytes != sizeof(frame))
|
|
break;
|
|
break;
|
|
|
|
|
|
if ((unsigned long)fp < regs->sp)
|
|
if ((unsigned long)fp < regs->sp)
|
|
@@ -1702,7 +1738,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
|
|
return entry;
|
|
return entry;
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_EVENT_TRACING
|
|
|
|
void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
|
|
void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
|
|
{
|
|
{
|
|
regs->ip = ip;
|
|
regs->ip = ip;
|
|
@@ -1714,4 +1749,3 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
|
|
regs->cs = __KERNEL_CS;
|
|
regs->cs = __KERNEL_CS;
|
|
local_save_flags(regs->flags);
|
|
local_save_flags(regs->flags);
|
|
}
|
|
}
|
|
-#endif
|
|
|