|
@@ -11,10 +11,12 @@
|
|
|
#include <linux/oprofile.h>
|
|
|
#include <linux/sched.h>
|
|
|
#include <linux/mm.h>
|
|
|
+#include <linux/compat.h>
|
|
|
+#include <linux/highmem.h>
|
|
|
+
|
|
|
#include <asm/ptrace.h>
|
|
|
#include <asm/uaccess.h>
|
|
|
#include <asm/stacktrace.h>
|
|
|
-#include <linux/compat.h>
|
|
|
|
|
|
static int backtrace_stack(void *data, char *name)
|
|
|
{
|
|
@@ -36,17 +38,53 @@ static struct stacktrace_ops backtrace_ops = {
|
|
|
.walk_stack = print_context_stack,
|
|
|
};
|
|
|
|
|
|
+/* from arch/x86/kernel/cpu/perf_event.c: */
|
|
|
+
|
|
|
+/*
|
|
|
+ * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
|
|
|
+ */
|
|
|
+static unsigned long
|
|
|
+copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
|
|
|
+{
|
|
|
+ unsigned long offset, addr = (unsigned long)from;
|
|
|
+ unsigned long size, len = 0;
|
|
|
+ struct page *page;
|
|
|
+ void *map;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ do {
|
|
|
+ ret = __get_user_pages_fast(addr, 1, 0, &page);
|
|
|
+ if (!ret)
|
|
|
+ break;
|
|
|
+
|
|
|
+ offset = addr & (PAGE_SIZE - 1);
|
|
|
+ size = min(PAGE_SIZE - offset, n - len);
|
|
|
+
|
|
|
+ map = kmap_atomic(page);
|
|
|
+ memcpy(to, map+offset, size);
|
|
|
+ kunmap_atomic(map);
|
|
|
+ put_page(page);
|
|
|
+
|
|
|
+ len += size;
|
|
|
+ to += size;
|
|
|
+ addr += size;
|
|
|
+
|
|
|
+ } while (len < n);
|
|
|
+
|
|
|
+ return len;
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
static struct stack_frame_ia32 *
|
|
|
dump_user_backtrace_32(struct stack_frame_ia32 *head)
|
|
|
{
|
|
|
+ /* Also check accessibility of one struct frame_head beyond: */
|
|
|
struct stack_frame_ia32 bufhead[2];
|
|
|
struct stack_frame_ia32 *fp;
|
|
|
+ unsigned long bytes;
|
|
|
|
|
|
- /* Also check accessibility of one struct frame_head beyond */
|
|
|
- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
|
|
|
- return NULL;
|
|
|
- if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
|
|
|
+ bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
|
|
|
+ if (bytes != sizeof(bufhead))
|
|
|
return NULL;
|
|
|
|
|
|
fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
|
|
@@ -87,12 +125,12 @@ x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
|
|
|
|
|
|
static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
|
|
|
{
|
|
|
+ /* Also check accessibility of one struct frame_head beyond: */
|
|
|
struct stack_frame bufhead[2];
|
|
|
+ unsigned long bytes;
|
|
|
|
|
|
- /* Also check accessibility of one struct stack_frame beyond */
|
|
|
- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
|
|
|
- return NULL;
|
|
|
- if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
|
|
|
+ bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
|
|
|
+ if (bytes != sizeof(bufhead))
|
|
|
return NULL;
|
|
|
|
|
|
oprofile_add_trace(bufhead[0].return_address);
|