|
@@ -529,7 +529,18 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * dwarf_unwind_stack - recursively unwind the stack
|
|
|
+ * dwarf_free_frame - free the memory allocated for @frame
|
|
|
+ * @frame: the frame to free
|
|
|
+ */
|
|
|
+void dwarf_free_frame(struct dwarf_frame *frame)
|
|
|
+{
|
|
|
+ dwarf_frame_free_regs(frame);
|
|
|
+ mempool_free(frame, dwarf_frame_pool);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * dwarf_unwind_stack - unwind the stack
|
|
|
+ *
|
|
|
* @pc: address of the function to unwind
|
|
|
* @prev: struct dwarf_frame of the previous stackframe on the callstack
|
|
|
*
|
|
@@ -547,9 +558,9 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
|
|
|
unsigned long addr;
|
|
|
|
|
|
/*
|
|
|
- * If this is the first invocation of this recursive function we
|
|
|
- * need get the contents of a physical register to get the CFA
|
|
|
- * in order to begin the virtual unwinding of the stack.
|
|
|
+ * If we're starting at the top of the stack we need get the
|
|
|
+ * contents of a physical register to get the CFA in order to
|
|
|
+ * begin the virtual unwinding of the stack.
|
|
|
*
|
|
|
* NOTE: the return address is guaranteed to be setup by the
|
|
|
* time this function makes its first function call.
|
|
@@ -571,9 +582,8 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
|
|
|
fde = dwarf_lookup_fde(pc);
|
|
|
if (!fde) {
|
|
|
/*
|
|
|
- * This is our normal exit path - the one that stops the
|
|
|
- * recursion. There's two reasons why we might exit
|
|
|
- * here,
|
|
|
+ * This is our normal exit path. There are two reasons
|
|
|
+ * why we might exit here,
|
|
|
*
|
|
|
* a) pc has no asscociated DWARF frame info and so
|
|
|
* we don't know how to unwind this frame. This is
|
|
@@ -615,10 +625,10 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
|
|
|
|
|
|
} else {
|
|
|
/*
|
|
|
- * Again, this is the first invocation of this
|
|
|
- * recurisve function. We need to physically
|
|
|
- * read the contents of a register in order to
|
|
|
- * get the Canonical Frame Address for this
|
|
|
+ * Again, we're starting from the top of the
|
|
|
+ * stack. We need to physically read
|
|
|
+ * the contents of a register in order to get
|
|
|
+ * the Canonical Frame Address for this
|
|
|
* function.
|
|
|
*/
|
|
|
frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
|
|
@@ -648,13 +658,12 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
|
|
|
return frame;
|
|
|
|
|
|
bail:
|
|
|
- dwarf_frame_free_regs(frame);
|
|
|
- mempool_free(frame, dwarf_frame_pool);
|
|
|
+ dwarf_free_frame(frame);
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
|
|
|
- unsigned char *end)
|
|
|
+ unsigned char *end, struct module *mod)
|
|
|
{
|
|
|
struct dwarf_cie *cie;
|
|
|
unsigned long flags;
|
|
@@ -750,6 +759,8 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
|
|
|
cie->initial_instructions = p;
|
|
|
cie->instructions_end = end;
|
|
|
|
|
|
+ cie->mod = mod;
|
|
|
+
|
|
|
/* Add to list */
|
|
|
spin_lock_irqsave(&dwarf_cie_lock, flags);
|
|
|
list_add_tail(&cie->link, &dwarf_cie_list);
|
|
@@ -760,7 +771,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
|
|
|
|
|
|
static int dwarf_parse_fde(void *entry, u32 entry_type,
|
|
|
void *start, unsigned long len,
|
|
|
- unsigned char *end)
|
|
|
+ unsigned char *end, struct module *mod)
|
|
|
{
|
|
|
struct dwarf_fde *fde;
|
|
|
struct dwarf_cie *cie;
|
|
@@ -809,6 +820,8 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
|
|
|
fde->instructions = p;
|
|
|
fde->end = end;
|
|
|
|
|
|
+ fde->mod = mod;
|
|
|
+
|
|
|
/* Add to list. */
|
|
|
spin_lock_irqsave(&dwarf_fde_lock, flags);
|
|
|
list_add_tail(&fde->link, &dwarf_fde_list);
|
|
@@ -832,10 +845,8 @@ static void dwarf_unwinder_dump(struct task_struct *task,
|
|
|
while (1) {
|
|
|
frame = dwarf_unwind_stack(return_addr, _frame);
|
|
|
|
|
|
- if (_frame) {
|
|
|
- dwarf_frame_free_regs(_frame);
|
|
|
- mempool_free(_frame, dwarf_frame_pool);
|
|
|
- }
|
|
|
+ if (_frame)
|
|
|
+ dwarf_free_frame(_frame);
|
|
|
|
|
|
_frame = frame;
|
|
|
|
|
@@ -845,6 +856,9 @@ static void dwarf_unwinder_dump(struct task_struct *task,
|
|
|
return_addr = frame->return_addr;
|
|
|
ops->address(data, return_addr, 1);
|
|
|
}
|
|
|
+
|
|
|
+ if (frame)
|
|
|
+ dwarf_free_frame(frame);
|
|
|
}
|
|
|
|
|
|
static struct unwinder dwarf_unwinder = {
|
|
@@ -874,15 +888,15 @@ static void dwarf_unwinder_cleanup(void)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * dwarf_unwinder_init - initialise the dwarf unwinder
|
|
|
+ * dwarf_parse_section - parse DWARF section
|
|
|
+ * @eh_frame_start: start address of the .eh_frame section
|
|
|
+ * @eh_frame_end: end address of the .eh_frame section
|
|
|
+ * @mod: the kernel module containing the .eh_frame section
|
|
|
*
|
|
|
- * Build the data structures describing the .dwarf_frame section to
|
|
|
- * make it easier to lookup CIE and FDE entries. Because the
|
|
|
- * .eh_frame section is packed as tightly as possible it is not
|
|
|
- * easy to lookup the FDE for a given PC, so we build a list of FDE
|
|
|
- * and CIE entries that make it easier.
|
|
|
+ * Parse the information in a .eh_frame section.
|
|
|
*/
|
|
|
-static int __init dwarf_unwinder_init(void)
|
|
|
+int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end,
|
|
|
+ struct module *mod)
|
|
|
{
|
|
|
u32 entry_type;
|
|
|
void *p, *entry;
|
|
@@ -890,32 +904,12 @@ static int __init dwarf_unwinder_init(void)
|
|
|
unsigned long len;
|
|
|
unsigned int c_entries, f_entries;
|
|
|
unsigned char *end;
|
|
|
- INIT_LIST_HEAD(&dwarf_cie_list);
|
|
|
- INIT_LIST_HEAD(&dwarf_fde_list);
|
|
|
|
|
|
c_entries = 0;
|
|
|
f_entries = 0;
|
|
|
- entry = &__start_eh_frame;
|
|
|
-
|
|
|
- dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
|
|
|
- sizeof(struct dwarf_frame), 0,
|
|
|
- SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
|
|
|
-
|
|
|
- dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
|
|
|
- sizeof(struct dwarf_reg), 0,
|
|
|
- SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
|
|
|
-
|
|
|
- dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
|
|
|
- mempool_alloc_slab,
|
|
|
- mempool_free_slab,
|
|
|
- dwarf_frame_cachep);
|
|
|
+ entry = eh_frame_start;
|
|
|
|
|
|
- dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
|
|
|
- mempool_alloc_slab,
|
|
|
- mempool_free_slab,
|
|
|
- dwarf_reg_cachep);
|
|
|
-
|
|
|
- while ((char *)entry < __stop_eh_frame) {
|
|
|
+ while ((char *)entry < eh_frame_end) {
|
|
|
p = entry;
|
|
|
|
|
|
count = dwarf_entry_len(p, &len);
|
|
@@ -927,6 +921,7 @@ static int __init dwarf_unwinder_init(void)
|
|
|
* entry and move to the next one because 'len'
|
|
|
* tells us where our next entry is.
|
|
|
*/
|
|
|
+ err = -EINVAL;
|
|
|
goto out;
|
|
|
} else
|
|
|
p += count;
|
|
@@ -938,13 +933,14 @@ static int __init dwarf_unwinder_init(void)
|
|
|
p += 4;
|
|
|
|
|
|
if (entry_type == DW_EH_FRAME_CIE) {
|
|
|
- err = dwarf_parse_cie(entry, p, len, end);
|
|
|
+ err = dwarf_parse_cie(entry, p, len, end, mod);
|
|
|
if (err < 0)
|
|
|
goto out;
|
|
|
else
|
|
|
c_entries++;
|
|
|
} else {
|
|
|
- err = dwarf_parse_fde(entry, entry_type, p, len, end);
|
|
|
+ err = dwarf_parse_fde(entry, entry_type, p, len,
|
|
|
+ end, mod);
|
|
|
if (err < 0)
|
|
|
goto out;
|
|
|
else
|
|
@@ -957,6 +953,95 @@ static int __init dwarf_unwinder_init(void)
|
|
|
printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
|
|
|
c_entries, f_entries);
|
|
|
|
|
|
+ return 0;
|
|
|
+
|
|
|
+out:
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * dwarf_module_unload - remove FDE/CIEs associated with @mod
|
|
|
+ * @mod: the module that is being unloaded
|
|
|
+ *
|
|
|
+ * Remove any FDEs and CIEs from the global lists that came from
|
|
|
+ * @mod's .eh_frame section because @mod is being unloaded.
|
|
|
+ */
|
|
|
+void dwarf_module_unload(struct module *mod)
|
|
|
+{
|
|
|
+ struct dwarf_fde *fde;
|
|
|
+ struct dwarf_cie *cie;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&dwarf_cie_lock, flags);
|
|
|
+
|
|
|
+again_cie:
|
|
|
+ list_for_each_entry(cie, &dwarf_cie_list, link) {
|
|
|
+ if (cie->mod == mod)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (&cie->link != &dwarf_cie_list) {
|
|
|
+ list_del(&cie->link);
|
|
|
+ kfree(cie);
|
|
|
+ goto again_cie;
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&dwarf_cie_lock, flags);
|
|
|
+
|
|
|
+ spin_lock_irqsave(&dwarf_fde_lock, flags);
|
|
|
+
|
|
|
+again_fde:
|
|
|
+ list_for_each_entry(fde, &dwarf_fde_list, link) {
|
|
|
+ if (fde->mod == mod)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (&fde->link != &dwarf_fde_list) {
|
|
|
+ list_del(&fde->link);
|
|
|
+ kfree(fde);
|
|
|
+ goto again_fde;
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&dwarf_fde_lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * dwarf_unwinder_init - initialise the dwarf unwinder
|
|
|
+ *
|
|
|
+ * Build the data structures describing the .dwarf_frame section to
|
|
|
+ * make it easier to lookup CIE and FDE entries. Because the
|
|
|
+ * .eh_frame section is packed as tightly as possible it is not
|
|
|
+ * easy to lookup the FDE for a given PC, so we build a list of FDE
|
|
|
+ * and CIE entries that make it easier.
|
|
|
+ */
|
|
|
+static int __init dwarf_unwinder_init(void)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+ INIT_LIST_HEAD(&dwarf_cie_list);
|
|
|
+ INIT_LIST_HEAD(&dwarf_fde_list);
|
|
|
+
|
|
|
+ dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
|
|
|
+ sizeof(struct dwarf_frame), 0,
|
|
|
+ SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
|
|
|
+
|
|
|
+ dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
|
|
|
+ sizeof(struct dwarf_reg), 0,
|
|
|
+ SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
|
|
|
+
|
|
|
+ dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
|
|
|
+ mempool_alloc_slab,
|
|
|
+ mempool_free_slab,
|
|
|
+ dwarf_frame_cachep);
|
|
|
+
|
|
|
+ dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
|
|
|
+ mempool_alloc_slab,
|
|
|
+ mempool_free_slab,
|
|
|
+ dwarf_reg_cachep);
|
|
|
+
|
|
|
+ err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL);
|
|
|
+ if (err)
|
|
|
+ goto out;
|
|
|
+
|
|
|
err = unwinder_register(&dwarf_unwinder);
|
|
|
if (err)
|
|
|
goto out;
|