|
@@ -655,7 +655,7 @@ bail:
|
|
|
}
|
|
|
|
|
|
static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
|
|
|
- unsigned char *end)
|
|
|
+ unsigned char *end, struct module *mod)
|
|
|
{
|
|
|
struct dwarf_cie *cie;
|
|
|
unsigned long flags;
|
|
@@ -751,6 +751,8 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
|
|
|
cie->initial_instructions = p;
|
|
|
cie->instructions_end = end;
|
|
|
|
|
|
+ cie->mod = mod;
|
|
|
+
|
|
|
/* Add to list */
|
|
|
spin_lock_irqsave(&dwarf_cie_lock, flags);
|
|
|
list_add_tail(&cie->link, &dwarf_cie_list);
|
|
@@ -761,7 +763,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
|
|
|
|
|
|
static int dwarf_parse_fde(void *entry, u32 entry_type,
|
|
|
void *start, unsigned long len,
|
|
|
- unsigned char *end)
|
|
|
+ unsigned char *end, struct module *mod)
|
|
|
{
|
|
|
struct dwarf_fde *fde;
|
|
|
struct dwarf_cie *cie;
|
|
@@ -810,6 +812,8 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
|
|
|
fde->instructions = p;
|
|
|
fde->end = end;
|
|
|
|
|
|
+ fde->mod = mod;
|
|
|
+
|
|
|
/* Add to list. */
|
|
|
spin_lock_irqsave(&dwarf_fde_lock, flags);
|
|
|
list_add_tail(&fde->link, &dwarf_fde_list);
|
|
@@ -875,15 +879,15 @@ static void dwarf_unwinder_cleanup(void)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * dwarf_unwinder_init - initialise the dwarf unwinder
|
|
|
+ * dwarf_parse_section - parse DWARF section
|
|
|
+ * @eh_frame_start: start address of the .eh_frame section
|
|
|
+ * @eh_frame_end: end address of the .eh_frame section
|
|
|
+ * @mod: the kernel module containing the .eh_frame section
|
|
|
*
|
|
|
- * Build the data structures describing the .dwarf_frame section to
|
|
|
- * make it easier to lookup CIE and FDE entries. Because the
|
|
|
- * .eh_frame section is packed as tightly as possible it is not
|
|
|
- * easy to lookup the FDE for a given PC, so we build a list of FDE
|
|
|
- * and CIE entries that make it easier.
|
|
|
+ * Parse the information in a .eh_frame section.
|
|
|
*/
|
|
|
-static int __init dwarf_unwinder_init(void)
|
|
|
+int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end,
|
|
|
+ struct module *mod)
|
|
|
{
|
|
|
u32 entry_type;
|
|
|
void *p, *entry;
|
|
@@ -891,29 +895,12 @@ static int __init dwarf_unwinder_init(void)
|
|
|
unsigned long len;
|
|
|
unsigned int c_entries, f_entries;
|
|
|
unsigned char *end;
|
|
|
- INIT_LIST_HEAD(&dwarf_cie_list);
|
|
|
- INIT_LIST_HEAD(&dwarf_fde_list);
|
|
|
|
|
|
c_entries = 0;
|
|
|
f_entries = 0;
|
|
|
- entry = &__start_eh_frame;
|
|
|
-
|
|
|
- dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
|
|
|
- sizeof(struct dwarf_frame), 0, SLAB_PANIC, NULL);
|
|
|
- dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
|
|
|
- sizeof(struct dwarf_reg), 0, SLAB_PANIC, NULL);
|
|
|
-
|
|
|
- dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
|
|
|
- mempool_alloc_slab,
|
|
|
- mempool_free_slab,
|
|
|
- dwarf_frame_cachep);
|
|
|
+ entry = eh_frame_start;
|
|
|
|
|
|
- dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
|
|
|
- mempool_alloc_slab,
|
|
|
- mempool_free_slab,
|
|
|
- dwarf_reg_cachep);
|
|
|
-
|
|
|
- while ((char *)entry < __stop_eh_frame) {
|
|
|
+ while ((char *)entry < eh_frame_end) {
|
|
|
p = entry;
|
|
|
|
|
|
count = dwarf_entry_len(p, &len);
|
|
@@ -925,6 +912,7 @@ static int __init dwarf_unwinder_init(void)
|
|
|
* entry and move to the next one because 'len'
|
|
|
* tells us where our next entry is.
|
|
|
*/
|
|
|
+ err = -EINVAL;
|
|
|
goto out;
|
|
|
} else
|
|
|
p += count;
|
|
@@ -936,13 +924,14 @@ static int __init dwarf_unwinder_init(void)
|
|
|
p += 4;
|
|
|
|
|
|
if (entry_type == DW_EH_FRAME_CIE) {
|
|
|
- err = dwarf_parse_cie(entry, p, len, end);
|
|
|
+ err = dwarf_parse_cie(entry, p, len, end, mod);
|
|
|
if (err < 0)
|
|
|
goto out;
|
|
|
else
|
|
|
c_entries++;
|
|
|
} else {
|
|
|
- err = dwarf_parse_fde(entry, entry_type, p, len, end);
|
|
|
+ err = dwarf_parse_fde(entry, entry_type, p, len,
|
|
|
+ end, mod);
|
|
|
if (err < 0)
|
|
|
goto out;
|
|
|
else
|
|
@@ -955,6 +944,92 @@ static int __init dwarf_unwinder_init(void)
|
|
|
printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
|
|
|
c_entries, f_entries);
|
|
|
|
|
|
+ return 0;
|
|
|
+
|
|
|
+out:
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * dwarf_module_unload - remove FDE/CIEs associated with @mod
|
|
|
+ * @mod: the module that is being unloaded
|
|
|
+ *
|
|
|
+ * Remove any FDEs and CIEs from the global lists that came from
|
|
|
+ * @mod's .eh_frame section because @mod is being unloaded.
|
|
|
+ */
|
|
|
+void dwarf_module_unload(struct module *mod)
|
|
|
+{
|
|
|
+ struct dwarf_fde *fde;
|
|
|
+ struct dwarf_cie *cie;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&dwarf_cie_lock, flags);
|
|
|
+
|
|
|
+again_cie:
|
|
|
+ list_for_each_entry(cie, &dwarf_cie_list, link) {
|
|
|
+ if (cie->mod == mod)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (&cie->link != &dwarf_cie_list) {
|
|
|
+ list_del(&cie->link);
|
|
|
+ kfree(cie);
|
|
|
+ goto again_cie;
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&dwarf_cie_lock, flags);
|
|
|
+
|
|
|
+ spin_lock_irqsave(&dwarf_fde_lock, flags);
|
|
|
+
|
|
|
+again_fde:
|
|
|
+ list_for_each_entry(fde, &dwarf_fde_list, link) {
|
|
|
+ if (fde->mod == mod)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (&fde->link != &dwarf_fde_list) {
|
|
|
+ list_del(&fde->link);
|
|
|
+ kfree(fde);
|
|
|
+ goto again_fde;
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&dwarf_fde_lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * dwarf_unwinder_init - initialise the dwarf unwinder
|
|
|
+ *
|
|
|
+ * Build the data structures describing the .dwarf_frame section to
|
|
|
+ * make it easier to lookup CIE and FDE entries. Because the
|
|
|
+ * .eh_frame section is packed as tightly as possible it is not
|
|
|
+ * easy to lookup the FDE for a given PC, so we build a list of FDE
|
|
|
+ * and CIE entries that make it easier.
|
|
|
+ */
|
|
|
+static int __init dwarf_unwinder_init(void)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+ INIT_LIST_HEAD(&dwarf_cie_list);
|
|
|
+ INIT_LIST_HEAD(&dwarf_fde_list);
|
|
|
+
|
|
|
+ dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
|
|
|
+ sizeof(struct dwarf_frame), 0, SLAB_PANIC, NULL);
|
|
|
+ dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
|
|
|
+ sizeof(struct dwarf_reg), 0, SLAB_PANIC, NULL);
|
|
|
+
|
|
|
+ dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
|
|
|
+ mempool_alloc_slab,
|
|
|
+ mempool_free_slab,
|
|
|
+ dwarf_frame_cachep);
|
|
|
+
|
|
|
+ dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
|
|
|
+ mempool_alloc_slab,
|
|
|
+ mempool_free_slab,
|
|
|
+ dwarf_reg_cachep);
|
|
|
+
|
|
|
+ err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL);
|
|
|
+ if (err)
|
|
|
+ goto out;
|
|
|
+
|
|
|
err = unwinder_register(&dwarf_unwinder);
|
|
|
if (err)
|
|
|
goto out;
|