|
@@ -255,3 +255,40 @@ void free_initrd_mem(unsigned long start, unsigned long end)
|
|
|
}
|
|
|
}
|
|
|
#endif
|
|
|
+
|
|
|
+#ifdef CONFIG_PROC_KCORE
|
|
|
+static struct kcore_list kcore_vmem;
|
|
|
+
|
|
|
+static int __init setup_kcore(void)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < lmb.memory.cnt; i++) {
|
|
|
+ unsigned long base;
|
|
|
+ unsigned long size;
|
|
|
+ struct kcore_list *kcore_mem;
|
|
|
+
|
|
|
+ base = lmb.memory.region[i].base;
|
|
|
+ size = lmb.memory.region[i].size;
|
|
|
+
|
|
|
+ kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
|
|
|
+ if (!kcore_mem)
|
|
|
+ panic("%s: kmalloc failed\n", __FUNCTION__);
|
|
|
+
|
|
|
+ /* must stay under 32 bits */
|
|
|
+ if ( 0xfffffffful - (unsigned long)__va(base) < size) {
|
|
|
+ size = 0xfffffffful - (unsigned long)(__va(base));
|
|
|
+ printk(KERN_DEBUG "setup_kcore: restrict size=%lx\n",
|
|
|
+ size);
|
|
|
+ }
|
|
|
+
|
|
|
+ kclist_add(kcore_mem, __va(base), size);
|
|
|
+ }
|
|
|
+
|
|
|
+ kclist_add(&kcore_vmem, (void *)VMALLOC_START,
|
|
|
+ VMALLOC_END-VMALLOC_START);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+module_init(setup_kcore);
|
|
|
+#endif
|