|
@@ -21,6 +21,7 @@
|
|
|
#include <linux/seq_file.h>
|
|
|
#include <linux/device.h>
|
|
|
#include <linux/pfn.h>
|
|
|
+#include <linux/mm.h>
|
|
|
#include <asm/io.h>
|
|
|
|
|
|
|
|
@@ -50,6 +51,14 @@ struct resource_constraint {
|
|
|
|
|
|
static DEFINE_RWLOCK(resource_lock);
|
|
|
|
|
|
+/*
|
|
|
+ * For memory hotplug, there is no way to free resource entries allocated
|
|
|
+ * by boot mem after the system is up. So for reusing the resource entry
|
|
|
+ * we need to remember the resource.
|
|
|
+ */
|
|
|
+static struct resource *bootmem_resource_free;
|
|
|
+static DEFINE_SPINLOCK(bootmem_resource_lock);
|
|
|
+
|
|
|
static void *r_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
{
|
|
|
struct resource *p = v;
|
|
@@ -151,6 +160,40 @@ __initcall(ioresources_init);
|
|
|
|
|
|
#endif /* CONFIG_PROC_FS */
|
|
|
|
|
|
+static void free_resource(struct resource *res)
|
|
|
+{
|
|
|
+ if (!res)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!PageSlab(virt_to_head_page(res))) {
|
|
|
+ spin_lock(&bootmem_resource_lock);
|
|
|
+ res->sibling = bootmem_resource_free;
|
|
|
+ bootmem_resource_free = res;
|
|
|
+ spin_unlock(&bootmem_resource_lock);
|
|
|
+ } else {
|
|
|
+ kfree(res);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static struct resource *alloc_resource(gfp_t flags)
|
|
|
+{
|
|
|
+ struct resource *res = NULL;
|
|
|
+
|
|
|
+ spin_lock(&bootmem_resource_lock);
|
|
|
+ if (bootmem_resource_free) {
|
|
|
+ res = bootmem_resource_free;
|
|
|
+ bootmem_resource_free = res->sibling;
|
|
|
+ }
|
|
|
+ spin_unlock(&bootmem_resource_lock);
|
|
|
+
|
|
|
+ if (res)
|
|
|
+ memset(res, 0, sizeof(struct resource));
|
|
|
+ else
|
|
|
+ res = kzalloc(sizeof(struct resource), flags);
|
|
|
+
|
|
|
+ return res;
|
|
|
+}
|
|
|
+
|
|
|
/* Return the conflict entry if you can't request it */
|
|
|
static struct resource * __request_resource(struct resource *root, struct resource *new)
|
|
|
{
|
|
@@ -771,7 +814,7 @@ static void __init __reserve_region_with_split(struct resource *root,
|
|
|
{
|
|
|
struct resource *parent = root;
|
|
|
struct resource *conflict;
|
|
|
- struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
|
|
|
+ struct resource *res = alloc_resource(GFP_ATOMIC);
|
|
|
struct resource *next_res = NULL;
|
|
|
|
|
|
if (!res)
|
|
@@ -796,7 +839,7 @@ static void __init __reserve_region_with_split(struct resource *root,
|
|
|
/* conflict covered whole area */
|
|
|
if (conflict->start <= res->start &&
|
|
|
conflict->end >= res->end) {
|
|
|
- kfree(res);
|
|
|
+ free_resource(res);
|
|
|
WARN_ON(next_res);
|
|
|
break;
|
|
|
}
|
|
@@ -806,10 +849,9 @@ static void __init __reserve_region_with_split(struct resource *root,
|
|
|
end = res->end;
|
|
|
res->end = conflict->start - 1;
|
|
|
if (conflict->end < end) {
|
|
|
- next_res = kzalloc(sizeof(*next_res),
|
|
|
- GFP_ATOMIC);
|
|
|
+ next_res = alloc_resource(GFP_ATOMIC);
|
|
|
if (!next_res) {
|
|
|
- kfree(res);
|
|
|
+ free_resource(res);
|
|
|
break;
|
|
|
}
|
|
|
next_res->name = name;
|
|
@@ -899,7 +941,7 @@ struct resource * __request_region(struct resource *parent,
|
|
|
const char *name, int flags)
|
|
|
{
|
|
|
DECLARE_WAITQUEUE(wait, current);
|
|
|
- struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
|
|
|
+ struct resource *res = alloc_resource(GFP_KERNEL);
|
|
|
|
|
|
if (!res)
|
|
|
return NULL;
|
|
@@ -933,7 +975,7 @@ struct resource * __request_region(struct resource *parent,
|
|
|
continue;
|
|
|
}
|
|
|
/* Uhhuh, that didn't work out.. */
|
|
|
- kfree(res);
|
|
|
+ free_resource(res);
|
|
|
res = NULL;
|
|
|
break;
|
|
|
}
|
|
@@ -967,7 +1009,7 @@ int __check_region(struct resource *parent, resource_size_t start,
|
|
|
return -EBUSY;
|
|
|
|
|
|
release_resource(res);
|
|
|
- kfree(res);
|
|
|
+ free_resource(res);
|
|
|
return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL(__check_region);
|
|
@@ -1007,7 +1049,7 @@ void __release_region(struct resource *parent, resource_size_t start,
|
|
|
write_unlock(&resource_lock);
|
|
|
if (res->flags & IORESOURCE_MUXED)
|
|
|
wake_up(&muxed_resource_wait);
|
|
|
- kfree(res);
|
|
|
+ free_resource(res);
|
|
|
return;
|
|
|
}
|
|
|
p = &res->sibling;
|
|
@@ -1055,8 +1097,8 @@ int release_mem_region_adjustable(struct resource *parent,
|
|
|
if ((start < parent->start) || (end > parent->end))
|
|
|
return ret;
|
|
|
|
|
|
- /* The kzalloc() result gets checked later */
|
|
|
- new_res = kzalloc(sizeof(struct resource), GFP_KERNEL);
|
|
|
+ /* The alloc_resource() result gets checked later */
|
|
|
+ new_res = alloc_resource(GFP_KERNEL);
|
|
|
|
|
|
p = &parent->child;
|
|
|
write_lock(&resource_lock);
|
|
@@ -1083,7 +1125,7 @@ int release_mem_region_adjustable(struct resource *parent,
|
|
|
if (res->start == start && res->end == end) {
|
|
|
/* free the whole entry */
|
|
|
*p = res->sibling;
|
|
|
- kfree(res);
|
|
|
+ free_resource(res);
|
|
|
ret = 0;
|
|
|
} else if (res->start == start && res->end != end) {
|
|
|
/* adjust the start */
|
|
@@ -1119,7 +1161,7 @@ int release_mem_region_adjustable(struct resource *parent,
|
|
|
}
|
|
|
|
|
|
write_unlock(&resource_lock);
|
|
|
- kfree(new_res);
|
|
|
+ free_resource(new_res);
|
|
|
return ret;
|
|
|
}
|
|
|
#endif /* CONFIG_MEMORY_HOTREMOVE */
|