|
@@ -19,31 +19,42 @@
|
|
|
|
|
|
struct lmb lmb;
|
|
|
|
|
|
+static int lmb_debug;
|
|
|
+
|
|
|
+static int __init early_lmb(char *p)
|
|
|
+{
|
|
|
+ if (p && strstr(p, "debug"))
|
|
|
+ lmb_debug = 1;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+early_param("lmb", early_lmb);
|
|
|
+
|
|
|
void lmb_dump_all(void)
|
|
|
{
|
|
|
-#ifdef DEBUG
|
|
|
unsigned long i;
|
|
|
|
|
|
- pr_debug("lmb_dump_all:\n");
|
|
|
- pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
|
|
|
- pr_debug(" memory.size = 0x%llx\n",
|
|
|
+ if (!lmb_debug)
|
|
|
+ return;
|
|
|
+
|
|
|
+ pr_info("lmb_dump_all:\n");
|
|
|
+ pr_info(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
|
|
|
+ pr_info(" memory.size = 0x%llx\n",
|
|
|
(unsigned long long)lmb.memory.size);
|
|
|
for (i=0; i < lmb.memory.cnt ;i++) {
|
|
|
- pr_debug(" memory.region[0x%x].base = 0x%llx\n",
|
|
|
+ pr_info(" memory.region[0x%lx].base = 0x%llx\n",
|
|
|
i, (unsigned long long)lmb.memory.region[i].base);
|
|
|
- pr_debug(" .size = 0x%llx\n",
|
|
|
+ pr_info(" .size = 0x%llx\n",
|
|
|
(unsigned long long)lmb.memory.region[i].size);
|
|
|
}
|
|
|
|
|
|
- pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
|
|
|
- pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size);
|
|
|
+ pr_info(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
|
|
|
+ pr_info(" reserved.size = 0x%lx\n", lmb.reserved.size);
|
|
|
for (i=0; i < lmb.reserved.cnt ;i++) {
|
|
|
- pr_debug(" reserved.region[0x%x].base = 0x%llx\n",
|
|
|
+ pr_info(" reserved.region[0x%lx].base = 0x%llx\n",
|
|
|
i, (unsigned long long)lmb.reserved.region[i].base);
|
|
|
- pr_debug(" .size = 0x%llx\n",
|
|
|
+ pr_info(" .size = 0x%llx\n",
|
|
|
(unsigned long long)lmb.reserved.region[i].size);
|
|
|
}
|
|
|
-#endif /* DEBUG */
|
|
|
}
|
|
|
|
|
|
static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
|
|
@@ -286,8 +297,7 @@ static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
|
|
|
j = lmb_overlaps_region(&lmb.reserved, base, size);
|
|
|
if (j < 0) {
|
|
|
/* this area isn't reserved, take it */
|
|
|
- if (lmb_add_region(&lmb.reserved, base,
|
|
|
- lmb_align_up(size, align)) < 0)
|
|
|
+ if (lmb_add_region(&lmb.reserved, base, size) < 0)
|
|
|
base = ~(u64)0;
|
|
|
return base;
|
|
|
}
|
|
@@ -333,6 +343,10 @@ u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
|
|
|
struct lmb_region *mem = &lmb.memory;
|
|
|
int i;
|
|
|
|
|
|
+ BUG_ON(0 == size);
|
|
|
+
|
|
|
+ size = lmb_align_up(size, align);
|
|
|
+
|
|
|
for (i = 0; i < mem->cnt; i++) {
|
|
|
u64 ret = lmb_alloc_nid_region(&mem->region[i],
|
|
|
nid_range,
|
|
@@ -370,6 +384,8 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
|
|
|
|
|
|
BUG_ON(0 == size);
|
|
|
|
|
|
+ size = lmb_align_up(size, align);
|
|
|
+
|
|
|
/* On some platforms, make sure we allocate lowmem */
|
|
|
/* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
|
|
|
if (max_addr == LMB_ALLOC_ANYWHERE)
|
|
@@ -393,8 +409,7 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
|
|
|
j = lmb_overlaps_region(&lmb.reserved, base, size);
|
|
|
if (j < 0) {
|
|
|
/* this area isn't reserved, take it */
|
|
|
- if (lmb_add_region(&lmb.reserved, base,
|
|
|
- lmb_align_up(size, align)) < 0)
|
|
|
+ if (lmb_add_region(&lmb.reserved, base, size) < 0)
|
|
|
return 0;
|
|
|
return base;
|
|
|
}
|