|
@@ -28,6 +28,16 @@ static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi)
|
|
|
return -ENOENT;
|
|
|
}
|
|
|
|
|
|
+static u64 mem_hole_size(u64 start, u64 end)
|
|
|
+{
|
|
|
+ unsigned long start_pfn = PFN_UP(start);
|
|
|
+ unsigned long end_pfn = PFN_DOWN(end);
|
|
|
+
|
|
|
+ if (start_pfn < end_pfn)
|
|
|
+ return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn));
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Sets up nid to range from @start to @end. The return value is -errno if
|
|
|
* something went wrong, 0 otherwise.
|
|
@@ -89,7 +99,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
|
|
|
* Calculate target node size. x86_32 freaks on __udivdi3() so do
|
|
|
* the division in ulong number of pages and convert back.
|
|
|
*/
|
|
|
- size = max_addr - addr - memblock_x86_hole_size(addr, max_addr);
|
|
|
+ size = max_addr - addr - mem_hole_size(addr, max_addr);
|
|
|
size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes);
|
|
|
|
|
|
/*
|
|
@@ -135,8 +145,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
|
|
|
* Continue to add memory to this fake node if its
|
|
|
* non-reserved memory is less than the per-node size.
|
|
|
*/
|
|
|
- while (end - start -
|
|
|
- memblock_x86_hole_size(start, end) < size) {
|
|
|
+ while (end - start - mem_hole_size(start, end) < size) {
|
|
|
end += FAKE_NODE_MIN_SIZE;
|
|
|
if (end > limit) {
|
|
|
end = limit;
|
|
@@ -150,7 +159,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
|
|
|
* this one must extend to the boundary.
|
|
|
*/
|
|
|
if (end < dma32_end && dma32_end - end -
|
|
|
- memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
|
|
|
+ mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
|
|
|
end = dma32_end;
|
|
|
|
|
|
/*
|
|
@@ -158,8 +167,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
|
|
|
* next node, this one must extend to the end of the
|
|
|
* physical node.
|
|
|
*/
|
|
|
- if (limit - end -
|
|
|
- memblock_x86_hole_size(end, limit) < size)
|
|
|
+ if (limit - end - mem_hole_size(end, limit) < size)
|
|
|
end = limit;
|
|
|
|
|
|
ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes,
|
|
@@ -180,7 +188,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
|
|
|
{
|
|
|
u64 end = start + size;
|
|
|
|
|
|
- while (end - start - memblock_x86_hole_size(start, end) < size) {
|
|
|
+ while (end - start - mem_hole_size(start, end) < size) {
|
|
|
end += FAKE_NODE_MIN_SIZE;
|
|
|
if (end > max_addr) {
|
|
|
end = max_addr;
|
|
@@ -211,8 +219,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
|
|
|
* creates a uniform distribution of node sizes across the entire
|
|
|
* machine (but not necessarily over physical nodes).
|
|
|
*/
|
|
|
- min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
|
|
|
- MAX_NUMNODES;
|
|
|
+ min_size = (max_addr - addr - mem_hole_size(addr, max_addr)) / MAX_NUMNODES;
|
|
|
min_size = max(min_size, FAKE_NODE_MIN_SIZE);
|
|
|
if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
|
|
|
min_size = (min_size + FAKE_NODE_MIN_SIZE) &
|
|
@@ -252,7 +259,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
|
|
|
* this one must extend to the boundary.
|
|
|
*/
|
|
|
if (end < dma32_end && dma32_end - end -
|
|
|
- memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
|
|
|
+ mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
|
|
|
end = dma32_end;
|
|
|
|
|
|
/*
|
|
@@ -260,8 +267,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
|
|
|
* next node, this one must extend to the end of the
|
|
|
* physical node.
|
|
|
*/
|
|
|
- if (limit - end -
|
|
|
- memblock_x86_hole_size(end, limit) < size)
|
|
|
+ if (limit - end - mem_hole_size(end, limit) < size)
|
|
|
end = limit;
|
|
|
|
|
|
ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
|