|
@@ -932,6 +932,10 @@ const char *arch_vma_name(struct vm_area_struct *vma)
|
|
|
/*
|
|
|
* Initialise the sparsemem vmemmap using huge-pages at the PMD level.
|
|
|
*/
|
|
|
+static long __meminitdata addr_start, addr_end;
|
|
|
+static void __meminitdata *p_start, *p_end;
|
|
|
+static int __meminitdata node_start;
|
|
|
+
|
|
|
int __meminit
|
|
|
vmemmap_populate(struct page *start_page, unsigned long size, int node)
|
|
|
{
|
|
@@ -966,12 +970,32 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
|
|
|
PAGE_KERNEL_LARGE);
|
|
|
set_pmd(pmd, __pmd(pte_val(entry)));
|
|
|
|
|
|
- printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
|
|
|
- addr, addr + PMD_SIZE - 1, p, node);
|
|
|
+ /* check to see if we have contiguous blocks */
|
|
|
+ if (p_end != p || node_start != node) {
|
|
|
+ if (p_start)
|
|
|
+ printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
|
|
|
+ addr_start, addr_end-1, p_start, p_end-1, node_start);
|
|
|
+ addr_start = addr;
|
|
|
+ node_start = node;
|
|
|
+ p_start = p;
|
|
|
+ }
|
|
|
+ addr_end = addr + PMD_SIZE;
|
|
|
+ p_end = p + PMD_SIZE;
|
|
|
} else {
|
|
|
vmemmap_verify((pte_t *)pmd, node, addr, next);
|
|
|
}
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
|
+
|
|
|
+void __meminit vmemmap_populate_print_last(void)
|
|
|
+{
|
|
|
+ if (p_start) {
|
|
|
+ printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
|
|
|
+ addr_start, addr_end-1, p_start, p_end-1, node_start);
|
|
|
+ p_start = NULL;
|
|
|
+ p_end = NULL;
|
|
|
+ node_start = 0;
|
|
|
+ }
|
|
|
+}
|
|
|
#endif
|