|
@@ -390,8 +390,65 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
|
|
|
PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
|
|
|
return map;
|
|
|
}
|
|
|
+void __init sparse_mem_maps_populate_node(struct page **map_map,
|
|
|
+ unsigned long pnum_begin,
|
|
|
+ unsigned long pnum_end,
|
|
|
+ unsigned long map_count, int nodeid)
|
|
|
+{
|
|
|
+ void *map;
|
|
|
+ unsigned long pnum;
|
|
|
+ unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
|
|
|
+
|
|
|
+ map = alloc_remap(nodeid, size * map_count);
|
|
|
+ if (map) {
|
|
|
+ for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
|
|
|
+ if (!present_section_nr(pnum))
|
|
|
+ continue;
|
|
|
+ map_map[pnum] = map;
|
|
|
+ map += size;
|
|
|
+ }
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ size = PAGE_ALIGN(size);
|
|
|
+ map = alloc_bootmem_pages_node(NODE_DATA(nodeid), size * map_count);
|
|
|
+ if (map) {
|
|
|
+ for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
|
|
|
+ if (!present_section_nr(pnum))
|
|
|
+ continue;
|
|
|
+ map_map[pnum] = map;
|
|
|
+ map += size;
|
|
|
+ }
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* fallback */
|
|
|
+ for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
|
|
|
+ struct mem_section *ms;
|
|
|
+
|
|
|
+ if (!present_section_nr(pnum))
|
|
|
+ continue;
|
|
|
+ map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
|
|
|
+ if (map_map[pnum])
|
|
|
+ continue;
|
|
|
+ ms = __nr_to_section(pnum);
|
|
|
+ printk(KERN_ERR "%s: sparsemem memory map backing failed "
|
|
|
+ "some memory will not be available.\n", __func__);
|
|
|
+ ms->section_mem_map = 0;
|
|
|
+ }
|
|
|
+}
|
|
|
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
|
|
|
|
|
|
+static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
|
|
|
+ unsigned long pnum_begin,
|
|
|
+ unsigned long pnum_end,
|
|
|
+ unsigned long map_count, int nodeid)
|
|
|
+{
|
|
|
+ sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
|
|
|
+ map_count, nodeid);
|
|
|
+}
|
|
|
+
|
|
|
+#ifndef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
|
|
|
static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
|
|
|
{
|
|
|
struct page *map;
|
|
@@ -407,6 +464,7 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
|
|
|
ms->section_mem_map = 0;
|
|
|
return NULL;
|
|
|
}
|
|
|
+#endif
|
|
|
|
|
|
void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
|
|
|
{
|
|
@@ -420,12 +478,14 @@ void __init sparse_init(void)
|
|
|
{
|
|
|
unsigned long pnum;
|
|
|
struct page *map;
|
|
|
+ struct page **map_map;
|
|
|
unsigned long *usemap;
|
|
|
unsigned long **usemap_map;
|
|
|
- int size;
|
|
|
+ int size, size2;
|
|
|
int nodeid_begin = 0;
|
|
|
unsigned long pnum_begin = 0;
|
|
|
unsigned long usemap_count;
|
|
|
+ unsigned long map_count;
|
|
|
|
|
|
/*
|
|
|
* map is using big page (aka 2M in x86 64 bit)
|
|
@@ -478,6 +538,48 @@ void __init sparse_init(void)
|
|
|
sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
|
|
|
usemap_count, nodeid_begin);
|
|
|
|
|
|
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
|
|
|
+ size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
|
|
|
+ map_map = alloc_bootmem(size2);
|
|
|
+ if (!map_map)
|
|
|
+ panic("can not allocate map_map\n");
|
|
|
+
|
|
|
+ for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
|
|
|
+ struct mem_section *ms;
|
|
|
+
|
|
|
+ if (!present_section_nr(pnum))
|
|
|
+ continue;
|
|
|
+ ms = __nr_to_section(pnum);
|
|
|
+ nodeid_begin = sparse_early_nid(ms);
|
|
|
+ pnum_begin = pnum;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ map_count = 1;
|
|
|
+ for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
|
|
|
+ struct mem_section *ms;
|
|
|
+ int nodeid;
|
|
|
+
|
|
|
+ if (!present_section_nr(pnum))
|
|
|
+ continue;
|
|
|
+ ms = __nr_to_section(pnum);
|
|
|
+ nodeid = sparse_early_nid(ms);
|
|
|
+ if (nodeid == nodeid_begin) {
|
|
|
+ map_count++;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ /* ok, we need to take cake of from pnum_begin to pnum - 1*/
|
|
|
+ sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
|
|
|
+ map_count, nodeid_begin);
|
|
|
+ /* new start, update count etc*/
|
|
|
+ nodeid_begin = nodeid;
|
|
|
+ pnum_begin = pnum;
|
|
|
+ map_count = 1;
|
|
|
+ }
|
|
|
+ /* ok, last chunk */
|
|
|
+ sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
|
|
|
+ map_count, nodeid_begin);
|
|
|
+#endif
|
|
|
+
|
|
|
for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
|
|
|
if (!present_section_nr(pnum))
|
|
|
continue;
|
|
@@ -486,7 +588,11 @@ void __init sparse_init(void)
|
|
|
if (!usemap)
|
|
|
continue;
|
|
|
|
|
|
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
|
|
|
+ map = map_map[pnum];
|
|
|
+#else
|
|
|
map = sparse_early_mem_map_alloc(pnum);
|
|
|
+#endif
|
|
|
if (!map)
|
|
|
continue;
|
|
|
|
|
@@ -496,6 +602,9 @@ void __init sparse_init(void)
|
|
|
|
|
|
vmemmap_populate_print_last();
|
|
|
|
|
|
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
|
|
|
+ free_bootmem(__pa(map_map), size2);
|
|
|
+#endif
|
|
|
free_bootmem(__pa(usemap_map), size);
|
|
|
}
|
|
|
|