|
@@ -58,8 +58,105 @@ static void release_memory_resource(struct resource *res)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
|
|
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
|
|
|
|
+#ifndef CONFIG_SPARSEMEM_VMEMMAP
|
|
|
|
+static void get_page_bootmem(unsigned long info, struct page *page, int magic)
|
|
|
|
+{
|
|
|
|
+ atomic_set(&page->_mapcount, magic);
|
|
|
|
+ SetPagePrivate(page);
|
|
|
|
+ set_page_private(page, info);
|
|
|
|
+ atomic_inc(&page->_count);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void put_page_bootmem(struct page *page)
|
|
|
|
+{
|
|
|
|
+ int magic;
|
|
|
|
+
|
|
|
|
+ magic = atomic_read(&page->_mapcount);
|
|
|
|
+ BUG_ON(magic >= -1);
|
|
|
|
+
|
|
|
|
+ if (atomic_dec_return(&page->_count) == 1) {
|
|
|
|
+ ClearPagePrivate(page);
|
|
|
|
+ set_page_private(page, 0);
|
|
|
|
+ reset_page_mapcount(page);
|
|
|
|
+ __free_pages_bootmem(page, 0);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void register_page_bootmem_info_section(unsigned long start_pfn)
|
|
|
|
+{
|
|
|
|
+ unsigned long *usemap, mapsize, section_nr, i;
|
|
|
|
+ struct mem_section *ms;
|
|
|
|
+ struct page *page, *memmap;
|
|
|
|
+
|
|
|
|
+ if (!pfn_valid(start_pfn))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ section_nr = pfn_to_section_nr(start_pfn);
|
|
|
|
+ ms = __nr_to_section(section_nr);
|
|
|
|
+
|
|
|
|
+ /* Get section's memmap address */
|
|
|
|
+ memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Get page for the memmap's phys address
|
|
|
|
+ * XXX: need more consideration for sparse_vmemmap...
|
|
|
|
+ */
|
|
|
|
+ page = virt_to_page(memmap);
|
|
|
|
+ mapsize = sizeof(struct page) * PAGES_PER_SECTION;
|
|
|
|
+ mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
|
|
|
|
+
|
|
|
|
+ /* remember memmap's page */
|
|
|
|
+ for (i = 0; i < mapsize; i++, page++)
|
|
|
|
+ get_page_bootmem(section_nr, page, SECTION_INFO);
|
|
|
|
+
|
|
|
|
+ usemap = __nr_to_section(section_nr)->pageblock_flags;
|
|
|
|
+ page = virt_to_page(usemap);
|
|
|
|
+
|
|
|
|
+ mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < mapsize; i++, page++)
|
|
|
|
+ get_page_bootmem(section_nr, page, MIX_INFO);
|
|
|
|
+
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void register_page_bootmem_info_node(struct pglist_data *pgdat)
|
|
|
|
+{
|
|
|
|
+ unsigned long i, pfn, end_pfn, nr_pages;
|
|
|
|
+ int node = pgdat->node_id;
|
|
|
|
+ struct page *page;
|
|
|
|
+ struct zone *zone;
|
|
|
|
+
|
|
|
|
+ nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
|
|
|
|
+ page = virt_to_page(pgdat);
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < nr_pages; i++, page++)
|
|
|
|
+ get_page_bootmem(node, page, NODE_INFO);
|
|
|
|
+
|
|
|
|
+ zone = &pgdat->node_zones[0];
|
|
|
|
+ for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
|
|
|
|
+ if (zone->wait_table) {
|
|
|
|
+ nr_pages = zone->wait_table_hash_nr_entries
|
|
|
|
+ * sizeof(wait_queue_head_t);
|
|
|
|
+ nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
|
|
|
|
+ page = virt_to_page(zone->wait_table);
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < nr_pages; i++, page++)
|
|
|
|
+ get_page_bootmem(node, page, NODE_INFO);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ pfn = pgdat->node_start_pfn;
|
|
|
|
+ end_pfn = pfn + pgdat->node_spanned_pages;
|
|
|
|
+
|
|
|
|
+ /* register_section info */
|
|
|
|
+ for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
|
|
|
|
+ register_page_bootmem_info_section(pfn);
|
|
|
|
+
|
|
|
|
+}
|
|
|
|
+#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
|
|
|
|
+
|
|
static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
|
|
static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
|
|
{
|
|
{
|
|
struct pglist_data *pgdat = zone->zone_pgdat;
|
|
struct pglist_data *pgdat = zone->zone_pgdat;
|