|
@@ -8,6 +8,7 @@
|
|
|
#include <linux/module.h>
|
|
|
#include <linux/spinlock.h>
|
|
|
#include <linux/vmalloc.h>
|
|
|
+#include "internal.h"
|
|
|
#include <asm/dma.h>
|
|
|
#include <asm/pgalloc.h>
|
|
|
#include <asm/pgtable.h>
|
|
@@ -376,6 +377,9 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
|
|
|
{
|
|
|
return; /* XXX: Not implemented yet */
|
|
|
}
|
|
|
+static void free_map_bootmem(struct page *page, unsigned long nr_pages)
|
|
|
+{
|
|
|
+}
|
|
|
#else
|
|
|
static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
|
|
|
{
|
|
@@ -413,17 +417,47 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
|
|
|
free_pages((unsigned long)memmap,
|
|
|
get_order(sizeof(struct page) * nr_pages));
|
|
|
}
|
|
|
+
|
|
|
+static void free_map_bootmem(struct page *page, unsigned long nr_pages)
|
|
|
+{
|
|
|
+ unsigned long maps_section_nr, removing_section_nr, i;
|
|
|
+ int magic;
|
|
|
+
|
|
|
+ for (i = 0; i < nr_pages; i++, page++) {
|
|
|
+ magic = atomic_read(&page->_mapcount);
|
|
|
+
|
|
|
+ BUG_ON(magic == NODE_INFO);
|
|
|
+
|
|
|
+ maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
|
|
|
+ removing_section_nr = page->private;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * When this function is called, the removing section is
|
|
|
+ * logical offlined state. This means all pages are isolated
|
|
|
+ * from page allocator. If removing section's memmap is placed
|
|
|
+ * on the same section, it must not be freed.
|
|
|
+ * If it is freed, page allocator may allocate it which will
|
|
|
+ * be removed physically soon.
|
|
|
+ */
|
|
|
+ if (maps_section_nr != removing_section_nr)
|
|
|
+ put_page_bootmem(page);
|
|
|
+ }
|
|
|
+}
|
|
|
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|
|
|
|
|
|
static void free_section_usemap(struct page *memmap, unsigned long *usemap)
|
|
|
{
|
|
|
+ struct page *usemap_page;
|
|
|
+ unsigned long nr_pages;
|
|
|
+
|
|
|
if (!usemap)
|
|
|
return;
|
|
|
|
|
|
+ usemap_page = virt_to_page(usemap);
|
|
|
/*
|
|
|
* Check to see if allocation came from hot-plug-add
|
|
|
*/
|
|
|
- if (PageSlab(virt_to_page(usemap))) {
|
|
|
+ if (PageSlab(usemap_page)) {
|
|
|
kfree(usemap);
|
|
|
if (memmap)
|
|
|
__kfree_section_memmap(memmap, PAGES_PER_SECTION);
|
|
@@ -431,10 +465,19 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * TODO: Allocations came from bootmem - how do I free up ?
|
|
|
+ * The usemap came from bootmem. This is packed with other usemaps
|
|
|
+ * on the section which has pgdat at boot time. Just keep it as is now.
|
|
|
*/
|
|
|
- printk(KERN_WARNING "Not freeing up allocations from bootmem "
|
|
|
- "- leaking memory\n");
|
|
|
+
|
|
|
+ if (memmap) {
|
|
|
+ struct page *memmap_page;
|
|
|
+ memmap_page = virt_to_page(memmap);
|
|
|
+
|
|
|
+ nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
|
|
|
+ >> PAGE_SHIFT;
|
|
|
+
|
|
|
+ free_map_bootmem(memmap_page, nr_pages);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/*
|