|
@@ -1295,6 +1295,54 @@ extern void free_area_init_node(int nid, unsigned long * zones_size,
|
|
|
unsigned long zone_start_pfn, unsigned long *zholes_size);
|
|
|
extern void free_initmem(void);
|
|
|
|
|
|
+/*
|
|
|
+ * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
|
|
|
+ * into the buddy system. The freed pages will be poisoned with pattern
|
|
|
+ * "poison" if it's non-zero.
|
|
|
+ * Return pages freed into the buddy system.
|
|
|
+ */
|
|
|
+extern unsigned long free_reserved_area(unsigned long start, unsigned long end,
|
|
|
+ int poison, char *s);
|
|
|
+
|
|
|
+static inline void adjust_managed_page_count(struct page *page, long count)
|
|
|
+{
|
|
|
+ totalram_pages += count;
|
|
|
+}
|
|
|
+
|
|
|
+/* Free the reserved page into the buddy system, so it gets managed. */
|
|
|
+static inline void __free_reserved_page(struct page *page)
|
|
|
+{
|
|
|
+ ClearPageReserved(page);
|
|
|
+ init_page_count(page);
|
|
|
+ __free_page(page);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void free_reserved_page(struct page *page)
|
|
|
+{
|
|
|
+ __free_reserved_page(page);
|
|
|
+ adjust_managed_page_count(page, 1);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void mark_page_reserved(struct page *page)
|
|
|
+{
|
|
|
+ SetPageReserved(page);
|
|
|
+ adjust_managed_page_count(page, -1);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Default method to free all the __init memory into the buddy system.
|
|
|
+ * The freed pages will be poisoned with pattern "poison" if it is
|
|
|
+ * non-zero. Return pages freed into the buddy system.
|
|
|
+ */
|
|
|
+static inline unsigned long free_initmem_default(int poison)
|
|
|
+{
|
|
|
+ extern char __init_begin[], __init_end[];
|
|
|
+
|
|
|
+ return free_reserved_area(PAGE_ALIGN((unsigned long)&__init_begin) ,
|
|
|
+ ((unsigned long)&__init_end) & PAGE_MASK,
|
|
|
+ poison, "unused kernel");
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
|
|
/*
|
|
|
* With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
|