|
@@ -57,6 +57,7 @@
|
|
|
#include <linux/ftrace_event.h>
|
|
|
#include <linux/memcontrol.h>
|
|
|
#include <linux/prefetch.h>
|
|
|
+#include <linux/page-debug-flags.h>
|
|
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
#include <asm/div64.h>
|
|
@@ -388,6 +389,37 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
|
|
|
clear_highpage(page + i);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
|
+unsigned int _debug_guardpage_minorder;
|
|
|
+
|
|
|
+static int __init debug_guardpage_minorder_setup(char *buf)
|
|
|
+{
|
|
|
+ unsigned long res;
|
|
|
+
|
|
|
+ if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
|
|
|
+ printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ _debug_guardpage_minorder = res;
|
|
|
+ printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
|
|
|
+
|
|
|
+static inline void set_page_guard_flag(struct page *page)
|
|
|
+{
|
|
|
+ __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void clear_page_guard_flag(struct page *page)
|
|
|
+{
|
|
|
+ __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline void set_page_guard_flag(struct page *page) { }
|
|
|
+static inline void clear_page_guard_flag(struct page *page) { }
|
|
|
+#endif
|
|
|
+
|
|
|
static inline void set_page_order(struct page *page, int order)
|
|
|
{
|
|
|
set_page_private(page, order);
|
|
@@ -445,6 +477,11 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
|
|
|
if (page_zone_id(page) != page_zone_id(buddy))
|
|
|
return 0;
|
|
|
|
|
|
+ if (page_is_guard(buddy) && page_order(buddy) == order) {
|
|
|
+ VM_BUG_ON(page_count(buddy) != 0);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
if (PageBuddy(buddy) && page_order(buddy) == order) {
|
|
|
VM_BUG_ON(page_count(buddy) != 0);
|
|
|
return 1;
|
|
@@ -501,11 +538,19 @@ static inline void __free_one_page(struct page *page,
|
|
|
buddy = page + (buddy_idx - page_idx);
|
|
|
if (!page_is_buddy(page, buddy, order))
|
|
|
break;
|
|
|
-
|
|
|
- /* Our buddy is free, merge with it and move up one order. */
|
|
|
- list_del(&buddy->lru);
|
|
|
- zone->free_area[order].nr_free--;
|
|
|
- rmv_page_order(buddy);
|
|
|
+ /*
|
|
|
+ * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
|
|
|
+ * merge with it and move up one order.
|
|
|
+ */
|
|
|
+ if (page_is_guard(buddy)) {
|
|
|
+ clear_page_guard_flag(buddy);
|
|
|
+ set_page_private(page, 0);
|
|
|
+ __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
|
|
|
+ } else {
|
|
|
+ list_del(&buddy->lru);
|
|
|
+ zone->free_area[order].nr_free--;
|
|
|
+ rmv_page_order(buddy);
|
|
|
+ }
|
|
|
combined_idx = buddy_idx & page_idx;
|
|
|
page = page + (combined_idx - page_idx);
|
|
|
page_idx = combined_idx;
|
|
@@ -731,6 +776,23 @@ static inline void expand(struct zone *zone, struct page *page,
|
|
|
high--;
|
|
|
size >>= 1;
|
|
|
VM_BUG_ON(bad_range(zone, &page[size]));
|
|
|
+
|
|
|
+#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
|
+ if (high < debug_guardpage_minorder()) {
|
|
|
+ /*
|
|
|
+ * Mark as guard pages (or page), that will allow to
|
|
|
+ * merge back to allocator when buddy will be freed.
|
|
|
+ * Corresponding page table entries will not be touched,
|
|
|
+ * pages will stay not present in virtual address space
|
|
|
+ */
|
|
|
+ INIT_LIST_HEAD(&page[size].lru);
|
|
|
+ set_page_guard_flag(&page[size]);
|
|
|
+ set_page_private(&page[size], high);
|
|
|
+ /* Guard pages are not available for any usage */
|
|
|
+ __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high));
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+#endif
|
|
|
list_add(&page[size].lru, &area->free_list[migratetype]);
|
|
|
area->nr_free++;
|
|
|
set_page_order(&page[size], high);
|
|
@@ -1754,7 +1816,8 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
|
|
|
{
|
|
|
unsigned int filter = SHOW_MEM_FILTER_NODES;
|
|
|
|
|
|
- if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
|
|
|
+ if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
|
|
|
+ debug_guardpage_minorder() > 0)
|
|
|
return;
|
|
|
|
|
|
/*
|