|
@@ -231,9 +231,9 @@ static void bad_page(struct page *page)
|
|
|
printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
|
|
|
KERN_EMERG "Backtrace:\n");
|
|
|
dump_stack();
|
|
|
- set_page_count(page, 0);
|
|
|
- reset_page_mapcount(page);
|
|
|
- page->mapping = NULL;
|
|
|
+
|
|
|
+ /* Leave bad fields for debug, except PageBuddy could make trouble */
|
|
|
+ __ClearPageBuddy(page);
|
|
|
add_taint(TAINT_BAD_PAGE);
|
|
|
}
|
|
|
|
|
@@ -290,25 +290,31 @@ void prep_compound_gigantic_page(struct page *page, unsigned long order)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-static void destroy_compound_page(struct page *page, unsigned long order)
|
|
|
+static int destroy_compound_page(struct page *page, unsigned long order)
|
|
|
{
|
|
|
int i;
|
|
|
int nr_pages = 1 << order;
|
|
|
+ int bad = 0;
|
|
|
|
|
|
- if (unlikely(compound_order(page) != order))
|
|
|
+ if (unlikely(compound_order(page) != order) ||
|
|
|
+ unlikely(!PageHead(page))) {
|
|
|
bad_page(page);
|
|
|
+ bad++;
|
|
|
+ }
|
|
|
|
|
|
- if (unlikely(!PageHead(page)))
|
|
|
- bad_page(page);
|
|
|
__ClearPageHead(page);
|
|
|
+
|
|
|
for (i = 1; i < nr_pages; i++) {
|
|
|
struct page *p = page + i;
|
|
|
|
|
|
- if (unlikely(!PageTail(p) |
|
|
|
- (p->first_page != page)))
|
|
|
+ if (unlikely(!PageTail(p) | (p->first_page != page))) {
|
|
|
bad_page(page);
|
|
|
+ bad++;
|
|
|
+ }
|
|
|
__ClearPageTail(p);
|
|
|
}
|
|
|
+
|
|
|
+ return bad;
|
|
|
}
|
|
|
|
|
|
static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
|
|
@@ -428,7 +434,8 @@ static inline void __free_one_page(struct page *page,
|
|
|
int migratetype = get_pageblock_migratetype(page);
|
|
|
|
|
|
if (unlikely(PageCompound(page)))
|
|
|
- destroy_compound_page(page, order);
|
|
|
+ if (unlikely(destroy_compound_page(page, order)))
|
|
|
+ return;
|
|
|
|
|
|
page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
|
|
|
|
|
@@ -465,15 +472,10 @@ static inline int free_pages_check(struct page *page)
|
|
|
if (unlikely(page_mapcount(page) |
|
|
|
(page->mapping != NULL) |
|
|
|
(page_count(page) != 0) |
|
|
|
- (page->flags & PAGE_FLAGS_CHECK_AT_FREE)))
|
|
|
+ (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
|
|
|
bad_page(page);
|
|
|
- /*
|
|
|
- * For now, we report if PG_reserved was found set, but do not
|
|
|
- * clear it, and do not free the page. But we shall soon need
|
|
|
- * to do more, for when the ZERO_PAGE count wraps negative.
|
|
|
- */
|
|
|
- if (PageReserved(page))
|
|
|
return 1;
|
|
|
+ }
|
|
|
if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
|
|
|
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
|
|
|
return 0;
|
|
@@ -521,11 +523,11 @@ static void __free_pages_ok(struct page *page, unsigned int order)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
int i;
|
|
|
- int reserved = 0;
|
|
|
+ int bad = 0;
|
|
|
|
|
|
for (i = 0 ; i < (1 << order) ; ++i)
|
|
|
- reserved += free_pages_check(page + i);
|
|
|
- if (reserved)
|
|
|
+ bad += free_pages_check(page + i);
|
|
|
+ if (bad)
|
|
|
return;
|
|
|
|
|
|
if (!PageHighMem(page)) {
|
|
@@ -610,17 +612,11 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
|
|
|
if (unlikely(page_mapcount(page) |
|
|
|
(page->mapping != NULL) |
|
|
|
(page_count(page) != 0) |
|
|
|
- (page->flags & PAGE_FLAGS_CHECK_AT_PREP)))
|
|
|
+ (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
|
|
|
bad_page(page);
|
|
|
-
|
|
|
- /*
|
|
|
- * For now, we report if PG_reserved was found set, but do not
|
|
|
- * clear it, and do not allocate the page: as a safety net.
|
|
|
- */
|
|
|
- if (PageReserved(page))
|
|
|
return 1;
|
|
|
+ }
|
|
|
|
|
|
- page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
|
|
|
set_page_private(page, 0);
|
|
|
set_page_refcounted(page);
|
|
|
|