|
@@ -1282,7 +1282,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
|
}
|
|
|
dec_mm_counter(mm, MM_ANONPAGES);
|
|
|
inc_mm_counter(mm, MM_SWAPENTS);
|
|
|
- } else if (PAGE_MIGRATION) {
|
|
|
+ } else if (IS_ENABLED(CONFIG_MIGRATION)) {
|
|
|
/*
|
|
|
* Store the pfn of the page in a special migration
|
|
|
* pte. do_swap_page() will wait until the migration
|
|
@@ -1293,7 +1293,8 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
|
}
|
|
|
set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
|
|
|
BUG_ON(pte_file(*pte));
|
|
|
- } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) {
|
|
|
+ } else if (IS_ENABLED(CONFIG_MIGRATION) &&
|
|
|
+ (TTU_ACTION(flags) == TTU_MIGRATION)) {
|
|
|
/* Establish migration entry for a file page */
|
|
|
swp_entry_t entry;
|
|
|
entry = make_migration_entry(page, pte_write(pteval));
|
|
@@ -1499,7 +1500,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
|
|
|
* locking requirements of exec(), migration skips
|
|
|
* temporary VMAs until after exec() completes.
|
|
|
*/
|
|
|
- if (PAGE_MIGRATION && (flags & TTU_MIGRATION) &&
|
|
|
+ if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
|
|
|
is_vma_temporary_stack(vma))
|
|
|
continue;
|
|
|
|