1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768 |
- /*
- * Memory Migration functionality - linux/mm/migration.c
- *
- * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
- *
- * Page migration was first developed in the context of the memory hotplug
- * project. The main authors of the migration code are:
- *
- * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
- * Hirokazu Takahashi <taka@valinux.co.jp>
- * Dave Hansen <haveblue@us.ibm.com>
- * Christoph Lameter
- */
- #include <linux/migrate.h>
- #include <linux/export.h>
- #include <linux/swap.h>
- #include <linux/swapops.h>
- #include <linux/pagemap.h>
- #include <linux/buffer_head.h>
- #include <linux/mm_inline.h>
- #include <linux/nsproxy.h>
- #include <linux/pagevec.h>
- #include <linux/ksm.h>
- #include <linux/rmap.h>
- #include <linux/topology.h>
- #include <linux/cpu.h>
- #include <linux/cpuset.h>
- #include <linux/writeback.h>
- #include <linux/mempolicy.h>
- #include <linux/vmalloc.h>
- #include <linux/security.h>
- #include <linux/memcontrol.h>
- #include <linux/syscalls.h>
- #include <linux/hugetlb.h>
- #include <linux/hugetlb_cgroup.h>
- #include <linux/gfp.h>
- #include <linux/balloon_compaction.h>
- #include <asm/tlbflush.h>
- #define CREATE_TRACE_POINTS
- #include <trace/events/migrate.h>
- #include "internal.h"
- /*
- * migrate_prep() needs to be called before we start compiling a list of pages
- * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
- * undesirable, use migrate_prep_local()
- */
- int migrate_prep(void)
- {
- /*
- * Clear the LRU lists so pages can be isolated.
- * Note that pages may be moved off the LRU after we have
- * drained them. Those pages will fail to migrate like other
- * pages that may be busy.
- */
- lru_add_drain_all();
- return 0;
- }
- /* Do the necessary work of migrate_prep but not if it involves other CPUs */
- int migrate_prep_local(void)
- {
- lru_add_drain();
- return 0;
- }
- /*
- * Add isolated pages on the list back to the LRU under page lock
- * to avoid leaking evictable pages back onto unevictable list.
- */
- void putback_lru_pages(struct list_head *l)
- {
- struct page *page;
- struct page *page2;
- list_for_each_entry_safe(page, page2, l, lru) {
- list_del(&page->lru);
- dec_zone_page_state(page, NR_ISOLATED_ANON +
- page_is_file_cache(page));
- putback_lru_page(page);
- }
- }
- /*
- * Put previously isolated pages back onto the appropriate lists
- * from where they were once taken off for compaction/migration.
- *
- * This function shall be used instead of putback_lru_pages(),
- * whenever the isolated pageset has been built by isolate_migratepages_range()
- */
- void putback_movable_pages(struct list_head *l)
- {
- struct page *page;
- struct page *page2;
- list_for_each_entry_safe(page, page2, l, lru) {
- list_del(&page->lru);
- dec_zone_page_state(page, NR_ISOLATED_ANON +
- page_is_file_cache(page));
- if (unlikely(balloon_page_movable(page)))
- balloon_page_putback(page);
- else
- putback_lru_page(page);
- }
- }
- /*
- * Restore a potential migration pte to a working pte entry
- */
- static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
- unsigned long addr, void *old)
- {
- struct mm_struct *mm = vma->vm_mm;
- swp_entry_t entry;
- pmd_t *pmd;
- pte_t *ptep, pte;
- spinlock_t *ptl;
- if (unlikely(PageHuge(new))) {
- ptep = huge_pte_offset(mm, addr);
- if (!ptep)
- goto out;
- ptl = &mm->page_table_lock;
- } else {
- pmd = mm_find_pmd(mm, addr);
- if (!pmd)
- goto out;
- if (pmd_trans_huge(*pmd))
- goto out;
- ptep = pte_offset_map(pmd, addr);
- /*
- * Peek to check is_swap_pte() before taking ptlock? No, we
- * can race mremap's move_ptes(), which skips anon_vma lock.
- */
- ptl = pte_lockptr(mm, pmd);
- }
- spin_lock(ptl);
- pte = *ptep;
- if (!is_swap_pte(pte))
- goto unlock;
- entry = pte_to_swp_entry(pte);
- if (!is_migration_entry(entry) ||
- migration_entry_to_page(entry) != old)
- goto unlock;
- get_page(new);
- pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
- if (is_write_migration_entry(entry))
- pte = pte_mkwrite(pte);
- #ifdef CONFIG_HUGETLB_PAGE
- if (PageHuge(new))
- pte = pte_mkhuge(pte);
- #endif
- flush_cache_page(vma, addr, pte_pfn(pte));
- set_pte_at(mm, addr, ptep, pte);
- if (PageHuge(new)) {
- if (PageAnon(new))
- hugepage_add_anon_rmap(new, vma, addr);
- else
- page_dup_rmap(new);
- } else if (PageAnon(new))
- page_add_anon_rmap(new, vma, addr);
- else
- page_add_file_rmap(new);
- /* No need to invalidate - it was non-present before */
- update_mmu_cache(vma, addr, ptep);
- unlock:
- pte_unmap_unlock(ptep, ptl);
- out:
- return SWAP_AGAIN;
- }
- /*
- * Get rid of all migration entries and replace them by
- * references to the indicated page.
- */
- static void remove_migration_ptes(struct page *old, struct page *new)
- {
- rmap_walk(new, remove_migration_pte, old);
- }
- /*
- * Something used the pte of a page under migration. We need to
- * get to the page and wait until migration is finished.
- * When we return from this function the fault will be retried.
- */
- void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
- unsigned long address)
- {
- pte_t *ptep, pte;
- spinlock_t *ptl;
- swp_entry_t entry;
- struct page *page;
- ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
- pte = *ptep;
- if (!is_swap_pte(pte))
- goto out;
- entry = pte_to_swp_entry(pte);
- if (!is_migration_entry(entry))
- goto out;
- page = migration_entry_to_page(entry);
- /*
- * Once radix-tree replacement of page migration started, page_count
- * *must* be zero. And, we don't want to call wait_on_page_locked()
- * against a page without get_page().
- * So, we use get_page_unless_zero(), here. Even failed, page fault
- * will occur again.
- */
- if (!get_page_unless_zero(page))
- goto out;
- pte_unmap_unlock(ptep, ptl);
- wait_on_page_locked(page);
- put_page(page);
- return;
- out:
- pte_unmap_unlock(ptep, ptl);
- }
- #ifdef CONFIG_BLOCK
- /* Returns true if all buffers are successfully locked */
- static bool buffer_migrate_lock_buffers(struct buffer_head *head,
- enum migrate_mode mode)
- {
- struct buffer_head *bh = head;
- /* Simple case, sync compaction */
- if (mode != MIGRATE_ASYNC) {
- do {
- get_bh(bh);
- lock_buffer(bh);
- bh = bh->b_this_page;
- } while (bh != head);
- return true;
- }
- /* async case, we cannot block on lock_buffer so use trylock_buffer */
- do {
- get_bh(bh);
- if (!trylock_buffer(bh)) {
- /*
- * We failed to lock the buffer and cannot stall in
- * async migration. Release the taken locks
- */
- struct buffer_head *failed_bh = bh;
- put_bh(failed_bh);
- bh = head;
- while (bh != failed_bh) {
- unlock_buffer(bh);
- put_bh(bh);
- bh = bh->b_this_page;
- }
- return false;
- }
- bh = bh->b_this_page;
- } while (bh != head);
- return true;
- }
- #else
- static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
- enum migrate_mode mode)
- {
- return true;
- }
- #endif /* CONFIG_BLOCK */
- /*
- * Replace the page in the mapping.
- *
- * The number of remaining references must be:
- * 1 for anonymous pages without a mapping
- * 2 for pages with a mapping
- * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
- */
- static int migrate_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page,
- struct buffer_head *head, enum migrate_mode mode)
- {
- int expected_count = 0;
- void **pslot;
- if (!mapping) {
- /* Anonymous page without mapping */
- if (page_count(page) != 1)
- return -EAGAIN;
- return MIGRATEPAGE_SUCCESS;
- }
- spin_lock_irq(&mapping->tree_lock);
- pslot = radix_tree_lookup_slot(&mapping->page_tree,
- page_index(page));
- expected_count = 2 + page_has_private(page);
- if (page_count(page) != expected_count ||
- radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
- spin_unlock_irq(&mapping->tree_lock);
- return -EAGAIN;
- }
- if (!page_freeze_refs(page, expected_count)) {
- spin_unlock_irq(&mapping->tree_lock);
- return -EAGAIN;
- }
- /*
- * In the async migration case of moving a page with buffers, lock the
- * buffers using trylock before the mapping is moved. If the mapping
- * was moved, we later failed to lock the buffers and could not move
- * the mapping back due to an elevated page count, we would have to
- * block waiting on other references to be dropped.
- */
- if (mode == MIGRATE_ASYNC && head &&
- !buffer_migrate_lock_buffers(head, mode)) {
- page_unfreeze_refs(page, expected_count);
- spin_unlock_irq(&mapping->tree_lock);
- return -EAGAIN;
- }
- /*
- * Now we know that no one else is looking at the page.
- */
- get_page(newpage); /* add cache reference */
- if (PageSwapCache(page)) {
- SetPageSwapCache(newpage);
- set_page_private(newpage, page_private(page));
- }
- radix_tree_replace_slot(pslot, newpage);
- /*
- * Drop cache reference from old page by unfreezing
- * to one less reference.
- * We know this isn't the last reference.
- */
- page_unfreeze_refs(page, expected_count - 1);
- /*
- * If moved to a different zone then also account
- * the page for that zone. Other VM counters will be
- * taken care of when we establish references to the
- * new page and drop references to the old page.
- *
- * Note that anonymous pages are accounted for
- * via NR_FILE_PAGES and NR_ANON_PAGES if they
- * are mapped to swap space.
- */
- __dec_zone_page_state(page, NR_FILE_PAGES);
- __inc_zone_page_state(newpage, NR_FILE_PAGES);
- if (!PageSwapCache(page) && PageSwapBacked(page)) {
- __dec_zone_page_state(page, NR_SHMEM);
- __inc_zone_page_state(newpage, NR_SHMEM);
- }
- spin_unlock_irq(&mapping->tree_lock);
- return MIGRATEPAGE_SUCCESS;
- }
- /*
- * The expected number of remaining references is the same as that
- * of migrate_page_move_mapping().
- */
- int migrate_huge_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page)
- {
- int expected_count;
- void **pslot;
- if (!mapping) {
- if (page_count(page) != 1)
- return -EAGAIN;
- return MIGRATEPAGE_SUCCESS;
- }
- spin_lock_irq(&mapping->tree_lock);
- pslot = radix_tree_lookup_slot(&mapping->page_tree,
- page_index(page));
- expected_count = 2 + page_has_private(page);
- if (page_count(page) != expected_count ||
- radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
- spin_unlock_irq(&mapping->tree_lock);
- return -EAGAIN;
- }
- if (!page_freeze_refs(page, expected_count)) {
- spin_unlock_irq(&mapping->tree_lock);
- return -EAGAIN;
- }
- get_page(newpage);
- radix_tree_replace_slot(pslot, newpage);
- page_unfreeze_refs(page, expected_count - 1);
- spin_unlock_irq(&mapping->tree_lock);
- return MIGRATEPAGE_SUCCESS;
- }
- /*
- * Copy the page to its new location
- */
- void migrate_page_copy(struct page *newpage, struct page *page)
- {
- if (PageHuge(page) || PageTransHuge(page))
- copy_huge_page(newpage, page);
- else
- copy_highpage(newpage, page);
- if (PageError(page))
- SetPageError(newpage);
- if (PageReferenced(page))
- SetPageReferenced(newpage);
- if (PageUptodate(page))
- SetPageUptodate(newpage);
- if (TestClearPageActive(page)) {
- VM_BUG_ON(PageUnevictable(page));
- SetPageActive(newpage);
- } else if (TestClearPageUnevictable(page))
- SetPageUnevictable(newpage);
- if (PageChecked(page))
- SetPageChecked(newpage);
- if (PageMappedToDisk(page))
- SetPageMappedToDisk(newpage);
- if (PageDirty(page)) {
- clear_page_dirty_for_io(page);
- /*
- * Want to mark the page and the radix tree as dirty, and
- * redo the accounting that clear_page_dirty_for_io undid,
- * but we can't use set_page_dirty because that function
- * is actually a signal that all of the page has become dirty.
- * Whereas only part of our page may be dirty.
- */
- if (PageSwapBacked(page))
- SetPageDirty(newpage);
- else
- __set_page_dirty_nobuffers(newpage);
- }
- mlock_migrate_page(newpage, page);
- ksm_migrate_page(newpage, page);
- ClearPageSwapCache(page);
- ClearPagePrivate(page);
- set_page_private(page, 0);
- /*
- * If any waiters have accumulated on the new page then
- * wake them up.
- */
- if (PageWriteback(newpage))
- end_page_writeback(newpage);
- }
- /************************************************************
- * Migration functions
- ***********************************************************/
- /* Always fail migration. Used for mappings that are not movable */
- int fail_migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page)
- {
- return -EIO;
- }
- EXPORT_SYMBOL(fail_migrate_page);
- /*
- * Common logic to directly migrate a single page suitable for
- * pages that do not use PagePrivate/PagePrivate2.
- *
- * Pages are locked upon entry and exit.
- */
- int migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page,
- enum migrate_mode mode)
- {
- int rc;
- BUG_ON(PageWriteback(page)); /* Writeback must be complete */
- rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
- if (rc != MIGRATEPAGE_SUCCESS)
- return rc;
- migrate_page_copy(newpage, page);
- return MIGRATEPAGE_SUCCESS;
- }
- EXPORT_SYMBOL(migrate_page);
- #ifdef CONFIG_BLOCK
- /*
- * Migration function for pages with buffers. This function can only be used
- * if the underlying filesystem guarantees that no other references to "page"
- * exist.
- */
- int buffer_migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page, enum migrate_mode mode)
- {
- struct buffer_head *bh, *head;
- int rc;
- if (!page_has_buffers(page))
- return migrate_page(mapping, newpage, page, mode);
- head = page_buffers(page);
- rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
- if (rc != MIGRATEPAGE_SUCCESS)
- return rc;
- /*
- * In the async case, migrate_page_move_mapping locked the buffers
- * with an IRQ-safe spinlock held. In the sync case, the buffers
- * need to be locked now
- */
- if (mode != MIGRATE_ASYNC)
- BUG_ON(!buffer_migrate_lock_buffers(head, mode));
- ClearPagePrivate(page);
- set_page_private(newpage, page_private(page));
- set_page_private(page, 0);
- put_page(page);
- get_page(newpage);
- bh = head;
- do {
- set_bh_page(bh, newpage, bh_offset(bh));
- bh = bh->b_this_page;
- } while (bh != head);
- SetPagePrivate(newpage);
- migrate_page_copy(newpage, page);
- bh = head;
- do {
- unlock_buffer(bh);
- put_bh(bh);
- bh = bh->b_this_page;
- } while (bh != head);
- return MIGRATEPAGE_SUCCESS;
- }
- EXPORT_SYMBOL(buffer_migrate_page);
- #endif
- /*
- * Writeback a page to clean the dirty state
- */
- static int writeout(struct address_space *mapping, struct page *page)
- {
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_NONE,
- .nr_to_write = 1,
- .range_start = 0,
- .range_end = LLONG_MAX,
- .for_reclaim = 1
- };
- int rc;
- if (!mapping->a_ops->writepage)
- /* No write method for the address space */
- return -EINVAL;
- if (!clear_page_dirty_for_io(page))
- /* Someone else already triggered a write */
- return -EAGAIN;
- /*
- * A dirty page may imply that the underlying filesystem has
- * the page on some queue. So the page must be clean for
- * migration. Writeout may mean we loose the lock and the
- * page state is no longer what we checked for earlier.
- * At this point we know that the migration attempt cannot
- * be successful.
- */
- remove_migration_ptes(page, page);
- rc = mapping->a_ops->writepage(page, &wbc);
- if (rc != AOP_WRITEPAGE_ACTIVATE)
- /* unlocked. Relock */
- lock_page(page);
- return (rc < 0) ? -EIO : -EAGAIN;
- }
- /*
- * Default handling if a filesystem does not provide a migration function.
- */
- static int fallback_migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page, enum migrate_mode mode)
- {
- if (PageDirty(page)) {
- /* Only writeback pages in full synchronous migration */
- if (mode != MIGRATE_SYNC)
- return -EBUSY;
- return writeout(mapping, page);
- }
- /*
- * Buffers may be managed in a filesystem specific way.
- * We must have no buffers or drop them.
- */
- if (page_has_private(page) &&
- !try_to_release_page(page, GFP_KERNEL))
- return -EAGAIN;
- return migrate_page(mapping, newpage, page, mode);
- }
- /*
- * Move a page to a newly allocated page
- * The page is locked and all ptes have been successfully removed.
- *
- * The new page will have replaced the old page if this function
- * is successful.
- *
- * Return value:
- * < 0 - error code
- * MIGRATEPAGE_SUCCESS - success
- */
- static int move_to_new_page(struct page *newpage, struct page *page,
- int remap_swapcache, enum migrate_mode mode)
- {
- struct address_space *mapping;
- int rc;
- /*
- * Block others from accessing the page when we get around to
- * establishing additional references. We are the only one
- * holding a reference to the new page at this point.
- */
- if (!trylock_page(newpage))
- BUG();
- /* Prepare mapping for the new page.*/
- newpage->index = page->index;
- newpage->mapping = page->mapping;
- if (PageSwapBacked(page))
- SetPageSwapBacked(newpage);
- mapping = page_mapping(page);
- if (!mapping)
- rc = migrate_page(mapping, newpage, page, mode);
- else if (mapping->a_ops->migratepage)
- /*
- * Most pages have a mapping and most filesystems provide a
- * migratepage callback. Anonymous pages are part of swap
- * space which also has its own migratepage callback. This
- * is the most common path for page migration.
- */
- rc = mapping->a_ops->migratepage(mapping,
- newpage, page, mode);
- else
- rc = fallback_migrate_page(mapping, newpage, page, mode);
- if (rc != MIGRATEPAGE_SUCCESS) {
- newpage->mapping = NULL;
- } else {
- if (remap_swapcache)
- remove_migration_ptes(page, newpage);
- page->mapping = NULL;
- }
- unlock_page(newpage);
- return rc;
- }
- static int __unmap_and_move(struct page *page, struct page *newpage,
- int force, bool offlining, enum migrate_mode mode)
- {
- int rc = -EAGAIN;
- int remap_swapcache = 1;
- struct mem_cgroup *mem;
- struct anon_vma *anon_vma = NULL;
- if (!trylock_page(page)) {
- if (!force || mode == MIGRATE_ASYNC)
- goto out;
- /*
- * It's not safe for direct compaction to call lock_page.
- * For example, during page readahead pages are added locked
- * to the LRU. Later, when the IO completes the pages are
- * marked uptodate and unlocked. However, the queueing
- * could be merging multiple pages for one bio (e.g.
- * mpage_readpages). If an allocation happens for the
- * second or third page, the process can end up locking
- * the same page twice and deadlocking. Rather than
- * trying to be clever about what pages can be locked,
- * avoid the use of lock_page for direct compaction
- * altogether.
- */
- if (current->flags & PF_MEMALLOC)
- goto out;
- lock_page(page);
- }
- /*
- * Only memory hotplug's offline_pages() caller has locked out KSM,
- * and can safely migrate a KSM page. The other cases have skipped
- * PageKsm along with PageReserved - but it is only now when we have
- * the page lock that we can be certain it will not go KSM beneath us
- * (KSM will not upgrade a page from PageAnon to PageKsm when it sees
- * its pagecount raised, but only here do we take the page lock which
- * serializes that).
- */
- if (PageKsm(page) && !offlining) {
- rc = -EBUSY;
- goto unlock;
- }
- /* charge against new page */
- mem_cgroup_prepare_migration(page, newpage, &mem);
- if (PageWriteback(page)) {
- /*
- * Only in the case of a full syncronous migration is it
- * necessary to wait for PageWriteback. In the async case,
- * the retry loop is too short and in the sync-light case,
- * the overhead of stalling is too much
- */
- if (mode != MIGRATE_SYNC) {
- rc = -EBUSY;
- goto uncharge;
- }
- if (!force)
- goto uncharge;
- wait_on_page_writeback(page);
- }
- /*
- * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
- * we cannot notice that anon_vma is freed while we migrates a page.
- * This get_anon_vma() delays freeing anon_vma pointer until the end
- * of migration. File cache pages are no problem because of page_lock()
- * File Caches may use write_page() or lock_page() in migration, then,
- * just care Anon page here.
- */
- if (PageAnon(page)) {
- /*
- * Only page_lock_anon_vma_read() understands the subtleties of
- * getting a hold on an anon_vma from outside one of its mms.
- */
- anon_vma = page_get_anon_vma(page);
- if (anon_vma) {
- /*
- * Anon page
- */
- } else if (PageSwapCache(page)) {
- /*
- * We cannot be sure that the anon_vma of an unmapped
- * swapcache page is safe to use because we don't
- * know in advance if the VMA that this page belonged
- * to still exists. If the VMA and others sharing the
- * data have been freed, then the anon_vma could
- * already be invalid.
- *
- * To avoid this possibility, swapcache pages get
- * migrated but are not remapped when migration
- * completes
- */
- remap_swapcache = 0;
- } else {
- goto uncharge;
- }
- }
- if (unlikely(balloon_page_movable(page))) {
- /*
- * A ballooned page does not need any special attention from
- * physical to virtual reverse mapping procedures.
- * Skip any attempt to unmap PTEs or to remap swap cache,
- * in order to avoid burning cycles at rmap level, and perform
- * the page migration right away (proteced by page lock).
- */
- rc = balloon_page_migrate(newpage, page, mode);
- goto uncharge;
- }
- /*
- * Corner case handling:
- * 1. When a new swap-cache page is read into, it is added to the LRU
- * and treated as swapcache but it has no rmap yet.
- * Calling try_to_unmap() against a page->mapping==NULL page will
- * trigger a BUG. So handle it here.
- * 2. An orphaned page (see truncate_complete_page) might have
- * fs-private metadata. The page can be picked up due to memory
- * offlining. Everywhere else except page reclaim, the page is
- * invisible to the vm, so the page can not be migrated. So try to
- * free the metadata, so the page can be freed.
- */
- if (!page->mapping) {
- VM_BUG_ON(PageAnon(page));
- if (page_has_private(page)) {
- try_to_free_buffers(page);
- goto uncharge;
- }
- goto skip_unmap;
- }
- /* Establish migration ptes or remove ptes */
- try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
- skip_unmap:
- if (!page_mapped(page))
- rc = move_to_new_page(newpage, page, remap_swapcache, mode);
- if (rc && remap_swapcache)
- remove_migration_ptes(page, page);
- /* Drop an anon_vma reference if we took one */
- if (anon_vma)
- put_anon_vma(anon_vma);
- uncharge:
- mem_cgroup_end_migration(mem, page, newpage,
- (rc == MIGRATEPAGE_SUCCESS ||
- rc == MIGRATEPAGE_BALLOON_SUCCESS));
- unlock:
- unlock_page(page);
- out:
- return rc;
- }
- /*
- * Obtain the lock on page, remove all ptes and migrate the page
- * to the newly allocated page in newpage.
- */
- static int unmap_and_move(new_page_t get_new_page, unsigned long private,
- struct page *page, int force, bool offlining,
- enum migrate_mode mode)
- {
- int rc = 0;
- int *result = NULL;
- struct page *newpage = get_new_page(page, private, &result);
- if (!newpage)
- return -ENOMEM;
- if (page_count(page) == 1) {
- /* page was freed from under us. So we are done. */
- goto out;
- }
- if (unlikely(PageTransHuge(page)))
- if (unlikely(split_huge_page(page)))
- goto out;
- rc = __unmap_and_move(page, newpage, force, offlining, mode);
- if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) {
- /*
- * A ballooned page has been migrated already.
- * Now, it's the time to wrap-up counters,
- * handle the page back to Buddy and return.
- */
- dec_zone_page_state(page, NR_ISOLATED_ANON +
- page_is_file_cache(page));
- balloon_page_free(page);
- return MIGRATEPAGE_SUCCESS;
- }
- out:
- if (rc != -EAGAIN) {
- /*
- * A page that has been migrated has all references
- * removed and will be freed. A page that has not been
- * migrated will have kepts its references and be
- * restored.
- */
- list_del(&page->lru);
- dec_zone_page_state(page, NR_ISOLATED_ANON +
- page_is_file_cache(page));
- putback_lru_page(page);
- }
- /*
- * Move the new page to the LRU. If migration was not successful
- * then this will free the page.
- */
- putback_lru_page(newpage);
- if (result) {
- if (rc)
- *result = rc;
- else
- *result = page_to_nid(newpage);
- }
- return rc;
- }
- /*
- * Counterpart of unmap_and_move_page() for hugepage migration.
- *
- * This function doesn't wait the completion of hugepage I/O
- * because there is no race between I/O and migration for hugepage.
- * Note that currently hugepage I/O occurs only in direct I/O
- * where no lock is held and PG_writeback is irrelevant,
- * and writeback status of all subpages are counted in the reference
- * count of the head page (i.e. if all subpages of a 2MB hugepage are
- * under direct I/O, the reference of the head page is 512 and a bit more.)
- * This means that when we try to migrate hugepage whose subpages are
- * doing direct I/O, some references remain after try_to_unmap() and
- * hugepage migration fails without data corruption.
- *
- * There is also no race when direct I/O is issued on the page under migration,
- * because then pte is replaced with migration swap entry and direct I/O code
- * will wait in the page fault for migration to complete.
- */
- static int unmap_and_move_huge_page(new_page_t get_new_page,
- unsigned long private, struct page *hpage,
- int force, bool offlining,
- enum migrate_mode mode)
- {
- int rc = 0;
- int *result = NULL;
- struct page *new_hpage = get_new_page(hpage, private, &result);
- struct anon_vma *anon_vma = NULL;
- if (!new_hpage)
- return -ENOMEM;
- rc = -EAGAIN;
- if (!trylock_page(hpage)) {
- if (!force || mode != MIGRATE_SYNC)
- goto out;
- lock_page(hpage);
- }
- if (PageAnon(hpage))
- anon_vma = page_get_anon_vma(hpage);
- try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
- if (!page_mapped(hpage))
- rc = move_to_new_page(new_hpage, hpage, 1, mode);
- if (rc)
- remove_migration_ptes(hpage, hpage);
- if (anon_vma)
- put_anon_vma(anon_vma);
- if (!rc)
- hugetlb_cgroup_migrate(hpage, new_hpage);
- unlock_page(hpage);
- out:
- put_page(new_hpage);
- if (result) {
- if (rc)
- *result = rc;
- else
- *result = page_to_nid(new_hpage);
- }
- return rc;
- }
- /*
- * migrate_pages
- *
- * The function takes one list of pages to migrate and a function
- * that determines from the page to be migrated and the private data
- * the target of the move and allocates the page.
- *
- * The function returns after 10 attempts or if no pages
- * are movable anymore because to has become empty
- * or no retryable pages exist anymore.
- * Caller should call putback_lru_pages to return pages to the LRU
- * or free list only if ret != 0.
- *
- * Return: Number of pages not migrated or error code.
- */
- int migrate_pages(struct list_head *from,
- new_page_t get_new_page, unsigned long private, bool offlining,
- enum migrate_mode mode, int reason)
- {
- int retry = 1;
- int nr_failed = 0;
- int nr_succeeded = 0;
- int pass = 0;
- struct page *page;
- struct page *page2;
- int swapwrite = current->flags & PF_SWAPWRITE;
- int rc;
- if (!swapwrite)
- current->flags |= PF_SWAPWRITE;
- for(pass = 0; pass < 10 && retry; pass++) {
- retry = 0;
- list_for_each_entry_safe(page, page2, from, lru) {
- cond_resched();
- rc = unmap_and_move(get_new_page, private,
- page, pass > 2, offlining,
- mode);
- switch(rc) {
- case -ENOMEM:
- goto out;
- case -EAGAIN:
- retry++;
- break;
- case MIGRATEPAGE_SUCCESS:
- nr_succeeded++;
- break;
- default:
- /* Permanent failure */
- nr_failed++;
- break;
- }
- }
- }
- rc = nr_failed + retry;
- out:
- if (nr_succeeded)
- count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
- if (nr_failed)
- count_vm_events(PGMIGRATE_FAIL, nr_failed);
- trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
- if (!swapwrite)
- current->flags &= ~PF_SWAPWRITE;
- return rc;
- }
- int migrate_huge_page(struct page *hpage, new_page_t get_new_page,
- unsigned long private, bool offlining,
- enum migrate_mode mode)
- {
- int pass, rc;
- for (pass = 0; pass < 10; pass++) {
- rc = unmap_and_move_huge_page(get_new_page,
- private, hpage, pass > 2, offlining,
- mode);
- switch (rc) {
- case -ENOMEM:
- goto out;
- case -EAGAIN:
- /* try again */
- cond_resched();
- break;
- case MIGRATEPAGE_SUCCESS:
- goto out;
- default:
- rc = -EIO;
- goto out;
- }
- }
- out:
- return rc;
- }
- #ifdef CONFIG_NUMA
- /*
- * Move a list of individual pages
- */
- struct page_to_node {
- unsigned long addr;
- struct page *page;
- int node;
- int status;
- };
- static struct page *new_page_node(struct page *p, unsigned long private,
- int **result)
- {
- struct page_to_node *pm = (struct page_to_node *)private;
- while (pm->node != MAX_NUMNODES && pm->page != p)
- pm++;
- if (pm->node == MAX_NUMNODES)
- return NULL;
- *result = &pm->status;
- return alloc_pages_exact_node(pm->node,
- GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
- }
- /*
- * Move a set of pages as indicated in the pm array. The addr
- * field must be set to the virtual address of the page to be moved
- * and the node number must contain a valid target node.
- * The pm array ends with node = MAX_NUMNODES.
- */
- static int do_move_page_to_node_array(struct mm_struct *mm,
- struct page_to_node *pm,
- int migrate_all)
- {
- int err;
- struct page_to_node *pp;
- LIST_HEAD(pagelist);
- down_read(&mm->mmap_sem);
- /*
- * Build a list of pages to migrate
- */
- for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
- struct vm_area_struct *vma;
- struct page *page;
- err = -EFAULT;
- vma = find_vma(mm, pp->addr);
- if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
- goto set_status;
- page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT);
- err = PTR_ERR(page);
- if (IS_ERR(page))
- goto set_status;
- err = -ENOENT;
- if (!page)
- goto set_status;
- /* Use PageReserved to check for zero page */
- if (PageReserved(page) || PageKsm(page))
- goto put_and_set;
- pp->page = page;
- err = page_to_nid(page);
- if (err == pp->node)
- /*
- * Node already in the right place
- */
- goto put_and_set;
- err = -EACCES;
- if (page_mapcount(page) > 1 &&
- !migrate_all)
- goto put_and_set;
- err = isolate_lru_page(page);
- if (!err) {
- list_add_tail(&page->lru, &pagelist);
- inc_zone_page_state(page, NR_ISOLATED_ANON +
- page_is_file_cache(page));
- }
- put_and_set:
- /*
- * Either remove the duplicate refcount from
- * isolate_lru_page() or drop the page ref if it was
- * not isolated.
- */
- put_page(page);
- set_status:
- pp->status = err;
- }
- err = 0;
- if (!list_empty(&pagelist)) {
- err = migrate_pages(&pagelist, new_page_node,
- (unsigned long)pm, 0, MIGRATE_SYNC,
- MR_SYSCALL);
- if (err)
- putback_lru_pages(&pagelist);
- }
- up_read(&mm->mmap_sem);
- return err;
- }
- /*
- * Migrate an array of page address onto an array of nodes and fill
- * the corresponding array of status.
- */
- static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
- unsigned long nr_pages,
- const void __user * __user *pages,
- const int __user *nodes,
- int __user *status, int flags)
- {
- struct page_to_node *pm;
- unsigned long chunk_nr_pages;
- unsigned long chunk_start;
- int err;
- err = -ENOMEM;
- pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
- if (!pm)
- goto out;
- migrate_prep();
- /*
- * Store a chunk of page_to_node array in a page,
- * but keep the last one as a marker
- */
- chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
- for (chunk_start = 0;
- chunk_start < nr_pages;
- chunk_start += chunk_nr_pages) {
- int j;
- if (chunk_start + chunk_nr_pages > nr_pages)
- chunk_nr_pages = nr_pages - chunk_start;
- /* fill the chunk pm with addrs and nodes from user-space */
- for (j = 0; j < chunk_nr_pages; j++) {
- const void __user *p;
- int node;
- err = -EFAULT;
- if (get_user(p, pages + j + chunk_start))
- goto out_pm;
- pm[j].addr = (unsigned long) p;
- if (get_user(node, nodes + j + chunk_start))
- goto out_pm;
- err = -ENODEV;
- if (node < 0 || node >= MAX_NUMNODES)
- goto out_pm;
- if (!node_state(node, N_MEMORY))
- goto out_pm;
- err = -EACCES;
- if (!node_isset(node, task_nodes))
- goto out_pm;
- pm[j].node = node;
- }
- /* End marker for this chunk */
- pm[chunk_nr_pages].node = MAX_NUMNODES;
- /* Migrate this chunk */
- err = do_move_page_to_node_array(mm, pm,
- flags & MPOL_MF_MOVE_ALL);
- if (err < 0)
- goto out_pm;
- /* Return status information */
- for (j = 0; j < chunk_nr_pages; j++)
- if (put_user(pm[j].status, status + j + chunk_start)) {
- err = -EFAULT;
- goto out_pm;
- }
- }
- err = 0;
- out_pm:
- free_page((unsigned long)pm);
- out:
- return err;
- }
- /*
- * Determine the nodes of an array of pages and store it in an array of status.
- */
- static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
- const void __user **pages, int *status)
- {
- unsigned long i;
- down_read(&mm->mmap_sem);
- for (i = 0; i < nr_pages; i++) {
- unsigned long addr = (unsigned long)(*pages);
- struct vm_area_struct *vma;
- struct page *page;
- int err = -EFAULT;
- vma = find_vma(mm, addr);
- if (!vma || addr < vma->vm_start)
- goto set_status;
- page = follow_page(vma, addr, 0);
- err = PTR_ERR(page);
- if (IS_ERR(page))
- goto set_status;
- err = -ENOENT;
- /* Use PageReserved to check for zero page */
- if (!page || PageReserved(page) || PageKsm(page))
- goto set_status;
- err = page_to_nid(page);
- set_status:
- *status = err;
- pages++;
- status++;
- }
- up_read(&mm->mmap_sem);
- }
- /*
- * Determine the nodes of a user array of pages and store it in
- * a user array of status.
- */
- static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
- const void __user * __user *pages,
- int __user *status)
- {
- #define DO_PAGES_STAT_CHUNK_NR 16
- const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
- int chunk_status[DO_PAGES_STAT_CHUNK_NR];
- while (nr_pages) {
- unsigned long chunk_nr;
- chunk_nr = nr_pages;
- if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
- chunk_nr = DO_PAGES_STAT_CHUNK_NR;
- if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
- break;
- do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
- if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
- break;
- pages += chunk_nr;
- status += chunk_nr;
- nr_pages -= chunk_nr;
- }
- return nr_pages ? -EFAULT : 0;
- }
- /*
- * Move a list of pages in the address space of the currently executing
- * process.
- */
- SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
- const void __user * __user *, pages,
- const int __user *, nodes,
- int __user *, status, int, flags)
- {
- const struct cred *cred = current_cred(), *tcred;
- struct task_struct *task;
- struct mm_struct *mm;
- int err;
- nodemask_t task_nodes;
- /* Check flags */
- if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
- return -EINVAL;
- if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
- return -EPERM;
- /* Find the mm_struct */
- rcu_read_lock();
- task = pid ? find_task_by_vpid(pid) : current;
- if (!task) {
- rcu_read_unlock();
- return -ESRCH;
- }
- get_task_struct(task);
- /*
- * Check if this process has the right to modify the specified
- * process. The right exists if the process has administrative
- * capabilities, superuser privileges or the same
- * userid as the target process.
- */
- tcred = __task_cred(task);
- if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
- !capable(CAP_SYS_NICE)) {
- rcu_read_unlock();
- err = -EPERM;
- goto out;
- }
- rcu_read_unlock();
- err = security_task_movememory(task);
- if (err)
- goto out;
- task_nodes = cpuset_mems_allowed(task);
- mm = get_task_mm(task);
- put_task_struct(task);
- if (!mm)
- return -EINVAL;
- if (nodes)
- err = do_pages_move(mm, task_nodes, nr_pages, pages,
- nodes, status, flags);
- else
- err = do_pages_stat(mm, nr_pages, pages, status);
- mmput(mm);
- return err;
- out:
- put_task_struct(task);
- return err;
- }
- /*
- * Call migration functions in the vma_ops that may prepare
- * memory in a vm for migration. migration functions may perform
- * the migration for vmas that do not have an underlying page struct.
- */
- int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
- const nodemask_t *from, unsigned long flags)
- {
- struct vm_area_struct *vma;
- int err = 0;
- for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
- if (vma->vm_ops && vma->vm_ops->migrate) {
- err = vma->vm_ops->migrate(vma, to, from, flags);
- if (err)
- break;
- }
- }
- return err;
- }
- #ifdef CONFIG_NUMA_BALANCING
- /*
- * Returns true if this is a safe migration target node for misplaced NUMA
- * pages. Currently it only checks the watermarks which crude
- */
- static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
- int nr_migrate_pages)
- {
- int z;
- for (z = pgdat->nr_zones - 1; z >= 0; z--) {
- struct zone *zone = pgdat->node_zones + z;
- if (!populated_zone(zone))
- continue;
- if (zone->all_unreclaimable)
- continue;
- /* Avoid waking kswapd by allocating pages_to_migrate pages. */
- if (!zone_watermark_ok(zone, 0,
- high_wmark_pages(zone) +
- nr_migrate_pages,
- 0, 0))
- continue;
- return true;
- }
- return false;
- }
- static struct page *alloc_misplaced_dst_page(struct page *page,
- unsigned long data,
- int **result)
- {
- int nid = (int) data;
- struct page *newpage;
- newpage = alloc_pages_exact_node(nid,
- (GFP_HIGHUSER_MOVABLE | GFP_THISNODE |
- __GFP_NOMEMALLOC | __GFP_NORETRY |
- __GFP_NOWARN) &
- ~GFP_IOFS, 0);
- if (newpage)
- page_xchg_last_nid(newpage, page_last_nid(page));
- return newpage;
- }
- /*
- * page migration rate limiting control.
- * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
- * window of time. Default here says do not migrate more than 1280M per second.
- * If a node is rate-limited then PTE NUMA updates are also rate-limited. However
- * as it is faults that reset the window, pte updates will happen unconditionally
- * if there has not been a fault since @pteupdate_interval_millisecs after the
- * throttle window closed.
- */
- static unsigned int migrate_interval_millisecs __read_mostly = 100;
- static unsigned int pteupdate_interval_millisecs __read_mostly = 1000;
- static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
- /* Returns true if NUMA migration is currently rate limited */
- bool migrate_ratelimited(int node)
- {
- pg_data_t *pgdat = NODE_DATA(node);
- if (time_after(jiffies, pgdat->numabalancing_migrate_next_window +
- msecs_to_jiffies(pteupdate_interval_millisecs)))
- return false;
- if (pgdat->numabalancing_migrate_nr_pages < ratelimit_pages)
- return false;
- return true;
- }
- /* Returns true if the node is migrate rate-limited after the update */
- bool numamigrate_update_ratelimit(pg_data_t *pgdat, unsigned long nr_pages)
- {
- bool rate_limited = false;
- /*
- * Rate-limit the amount of data that is being migrated to a node.
- * Optimal placement is no good if the memory bus is saturated and
- * all the time is being spent migrating!
- */
- spin_lock(&pgdat->numabalancing_migrate_lock);
- if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
- pgdat->numabalancing_migrate_nr_pages = 0;
- pgdat->numabalancing_migrate_next_window = jiffies +
- msecs_to_jiffies(migrate_interval_millisecs);
- }
- if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages)
- rate_limited = true;
- else
- pgdat->numabalancing_migrate_nr_pages += nr_pages;
- spin_unlock(&pgdat->numabalancing_migrate_lock);
-
- return rate_limited;
- }
- int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
- {
- int ret = 0;
- /* Avoid migrating to a node that is nearly full */
- if (migrate_balanced_pgdat(pgdat, 1)) {
- int page_lru;
- if (isolate_lru_page(page)) {
- put_page(page);
- return 0;
- }
- /* Page is isolated */
- ret = 1;
- page_lru = page_is_file_cache(page);
- if (!PageTransHuge(page))
- inc_zone_page_state(page, NR_ISOLATED_ANON + page_lru);
- else
- mod_zone_page_state(page_zone(page),
- NR_ISOLATED_ANON + page_lru,
- HPAGE_PMD_NR);
- }
- /*
- * Page is either isolated or there is not enough space on the target
- * node. If isolated, then it has taken a reference count and the
- * callers reference can be safely dropped without the page
- * disappearing underneath us during migration. Otherwise the page is
- * not to be migrated but the callers reference should still be
- * dropped so it does not leak.
- */
- put_page(page);
- return ret;
- }
- /*
- * Attempt to migrate a misplaced page to the specified destination
- * node. Caller is expected to have an elevated reference count on
- * the page that will be dropped by this function before returning.
- */
- int migrate_misplaced_page(struct page *page, int node)
- {
- pg_data_t *pgdat = NODE_DATA(node);
- int isolated = 0;
- int nr_remaining;
- LIST_HEAD(migratepages);
- /*
- * Don't migrate pages that are mapped in multiple processes.
- * TODO: Handle false sharing detection instead of this hammer
- */
- if (page_mapcount(page) != 1) {
- put_page(page);
- goto out;
- }
- /*
- * Rate-limit the amount of data that is being migrated to a node.
- * Optimal placement is no good if the memory bus is saturated and
- * all the time is being spent migrating!
- */
- if (numamigrate_update_ratelimit(pgdat, 1)) {
- put_page(page);
- goto out;
- }
- isolated = numamigrate_isolate_page(pgdat, page);
- if (!isolated)
- goto out;
- list_add(&page->lru, &migratepages);
- nr_remaining = migrate_pages(&migratepages,
- alloc_misplaced_dst_page,
- node, false, MIGRATE_ASYNC,
- MR_NUMA_MISPLACED);
- if (nr_remaining) {
- putback_lru_pages(&migratepages);
- isolated = 0;
- } else
- count_vm_numa_event(NUMA_PAGE_MIGRATE);
- BUG_ON(!list_empty(&migratepages));
- out:
- return isolated;
- }
- #endif /* CONFIG_NUMA_BALANCING */
- #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
- int migrate_misplaced_transhuge_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- pmd_t *pmd, pmd_t entry,
- unsigned long address,
- struct page *page, int node)
- {
- unsigned long haddr = address & HPAGE_PMD_MASK;
- pg_data_t *pgdat = NODE_DATA(node);
- int isolated = 0;
- struct page *new_page = NULL;
- struct mem_cgroup *memcg = NULL;
- int page_lru = page_is_file_cache(page);
- /*
- * Don't migrate pages that are mapped in multiple processes.
- * TODO: Handle false sharing detection instead of this hammer
- */
- if (page_mapcount(page) != 1)
- goto out_dropref;
- /*
- * Rate-limit the amount of data that is being migrated to a node.
- * Optimal placement is no good if the memory bus is saturated and
- * all the time is being spent migrating!
- */
- if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
- goto out_dropref;
- new_page = alloc_pages_node(node,
- (GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER);
- if (!new_page) {
- count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
- goto out_dropref;
- }
- page_xchg_last_nid(new_page, page_last_nid(page));
- isolated = numamigrate_isolate_page(pgdat, page);
- if (!isolated) {
- count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
- put_page(new_page);
- goto out_keep_locked;
- }
- /* Prepare a page as a migration target */
- __set_page_locked(new_page);
- SetPageSwapBacked(new_page);
- /* anon mapping, we can simply copy page->mapping to the new page: */
- new_page->mapping = page->mapping;
- new_page->index = page->index;
- migrate_page_copy(new_page, page);
- WARN_ON(PageLRU(new_page));
- /* Recheck the target PMD */
- spin_lock(&mm->page_table_lock);
- if (unlikely(!pmd_same(*pmd, entry))) {
- spin_unlock(&mm->page_table_lock);
- /* Reverse changes made by migrate_page_copy() */
- if (TestClearPageActive(new_page))
- SetPageActive(page);
- if (TestClearPageUnevictable(new_page))
- SetPageUnevictable(page);
- mlock_migrate_page(page, new_page);
- unlock_page(new_page);
- put_page(new_page); /* Free it */
- unlock_page(page);
- putback_lru_page(page);
- count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
- goto out;
- }
- /*
- * Traditional migration needs to prepare the memcg charge
- * transaction early to prevent the old page from being
- * uncharged when installing migration entries. Here we can
- * save the potential rollback and start the charge transfer
- * only when migration is already known to end successfully.
- */
- mem_cgroup_prepare_migration(page, new_page, &memcg);
- entry = mk_pmd(new_page, vma->vm_page_prot);
- entry = pmd_mknonnuma(entry);
- entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
- entry = pmd_mkhuge(entry);
- page_add_new_anon_rmap(new_page, vma, haddr);
- set_pmd_at(mm, haddr, pmd, entry);
- update_mmu_cache_pmd(vma, address, entry);
- page_remove_rmap(page);
- /*
- * Finish the charge transaction under the page table lock to
- * prevent split_huge_page() from dividing up the charge
- * before it's fully transferred to the new page.
- */
- mem_cgroup_end_migration(memcg, page, new_page, true);
- spin_unlock(&mm->page_table_lock);
- unlock_page(new_page);
- unlock_page(page);
- put_page(page); /* Drop the rmap reference */
- put_page(page); /* Drop the LRU isolation reference */
- count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
- count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
- out:
- mod_zone_page_state(page_zone(page),
- NR_ISOLATED_ANON + page_lru,
- -HPAGE_PMD_NR);
- return isolated;
- out_dropref:
- put_page(page);
- out_keep_locked:
- return 0;
- }
- #endif /* CONFIG_NUMA_BALANCING */
- #endif /* CONFIG_NUMA */
|