123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020 |
- /*
- * mm/rmap.c - physical to virtual reverse mappings
- *
- * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
- * Released under the General Public License (GPL).
- *
- * Simple, low overhead reverse mapping scheme.
- * Please try to keep this thing as modular as possible.
- *
- * Provides methods for unmapping each kind of mapped page:
- * the anon methods track anonymous pages, and
- * the file methods track pages belonging to an inode.
- *
- * Original design by Rik van Riel <riel@conectiva.com.br> 2001
- * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
- * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
- * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
- */
- /*
- * Lock ordering in mm:
- *
- * inode->i_mutex (while writing or truncating, not reading or faulting)
- * inode->i_alloc_sem (vmtruncate_range)
- * mm->mmap_sem
- * page->flags PG_locked (lock_page)
- * mapping->i_mmap_lock
- * anon_vma->lock
- * mm->page_table_lock or pte_lock
- * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
- * swap_lock (in swap_duplicate, swap_info_get)
- * mmlist_lock (in mmput, drain_mmlist and others)
- * mapping->private_lock (in __set_page_dirty_buffers)
- * inode_lock (in set_page_dirty's __mark_inode_dirty)
- * sb_lock (within inode_lock in fs/fs-writeback.c)
- * mapping->tree_lock (widely used, in set_page_dirty,
- * in arch-dependent flush_dcache_mmap_lock,
- * within inode_lock in __sync_single_inode)
- */
- #include <linux/mm.h>
- #include <linux/pagemap.h>
- #include <linux/swap.h>
- #include <linux/swapops.h>
- #include <linux/slab.h>
- #include <linux/init.h>
- #include <linux/rmap.h>
- #include <linux/rcupdate.h>
- #include <linux/module.h>
- #include <linux/kallsyms.h>
- #include <linux/memcontrol.h>
- #include <asm/tlbflush.h>
- struct kmem_cache *anon_vma_cachep;
- /* This must be called under the mmap_sem. */
- int anon_vma_prepare(struct vm_area_struct *vma)
- {
- struct anon_vma *anon_vma = vma->anon_vma;
- might_sleep();
- if (unlikely(!anon_vma)) {
- struct mm_struct *mm = vma->vm_mm;
- struct anon_vma *allocated, *locked;
- anon_vma = find_mergeable_anon_vma(vma);
- if (anon_vma) {
- allocated = NULL;
- locked = anon_vma;
- spin_lock(&locked->lock);
- } else {
- anon_vma = anon_vma_alloc();
- if (unlikely(!anon_vma))
- return -ENOMEM;
- allocated = anon_vma;
- locked = NULL;
- }
- /* page_table_lock to protect against threads */
- spin_lock(&mm->page_table_lock);
- if (likely(!vma->anon_vma)) {
- vma->anon_vma = anon_vma;
- list_add_tail(&vma->anon_vma_node, &anon_vma->head);
- allocated = NULL;
- }
- spin_unlock(&mm->page_table_lock);
- if (locked)
- spin_unlock(&locked->lock);
- if (unlikely(allocated))
- anon_vma_free(allocated);
- }
- return 0;
- }
- void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
- {
- BUG_ON(vma->anon_vma != next->anon_vma);
- list_del(&next->anon_vma_node);
- }
- void __anon_vma_link(struct vm_area_struct *vma)
- {
- struct anon_vma *anon_vma = vma->anon_vma;
- if (anon_vma)
- list_add_tail(&vma->anon_vma_node, &anon_vma->head);
- }
- void anon_vma_link(struct vm_area_struct *vma)
- {
- struct anon_vma *anon_vma = vma->anon_vma;
- if (anon_vma) {
- spin_lock(&anon_vma->lock);
- list_add_tail(&vma->anon_vma_node, &anon_vma->head);
- spin_unlock(&anon_vma->lock);
- }
- }
- void anon_vma_unlink(struct vm_area_struct *vma)
- {
- struct anon_vma *anon_vma = vma->anon_vma;
- int empty;
- if (!anon_vma)
- return;
- spin_lock(&anon_vma->lock);
- list_del(&vma->anon_vma_node);
- /* We must garbage collect the anon_vma if it's empty */
- empty = list_empty(&anon_vma->head);
- spin_unlock(&anon_vma->lock);
- if (empty)
- anon_vma_free(anon_vma);
- }
- static void anon_vma_ctor(struct kmem_cache *cachep, void *data)
- {
- struct anon_vma *anon_vma = data;
- spin_lock_init(&anon_vma->lock);
- INIT_LIST_HEAD(&anon_vma->head);
- }
- void __init anon_vma_init(void)
- {
- anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
- }
- /*
- * Getting a lock on a stable anon_vma from a page off the LRU is
- * tricky: page_lock_anon_vma rely on RCU to guard against the races.
- */
- static struct anon_vma *page_lock_anon_vma(struct page *page)
- {
- struct anon_vma *anon_vma;
- unsigned long anon_mapping;
- rcu_read_lock();
- anon_mapping = (unsigned long) page->mapping;
- if (!(anon_mapping & PAGE_MAPPING_ANON))
- goto out;
- if (!page_mapped(page))
- goto out;
- anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
- spin_lock(&anon_vma->lock);
- return anon_vma;
- out:
- rcu_read_unlock();
- return NULL;
- }
- static void page_unlock_anon_vma(struct anon_vma *anon_vma)
- {
- spin_unlock(&anon_vma->lock);
- rcu_read_unlock();
- }
- /*
- * At what user virtual address is page expected in @vma?
- * Returns virtual address or -EFAULT if page's index/offset is not
- * within the range mapped the @vma.
- */
- static inline unsigned long
- vma_address(struct page *page, struct vm_area_struct *vma)
- {
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- unsigned long address;
- address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
- if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
- /* page should be within @vma mapping range */
- return -EFAULT;
- }
- return address;
- }
- /*
- * At what user virtual address is page expected in vma? checking that the
- * page matches the vma: currently only used on anon pages, by unuse_vma;
- */
- unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
- {
- if (PageAnon(page)) {
- if ((void *)vma->anon_vma !=
- (void *)page->mapping - PAGE_MAPPING_ANON)
- return -EFAULT;
- } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
- if (!vma->vm_file ||
- vma->vm_file->f_mapping != page->mapping)
- return -EFAULT;
- } else
- return -EFAULT;
- return vma_address(page, vma);
- }
- /*
- * Check that @page is mapped at @address into @mm.
- *
- * On success returns with pte mapped and locked.
- */
- pte_t *page_check_address(struct page *page, struct mm_struct *mm,
- unsigned long address, spinlock_t **ptlp)
- {
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- spinlock_t *ptl;
- pgd = pgd_offset(mm, address);
- if (!pgd_present(*pgd))
- return NULL;
- pud = pud_offset(pgd, address);
- if (!pud_present(*pud))
- return NULL;
- pmd = pmd_offset(pud, address);
- if (!pmd_present(*pmd))
- return NULL;
- pte = pte_offset_map(pmd, address);
- /* Make a quick check before getting the lock */
- if (!pte_present(*pte)) {
- pte_unmap(pte);
- return NULL;
- }
- ptl = pte_lockptr(mm, pmd);
- spin_lock(ptl);
- if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
- *ptlp = ptl;
- return pte;
- }
- pte_unmap_unlock(pte, ptl);
- return NULL;
- }
- /*
- * Subfunctions of page_referenced: page_referenced_one called
- * repeatedly from either page_referenced_anon or page_referenced_file.
- */
- static int page_referenced_one(struct page *page,
- struct vm_area_struct *vma, unsigned int *mapcount)
- {
- struct mm_struct *mm = vma->vm_mm;
- unsigned long address;
- pte_t *pte;
- spinlock_t *ptl;
- int referenced = 0;
- address = vma_address(page, vma);
- if (address == -EFAULT)
- goto out;
- pte = page_check_address(page, mm, address, &ptl);
- if (!pte)
- goto out;
- if (vma->vm_flags & VM_LOCKED) {
- referenced++;
- *mapcount = 1; /* break early from loop */
- } else if (ptep_clear_flush_young(vma, address, pte))
- referenced++;
- /* Pretend the page is referenced if the task has the
- swap token and is in the middle of a page fault. */
- if (mm != current->mm && has_swap_token(mm) &&
- rwsem_is_locked(&mm->mmap_sem))
- referenced++;
- (*mapcount)--;
- pte_unmap_unlock(pte, ptl);
- out:
- return referenced;
- }
- static int page_referenced_anon(struct page *page,
- struct mem_cgroup *mem_cont)
- {
- unsigned int mapcount;
- struct anon_vma *anon_vma;
- struct vm_area_struct *vma;
- int referenced = 0;
- anon_vma = page_lock_anon_vma(page);
- if (!anon_vma)
- return referenced;
- mapcount = page_mapcount(page);
- list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- /*
- * If we are reclaiming on behalf of a cgroup, skip
- * counting on behalf of references from different
- * cgroups
- */
- if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
- continue;
- referenced += page_referenced_one(page, vma, &mapcount);
- if (!mapcount)
- break;
- }
- page_unlock_anon_vma(anon_vma);
- return referenced;
- }
- /**
- * page_referenced_file - referenced check for object-based rmap
- * @page: the page we're checking references on.
- * @mem_cont: target memory controller
- *
- * For an object-based mapped page, find all the places it is mapped and
- * check/clear the referenced flag. This is done by following the page->mapping
- * pointer, then walking the chain of vmas it holds. It returns the number
- * of references it found.
- *
- * This function is only called from page_referenced for object-based pages.
- */
- static int page_referenced_file(struct page *page,
- struct mem_cgroup *mem_cont)
- {
- unsigned int mapcount;
- struct address_space *mapping = page->mapping;
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- struct vm_area_struct *vma;
- struct prio_tree_iter iter;
- int referenced = 0;
- /*
- * The caller's checks on page->mapping and !PageAnon have made
- * sure that this is a file page: the check for page->mapping
- * excludes the case just before it gets set on an anon page.
- */
- BUG_ON(PageAnon(page));
- /*
- * The page lock not only makes sure that page->mapping cannot
- * suddenly be NULLified by truncation, it makes sure that the
- * structure at mapping cannot be freed and reused yet,
- * so we can safely take mapping->i_mmap_lock.
- */
- BUG_ON(!PageLocked(page));
- spin_lock(&mapping->i_mmap_lock);
- /*
- * i_mmap_lock does not stabilize mapcount at all, but mapcount
- * is more likely to be accurate if we note it after spinning.
- */
- mapcount = page_mapcount(page);
- vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
- /*
- * If we are reclaiming on behalf of a cgroup, skip
- * counting on behalf of references from different
- * cgroups
- */
- if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
- continue;
- if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
- == (VM_LOCKED|VM_MAYSHARE)) {
- referenced++;
- break;
- }
- referenced += page_referenced_one(page, vma, &mapcount);
- if (!mapcount)
- break;
- }
- spin_unlock(&mapping->i_mmap_lock);
- return referenced;
- }
- /**
- * page_referenced - test if the page was referenced
- * @page: the page to test
- * @is_locked: caller holds lock on the page
- * @mem_cont: target memory controller
- *
- * Quick test_and_clear_referenced for all mappings to a page,
- * returns the number of ptes which referenced the page.
- */
- int page_referenced(struct page *page, int is_locked,
- struct mem_cgroup *mem_cont)
- {
- int referenced = 0;
- if (TestClearPageReferenced(page))
- referenced++;
- if (page_mapped(page) && page->mapping) {
- if (PageAnon(page))
- referenced += page_referenced_anon(page, mem_cont);
- else if (is_locked)
- referenced += page_referenced_file(page, mem_cont);
- else if (TestSetPageLocked(page))
- referenced++;
- else {
- if (page->mapping)
- referenced +=
- page_referenced_file(page, mem_cont);
- unlock_page(page);
- }
- }
- if (page_test_and_clear_young(page))
- referenced++;
- return referenced;
- }
- static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
- {
- struct mm_struct *mm = vma->vm_mm;
- unsigned long address;
- pte_t *pte;
- spinlock_t *ptl;
- int ret = 0;
- address = vma_address(page, vma);
- if (address == -EFAULT)
- goto out;
- pte = page_check_address(page, mm, address, &ptl);
- if (!pte)
- goto out;
- if (pte_dirty(*pte) || pte_write(*pte)) {
- pte_t entry;
- flush_cache_page(vma, address, pte_pfn(*pte));
- entry = ptep_clear_flush(vma, address, pte);
- entry = pte_wrprotect(entry);
- entry = pte_mkclean(entry);
- set_pte_at(mm, address, pte, entry);
- ret = 1;
- }
- pte_unmap_unlock(pte, ptl);
- out:
- return ret;
- }
- static int page_mkclean_file(struct address_space *mapping, struct page *page)
- {
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- struct vm_area_struct *vma;
- struct prio_tree_iter iter;
- int ret = 0;
- BUG_ON(PageAnon(page));
- spin_lock(&mapping->i_mmap_lock);
- vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
- if (vma->vm_flags & VM_SHARED)
- ret += page_mkclean_one(page, vma);
- }
- spin_unlock(&mapping->i_mmap_lock);
- return ret;
- }
- int page_mkclean(struct page *page)
- {
- int ret = 0;
- BUG_ON(!PageLocked(page));
- if (page_mapped(page)) {
- struct address_space *mapping = page_mapping(page);
- if (mapping) {
- ret = page_mkclean_file(mapping, page);
- if (page_test_dirty(page)) {
- page_clear_dirty(page);
- ret = 1;
- }
- }
- }
- return ret;
- }
- EXPORT_SYMBOL_GPL(page_mkclean);
- /**
- * __page_set_anon_rmap - setup new anonymous rmap
- * @page: the page to add the mapping to
- * @vma: the vm area in which the mapping is added
- * @address: the user virtual address mapped
- */
- static void __page_set_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
- {
- struct anon_vma *anon_vma = vma->anon_vma;
- BUG_ON(!anon_vma);
- anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
- page->mapping = (struct address_space *) anon_vma;
- page->index = linear_page_index(vma, address);
- /*
- * nr_mapped state can be updated without turning off
- * interrupts because it is not modified via interrupt.
- */
- __inc_zone_page_state(page, NR_ANON_PAGES);
- }
- /**
- * __page_check_anon_rmap - sanity check anonymous rmap addition
- * @page: the page to add the mapping to
- * @vma: the vm area in which the mapping is added
- * @address: the user virtual address mapped
- */
- static void __page_check_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
- {
- #ifdef CONFIG_DEBUG_VM
- /*
- * The page's anon-rmap details (mapping and index) are guaranteed to
- * be set up correctly at this point.
- *
- * We have exclusion against page_add_anon_rmap because the caller
- * always holds the page locked, except if called from page_dup_rmap,
- * in which case the page is already known to be setup.
- *
- * We have exclusion against page_add_new_anon_rmap because those pages
- * are initially only visible via the pagetables, and the pte is locked
- * over the call to page_add_new_anon_rmap.
- */
- struct anon_vma *anon_vma = vma->anon_vma;
- anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
- BUG_ON(page->mapping != (struct address_space *)anon_vma);
- BUG_ON(page->index != linear_page_index(vma, address));
- #endif
- }
- /**
- * page_add_anon_rmap - add pte mapping to an anonymous page
- * @page: the page to add the mapping to
- * @vma: the vm area in which the mapping is added
- * @address: the user virtual address mapped
- *
- * The caller needs to hold the pte lock and the page must be locked.
- */
- void page_add_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
- {
- VM_BUG_ON(!PageLocked(page));
- VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
- if (atomic_inc_and_test(&page->_mapcount))
- __page_set_anon_rmap(page, vma, address);
- else {
- __page_check_anon_rmap(page, vma, address);
- /*
- * We unconditionally charged during prepare, we uncharge here
- * This takes care of balancing the reference counts
- */
- mem_cgroup_uncharge_page(page);
- }
- }
- /**
- * page_add_new_anon_rmap - add pte mapping to a new anonymous page
- * @page: the page to add the mapping to
- * @vma: the vm area in which the mapping is added
- * @address: the user virtual address mapped
- *
- * Same as page_add_anon_rmap but must only be called on *new* pages.
- * This means the inc-and-test can be bypassed.
- * Page does not have to be locked.
- */
- void page_add_new_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
- {
- BUG_ON(address < vma->vm_start || address >= vma->vm_end);
- atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
- __page_set_anon_rmap(page, vma, address);
- }
- /**
- * page_add_file_rmap - add pte mapping to a file page
- * @page: the page to add the mapping to
- *
- * The caller needs to hold the pte lock.
- */
- void page_add_file_rmap(struct page *page)
- {
- if (atomic_inc_and_test(&page->_mapcount))
- __inc_zone_page_state(page, NR_FILE_MAPPED);
- else
- /*
- * We unconditionally charged during prepare, we uncharge here
- * This takes care of balancing the reference counts
- */
- mem_cgroup_uncharge_page(page);
- }
- #ifdef CONFIG_DEBUG_VM
- /**
- * page_dup_rmap - duplicate pte mapping to a page
- * @page: the page to add the mapping to
- * @vma: the vm area being duplicated
- * @address: the user virtual address mapped
- *
- * For copy_page_range only: minimal extract from page_add_file_rmap /
- * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
- * quicker.
- *
- * The caller needs to hold the pte lock.
- */
- void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
- {
- BUG_ON(page_mapcount(page) == 0);
- if (PageAnon(page))
- __page_check_anon_rmap(page, vma, address);
- atomic_inc(&page->_mapcount);
- }
- #endif
- /**
- * page_remove_rmap - take down pte mapping from a page
- * @page: page to remove mapping from
- * @vma: the vm area in which the mapping is removed
- *
- * The caller needs to hold the pte lock.
- */
- void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
- {
- if (atomic_add_negative(-1, &page->_mapcount)) {
- if (unlikely(page_mapcount(page) < 0)) {
- printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
- printk (KERN_EMERG " page pfn = %lx\n", page_to_pfn(page));
- printk (KERN_EMERG " page->flags = %lx\n", page->flags);
- printk (KERN_EMERG " page->count = %x\n", page_count(page));
- printk (KERN_EMERG " page->mapping = %p\n", page->mapping);
- print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops);
- if (vma->vm_ops) {
- print_symbol (KERN_EMERG " vma->vm_ops->fault = %s\n", (unsigned long)vma->vm_ops->fault);
- }
- if (vma->vm_file && vma->vm_file->f_op)
- print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap);
- BUG();
- }
- /*
- * It would be tidy to reset the PageAnon mapping here,
- * but that might overwrite a racing page_add_anon_rmap
- * which increments mapcount after us but sets mapping
- * before us: so leave the reset to free_hot_cold_page,
- * and remember that it's only reliable while mapped.
- * Leaving it set also helps swapoff to reinstate ptes
- * faster for those pages still in swapcache.
- */
- if (page_test_dirty(page)) {
- page_clear_dirty(page);
- set_page_dirty(page);
- }
- mem_cgroup_uncharge_page(page);
- __dec_zone_page_state(page,
- PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
- }
- }
- /*
- * Subfunctions of try_to_unmap: try_to_unmap_one called
- * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
- */
- static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
- int migration)
- {
- struct mm_struct *mm = vma->vm_mm;
- unsigned long address;
- pte_t *pte;
- pte_t pteval;
- spinlock_t *ptl;
- int ret = SWAP_AGAIN;
- address = vma_address(page, vma);
- if (address == -EFAULT)
- goto out;
- pte = page_check_address(page, mm, address, &ptl);
- if (!pte)
- goto out;
- /*
- * If the page is mlock()d, we cannot swap it out.
- * If it's recently referenced (perhaps page_referenced
- * skipped over this mm) then we should reactivate it.
- */
- if (!migration && ((vma->vm_flags & VM_LOCKED) ||
- (ptep_clear_flush_young(vma, address, pte)))) {
- ret = SWAP_FAIL;
- goto out_unmap;
- }
- /* Nuke the page table entry. */
- flush_cache_page(vma, address, page_to_pfn(page));
- pteval = ptep_clear_flush(vma, address, pte);
- /* Move the dirty bit to the physical page now the pte is gone. */
- if (pte_dirty(pteval))
- set_page_dirty(page);
- /* Update high watermark before we lower rss */
- update_hiwater_rss(mm);
- if (PageAnon(page)) {
- swp_entry_t entry = { .val = page_private(page) };
- if (PageSwapCache(page)) {
- /*
- * Store the swap location in the pte.
- * See handle_pte_fault() ...
- */
- swap_duplicate(entry);
- if (list_empty(&mm->mmlist)) {
- spin_lock(&mmlist_lock);
- if (list_empty(&mm->mmlist))
- list_add(&mm->mmlist, &init_mm.mmlist);
- spin_unlock(&mmlist_lock);
- }
- dec_mm_counter(mm, anon_rss);
- #ifdef CONFIG_MIGRATION
- } else {
- /*
- * Store the pfn of the page in a special migration
- * pte. do_swap_page() will wait until the migration
- * pte is removed and then restart fault handling.
- */
- BUG_ON(!migration);
- entry = make_migration_entry(page, pte_write(pteval));
- #endif
- }
- set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
- BUG_ON(pte_file(*pte));
- } else
- #ifdef CONFIG_MIGRATION
- if (migration) {
- /* Establish migration entry for a file page */
- swp_entry_t entry;
- entry = make_migration_entry(page, pte_write(pteval));
- set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
- } else
- #endif
- dec_mm_counter(mm, file_rss);
- page_remove_rmap(page, vma);
- page_cache_release(page);
- out_unmap:
- pte_unmap_unlock(pte, ptl);
- out:
- return ret;
- }
- /*
- * objrmap doesn't work for nonlinear VMAs because the assumption that
- * offset-into-file correlates with offset-into-virtual-addresses does not hold.
- * Consequently, given a particular page and its ->index, we cannot locate the
- * ptes which are mapping that page without an exhaustive linear search.
- *
- * So what this code does is a mini "virtual scan" of each nonlinear VMA which
- * maps the file to which the target page belongs. The ->vm_private_data field
- * holds the current cursor into that scan. Successive searches will circulate
- * around the vma's virtual address space.
- *
- * So as more replacement pressure is applied to the pages in a nonlinear VMA,
- * more scanning pressure is placed against them as well. Eventually pages
- * will become fully unmapped and are eligible for eviction.
- *
- * For very sparsely populated VMAs this is a little inefficient - chances are
- * there there won't be many ptes located within the scan cluster. In this case
- * maybe we could scan further - to the end of the pte page, perhaps.
- */
- #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
- #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
- static void try_to_unmap_cluster(unsigned long cursor,
- unsigned int *mapcount, struct vm_area_struct *vma)
- {
- struct mm_struct *mm = vma->vm_mm;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- pte_t pteval;
- spinlock_t *ptl;
- struct page *page;
- unsigned long address;
- unsigned long end;
- address = (vma->vm_start + cursor) & CLUSTER_MASK;
- end = address + CLUSTER_SIZE;
- if (address < vma->vm_start)
- address = vma->vm_start;
- if (end > vma->vm_end)
- end = vma->vm_end;
- pgd = pgd_offset(mm, address);
- if (!pgd_present(*pgd))
- return;
- pud = pud_offset(pgd, address);
- if (!pud_present(*pud))
- return;
- pmd = pmd_offset(pud, address);
- if (!pmd_present(*pmd))
- return;
- pte = pte_offset_map_lock(mm, pmd, address, &ptl);
- /* Update high watermark before we lower rss */
- update_hiwater_rss(mm);
- for (; address < end; pte++, address += PAGE_SIZE) {
- if (!pte_present(*pte))
- continue;
- page = vm_normal_page(vma, address, *pte);
- BUG_ON(!page || PageAnon(page));
- if (ptep_clear_flush_young(vma, address, pte))
- continue;
- /* Nuke the page table entry. */
- flush_cache_page(vma, address, pte_pfn(*pte));
- pteval = ptep_clear_flush(vma, address, pte);
- /* If nonlinear, store the file page offset in the pte. */
- if (page->index != linear_page_index(vma, address))
- set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
- /* Move the dirty bit to the physical page now the pte is gone. */
- if (pte_dirty(pteval))
- set_page_dirty(page);
- page_remove_rmap(page, vma);
- page_cache_release(page);
- dec_mm_counter(mm, file_rss);
- (*mapcount)--;
- }
- pte_unmap_unlock(pte - 1, ptl);
- }
- static int try_to_unmap_anon(struct page *page, int migration)
- {
- struct anon_vma *anon_vma;
- struct vm_area_struct *vma;
- int ret = SWAP_AGAIN;
- anon_vma = page_lock_anon_vma(page);
- if (!anon_vma)
- return ret;
- list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- ret = try_to_unmap_one(page, vma, migration);
- if (ret == SWAP_FAIL || !page_mapped(page))
- break;
- }
- page_unlock_anon_vma(anon_vma);
- return ret;
- }
- /**
- * try_to_unmap_file - unmap file page using the object-based rmap method
- * @page: the page to unmap
- * @migration: migration flag
- *
- * Find all the mappings of a page using the mapping pointer and the vma chains
- * contained in the address_space struct it points to.
- *
- * This function is only called from try_to_unmap for object-based pages.
- */
- static int try_to_unmap_file(struct page *page, int migration)
- {
- struct address_space *mapping = page->mapping;
- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- struct vm_area_struct *vma;
- struct prio_tree_iter iter;
- int ret = SWAP_AGAIN;
- unsigned long cursor;
- unsigned long max_nl_cursor = 0;
- unsigned long max_nl_size = 0;
- unsigned int mapcount;
- spin_lock(&mapping->i_mmap_lock);
- vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
- ret = try_to_unmap_one(page, vma, migration);
- if (ret == SWAP_FAIL || !page_mapped(page))
- goto out;
- }
- if (list_empty(&mapping->i_mmap_nonlinear))
- goto out;
- list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
- shared.vm_set.list) {
- if ((vma->vm_flags & VM_LOCKED) && !migration)
- continue;
- cursor = (unsigned long) vma->vm_private_data;
- if (cursor > max_nl_cursor)
- max_nl_cursor = cursor;
- cursor = vma->vm_end - vma->vm_start;
- if (cursor > max_nl_size)
- max_nl_size = cursor;
- }
- if (max_nl_size == 0) { /* any nonlinears locked or reserved */
- ret = SWAP_FAIL;
- goto out;
- }
- /*
- * We don't try to search for this page in the nonlinear vmas,
- * and page_referenced wouldn't have found it anyway. Instead
- * just walk the nonlinear vmas trying to age and unmap some.
- * The mapcount of the page we came in with is irrelevant,
- * but even so use it as a guide to how hard we should try?
- */
- mapcount = page_mapcount(page);
- if (!mapcount)
- goto out;
- cond_resched_lock(&mapping->i_mmap_lock);
- max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
- if (max_nl_cursor == 0)
- max_nl_cursor = CLUSTER_SIZE;
- do {
- list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
- shared.vm_set.list) {
- if ((vma->vm_flags & VM_LOCKED) && !migration)
- continue;
- cursor = (unsigned long) vma->vm_private_data;
- while ( cursor < max_nl_cursor &&
- cursor < vma->vm_end - vma->vm_start) {
- try_to_unmap_cluster(cursor, &mapcount, vma);
- cursor += CLUSTER_SIZE;
- vma->vm_private_data = (void *) cursor;
- if ((int)mapcount <= 0)
- goto out;
- }
- vma->vm_private_data = (void *) max_nl_cursor;
- }
- cond_resched_lock(&mapping->i_mmap_lock);
- max_nl_cursor += CLUSTER_SIZE;
- } while (max_nl_cursor <= max_nl_size);
- /*
- * Don't loop forever (perhaps all the remaining pages are
- * in locked vmas). Reset cursor on all unreserved nonlinear
- * vmas, now forgetting on which ones it had fallen behind.
- */
- list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
- vma->vm_private_data = NULL;
- out:
- spin_unlock(&mapping->i_mmap_lock);
- return ret;
- }
- /**
- * try_to_unmap - try to remove all page table mappings to a page
- * @page: the page to get unmapped
- * @migration: migration flag
- *
- * Tries to remove all the page table entries which are mapping this
- * page, used in the pageout path. Caller must hold the page lock.
- * Return values are:
- *
- * SWAP_SUCCESS - we succeeded in removing all mappings
- * SWAP_AGAIN - we missed a mapping, try again later
- * SWAP_FAIL - the page is unswappable
- */
- int try_to_unmap(struct page *page, int migration)
- {
- int ret;
- BUG_ON(!PageLocked(page));
- if (PageAnon(page))
- ret = try_to_unmap_anon(page, migration);
- else
- ret = try_to_unmap_file(page, migration);
- if (!page_mapped(page))
- ret = SWAP_SUCCESS;
- return ret;
- }
|