|
@@ -27,6 +27,7 @@
|
|
|
#include <linux/mutex.h>
|
|
|
#include <linux/capability.h>
|
|
|
#include <linux/syscalls.h>
|
|
|
+#include <linux/memcontrol.h>
|
|
|
|
|
|
#include <asm/pgtable.h>
|
|
|
#include <asm/tlbflush.h>
|
|
@@ -506,9 +507,12 @@ unsigned int count_swap_pages(int type, int free)
|
|
|
* just let do_wp_page work it out if a write is requested later - to
|
|
|
* force COW, vm_page_prot omits write permission from any private vma.
|
|
|
*/
|
|
|
-static void unuse_pte(struct vm_area_struct *vma, pte_t *pte,
|
|
|
+static int unuse_pte(struct vm_area_struct *vma, pte_t *pte,
|
|
|
unsigned long addr, swp_entry_t entry, struct page *page)
|
|
|
{
|
|
|
+ if (mem_cgroup_charge(page, vma->vm_mm))
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
inc_mm_counter(vma->vm_mm, anon_rss);
|
|
|
get_page(page);
|
|
|
set_pte_at(vma->vm_mm, addr, pte,
|
|
@@ -520,6 +524,7 @@ static void unuse_pte(struct vm_area_struct *vma, pte_t *pte,
|
|
|
* immediately swapped out again after swapon.
|
|
|
*/
|
|
|
activate_page(page);
|
|
|
+ return 1;
|
|
|
}
|
|
|
|
|
|
static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|
@@ -529,7 +534,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
pte_t swp_pte = swp_entry_to_pte(entry);
|
|
|
pte_t *pte;
|
|
|
spinlock_t *ptl;
|
|
|
- int found = 0;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
|
|
do {
|
|
@@ -538,13 +543,12 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
* Test inline before going to call unuse_pte.
|
|
|
*/
|
|
|
if (unlikely(pte_same(*pte, swp_pte))) {
|
|
|
- unuse_pte(vma, pte++, addr, entry, page);
|
|
|
- found = 1;
|
|
|
+ ret = unuse_pte(vma, pte++, addr, entry, page);
|
|
|
break;
|
|
|
}
|
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
|
pte_unmap_unlock(pte - 1, ptl);
|
|
|
- return found;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
|
|
@@ -553,14 +557,16 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
|
|
|
{
|
|
|
pmd_t *pmd;
|
|
|
unsigned long next;
|
|
|
+ int ret;
|
|
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
do {
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
if (pmd_none_or_clear_bad(pmd))
|
|
|
continue;
|
|
|
- if (unuse_pte_range(vma, pmd, addr, next, entry, page))
|
|
|
- return 1;
|
|
|
+ ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
} while (pmd++, addr = next, addr != end);
|
|
|
return 0;
|
|
|
}
|
|
@@ -571,14 +577,16 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
|
|
|
{
|
|
|
pud_t *pud;
|
|
|
unsigned long next;
|
|
|
+ int ret;
|
|
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
|
do {
|
|
|
next = pud_addr_end(addr, end);
|
|
|
if (pud_none_or_clear_bad(pud))
|
|
|
continue;
|
|
|
- if (unuse_pmd_range(vma, pud, addr, next, entry, page))
|
|
|
- return 1;
|
|
|
+ ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
} while (pud++, addr = next, addr != end);
|
|
|
return 0;
|
|
|
}
|
|
@@ -588,6 +596,7 @@ static int unuse_vma(struct vm_area_struct *vma,
|
|
|
{
|
|
|
pgd_t *pgd;
|
|
|
unsigned long addr, end, next;
|
|
|
+ int ret;
|
|
|
|
|
|
if (page->mapping) {
|
|
|
addr = page_address_in_vma(page, vma);
|
|
@@ -605,8 +614,9 @@ static int unuse_vma(struct vm_area_struct *vma,
|
|
|
next = pgd_addr_end(addr, end);
|
|
|
if (pgd_none_or_clear_bad(pgd))
|
|
|
continue;
|
|
|
- if (unuse_pud_range(vma, pgd, addr, next, entry, page))
|
|
|
- return 1;
|
|
|
+ ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
} while (pgd++, addr = next, addr != end);
|
|
|
return 0;
|
|
|
}
|
|
@@ -615,6 +625,7 @@ static int unuse_mm(struct mm_struct *mm,
|
|
|
swp_entry_t entry, struct page *page)
|
|
|
{
|
|
|
struct vm_area_struct *vma;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
if (!down_read_trylock(&mm->mmap_sem)) {
|
|
|
/*
|
|
@@ -627,15 +638,11 @@ static int unuse_mm(struct mm_struct *mm,
|
|
|
lock_page(page);
|
|
|
}
|
|
|
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
|
|
- if (vma->anon_vma && unuse_vma(vma, entry, page))
|
|
|
+ if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
|
|
|
break;
|
|
|
}
|
|
|
up_read(&mm->mmap_sem);
|
|
|
- /*
|
|
|
- * Currently unuse_mm cannot fail, but leave error handling
|
|
|
- * at call sites for now, since we change it from time to time.
|
|
|
- */
|
|
|
- return 0;
|
|
|
+ return (ret < 0)? ret: 0;
|
|
|
}
|
|
|
|
|
|
/*
|