|
@@ -874,11 +874,17 @@ unsigned int count_swap_pages(int type, int free)
|
|
|
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
unsigned long addr, swp_entry_t entry, struct page *page)
|
|
|
{
|
|
|
+ struct page *swapcache;
|
|
|
struct mem_cgroup *memcg;
|
|
|
spinlock_t *ptl;
|
|
|
pte_t *pte;
|
|
|
int ret = 1;
|
|
|
|
|
|
+ swapcache = page;
|
|
|
+ page = ksm_might_need_to_copy(page, vma, addr);
|
|
|
+ if (unlikely(!page))
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
|
|
|
GFP_KERNEL, &memcg)) {
|
|
|
ret = -ENOMEM;
|
|
@@ -897,7 +903,10 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
get_page(page);
|
|
|
set_pte_at(vma->vm_mm, addr, pte,
|
|
|
pte_mkold(mk_pte(page, vma->vm_page_prot)));
|
|
|
- page_add_anon_rmap(page, vma, addr);
|
|
|
+ if (page == swapcache)
|
|
|
+ page_add_anon_rmap(page, vma, addr);
|
|
|
+ else /* ksm created a completely new copy */
|
|
|
+ page_add_new_anon_rmap(page, vma, addr);
|
|
|
mem_cgroup_commit_charge_swapin(page, memcg);
|
|
|
swap_free(entry);
|
|
|
/*
|
|
@@ -908,6 +917,10 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
out:
|
|
|
pte_unmap_unlock(pte, ptl);
|
|
|
out_nolock:
|
|
|
+ if (page != swapcache) {
|
|
|
+ unlock_page(page);
|
|
|
+ put_page(page);
|
|
|
+ }
|
|
|
return ret;
|
|
|
}
|
|
|
|