|
@@ -1593,7 +1593,7 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
|
|
/* Ok, finally just insert the thing.. */
|
|
|
entry = pte_mkspecial(pfn_pte(pfn, prot));
|
|
|
set_pte_at(mm, addr, pte, entry);
|
|
|
- update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */
|
|
|
+ update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
|
|
|
|
|
|
retval = 0;
|
|
|
out_unlock:
|
|
@@ -2116,7 +2116,7 @@ reuse:
|
|
|
entry = pte_mkyoung(orig_pte);
|
|
|
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
|
|
if (ptep_set_access_flags(vma, address, page_table, entry,1))
|
|
|
- update_mmu_cache(vma, address, entry);
|
|
|
+ update_mmu_cache(vma, address, page_table);
|
|
|
ret |= VM_FAULT_WRITE;
|
|
|
goto unlock;
|
|
|
}
|
|
@@ -2185,7 +2185,7 @@ gotten:
|
|
|
* new page to be mapped directly into the secondary page table.
|
|
|
*/
|
|
|
set_pte_at_notify(mm, address, page_table, entry);
|
|
|
- update_mmu_cache(vma, address, entry);
|
|
|
+ update_mmu_cache(vma, address, page_table);
|
|
|
if (old_page) {
|
|
|
/*
|
|
|
* Only after switching the pte to the new page may
|
|
@@ -2629,7 +2629,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
}
|
|
|
|
|
|
/* No need to invalidate - it was non-present before */
|
|
|
- update_mmu_cache(vma, address, pte);
|
|
|
+ update_mmu_cache(vma, address, page_table);
|
|
|
unlock:
|
|
|
pte_unmap_unlock(page_table, ptl);
|
|
|
out:
|
|
@@ -2694,7 +2694,7 @@ setpte:
|
|
|
set_pte_at(mm, address, page_table, entry);
|
|
|
|
|
|
/* No need to invalidate - it was non-present before */
|
|
|
- update_mmu_cache(vma, address, entry);
|
|
|
+ update_mmu_cache(vma, address, page_table);
|
|
|
unlock:
|
|
|
pte_unmap_unlock(page_table, ptl);
|
|
|
return 0;
|
|
@@ -2855,7 +2855,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
set_pte_at(mm, address, page_table, entry);
|
|
|
|
|
|
/* no need to invalidate: a not-present page won't be cached */
|
|
|
- update_mmu_cache(vma, address, entry);
|
|
|
+ update_mmu_cache(vma, address, page_table);
|
|
|
} else {
|
|
|
if (charged)
|
|
|
mem_cgroup_uncharge_page(page);
|
|
@@ -2992,7 +2992,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
|
|
|
}
|
|
|
entry = pte_mkyoung(entry);
|
|
|
if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
|
|
|
- update_mmu_cache(vma, address, entry);
|
|
|
+ update_mmu_cache(vma, address, pte);
|
|
|
} else {
|
|
|
/*
|
|
|
* This is needed only for protection faults but the arch code
|