|
@@ -433,7 +433,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
|
|
|
{
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
unsigned long address;
|
|
|
- pte_t *pte, entry;
|
|
|
+ pte_t *pte;
|
|
|
spinlock_t *ptl;
|
|
|
int ret = 0;
|
|
|
|
|
@@ -445,17 +445,18 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
|
|
|
if (!pte)
|
|
|
goto out;
|
|
|
|
|
|
- if (!pte_dirty(*pte) && !pte_write(*pte))
|
|
|
- goto unlock;
|
|
|
+ if (pte_dirty(*pte) || pte_write(*pte)) {
|
|
|
+ pte_t entry;
|
|
|
|
|
|
- entry = ptep_get_and_clear(mm, address, pte);
|
|
|
- entry = pte_mkclean(entry);
|
|
|
- entry = pte_wrprotect(entry);
|
|
|
- ptep_establish(vma, address, pte, entry);
|
|
|
- lazy_mmu_prot_update(entry);
|
|
|
- ret = 1;
|
|
|
+ flush_cache_page(vma, address, pte_pfn(*pte));
|
|
|
+ entry = ptep_clear_flush(vma, address, pte);
|
|
|
+ entry = pte_wrprotect(entry);
|
|
|
+ entry = pte_mkclean(entry);
|
|
|
+ set_pte_at(vma, address, pte, entry);
|
|
|
+ lazy_mmu_prot_update(entry);
|
|
|
+ ret = 1;
|
|
|
+ }
|
|
|
|
|
|
-unlock:
|
|
|
pte_unmap_unlock(pte, ptl);
|
|
|
out:
|
|
|
return ret;
|
|
@@ -490,6 +491,8 @@ int page_mkclean(struct page *page)
|
|
|
if (mapping)
|
|
|
ret = page_mkclean_file(mapping, page);
|
|
|
}
|
|
|
+ if (page_test_and_clear_dirty(page))
|
|
|
+ ret = 1;
|
|
|
|
|
|
return ret;
|
|
|
}
|