|
@@ -37,7 +37,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
|
|
|
pgd_t *pgd;
|
|
|
pmd_t *pmd;
|
|
|
pte_t *pte, entry;
|
|
|
- int ret = 0;
|
|
|
+ int ret;
|
|
|
|
|
|
pgd = pgd_offset(vma->vm_mm, address);
|
|
|
if (pgd_none(*pgd))
|
|
@@ -54,16 +54,20 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
|
|
|
pte = pte_offset_map(pmd, address);
|
|
|
entry = *pte;
|
|
|
|
|
|
+ /*
|
|
|
+ * If this page is present, it's actually being shared.
|
|
|
+ */
|
|
|
+ ret = pte_present(entry);
|
|
|
+
|
|
|
/*
|
|
|
* If this page isn't present, or is already setup to
|
|
|
* fault (ie, is old), we can safely ignore any issues.
|
|
|
*/
|
|
|
- if (pte_present(entry) && pte_val(entry) & shared_pte_mask) {
|
|
|
+ if (ret && pte_val(entry) & shared_pte_mask) {
|
|
|
flush_cache_page(vma, address, pte_pfn(entry));
|
|
|
pte_val(entry) &= ~shared_pte_mask;
|
|
|
set_pte_at(vma->vm_mm, address, pte, entry);
|
|
|
flush_tlb_page(vma, address);
|
|
|
- ret = 1;
|
|
|
}
|
|
|
pte_unmap(pte);
|
|
|
return ret;
|