|
@@ -224,10 +224,14 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
|
|
|
/*
|
|
|
* Check that @page is mapped at @address into @mm.
|
|
|
*
|
|
|
+ * If @sync is false, page_check_address may perform a racy check to avoid
|
|
|
+ * the page table lock when the pte is not present (helpful when reclaiming
|
|
|
+ * highly shared pages).
|
|
|
+ *
|
|
|
* On success returns with pte mapped and locked.
|
|
|
*/
|
|
|
pte_t *page_check_address(struct page *page, struct mm_struct *mm,
|
|
|
- unsigned long address, spinlock_t **ptlp)
|
|
|
+ unsigned long address, spinlock_t **ptlp, int sync)
|
|
|
{
|
|
|
pgd_t *pgd;
|
|
|
pud_t *pud;
|
|
@@ -249,7 +253,7 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm,
|
|
|
|
|
|
pte = pte_offset_map(pmd, address);
|
|
|
/* Make a quick check before getting the lock */
|
|
|
- if (!pte_present(*pte)) {
|
|
|
+ if (!sync && !pte_present(*pte)) {
|
|
|
pte_unmap(pte);
|
|
|
return NULL;
|
|
|
}
|
|
@@ -281,7 +285,7 @@ static int page_referenced_one(struct page *page,
|
|
|
if (address == -EFAULT)
|
|
|
goto out;
|
|
|
|
|
|
- pte = page_check_address(page, mm, address, &ptl);
|
|
|
+ pte = page_check_address(page, mm, address, &ptl, 0);
|
|
|
if (!pte)
|
|
|
goto out;
|
|
|
|
|
@@ -450,7 +454,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
|
|
|
if (address == -EFAULT)
|
|
|
goto out;
|
|
|
|
|
|
- pte = page_check_address(page, mm, address, &ptl);
|
|
|
+ pte = page_check_address(page, mm, address, &ptl, 1);
|
|
|
if (!pte)
|
|
|
goto out;
|
|
|
|
|
@@ -704,7 +708,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
|
if (address == -EFAULT)
|
|
|
goto out;
|
|
|
|
|
|
- pte = page_check_address(page, mm, address, &ptl);
|
|
|
+ pte = page_check_address(page, mm, address, &ptl, 0);
|
|
|
if (!pte)
|
|
|
goto out;
|
|
|
|