|
@@ -1552,23 +1552,33 @@ int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * This function returns whether a given @page is mapped onto the @address
|
|
|
+ * in the virtual space of @mm.
|
|
|
+ *
|
|
|
+ * When it's true, this function returns *pmd with holding the page table lock
|
|
|
+ * and passing it back to the caller via @ptl.
|
|
|
+ * If it's false, returns NULL without holding the page table lock.
|
|
|
+ */
|
|
|
pmd_t *page_check_address_pmd(struct page *page,
|
|
|
struct mm_struct *mm,
|
|
|
unsigned long address,
|
|
|
- enum page_check_address_pmd_flag flag)
|
|
|
+ enum page_check_address_pmd_flag flag,
|
|
|
+ spinlock_t **ptl)
|
|
|
{
|
|
|
- pmd_t *pmd, *ret = NULL;
|
|
|
+ pmd_t *pmd;
|
|
|
|
|
|
if (address & ~HPAGE_PMD_MASK)
|
|
|
- goto out;
|
|
|
+ return NULL;
|
|
|
|
|
|
pmd = mm_find_pmd(mm, address);
|
|
|
if (!pmd)
|
|
|
- goto out;
|
|
|
+ return NULL;
|
|
|
+ *ptl = pmd_lock(mm, pmd);
|
|
|
if (pmd_none(*pmd))
|
|
|
- goto out;
|
|
|
+ goto unlock;
|
|
|
if (pmd_page(*pmd) != page)
|
|
|
- goto out;
|
|
|
+ goto unlock;
|
|
|
/*
|
|
|
* split_vma() may create temporary aliased mappings. There is
|
|
|
* no risk as long as all huge pmd are found and have their
|
|
@@ -1578,14 +1588,15 @@ pmd_t *page_check_address_pmd(struct page *page,
|
|
|
*/
|
|
|
if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
|
|
|
pmd_trans_splitting(*pmd))
|
|
|
- goto out;
|
|
|
+ goto unlock;
|
|
|
if (pmd_trans_huge(*pmd)) {
|
|
|
VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
|
|
|
!pmd_trans_splitting(*pmd));
|
|
|
- ret = pmd;
|
|
|
+ return pmd;
|
|
|
}
|
|
|
-out:
|
|
|
- return ret;
|
|
|
+unlock:
|
|
|
+ spin_unlock(*ptl);
|
|
|
+ return NULL;
|
|
|
}
|
|
|
|
|
|
static int __split_huge_page_splitting(struct page *page,
|
|
@@ -1593,6 +1604,7 @@ static int __split_huge_page_splitting(struct page *page,
|
|
|
unsigned long address)
|
|
|
{
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
+ spinlock_t *ptl;
|
|
|
pmd_t *pmd;
|
|
|
int ret = 0;
|
|
|
/* For mmu_notifiers */
|
|
@@ -1600,9 +1612,8 @@ static int __split_huge_page_splitting(struct page *page,
|
|
|
const unsigned long mmun_end = address + HPAGE_PMD_SIZE;
|
|
|
|
|
|
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
|
|
|
- spin_lock(&mm->page_table_lock);
|
|
|
pmd = page_check_address_pmd(page, mm, address,
|
|
|
- PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
|
|
|
+ PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl);
|
|
|
if (pmd) {
|
|
|
/*
|
|
|
* We can't temporarily set the pmd to null in order
|
|
@@ -1613,8 +1624,8 @@ static int __split_huge_page_splitting(struct page *page,
|
|
|
*/
|
|
|
pmdp_splitting_flush(vma, address, pmd);
|
|
|
ret = 1;
|
|
|
+ spin_unlock(ptl);
|
|
|
}
|
|
|
- spin_unlock(&mm->page_table_lock);
|
|
|
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
|
|
|
|
|
|
return ret;
|
|
@@ -1745,14 +1756,14 @@ static int __split_huge_page_map(struct page *page,
|
|
|
unsigned long address)
|
|
|
{
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
+ spinlock_t *ptl;
|
|
|
pmd_t *pmd, _pmd;
|
|
|
int ret = 0, i;
|
|
|
pgtable_t pgtable;
|
|
|
unsigned long haddr;
|
|
|
|
|
|
- spin_lock(&mm->page_table_lock);
|
|
|
pmd = page_check_address_pmd(page, mm, address,
|
|
|
- PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
|
|
|
+ PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl);
|
|
|
if (pmd) {
|
|
|
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
|
|
|
pmd_populate(mm, &_pmd, pgtable);
|
|
@@ -1807,8 +1818,8 @@ static int __split_huge_page_map(struct page *page,
|
|
|
pmdp_invalidate(vma, address, pmd);
|
|
|
pmd_populate(mm, pmd, pgtable);
|
|
|
ret = 1;
|
|
|
+ spin_unlock(ptl);
|
|
|
}
|
|
|
- spin_unlock(&mm->page_table_lock);
|
|
|
|
|
|
return ret;
|
|
|
}
|