|
@@ -3103,6 +3103,43 @@ int in_gate_area_no_task(unsigned long addr)
|
|
|
|
|
|
#endif /* __HAVE_ARCH_GATE_AREA */
|
|
#endif /* __HAVE_ARCH_GATE_AREA */
|
|
|
|
|
|
|
|
+static int follow_pte(struct mm_struct *mm, unsigned long address,
|
|
|
|
+ pte_t **ptepp, spinlock_t **ptlp)
|
|
|
|
+{
|
|
|
|
+ pgd_t *pgd;
|
|
|
|
+ pud_t *pud;
|
|
|
|
+ pmd_t *pmd;
|
|
|
|
+ pte_t *ptep;
|
|
|
|
+
|
|
|
|
+ pgd = pgd_offset(mm, address);
|
|
|
|
+ if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ pud = pud_offset(pgd, address);
|
|
|
|
+ if (pud_none(*pud) || unlikely(pud_bad(*pud)))
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ pmd = pmd_offset(pud, address);
|
|
|
|
+ if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ /* We cannot handle huge page PFN maps. Luckily they don't exist. */
|
|
|
|
+ if (pmd_huge(*pmd))
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
|
|
|
|
+ if (!ptep)
|
|
|
|
+ goto out;
|
|
|
|
+ if (!pte_present(*ptep))
|
|
|
|
+ goto unlock;
|
|
|
|
+ *ptepp = ptep;
|
|
|
|
+ return 0;
|
|
|
|
+unlock:
|
|
|
|
+ pte_unmap_unlock(ptep, *ptlp);
|
|
|
|
+out:
|
|
|
|
+ return -EINVAL;
|
|
|
|
+}
|
|
|
|
+
|
|
#ifdef CONFIG_HAVE_IOREMAP_PROT
|
|
#ifdef CONFIG_HAVE_IOREMAP_PROT
|
|
int follow_phys(struct vm_area_struct *vma,
|
|
int follow_phys(struct vm_area_struct *vma,
|
|
unsigned long address, unsigned int flags,
|
|
unsigned long address, unsigned int flags,
|