|
@@ -1448,6 +1448,100 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(remap_pfn_range);
|
|
EXPORT_SYMBOL(remap_pfn_range);
|
|
|
|
|
|
|
|
+static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|
|
|
+ unsigned long addr, unsigned long end,
|
|
|
|
+ pte_fn_t fn, void *data)
|
|
|
|
+{
|
|
|
|
+ pte_t *pte;
|
|
|
|
+ int err;
|
|
|
|
+ struct page *pmd_page;
|
|
|
|
+ spinlock_t *ptl = ptl; /* Suppress gcc warning */
|
|
|
|
+
|
|
|
|
+ pte = (mm == &init_mm) ?
|
|
|
|
+ pte_alloc_kernel(pmd, addr) :
|
|
|
|
+ pte_alloc_map_lock(mm, pmd, addr, &ptl);
|
|
|
|
+ if (!pte)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ BUG_ON(pmd_huge(*pmd));
|
|
|
|
+
|
|
|
|
+ pmd_page = pmd_page(*pmd);
|
|
|
|
+
|
|
|
|
+ do {
|
|
|
|
+ err = fn(pte, pmd_page, addr, data);
|
|
|
|
+ if (err)
|
|
|
|
+ break;
|
|
|
|
+ } while (pte++, addr += PAGE_SIZE, addr != end);
|
|
|
|
+
|
|
|
|
+ if (mm != &init_mm)
|
|
|
|
+ pte_unmap_unlock(pte-1, ptl);
|
|
|
|
+ return err;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
|
|
|
|
+ unsigned long addr, unsigned long end,
|
|
|
|
+ pte_fn_t fn, void *data)
|
|
|
|
+{
|
|
|
|
+ pmd_t *pmd;
|
|
|
|
+ unsigned long next;
|
|
|
|
+ int err;
|
|
|
|
+
|
|
|
|
+ pmd = pmd_alloc(mm, pud, addr);
|
|
|
|
+ if (!pmd)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ do {
|
|
|
|
+ next = pmd_addr_end(addr, end);
|
|
|
|
+ err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
|
|
|
|
+ if (err)
|
|
|
|
+ break;
|
|
|
|
+ } while (pmd++, addr = next, addr != end);
|
|
|
|
+ return err;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
|
|
|
+ unsigned long addr, unsigned long end,
|
|
|
|
+ pte_fn_t fn, void *data)
|
|
|
|
+{
|
|
|
|
+ pud_t *pud;
|
|
|
|
+ unsigned long next;
|
|
|
|
+ int err;
|
|
|
|
+
|
|
|
|
+ pud = pud_alloc(mm, pgd, addr);
|
|
|
|
+ if (!pud)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ do {
|
|
|
|
+ next = pud_addr_end(addr, end);
|
|
|
|
+ err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
|
|
|
|
+ if (err)
|
|
|
|
+ break;
|
|
|
|
+ } while (pud++, addr = next, addr != end);
|
|
|
|
+ return err;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Scan a region of virtual memory, filling in page tables as necessary
|
|
|
|
+ * and calling a provided function on each leaf page table.
|
|
|
|
+ */
|
|
|
|
+int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
|
|
|
|
+ unsigned long size, pte_fn_t fn, void *data)
|
|
|
|
+{
|
|
|
|
+ pgd_t *pgd;
|
|
|
|
+ unsigned long next;
|
|
|
|
+ unsigned long end = addr + size;
|
|
|
|
+ int err;
|
|
|
|
+
|
|
|
|
+ BUG_ON(addr >= end);
|
|
|
|
+ pgd = pgd_offset(mm, addr);
|
|
|
|
+ do {
|
|
|
|
+ next = pgd_addr_end(addr, end);
|
|
|
|
+ err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
|
|
|
|
+ if (err)
|
|
|
|
+ break;
|
|
|
|
+ } while (pgd++, addr = next, addr != end);
|
|
|
|
+ return err;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(apply_to_page_range);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* handle_pte_fault chooses page fault handler according to an entry
|
|
* handle_pte_fault chooses page fault handler according to an entry
|
|
* which was read non-atomically. Before making any commitment, on
|
|
* which was read non-atomically. Before making any commitment, on
|