|
@@ -2255,6 +2255,54 @@ oom:
|
|
|
return VM_FAULT_OOM;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * do_no_pfn() tries to create a new page mapping for a page without
|
|
|
+ * a struct_page backing it
|
|
|
+ *
|
|
|
+ * As this is called only for pages that do not currently exist, we
|
|
|
+ * do not need to flush old virtual caches or the TLB.
|
|
|
+ *
|
|
|
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
|
+ * but allow concurrent faults), and pte mapped but not yet locked.
|
|
|
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
|
|
|
+ *
|
|
|
+ * It is expected that the ->nopfn handler always returns the same pfn
|
|
|
+ * for a given virtual mapping.
|
|
|
+ *
|
|
|
+ * Mark this `noinline' to prevent it from bloating the main pagefault code.
|
|
|
+ */
|
|
|
+static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
+ unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
|
+ int write_access)
|
|
|
+{
|
|
|
+ spinlock_t *ptl;
|
|
|
+ pte_t entry;
|
|
|
+ unsigned long pfn;
|
|
|
+ int ret = VM_FAULT_MINOR;
|
|
|
+
|
|
|
+ pte_unmap(page_table);
|
|
|
+ BUG_ON(!(vma->vm_flags & VM_PFNMAP));
|
|
|
+ BUG_ON(is_cow_mapping(vma->vm_flags));
|
|
|
+
|
|
|
+ pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK);
|
|
|
+ if (pfn == NOPFN_OOM)
|
|
|
+ return VM_FAULT_OOM;
|
|
|
+ if (pfn == NOPFN_SIGBUS)
|
|
|
+ return VM_FAULT_SIGBUS;
|
|
|
+
|
|
|
+ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
|
|
|
+
|
|
|
+ /* Only go through if we didn't race with anybody else... */
|
|
|
+ if (pte_none(*page_table)) {
|
|
|
+ entry = pfn_pte(pfn, vma->vm_page_prot);
|
|
|
+ if (write_access)
|
|
|
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
|
|
+ set_pte_at(mm, address, page_table, entry);
|
|
|
+ }
|
|
|
+ pte_unmap_unlock(page_table, ptl);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Fault of a previously existing named mapping. Repopulate the pte
|
|
|
* from the encoded file_pte if possible. This enables swappable
|
|
@@ -2317,11 +2365,17 @@ static inline int handle_pte_fault(struct mm_struct *mm,
|
|
|
old_entry = entry = *pte;
|
|
|
if (!pte_present(entry)) {
|
|
|
if (pte_none(entry)) {
|
|
|
- if (!vma->vm_ops || !vma->vm_ops->nopage)
|
|
|
- return do_anonymous_page(mm, vma, address,
|
|
|
- pte, pmd, write_access);
|
|
|
- return do_no_page(mm, vma, address,
|
|
|
- pte, pmd, write_access);
|
|
|
+ if (vma->vm_ops) {
|
|
|
+ if (vma->vm_ops->nopage)
|
|
|
+ return do_no_page(mm, vma, address,
|
|
|
+ pte, pmd,
|
|
|
+ write_access);
|
|
|
+ if (unlikely(vma->vm_ops->nopfn))
|
|
|
+ return do_no_pfn(mm, vma, address, pte,
|
|
|
+ pmd, write_access);
|
|
|
+ }
|
|
|
+ return do_anonymous_page(mm, vma, address,
|
|
|
+ pte, pmd, write_access);
|
|
|
}
|
|
|
if (pte_file(entry))
|
|
|
return do_file_page(mm, vma, address,
|