|
@@ -1277,6 +1277,51 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *
|
|
|
}
|
|
|
EXPORT_SYMBOL(vm_insert_page);
|
|
|
|
|
|
+/**
|
|
|
+ * vm_insert_pfn - insert single pfn into user vma
|
|
|
+ * @vma: user vma to map to
|
|
|
+ * @addr: target user address of this page
|
|
|
+ * @pfn: source kernel pfn
|
|
|
+ *
|
|
|
+ * Similar to vm_inert_page, this allows drivers to insert individual pages
|
|
|
+ * they've allocated into a user vma. Same comments apply.
|
|
|
+ *
|
|
|
+ * This function should only be called from a vm_ops->fault handler, and
|
|
|
+ * in that case the handler should return NULL.
|
|
|
+ */
|
|
|
+int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
|
|
+ unsigned long pfn)
|
|
|
+{
|
|
|
+ struct mm_struct *mm = vma->vm_mm;
|
|
|
+ int retval;
|
|
|
+ pte_t *pte, entry;
|
|
|
+ spinlock_t *ptl;
|
|
|
+
|
|
|
+ BUG_ON(!(vma->vm_flags & VM_PFNMAP));
|
|
|
+ BUG_ON(is_cow_mapping(vma->vm_flags));
|
|
|
+
|
|
|
+ retval = -ENOMEM;
|
|
|
+ pte = get_locked_pte(mm, addr, &ptl);
|
|
|
+ if (!pte)
|
|
|
+ goto out;
|
|
|
+ retval = -EBUSY;
|
|
|
+ if (!pte_none(*pte))
|
|
|
+ goto out_unlock;
|
|
|
+
|
|
|
+ /* Ok, finally just insert the thing.. */
|
|
|
+ entry = pfn_pte(pfn, vma->vm_page_prot);
|
|
|
+ set_pte_at(mm, addr, pte, entry);
|
|
|
+ update_mmu_cache(vma, addr, entry);
|
|
|
+
|
|
|
+ retval = 0;
|
|
|
+out_unlock:
|
|
|
+ pte_unmap_unlock(pte, ptl);
|
|
|
+
|
|
|
+out:
|
|
|
+ return retval;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(vm_insert_pfn);
|
|
|
+
|
|
|
/*
|
|
|
* maps a range of physical memory into the requested pages. the old
|
|
|
* mappings are removed. any references to nonexistent pages results
|