|
@@ -98,7 +98,7 @@ extern unsigned int kobjsize(const void *objp);
|
|
|
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
|
|
|
#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
|
|
|
#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */
|
|
|
-#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
|
|
|
+#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it. Refer note in VM_PFNMAP_AT_MMAP below */
|
|
|
#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */
|
|
|
|
|
|
#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
|
|
@@ -126,6 +126,17 @@ extern unsigned int kobjsize(const void *objp);
|
|
|
*/
|
|
|
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
|
|
|
|
|
|
+/*
|
|
|
+ * pfnmap vmas that are fully mapped at mmap time (not mapped on fault).
|
|
|
+ * Used by x86 PAT to identify such PFNMAP mappings and optimize their handling.
|
|
|
+ * Note VM_INSERTPAGE flag is overloaded here. i.e,
|
|
|
+ * VM_INSERTPAGE && !VM_PFNMAP implies
|
|
|
+ * The vma has had "vm_insert_page()" done on it
|
|
|
+ * VM_INSERTPAGE && VM_PFNMAP implies
|
|
|
+ * The vma is PFNMAP with full mapping at mmap time
|
|
|
+ */
|
|
|
+#define VM_PFNMAP_AT_MMAP (VM_INSERTPAGE | VM_PFNMAP)
|
|
|
+
|
|
|
/*
|
|
|
* mapping from the currently active vm_flags protection bits (the
|
|
|
* low four bits) to a page protection mask..
|
|
@@ -145,7 +156,7 @@ extern pgprot_t protection_map[16];
|
|
|
*/
|
|
|
static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
|
|
|
{
|
|
|
- return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff);
|
|
|
+ return ((vma->vm_flags & VM_PFNMAP_AT_MMAP) == VM_PFNMAP_AT_MMAP);
|
|
|
}
|
|
|
|
|
|
static inline int is_pfn_mapping(struct vm_area_struct *vma)
|