|
@@ -23,6 +23,7 @@ struct vm_area_struct;
|
|
#define ___GFP_REPEAT 0x400u
|
|
#define ___GFP_REPEAT 0x400u
|
|
#define ___GFP_NOFAIL 0x800u
|
|
#define ___GFP_NOFAIL 0x800u
|
|
#define ___GFP_NORETRY 0x1000u
|
|
#define ___GFP_NORETRY 0x1000u
|
|
|
|
+#define ___GFP_MEMALLOC 0x2000u
|
|
#define ___GFP_COMP 0x4000u
|
|
#define ___GFP_COMP 0x4000u
|
|
#define ___GFP_ZERO 0x8000u
|
|
#define ___GFP_ZERO 0x8000u
|
|
#define ___GFP_NOMEMALLOC 0x10000u
|
|
#define ___GFP_NOMEMALLOC 0x10000u
|
|
@@ -76,9 +77,14 @@ struct vm_area_struct;
|
|
#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */
|
|
#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */
|
|
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */
|
|
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */
|
|
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */
|
|
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */
|
|
|
|
+#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */
|
|
#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */
|
|
#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */
|
|
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */
|
|
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */
|
|
-#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves */
|
|
|
|
|
|
+#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves.
|
|
|
|
+ * This takes precedence over the
|
|
|
|
+ * __GFP_MEMALLOC flag if both are
|
|
|
|
+ * set
|
|
|
|
+ */
|
|
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
|
|
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
|
|
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
|
|
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
|
|
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
|
|
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
|
|
@@ -129,7 +135,7 @@ struct vm_area_struct;
|
|
/* Control page allocator reclaim behavior */
|
|
/* Control page allocator reclaim behavior */
|
|
#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
|
|
#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
|
|
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
|
|
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
|
|
- __GFP_NORETRY|__GFP_NOMEMALLOC)
|
|
|
|
|
|
+ __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
|
|
|
|
|
|
/* Control slab gfp mask during early boot */
|
|
/* Control slab gfp mask during early boot */
|
|
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
|
|
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
|