|
@@ -32,6 +32,7 @@ typedef unsigned __bitwise xfs_km_flags_t;
|
|
|
#define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u)
|
|
|
#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
|
|
|
#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
|
|
|
+#define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
|
|
|
|
|
|
/*
|
|
|
* We use a special process flag to avoid recursive callbacks into
|
|
@@ -43,7 +44,7 @@ kmem_flags_convert(xfs_km_flags_t flags)
|
|
|
{
|
|
|
gfp_t lflags;
|
|
|
|
|
|
- BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL));
|
|
|
+ BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO));
|
|
|
|
|
|
if (flags & KM_NOSLEEP) {
|
|
|
lflags = GFP_ATOMIC | __GFP_NOWARN;
|
|
@@ -52,11 +53,14 @@ kmem_flags_convert(xfs_km_flags_t flags)
|
|
|
if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
|
|
|
lflags &= ~__GFP_FS;
|
|
|
}
|
|
|
+
|
|
|
+ if (flags & KM_ZERO)
|
|
|
+ lflags |= __GFP_ZERO;
|
|
|
+
|
|
|
return lflags;
|
|
|
}
|
|
|
|
|
|
extern void *kmem_alloc(size_t, xfs_km_flags_t);
|
|
|
-extern void *kmem_zalloc(size_t, xfs_km_flags_t);
|
|
|
extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t);
|
|
|
extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
|
|
|
extern void kmem_free(const void *);
|
|
@@ -64,6 +68,12 @@ extern void kmem_free(const void *);
|
|
|
|
|
|
extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
|
|
|
|
|
|
+static inline void *
|
|
|
+kmem_zalloc(size_t size, xfs_km_flags_t flags)
|
|
|
+{
|
|
|
+ return kmem_alloc(size, flags | KM_ZERO);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Zone interfaces
|
|
|
*/
|
|
@@ -102,6 +112,11 @@ kmem_zone_destroy(kmem_zone_t *zone)
|
|
|
}
|
|
|
|
|
|
extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
|
|
|
-extern void *kmem_zone_zalloc(kmem_zone_t *, xfs_km_flags_t);
|
|
|
+
|
|
|
+static inline void *
|
|
|
+kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
|
|
|
+{
|
|
|
+ return kmem_zone_alloc(zone, flags | KM_ZERO);
|
|
|
+}
|
|
|
|
|
|
#endif /* __XFS_SUPPORT_KMEM_H__ */
|