|
@@ -482,6 +482,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
|
|
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
|
|
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
|
|
void *ret;
|
|
void *ret;
|
|
|
|
|
|
|
|
+ gfp &= gfp_allowed_mask;
|
|
|
|
+
|
|
lockdep_trace_alloc(gfp);
|
|
lockdep_trace_alloc(gfp);
|
|
|
|
|
|
if (size < PAGE_SIZE - align) {
|
|
if (size < PAGE_SIZE - align) {
|
|
@@ -608,6 +610,10 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
|
|
{
|
|
{
|
|
void *b;
|
|
void *b;
|
|
|
|
|
|
|
|
+ flags &= gfp_allowed_mask;
|
|
|
|
+
|
|
|
|
+ lockdep_trace_alloc(flags);
|
|
|
|
+
|
|
if (c->size < PAGE_SIZE) {
|
|
if (c->size < PAGE_SIZE) {
|
|
b = slob_alloc(c->size, flags, c->align, node);
|
|
b = slob_alloc(c->size, flags, c->align, node);
|
|
trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
|
|
trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
|