|
@@ -433,11 +433,6 @@ static int obj_offset(struct kmem_cache *cachep)
|
|
|
return cachep->obj_offset;
|
|
|
}
|
|
|
|
|
|
-static int obj_size(struct kmem_cache *cachep)
|
|
|
-{
|
|
|
- return cachep->object_size;
|
|
|
-}
|
|
|
-
|
|
|
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
|
|
|
{
|
|
|
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
|
|
@@ -465,7 +460,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
|
|
|
#else
|
|
|
|
|
|
#define obj_offset(x) 0
|
|
|
-#define obj_size(cachep) (cachep->size)
|
|
|
#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
|
|
|
#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
|
|
|
#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
|
|
@@ -1853,7 +1847,7 @@ static void kmem_rcu_free(struct rcu_head *head)
|
|
|
static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
|
|
|
unsigned long caller)
|
|
|
{
|
|
|
- int size = obj_size(cachep);
|
|
|
+ int size = cachep->object_size;
|
|
|
|
|
|
addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
|
|
|
|
|
@@ -1885,7 +1879,7 @@ static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
|
|
|
|
|
|
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
|
|
|
{
|
|
|
- int size = obj_size(cachep);
|
|
|
+ int size = cachep->object_size;
|
|
|
addr = &((char *)addr)[obj_offset(cachep)];
|
|
|
|
|
|
memset(addr, val, size);
|
|
@@ -1945,7 +1939,7 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
|
|
|
printk("\n");
|
|
|
}
|
|
|
realobj = (char *)objp + obj_offset(cachep);
|
|
|
- size = obj_size(cachep);
|
|
|
+ size = cachep->object_size;
|
|
|
for (i = 0; i < size && lines; i += 16, lines--) {
|
|
|
int limit;
|
|
|
limit = 16;
|
|
@@ -1962,7 +1956,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
|
|
|
int lines = 0;
|
|
|
|
|
|
realobj = (char *)objp + obj_offset(cachep);
|
|
|
- size = obj_size(cachep);
|
|
|
+ size = cachep->object_size;
|
|
|
|
|
|
for (i = 0; i < size; i++) {
|
|
|
char exp = POISON_FREE;
|
|
@@ -3265,7 +3259,7 @@ static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
|
|
|
if (cachep == &cache_cache)
|
|
|
return false;
|
|
|
|
|
|
- return should_failslab(obj_size(cachep), flags, cachep->flags);
|
|
|
+ return should_failslab(cachep->object_size, flags, cachep->flags);
|
|
|
}
|
|
|
|
|
|
static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
|
|
@@ -3525,14 +3519,14 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
|
|
|
out:
|
|
|
local_irq_restore(save_flags);
|
|
|
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
|
|
|
- kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
|
|
|
+ kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
|
|
|
flags);
|
|
|
|
|
|
if (likely(ptr))
|
|
|
- kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep));
|
|
|
+ kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
|
|
|
|
|
|
if (unlikely((flags & __GFP_ZERO) && ptr))
|
|
|
- memset(ptr, 0, obj_size(cachep));
|
|
|
+ memset(ptr, 0, cachep->object_size);
|
|
|
|
|
|
return ptr;
|
|
|
}
|
|
@@ -3587,15 +3581,15 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
|
|
|
objp = __do_cache_alloc(cachep, flags);
|
|
|
local_irq_restore(save_flags);
|
|
|
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
|
|
|
- kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
|
|
|
+ kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
|
|
|
flags);
|
|
|
prefetchw(objp);
|
|
|
|
|
|
if (likely(objp))
|
|
|
- kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
|
|
|
+ kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
|
|
|
|
|
|
if (unlikely((flags & __GFP_ZERO) && objp))
|
|
|
- memset(objp, 0, obj_size(cachep));
|
|
|
+ memset(objp, 0, cachep->object_size);
|
|
|
|
|
|
return objp;
|
|
|
}
|
|
@@ -3711,7 +3705,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
|
|
|
kmemleak_free_recursive(objp, cachep->flags);
|
|
|
objp = cache_free_debugcheck(cachep, objp, caller);
|
|
|
|
|
|
- kmemcheck_slab_free(cachep, objp, obj_size(cachep));
|
|
|
+ kmemcheck_slab_free(cachep, objp, cachep->object_size);
|
|
|
|
|
|
/*
|
|
|
* Skip calling cache_free_alien() when the platform is not numa.
|
|
@@ -3746,7 +3740,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
|
|
|
void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
|
|
|
|
|
|
trace_kmem_cache_alloc(_RET_IP_, ret,
|
|
|
- obj_size(cachep), cachep->size, flags);
|
|
|
+ cachep->object_size, cachep->size, flags);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -3774,7 +3768,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
|
|
|
__builtin_return_address(0));
|
|
|
|
|
|
trace_kmem_cache_alloc_node(_RET_IP_, ret,
|
|
|
- obj_size(cachep), cachep->size,
|
|
|
+ cachep->object_size, cachep->size,
|
|
|
flags, nodeid);
|
|
|
|
|
|
return ret;
|
|
@@ -3896,9 +3890,9 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
|
|
|
unsigned long flags;
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
- debug_check_no_locks_freed(objp, obj_size(cachep));
|
|
|
+ debug_check_no_locks_freed(objp, cachep->size);
|
|
|
if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
|
|
|
- debug_check_no_obj_freed(objp, obj_size(cachep));
|
|
|
+ debug_check_no_obj_freed(objp, cachep->object_size);
|
|
|
__cache_free(cachep, objp, __builtin_return_address(0));
|
|
|
local_irq_restore(flags);
|
|
|
|
|
@@ -3927,8 +3921,9 @@ void kfree(const void *objp)
|
|
|
local_irq_save(flags);
|
|
|
kfree_debugcheck(objp);
|
|
|
c = virt_to_cache(objp);
|
|
|
- debug_check_no_locks_freed(objp, obj_size(c));
|
|
|
- debug_check_no_obj_freed(objp, obj_size(c));
|
|
|
+ debug_check_no_locks_freed(objp, c->object_size);
|
|
|
+
|
|
|
+ debug_check_no_obj_freed(objp, c->object_size);
|
|
|
__cache_free(c, (void *)objp, __builtin_return_address(0));
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
@@ -3936,7 +3931,7 @@ EXPORT_SYMBOL(kfree);
|
|
|
|
|
|
unsigned int kmem_cache_size(struct kmem_cache *cachep)
|
|
|
{
|
|
|
- return obj_size(cachep);
|
|
|
+ return cachep->object_size;
|
|
|
}
|
|
|
EXPORT_SYMBOL(kmem_cache_size);
|
|
|
|
|
@@ -4657,6 +4652,6 @@ size_t ksize(const void *objp)
|
|
|
if (unlikely(objp == ZERO_SIZE_PTR))
|
|
|
return 0;
|
|
|
|
|
|
- return obj_size(virt_to_cache(objp));
|
|
|
+ return virt_to_cache(objp)->object_size;
|
|
|
}
|
|
|
EXPORT_SYMBOL(ksize);
|