|
@@ -137,6 +137,7 @@
|
|
|
|
|
|
/* Shouldn't this be in a header file somewhere? */
|
|
|
#define BYTES_PER_WORD sizeof(void *)
|
|
|
+#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
|
|
|
|
|
|
#ifndef cache_line_size
|
|
|
#define cache_line_size() L1_CACHE_BYTES
|
|
@@ -547,7 +548,7 @@ static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
|
|
|
if (cachep->flags & SLAB_STORE_USER)
|
|
|
return (unsigned long long *)(objp + cachep->buffer_size -
|
|
|
sizeof(unsigned long long) -
|
|
|
- BYTES_PER_WORD);
|
|
|
+ REDZONE_ALIGN);
|
|
|
return (unsigned long long *) (objp + cachep->buffer_size -
|
|
|
sizeof(unsigned long long));
|
|
|
}
|
|
@@ -2178,7 +2179,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|
|
* above the next power of two: caches with object sizes just above a
|
|
|
* power of two have a significant amount of internal fragmentation.
|
|
|
*/
|
|
|
- if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD))
|
|
|
+ if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
|
|
|
+ 2 * sizeof(unsigned long long)))
|
|
|
flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
|
|
|
if (!(flags & SLAB_DESTROY_BY_RCU))
|
|
|
flags |= SLAB_POISON;
|
|
@@ -2219,12 +2221,20 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Redzoning and user store require word alignment. Note this will be
|
|
|
- * overridden by architecture or caller mandated alignment if either
|
|
|
- * is greater than BYTES_PER_WORD.
|
|
|
+ * Redzoning and user store require word alignment or possibly larger.
|
|
|
+ * Note this will be overridden by architecture or caller mandated
|
|
|
+ * alignment if either is greater than BYTES_PER_WORD.
|
|
|
*/
|
|
|
- if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER)
|
|
|
- ralign = __alignof__(unsigned long long);
|
|
|
+ if (flags & SLAB_STORE_USER)
|
|
|
+ ralign = BYTES_PER_WORD;
|
|
|
+
|
|
|
+ if (flags & SLAB_RED_ZONE) {
|
|
|
+ ralign = REDZONE_ALIGN;
|
|
|
+ /* If redzoning, ensure that the second redzone is suitably
|
|
|
+ * aligned, by adjusting the object size accordingly. */
|
|
|
+ size += REDZONE_ALIGN - 1;
|
|
|
+ size &= ~(REDZONE_ALIGN - 1);
|
|
|
+ }
|
|
|
|
|
|
/* 2) arch mandated alignment */
|
|
|
if (ralign < ARCH_SLAB_MINALIGN) {
|
|
@@ -2261,9 +2271,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|
|
}
|
|
|
if (flags & SLAB_STORE_USER) {
|
|
|
/* user store requires one word storage behind the end of
|
|
|
- * the real object.
|
|
|
+ * the real object. But if the second red zone needs to be
|
|
|
+ * aligned to 64 bits, we must allow that much space.
|
|
|
*/
|
|
|
- size += BYTES_PER_WORD;
|
|
|
+ if (flags & SLAB_RED_ZONE)
|
|
|
+ size += REDZONE_ALIGN;
|
|
|
+ else
|
|
|
+ size += BYTES_PER_WORD;
|
|
|
}
|
|
|
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
|
|
|
if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
|