|
@@ -109,6 +109,7 @@
|
|
#include <linux/mutex.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/fault-inject.h>
|
|
#include <linux/fault-inject.h>
|
|
#include <linux/rtmutex.h>
|
|
#include <linux/rtmutex.h>
|
|
|
|
+#include <linux/reciprocal_div.h>
|
|
|
|
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/tlbflush.h>
|
|
@@ -386,6 +387,7 @@ struct kmem_cache {
|
|
unsigned int shared;
|
|
unsigned int shared;
|
|
|
|
|
|
unsigned int buffer_size;
|
|
unsigned int buffer_size;
|
|
|
|
+ u32 reciprocal_buffer_size;
|
|
/* 3) touched by every alloc & free from the backend */
|
|
/* 3) touched by every alloc & free from the backend */
|
|
struct kmem_list3 *nodelists[MAX_NUMNODES];
|
|
struct kmem_list3 *nodelists[MAX_NUMNODES];
|
|
|
|
|
|
@@ -627,10 +629,17 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
|
|
return slab->s_mem + cache->buffer_size * idx;
|
|
return slab->s_mem + cache->buffer_size * idx;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline unsigned int obj_to_index(struct kmem_cache *cache,
|
|
|
|
- struct slab *slab, void *obj)
|
|
|
|
|
|
+/*
|
|
|
|
+ * We want to avoid an expensive divide : (offset / cache->buffer_size)
|
|
|
|
+ * Using the fact that buffer_size is a constant for a particular cache,
|
|
|
|
+ * we can replace (offset / cache->buffer_size) by
|
|
|
|
+ * reciprocal_divide(offset, cache->reciprocal_buffer_size)
|
|
|
|
+ */
|
|
|
|
+static inline unsigned int obj_to_index(const struct kmem_cache *cache,
|
|
|
|
+ const struct slab *slab, void *obj)
|
|
{
|
|
{
|
|
- return (unsigned)(obj - slab->s_mem) / cache->buffer_size;
|
|
|
|
|
|
+ u32 offset = (obj - slab->s_mem);
|
|
|
|
+ return reciprocal_divide(offset, cache->reciprocal_buffer_size);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1427,6 +1436,8 @@ void __init kmem_cache_init(void)
|
|
|
|
|
|
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
|
|
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
|
|
cache_line_size());
|
|
cache_line_size());
|
|
|
|
+ cache_cache.reciprocal_buffer_size =
|
|
|
|
+ reciprocal_value(cache_cache.buffer_size);
|
|
|
|
|
|
for (order = 0; order < MAX_ORDER; order++) {
|
|
for (order = 0; order < MAX_ORDER; order++) {
|
|
cache_estimate(order, cache_cache.buffer_size,
|
|
cache_estimate(order, cache_cache.buffer_size,
|
|
@@ -2313,6 +2324,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|
if (flags & SLAB_CACHE_DMA)
|
|
if (flags & SLAB_CACHE_DMA)
|
|
cachep->gfpflags |= GFP_DMA;
|
|
cachep->gfpflags |= GFP_DMA;
|
|
cachep->buffer_size = size;
|
|
cachep->buffer_size = size;
|
|
|
|
+ cachep->reciprocal_buffer_size = reciprocal_value(size);
|
|
|
|
|
|
if (flags & CFLGS_OFF_SLAB) {
|
|
if (flags & CFLGS_OFF_SLAB) {
|
|
cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
|
|
cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
|