|
@@ -280,15 +280,32 @@ static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * The end pointer in a slab is special. It points to the first object in the
|
|
|
|
+ * slab but has bit 0 set to mark it.
|
|
|
|
+ *
|
|
|
|
+ * Note that SLUB relies on page_mapping returning NULL for pages with bit 0
|
|
|
|
+ * in the mapping set.
|
|
|
|
+ */
|
|
|
|
+static inline int is_end(void *addr)
|
|
|
|
+{
|
|
|
|
+ return (unsigned long)addr & PAGE_MAPPING_ANON;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void *slab_address(struct page *page)
|
|
|
|
+{
|
|
|
|
+ return page->end - PAGE_MAPPING_ANON;
|
|
|
|
+}
|
|
|
|
+
|
|
static inline int check_valid_pointer(struct kmem_cache *s,
|
|
static inline int check_valid_pointer(struct kmem_cache *s,
|
|
struct page *page, const void *object)
|
|
struct page *page, const void *object)
|
|
{
|
|
{
|
|
void *base;
|
|
void *base;
|
|
|
|
|
|
- if (!object)
|
|
|
|
|
|
+ if (object == page->end)
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
- base = page_address(page);
|
|
|
|
|
|
+ base = slab_address(page);
|
|
if (object < base || object >= base + s->objects * s->size ||
|
|
if (object < base || object >= base + s->objects * s->size ||
|
|
(object - base) % s->size) {
|
|
(object - base) % s->size) {
|
|
return 0;
|
|
return 0;
|
|
@@ -321,7 +338,8 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
|
|
|
|
|
|
/* Scan freelist */
|
|
/* Scan freelist */
|
|
#define for_each_free_object(__p, __s, __free) \
|
|
#define for_each_free_object(__p, __s, __free) \
|
|
- for (__p = (__free); __p; __p = get_freepointer((__s), __p))
|
|
|
|
|
|
+ for (__p = (__free); (__p) != page->end; __p = get_freepointer((__s),\
|
|
|
|
+ __p))
|
|
|
|
|
|
/* Determine object index from a given position */
|
|
/* Determine object index from a given position */
|
|
static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
|
|
static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
|
|
@@ -473,7 +491,7 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
|
|
static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
|
|
static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
|
|
{
|
|
{
|
|
unsigned int off; /* Offset of last byte */
|
|
unsigned int off; /* Offset of last byte */
|
|
- u8 *addr = page_address(page);
|
|
|
|
|
|
+ u8 *addr = slab_address(page);
|
|
|
|
|
|
print_tracking(s, p);
|
|
print_tracking(s, p);
|
|
|
|
|
|
@@ -651,7 +669,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
|
|
if (!(s->flags & SLAB_POISON))
|
|
if (!(s->flags & SLAB_POISON))
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
- start = page_address(page);
|
|
|
|
|
|
+ start = slab_address(page);
|
|
end = start + (PAGE_SIZE << s->order);
|
|
end = start + (PAGE_SIZE << s->order);
|
|
length = s->objects * s->size;
|
|
length = s->objects * s->size;
|
|
remainder = end - (start + length);
|
|
remainder = end - (start + length);
|
|
@@ -718,7 +736,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
|
|
* of the free objects in this slab. May cause
|
|
* of the free objects in this slab. May cause
|
|
* another error because the object count is now wrong.
|
|
* another error because the object count is now wrong.
|
|
*/
|
|
*/
|
|
- set_freepointer(s, p, NULL);
|
|
|
|
|
|
+ set_freepointer(s, p, page->end);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
return 1;
|
|
return 1;
|
|
@@ -752,18 +770,18 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
|
|
void *fp = page->freelist;
|
|
void *fp = page->freelist;
|
|
void *object = NULL;
|
|
void *object = NULL;
|
|
|
|
|
|
- while (fp && nr <= s->objects) {
|
|
|
|
|
|
+ while (fp != page->end && nr <= s->objects) {
|
|
if (fp == search)
|
|
if (fp == search)
|
|
return 1;
|
|
return 1;
|
|
if (!check_valid_pointer(s, page, fp)) {
|
|
if (!check_valid_pointer(s, page, fp)) {
|
|
if (object) {
|
|
if (object) {
|
|
object_err(s, page, object,
|
|
object_err(s, page, object,
|
|
"Freechain corrupt");
|
|
"Freechain corrupt");
|
|
- set_freepointer(s, object, NULL);
|
|
|
|
|
|
+ set_freepointer(s, object, page->end);
|
|
break;
|
|
break;
|
|
} else {
|
|
} else {
|
|
slab_err(s, page, "Freepointer corrupt");
|
|
slab_err(s, page, "Freepointer corrupt");
|
|
- page->freelist = NULL;
|
|
|
|
|
|
+ page->freelist = page->end;
|
|
page->inuse = s->objects;
|
|
page->inuse = s->objects;
|
|
slab_fix(s, "Freelist cleared");
|
|
slab_fix(s, "Freelist cleared");
|
|
return 0;
|
|
return 0;
|
|
@@ -869,7 +887,7 @@ bad:
|
|
*/
|
|
*/
|
|
slab_fix(s, "Marking all objects used");
|
|
slab_fix(s, "Marking all objects used");
|
|
page->inuse = s->objects;
|
|
page->inuse = s->objects;
|
|
- page->freelist = NULL;
|
|
|
|
|
|
+ page->freelist = page->end;
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -910,7 +928,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
|
|
}
|
|
}
|
|
|
|
|
|
/* Special debug activities for freeing objects */
|
|
/* Special debug activities for freeing objects */
|
|
- if (!SlabFrozen(page) && !page->freelist)
|
|
|
|
|
|
+ if (!SlabFrozen(page) && page->freelist == page->end)
|
|
remove_full(s, page);
|
|
remove_full(s, page);
|
|
if (s->flags & SLAB_STORE_USER)
|
|
if (s->flags & SLAB_STORE_USER)
|
|
set_track(s, object, TRACK_FREE, addr);
|
|
set_track(s, object, TRACK_FREE, addr);
|
|
@@ -1102,6 +1120,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
SetSlabDebug(page);
|
|
SetSlabDebug(page);
|
|
|
|
|
|
start = page_address(page);
|
|
start = page_address(page);
|
|
|
|
+ page->end = start + 1;
|
|
|
|
|
|
if (unlikely(s->flags & SLAB_POISON))
|
|
if (unlikely(s->flags & SLAB_POISON))
|
|
memset(start, POISON_INUSE, PAGE_SIZE << s->order);
|
|
memset(start, POISON_INUSE, PAGE_SIZE << s->order);
|
|
@@ -1113,7 +1132,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|
last = p;
|
|
last = p;
|
|
}
|
|
}
|
|
setup_object(s, page, last);
|
|
setup_object(s, page, last);
|
|
- set_freepointer(s, last, NULL);
|
|
|
|
|
|
+ set_freepointer(s, last, page->end);
|
|
|
|
|
|
page->freelist = start;
|
|
page->freelist = start;
|
|
page->inuse = 0;
|
|
page->inuse = 0;
|
|
@@ -1129,7 +1148,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
|
|
void *p;
|
|
void *p;
|
|
|
|
|
|
slab_pad_check(s, page);
|
|
slab_pad_check(s, page);
|
|
- for_each_object(p, s, page_address(page))
|
|
|
|
|
|
+ for_each_object(p, s, slab_address(page))
|
|
check_object(s, page, p, 0);
|
|
check_object(s, page, p, 0);
|
|
ClearSlabDebug(page);
|
|
ClearSlabDebug(page);
|
|
}
|
|
}
|
|
@@ -1139,6 +1158,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
|
|
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
|
|
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
|
|
-pages);
|
|
-pages);
|
|
|
|
|
|
|
|
+ page->mapping = NULL;
|
|
__free_pages(page, s->order);
|
|
__free_pages(page, s->order);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1341,7 +1361,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
|
|
ClearSlabFrozen(page);
|
|
ClearSlabFrozen(page);
|
|
if (page->inuse) {
|
|
if (page->inuse) {
|
|
|
|
|
|
- if (page->freelist)
|
|
|
|
|
|
+ if (page->freelist != page->end)
|
|
add_partial(n, page, tail);
|
|
add_partial(n, page, tail);
|
|
else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
|
|
else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
|
|
add_full(n, page);
|
|
add_full(n, page);
|
|
@@ -1377,8 +1397,12 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
|
|
* Merge cpu freelist into freelist. Typically we get here
|
|
* Merge cpu freelist into freelist. Typically we get here
|
|
* because both freelists are empty. So this is unlikely
|
|
* because both freelists are empty. So this is unlikely
|
|
* to occur.
|
|
* to occur.
|
|
|
|
+ *
|
|
|
|
+ * We need to use _is_end here because deactivate slab may
|
|
|
|
+ * be called for a debug slab. Then c->freelist may contain
|
|
|
|
+ * a dummy pointer.
|
|
*/
|
|
*/
|
|
- while (unlikely(c->freelist)) {
|
|
|
|
|
|
+ while (unlikely(!is_end(c->freelist))) {
|
|
void **object;
|
|
void **object;
|
|
|
|
|
|
tail = 0; /* Hot objects. Put the slab first */
|
|
tail = 0; /* Hot objects. Put the slab first */
|
|
@@ -1478,7 +1502,7 @@ static void *__slab_alloc(struct kmem_cache *s,
|
|
goto another_slab;
|
|
goto another_slab;
|
|
load_freelist:
|
|
load_freelist:
|
|
object = c->page->freelist;
|
|
object = c->page->freelist;
|
|
- if (unlikely(!object))
|
|
|
|
|
|
+ if (unlikely(object == c->page->end))
|
|
goto another_slab;
|
|
goto another_slab;
|
|
if (unlikely(SlabDebug(c->page)))
|
|
if (unlikely(SlabDebug(c->page)))
|
|
goto debug;
|
|
goto debug;
|
|
@@ -1486,7 +1510,7 @@ load_freelist:
|
|
object = c->page->freelist;
|
|
object = c->page->freelist;
|
|
c->freelist = object[c->offset];
|
|
c->freelist = object[c->offset];
|
|
c->page->inuse = s->objects;
|
|
c->page->inuse = s->objects;
|
|
- c->page->freelist = NULL;
|
|
|
|
|
|
+ c->page->freelist = c->page->end;
|
|
c->node = page_to_nid(c->page);
|
|
c->node = page_to_nid(c->page);
|
|
slab_unlock(c->page);
|
|
slab_unlock(c->page);
|
|
return object;
|
|
return object;
|
|
@@ -1550,7 +1574,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
|
|
|
|
|
|
local_irq_save(flags);
|
|
local_irq_save(flags);
|
|
c = get_cpu_slab(s, smp_processor_id());
|
|
c = get_cpu_slab(s, smp_processor_id());
|
|
- if (unlikely(!c->freelist || !node_match(c, node)))
|
|
|
|
|
|
+ if (unlikely(is_end(c->freelist) || !node_match(c, node)))
|
|
|
|
|
|
object = __slab_alloc(s, gfpflags, node, addr, c);
|
|
object = __slab_alloc(s, gfpflags, node, addr, c);
|
|
|
|
|
|
@@ -1614,7 +1638,7 @@ checks_ok:
|
|
* was not on the partial list before
|
|
* was not on the partial list before
|
|
* then add it.
|
|
* then add it.
|
|
*/
|
|
*/
|
|
- if (unlikely(!prior))
|
|
|
|
|
|
+ if (unlikely(prior == page->end))
|
|
add_partial(get_node(s, page_to_nid(page)), page, 1);
|
|
add_partial(get_node(s, page_to_nid(page)), page, 1);
|
|
|
|
|
|
out_unlock:
|
|
out_unlock:
|
|
@@ -1622,7 +1646,7 @@ out_unlock:
|
|
return;
|
|
return;
|
|
|
|
|
|
slab_empty:
|
|
slab_empty:
|
|
- if (prior)
|
|
|
|
|
|
+ if (prior != page->end)
|
|
/*
|
|
/*
|
|
* Slab still on the partial list.
|
|
* Slab still on the partial list.
|
|
*/
|
|
*/
|
|
@@ -1842,7 +1866,7 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
|
|
struct kmem_cache_cpu *c)
|
|
struct kmem_cache_cpu *c)
|
|
{
|
|
{
|
|
c->page = NULL;
|
|
c->page = NULL;
|
|
- c->freelist = NULL;
|
|
|
|
|
|
+ c->freelist = (void *)PAGE_MAPPING_ANON;
|
|
c->node = 0;
|
|
c->node = 0;
|
|
c->offset = s->offset / sizeof(void *);
|
|
c->offset = s->offset / sizeof(void *);
|
|
c->objsize = s->objsize;
|
|
c->objsize = s->objsize;
|
|
@@ -3105,7 +3129,7 @@ static int validate_slab(struct kmem_cache *s, struct page *page,
|
|
unsigned long *map)
|
|
unsigned long *map)
|
|
{
|
|
{
|
|
void *p;
|
|
void *p;
|
|
- void *addr = page_address(page);
|
|
|
|
|
|
+ void *addr = slab_address(page);
|
|
|
|
|
|
if (!check_slab(s, page) ||
|
|
if (!check_slab(s, page) ||
|
|
!on_freelist(s, page, NULL))
|
|
!on_freelist(s, page, NULL))
|
|
@@ -3385,7 +3409,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
|
|
static void process_slab(struct loc_track *t, struct kmem_cache *s,
|
|
static void process_slab(struct loc_track *t, struct kmem_cache *s,
|
|
struct page *page, enum track_item alloc)
|
|
struct page *page, enum track_item alloc)
|
|
{
|
|
{
|
|
- void *addr = page_address(page);
|
|
|
|
|
|
+ void *addr = slab_address(page);
|
|
DECLARE_BITMAP(map, s->objects);
|
|
DECLARE_BITMAP(map, s->objects);
|
|
void *p;
|
|
void *p;
|
|
|
|
|