|
@@ -59,6 +59,8 @@
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
#include <linux/slab.h>
|
|
|
+#include "slab.h"
|
|
|
+
|
|
|
#include <linux/mm.h>
|
|
|
#include <linux/swap.h> /* struct reclaim_state */
|
|
|
#include <linux/cache.h>
|
|
@@ -91,36 +93,6 @@ struct slob_block {
|
|
|
};
|
|
|
typedef struct slob_block slob_t;
|
|
|
|
|
|
-/*
|
|
|
- * We use struct page fields to manage some slob allocation aspects,
|
|
|
- * however to avoid the horrible mess in include/linux/mm_types.h, we'll
|
|
|
- * just define our own struct page type variant here.
|
|
|
- */
|
|
|
-struct slob_page {
|
|
|
- union {
|
|
|
- struct {
|
|
|
- unsigned long flags; /* mandatory */
|
|
|
- atomic_t _count; /* mandatory */
|
|
|
- slobidx_t units; /* free units left in page */
|
|
|
- unsigned long pad[2];
|
|
|
- slob_t *free; /* first free slob_t in page */
|
|
|
- struct list_head list; /* linked list of free pages */
|
|
|
- };
|
|
|
- struct page page;
|
|
|
- };
|
|
|
-};
|
|
|
-static inline void struct_slob_page_wrong_size(void)
|
|
|
-{ BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
|
|
|
-
|
|
|
-/*
|
|
|
- * free_slob_page: call before a slob_page is returned to the page allocator.
|
|
|
- */
|
|
|
-static inline void free_slob_page(struct slob_page *sp)
|
|
|
-{
|
|
|
- reset_page_mapcount(&sp->page);
|
|
|
- sp->page.mapping = NULL;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* All partially free slob pages go on these lists.
|
|
|
*/
|
|
@@ -130,47 +102,24 @@ static LIST_HEAD(free_slob_small);
|
|
|
static LIST_HEAD(free_slob_medium);
|
|
|
static LIST_HEAD(free_slob_large);
|
|
|
|
|
|
-/*
|
|
|
- * is_slob_page: True for all slob pages (false for bigblock pages)
|
|
|
- */
|
|
|
-static inline int is_slob_page(struct slob_page *sp)
|
|
|
-{
|
|
|
- return PageSlab((struct page *)sp);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void set_slob_page(struct slob_page *sp)
|
|
|
-{
|
|
|
- __SetPageSlab((struct page *)sp);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void clear_slob_page(struct slob_page *sp)
|
|
|
-{
|
|
|
- __ClearPageSlab((struct page *)sp);
|
|
|
-}
|
|
|
-
|
|
|
-static inline struct slob_page *slob_page(const void *addr)
|
|
|
-{
|
|
|
- return (struct slob_page *)virt_to_page(addr);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* slob_page_free: true for pages on free_slob_pages list.
|
|
|
*/
|
|
|
-static inline int slob_page_free(struct slob_page *sp)
|
|
|
+static inline int slob_page_free(struct page *sp)
|
|
|
{
|
|
|
- return PageSlobFree((struct page *)sp);
|
|
|
+ return PageSlobFree(sp);
|
|
|
}
|
|
|
|
|
|
-static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
|
|
|
+static void set_slob_page_free(struct page *sp, struct list_head *list)
|
|
|
{
|
|
|
list_add(&sp->list, list);
|
|
|
- __SetPageSlobFree((struct page *)sp);
|
|
|
+ __SetPageSlobFree(sp);
|
|
|
}
|
|
|
|
|
|
-static inline void clear_slob_page_free(struct slob_page *sp)
|
|
|
+static inline void clear_slob_page_free(struct page *sp)
|
|
|
{
|
|
|
list_del(&sp->list);
|
|
|
- __ClearPageSlobFree((struct page *)sp);
|
|
|
+ __ClearPageSlobFree(sp);
|
|
|
}
|
|
|
|
|
|
#define SLOB_UNIT sizeof(slob_t)
|
|
@@ -267,12 +216,12 @@ static void slob_free_pages(void *b, int order)
|
|
|
/*
|
|
|
* Allocate a slob block within a given slob_page sp.
|
|
|
*/
|
|
|
-static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
|
|
|
+static void *slob_page_alloc(struct page *sp, size_t size, int align)
|
|
|
{
|
|
|
slob_t *prev, *cur, *aligned = NULL;
|
|
|
int delta = 0, units = SLOB_UNITS(size);
|
|
|
|
|
|
- for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
|
|
|
+ for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
|
|
|
slobidx_t avail = slob_units(cur);
|
|
|
|
|
|
if (align) {
|
|
@@ -296,12 +245,12 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
|
|
|
if (prev)
|
|
|
set_slob(prev, slob_units(prev), next);
|
|
|
else
|
|
|
- sp->free = next;
|
|
|
+ sp->freelist = next;
|
|
|
} else { /* fragment */
|
|
|
if (prev)
|
|
|
set_slob(prev, slob_units(prev), cur + units);
|
|
|
else
|
|
|
- sp->free = cur + units;
|
|
|
+ sp->freelist = cur + units;
|
|
|
set_slob(cur + units, avail - units, next);
|
|
|
}
|
|
|
|
|
@@ -320,7 +269,7 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
|
|
|
*/
|
|
|
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
|
|
|
{
|
|
|
- struct slob_page *sp;
|
|
|
+ struct page *sp;
|
|
|
struct list_head *prev;
|
|
|
struct list_head *slob_list;
|
|
|
slob_t *b = NULL;
|
|
@@ -341,7 +290,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
|
|
|
* If there's a node specification, search for a partial
|
|
|
* page with a matching node id in the freelist.
|
|
|
*/
|
|
|
- if (node != -1 && page_to_nid(&sp->page) != node)
|
|
|
+ if (node != -1 && page_to_nid(sp) != node)
|
|
|
continue;
|
|
|
#endif
|
|
|
/* Enough room on this page? */
|
|
@@ -369,12 +318,12 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
|
|
|
b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
|
|
|
if (!b)
|
|
|
return NULL;
|
|
|
- sp = slob_page(b);
|
|
|
- set_slob_page(sp);
|
|
|
+ sp = virt_to_page(b);
|
|
|
+ __SetPageSlab(sp);
|
|
|
|
|
|
spin_lock_irqsave(&slob_lock, flags);
|
|
|
sp->units = SLOB_UNITS(PAGE_SIZE);
|
|
|
- sp->free = b;
|
|
|
+ sp->freelist = b;
|
|
|
INIT_LIST_HEAD(&sp->list);
|
|
|
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
|
|
|
set_slob_page_free(sp, slob_list);
|
|
@@ -392,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
|
|
|
*/
|
|
|
static void slob_free(void *block, int size)
|
|
|
{
|
|
|
- struct slob_page *sp;
|
|
|
+ struct page *sp;
|
|
|
slob_t *prev, *next, *b = (slob_t *)block;
|
|
|
slobidx_t units;
|
|
|
unsigned long flags;
|
|
@@ -402,7 +351,7 @@ static void slob_free(void *block, int size)
|
|
|
return;
|
|
|
BUG_ON(!size);
|
|
|
|
|
|
- sp = slob_page(block);
|
|
|
+ sp = virt_to_page(block);
|
|
|
units = SLOB_UNITS(size);
|
|
|
|
|
|
spin_lock_irqsave(&slob_lock, flags);
|
|
@@ -412,8 +361,8 @@ static void slob_free(void *block, int size)
|
|
|
if (slob_page_free(sp))
|
|
|
clear_slob_page_free(sp);
|
|
|
spin_unlock_irqrestore(&slob_lock, flags);
|
|
|
- clear_slob_page(sp);
|
|
|
- free_slob_page(sp);
|
|
|
+ __ClearPageSlab(sp);
|
|
|
+ reset_page_mapcount(sp);
|
|
|
slob_free_pages(b, 0);
|
|
|
return;
|
|
|
}
|
|
@@ -421,7 +370,7 @@ static void slob_free(void *block, int size)
|
|
|
if (!slob_page_free(sp)) {
|
|
|
/* This slob page is about to become partially free. Easy! */
|
|
|
sp->units = units;
|
|
|
- sp->free = b;
|
|
|
+ sp->freelist = b;
|
|
|
set_slob(b, units,
|
|
|
(void *)((unsigned long)(b +
|
|
|
SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
|
|
@@ -441,15 +390,15 @@ static void slob_free(void *block, int size)
|
|
|
*/
|
|
|
sp->units += units;
|
|
|
|
|
|
- if (b < sp->free) {
|
|
|
- if (b + units == sp->free) {
|
|
|
- units += slob_units(sp->free);
|
|
|
- sp->free = slob_next(sp->free);
|
|
|
+ if (b < (slob_t *)sp->freelist) {
|
|
|
+ if (b + units == sp->freelist) {
|
|
|
+ units += slob_units(sp->freelist);
|
|
|
+ sp->freelist = slob_next(sp->freelist);
|
|
|
}
|
|
|
- set_slob(b, units, sp->free);
|
|
|
- sp->free = b;
|
|
|
+ set_slob(b, units, sp->freelist);
|
|
|
+ sp->freelist = b;
|
|
|
} else {
|
|
|
- prev = sp->free;
|
|
|
+ prev = sp->freelist;
|
|
|
next = slob_next(prev);
|
|
|
while (b > next) {
|
|
|
prev = next;
|
|
@@ -522,7 +471,7 @@ EXPORT_SYMBOL(__kmalloc_node);
|
|
|
|
|
|
void kfree(const void *block)
|
|
|
{
|
|
|
- struct slob_page *sp;
|
|
|
+ struct page *sp;
|
|
|
|
|
|
trace_kfree(_RET_IP_, block);
|
|
|
|
|
@@ -530,43 +479,36 @@ void kfree(const void *block)
|
|
|
return;
|
|
|
kmemleak_free(block);
|
|
|
|
|
|
- sp = slob_page(block);
|
|
|
- if (is_slob_page(sp)) {
|
|
|
+ sp = virt_to_page(block);
|
|
|
+ if (PageSlab(sp)) {
|
|
|
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
|
|
|
unsigned int *m = (unsigned int *)(block - align);
|
|
|
slob_free(m, *m + align);
|
|
|
} else
|
|
|
- put_page(&sp->page);
|
|
|
+ put_page(sp);
|
|
|
}
|
|
|
EXPORT_SYMBOL(kfree);
|
|
|
|
|
|
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
|
|
|
size_t ksize(const void *block)
|
|
|
{
|
|
|
- struct slob_page *sp;
|
|
|
+ struct page *sp;
|
|
|
|
|
|
BUG_ON(!block);
|
|
|
if (unlikely(block == ZERO_SIZE_PTR))
|
|
|
return 0;
|
|
|
|
|
|
- sp = slob_page(block);
|
|
|
- if (is_slob_page(sp)) {
|
|
|
+ sp = virt_to_page(block);
|
|
|
+ if (PageSlab(sp)) {
|
|
|
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
|
|
|
unsigned int *m = (unsigned int *)(block - align);
|
|
|
return SLOB_UNITS(*m) * SLOB_UNIT;
|
|
|
} else
|
|
|
- return sp->page.private;
|
|
|
+ return sp->private;
|
|
|
}
|
|
|
EXPORT_SYMBOL(ksize);
|
|
|
|
|
|
-struct kmem_cache {
|
|
|
- unsigned int size, align;
|
|
|
- unsigned long flags;
|
|
|
- const char *name;
|
|
|
- void (*ctor)(void *);
|
|
|
-};
|
|
|
-
|
|
|
-struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
|
|
+struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
|
|
|
size_t align, unsigned long flags, void (*ctor)(void *))
|
|
|
{
|
|
|
struct kmem_cache *c;
|
|
@@ -589,13 +531,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
|
|
c->align = ARCH_SLAB_MINALIGN;
|
|
|
if (c->align < align)
|
|
|
c->align = align;
|
|
|
- } else if (flags & SLAB_PANIC)
|
|
|
- panic("Cannot create slab cache %s\n", name);
|
|
|
|
|
|
- kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
|
|
|
+ kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
|
|
|
+ c->refcount = 1;
|
|
|
+ }
|
|
|
return c;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(kmem_cache_create);
|
|
|
|
|
|
void kmem_cache_destroy(struct kmem_cache *c)
|
|
|
{
|
|
@@ -678,19 +619,12 @@ int kmem_cache_shrink(struct kmem_cache *d)
|
|
|
}
|
|
|
EXPORT_SYMBOL(kmem_cache_shrink);
|
|
|
|
|
|
-static unsigned int slob_ready __read_mostly;
|
|
|
-
|
|
|
-int slab_is_available(void)
|
|
|
-{
|
|
|
- return slob_ready;
|
|
|
-}
|
|
|
-
|
|
|
void __init kmem_cache_init(void)
|
|
|
{
|
|
|
- slob_ready = 1;
|
|
|
+ slab_state = UP;
|
|
|
}
|
|
|
|
|
|
void __init kmem_cache_init_late(void)
|
|
|
{
|
|
|
- /* Nothing to do */
|
|
|
+ slab_state = FULL;
|
|
|
}
|