123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340 |
- /*
- * SLOB Allocator: Simple List Of Blocks
- *
- * Matt Mackall <mpm@selenic.com> 12/30/03
- *
- * How SLOB works:
- *
- * The core of SLOB is a traditional K&R style heap allocator, with
- * support for returning aligned objects. The granularity of this
- * allocator is 8 bytes on x86, though it's perhaps possible to reduce
- * this to 4 if it's deemed worth the effort. The slob heap is a
- * singly-linked list of pages from __get_free_page, grown on demand
- * and allocation from the heap is currently first-fit.
- *
- * Above this is an implementation of kmalloc/kfree. Blocks returned
- * from kmalloc are 8-byte aligned and prepended with a 8-byte header.
- * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
- * __get_free_pages directly so that it can return page-aligned blocks
- * and keeps a linked list of such pages and their orders. These
- * objects are detected in kfree() by their page alignment.
- *
- * SLAB is emulated on top of SLOB by simply calling constructors and
- * destructors for every SLAB allocation. Objects are returned with
- * the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is
- * set, in which case the low-level allocator will fragment blocks to
- * create the proper alignment. Again, objects of page-size or greater
- * are allocated by calling __get_free_pages. As SLAB objects know
- * their size, no separate size bookkeeping is necessary and there is
- * essentially no allocation space overhead.
- */
- #include <linux/slab.h>
- #include <linux/mm.h>
- #include <linux/cache.h>
- #include <linux/init.h>
- #include <linux/module.h>
- #include <linux/timer.h>
- struct slob_block {
- int units;
- struct slob_block *next;
- };
- typedef struct slob_block slob_t;
- #define SLOB_UNIT sizeof(slob_t)
- #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
- #define SLOB_ALIGN L1_CACHE_BYTES
- struct bigblock {
- int order;
- void *pages;
- struct bigblock *next;
- };
- typedef struct bigblock bigblock_t;
- static slob_t arena = { .next = &arena, .units = 1 };
- static slob_t *slobfree = &arena;
- static bigblock_t *bigblocks;
- static DEFINE_SPINLOCK(slob_lock);
- static DEFINE_SPINLOCK(block_lock);
- static void slob_free(void *b, int size);
- static void *slob_alloc(size_t size, gfp_t gfp, int align)
- {
- slob_t *prev, *cur, *aligned = 0;
- int delta = 0, units = SLOB_UNITS(size);
- unsigned long flags;
- spin_lock_irqsave(&slob_lock, flags);
- prev = slobfree;
- for (cur = prev->next; ; prev = cur, cur = cur->next) {
- if (align) {
- aligned = (slob_t *)ALIGN((unsigned long)cur, align);
- delta = aligned - cur;
- }
- if (cur->units >= units + delta) { /* room enough? */
- if (delta) { /* need to fragment head to align? */
- aligned->units = cur->units - delta;
- aligned->next = cur->next;
- cur->next = aligned;
- cur->units = delta;
- prev = cur;
- cur = aligned;
- }
- if (cur->units == units) /* exact fit? */
- prev->next = cur->next; /* unlink */
- else { /* fragment */
- prev->next = cur + units;
- prev->next->units = cur->units - units;
- prev->next->next = cur->next;
- cur->units = units;
- }
- slobfree = prev;
- spin_unlock_irqrestore(&slob_lock, flags);
- return cur;
- }
- if (cur == slobfree) {
- spin_unlock_irqrestore(&slob_lock, flags);
- if (size == PAGE_SIZE) /* trying to shrink arena? */
- return 0;
- cur = (slob_t *)__get_free_page(gfp);
- if (!cur)
- return 0;
- slob_free(cur, PAGE_SIZE);
- spin_lock_irqsave(&slob_lock, flags);
- cur = slobfree;
- }
- }
- }
- static void slob_free(void *block, int size)
- {
- slob_t *cur, *b = (slob_t *)block;
- unsigned long flags;
- if (!block)
- return;
- if (size)
- b->units = SLOB_UNITS(size);
- /* Find reinsertion point */
- spin_lock_irqsave(&slob_lock, flags);
- for (cur = slobfree; !(b > cur && b < cur->next); cur = cur->next)
- if (cur >= cur->next && (b > cur || b < cur->next))
- break;
- if (b + b->units == cur->next) {
- b->units += cur->next->units;
- b->next = cur->next->next;
- } else
- b->next = cur->next;
- if (cur + cur->units == b) {
- cur->units += b->units;
- cur->next = b->next;
- } else
- cur->next = b;
- slobfree = cur;
- spin_unlock_irqrestore(&slob_lock, flags);
- }
- static int FASTCALL(find_order(int size));
- static int fastcall find_order(int size)
- {
- int order = 0;
- for ( ; size > 4096 ; size >>=1)
- order++;
- return order;
- }
- void *kmalloc(size_t size, gfp_t gfp)
- {
- slob_t *m;
- bigblock_t *bb;
- unsigned long flags;
- if (size < PAGE_SIZE - SLOB_UNIT) {
- m = slob_alloc(size + SLOB_UNIT, gfp, 0);
- return m ? (void *)(m + 1) : 0;
- }
- bb = slob_alloc(sizeof(bigblock_t), gfp, 0);
- if (!bb)
- return 0;
- bb->order = find_order(size);
- bb->pages = (void *)__get_free_pages(gfp, bb->order);
- if (bb->pages) {
- spin_lock_irqsave(&block_lock, flags);
- bb->next = bigblocks;
- bigblocks = bb;
- spin_unlock_irqrestore(&block_lock, flags);
- return bb->pages;
- }
- slob_free(bb, sizeof(bigblock_t));
- return 0;
- }
- EXPORT_SYMBOL(kmalloc);
- void kfree(const void *block)
- {
- bigblock_t *bb, **last = &bigblocks;
- unsigned long flags;
- if (!block)
- return;
- if (!((unsigned long)block & (PAGE_SIZE-1))) {
- /* might be on the big block list */
- spin_lock_irqsave(&block_lock, flags);
- for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) {
- if (bb->pages == block) {
- *last = bb->next;
- spin_unlock_irqrestore(&block_lock, flags);
- free_pages((unsigned long)block, bb->order);
- slob_free(bb, sizeof(bigblock_t));
- return;
- }
- }
- spin_unlock_irqrestore(&block_lock, flags);
- }
- slob_free((slob_t *)block - 1, 0);
- return;
- }
- EXPORT_SYMBOL(kfree);
- unsigned int ksize(const void *block)
- {
- bigblock_t *bb;
- unsigned long flags;
- if (!block)
- return 0;
- if (!((unsigned long)block & (PAGE_SIZE-1))) {
- spin_lock_irqsave(&block_lock, flags);
- for (bb = bigblocks; bb; bb = bb->next)
- if (bb->pages == block) {
- spin_unlock_irqrestore(&slob_lock, flags);
- return PAGE_SIZE << bb->order;
- }
- spin_unlock_irqrestore(&block_lock, flags);
- }
- return ((slob_t *)block - 1)->units * SLOB_UNIT;
- }
- struct kmem_cache {
- unsigned int size, align;
- const char *name;
- void (*ctor)(void *, struct kmem_cache *, unsigned long);
- void (*dtor)(void *, struct kmem_cache *, unsigned long);
- };
- struct kmem_cache *kmem_cache_create(const char *name, size_t size,
- size_t align, unsigned long flags,
- void (*ctor)(void*, struct kmem_cache *, unsigned long),
- void (*dtor)(void*, struct kmem_cache *, unsigned long))
- {
- struct kmem_cache *c;
- c = slob_alloc(sizeof(struct kmem_cache), flags, 0);
- if (c) {
- c->name = name;
- c->size = size;
- c->ctor = ctor;
- c->dtor = dtor;
- /* ignore alignment unless it's forced */
- c->align = (flags & SLAB_MUST_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
- if (c->align < align)
- c->align = align;
- }
- return c;
- }
- EXPORT_SYMBOL(kmem_cache_create);
- void kmem_cache_destroy(struct kmem_cache *c)
- {
- slob_free(c, sizeof(struct kmem_cache));
- }
- EXPORT_SYMBOL(kmem_cache_destroy);
- void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
- {
- void *b;
- if (c->size < PAGE_SIZE)
- b = slob_alloc(c->size, flags, c->align);
- else
- b = (void *)__get_free_pages(flags, find_order(c->size));
- if (c->ctor)
- c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR);
- return b;
- }
- EXPORT_SYMBOL(kmem_cache_alloc);
- void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
- {
- void *ret = kmem_cache_alloc(c, flags);
- if (ret)
- memset(ret, 0, c->size);
- return ret;
- }
- EXPORT_SYMBOL(kmem_cache_zalloc);
- void kmem_cache_free(struct kmem_cache *c, void *b)
- {
- if (c->dtor)
- c->dtor(b, c, 0);
- if (c->size < PAGE_SIZE)
- slob_free(b, c->size);
- else
- free_pages((unsigned long)b, find_order(c->size));
- }
- EXPORT_SYMBOL(kmem_cache_free);
- unsigned int kmem_cache_size(struct kmem_cache *c)
- {
- return c->size;
- }
- EXPORT_SYMBOL(kmem_cache_size);
- const char *kmem_cache_name(struct kmem_cache *c)
- {
- return c->name;
- }
- EXPORT_SYMBOL(kmem_cache_name);
- static struct timer_list slob_timer = TIMER_INITIALIZER(
- (void (*)(unsigned long))kmem_cache_init, 0, 0);
- void kmem_cache_init(void)
- {
- void *p = slob_alloc(PAGE_SIZE, 0, PAGE_SIZE-1);
- if (p)
- free_page((unsigned long)p);
- mod_timer(&slob_timer, jiffies + HZ);
- }
|