slob.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643
  1. /*
  2. * SLOB Allocator: Simple List Of Blocks
  3. *
  4. * Matt Mackall <mpm@selenic.com> 12/30/03
  5. *
  6. * How SLOB works:
  7. *
  8. * The core of SLOB is a traditional K&R style heap allocator, with
  9. * support for returning aligned objects. The granularity of this
  10. * allocator is 4 bytes on 32-bit and 8 bytes on 64-bit, though it
  11. * could be as low as 2 if the compiler alignment requirements allow.
  12. *
  13. * The slob heap is a linked list of pages from __get_free_page, and
  14. * within each page, there is a singly-linked list of free blocks (slob_t).
  15. * The heap is grown on demand and allocation from the heap is currently
  16. * first-fit.
  17. *
  18. * Above this is an implementation of kmalloc/kfree. Blocks returned
  19. * from kmalloc are 4-byte aligned and prepended with a 4-byte header.
  20. * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
  21. * __get_free_pages directly so that it can return page-aligned blocks
  22. * and keeps a linked list of such pages and their orders. These
  23. * objects are detected in kfree() by their page alignment.
  24. *
  25. * SLAB is emulated on top of SLOB by simply calling constructors and
  26. * destructors for every SLAB allocation. Objects are returned with the
  27. * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
  28. * case the low-level allocator will fragment blocks to create the proper
  29. * alignment. Again, objects of page-size or greater are allocated by
  30. * calling __get_free_pages. As SLAB objects know their size, no separate
  31. * size bookkeeping is necessary and there is essentially no allocation
  32. * space overhead.
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/slab.h>
  36. #include <linux/mm.h>
  37. #include <linux/cache.h>
  38. #include <linux/init.h>
  39. #include <linux/module.h>
  40. #include <linux/rcupdate.h>
  41. #include <linux/list.h>
  42. #include <asm/atomic.h>
  43. /* SLOB_MIN_ALIGN == sizeof(long) */
  44. #if BITS_PER_BYTE == 32
  45. #define SLOB_MIN_ALIGN 4
  46. #else
  47. #define SLOB_MIN_ALIGN 8
  48. #endif
  49. /*
  50. * slob_block has a field 'units', which indicates size of block if +ve,
  51. * or offset of next block if -ve (in SLOB_UNITs).
  52. *
  53. * Free blocks of size 1 unit simply contain the offset of the next block.
  54. * Those with larger size contain their size in the first SLOB_UNIT of
  55. * memory, and the offset of the next free block in the second SLOB_UNIT.
  56. */
  57. #if PAGE_SIZE <= (32767 * SLOB_MIN_ALIGN)
  58. typedef s16 slobidx_t;
  59. #else
  60. typedef s32 slobidx_t;
  61. #endif
  62. /*
  63. * Align struct slob_block to long for now, but can some embedded
  64. * architectures get away with less?
  65. */
  66. struct slob_block {
  67. slobidx_t units;
  68. } __attribute__((aligned(SLOB_MIN_ALIGN)));
  69. typedef struct slob_block slob_t;
  70. /*
  71. * We use struct page fields to manage some slob allocation aspects,
  72. * however to avoid the horrible mess in include/linux/mm_types.h, we'll
  73. * just define our own struct page type variant here.
  74. */
  75. struct slob_page {
  76. union {
  77. struct {
  78. unsigned long flags; /* mandatory */
  79. atomic_t _count; /* mandatory */
  80. slobidx_t units; /* free units left in page */
  81. unsigned long pad[2];
  82. slob_t *free; /* first free slob_t in page */
  83. struct list_head list; /* linked list of free pages */
  84. };
  85. struct page page;
  86. };
  87. };
  88. static inline void struct_slob_page_wrong_size(void)
  89. { BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
  90. /*
  91. * free_slob_page: call before a slob_page is returned to the page allocator.
  92. */
  93. static inline void free_slob_page(struct slob_page *sp)
  94. {
  95. reset_page_mapcount(&sp->page);
  96. sp->page.mapping = NULL;
  97. }
  98. /*
  99. * All (partially) free slob pages go on this list.
  100. */
  101. static LIST_HEAD(free_slob_pages);
  102. /*
  103. * slob_page: True for all slob pages (false for bigblock pages)
  104. */
  105. static inline int slob_page(struct slob_page *sp)
  106. {
  107. return test_bit(PG_active, &sp->flags);
  108. }
  109. static inline void set_slob_page(struct slob_page *sp)
  110. {
  111. __set_bit(PG_active, &sp->flags);
  112. }
  113. static inline void clear_slob_page(struct slob_page *sp)
  114. {
  115. __clear_bit(PG_active, &sp->flags);
  116. }
  117. /*
  118. * slob_page_free: true for pages on free_slob_pages list.
  119. */
  120. static inline int slob_page_free(struct slob_page *sp)
  121. {
  122. return test_bit(PG_private, &sp->flags);
  123. }
  124. static inline void set_slob_page_free(struct slob_page *sp)
  125. {
  126. list_add(&sp->list, &free_slob_pages);
  127. __set_bit(PG_private, &sp->flags);
  128. }
  129. static inline void clear_slob_page_free(struct slob_page *sp)
  130. {
  131. list_del(&sp->list);
  132. __clear_bit(PG_private, &sp->flags);
  133. }
  134. #define SLOB_UNIT sizeof(slob_t)
  135. #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
  136. #define SLOB_ALIGN L1_CACHE_BYTES
  137. /*
  138. * struct slob_rcu is inserted at the tail of allocated slob blocks, which
  139. * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
  140. * the block using call_rcu.
  141. */
  142. struct slob_rcu {
  143. struct rcu_head head;
  144. int size;
  145. };
  146. /*
  147. * slob_lock protects all slob allocator structures.
  148. */
  149. static DEFINE_SPINLOCK(slob_lock);
  150. /*
  151. * Encode the given size and next info into a free slob block s.
  152. */
  153. static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
  154. {
  155. slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
  156. slobidx_t offset = next - base;
  157. if (size > 1) {
  158. s[0].units = size;
  159. s[1].units = offset;
  160. } else
  161. s[0].units = -offset;
  162. }
  163. /*
  164. * Return the size of a slob block.
  165. */
  166. static slobidx_t slob_units(slob_t *s)
  167. {
  168. if (s->units > 0)
  169. return s->units;
  170. return 1;
  171. }
  172. /*
  173. * Return the next free slob block pointer after this one.
  174. */
  175. static slob_t *slob_next(slob_t *s)
  176. {
  177. slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
  178. slobidx_t next;
  179. if (s[0].units < 0)
  180. next = -s[0].units;
  181. else
  182. next = s[1].units;
  183. return base+next;
  184. }
  185. /*
  186. * Returns true if s is the last free block in its page.
  187. */
  188. static int slob_last(slob_t *s)
  189. {
  190. return !((unsigned long)slob_next(s) & ~PAGE_MASK);
  191. }
  192. /*
  193. * Allocate a slob block within a given slob_page sp.
  194. */
  195. static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
  196. {
  197. slob_t *prev, *cur, *aligned = 0;
  198. int delta = 0, units = SLOB_UNITS(size);
  199. for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
  200. slobidx_t avail = slob_units(cur);
  201. if (align) {
  202. aligned = (slob_t *)ALIGN((unsigned long)cur, align);
  203. delta = aligned - cur;
  204. }
  205. if (avail >= units + delta) { /* room enough? */
  206. slob_t *next;
  207. if (delta) { /* need to fragment head to align? */
  208. next = slob_next(cur);
  209. set_slob(aligned, avail - delta, next);
  210. set_slob(cur, delta, aligned);
  211. prev = cur;
  212. cur = aligned;
  213. avail = slob_units(cur);
  214. }
  215. next = slob_next(cur);
  216. if (avail == units) { /* exact fit? unlink. */
  217. if (prev)
  218. set_slob(prev, slob_units(prev), next);
  219. else
  220. sp->free = next;
  221. } else { /* fragment */
  222. if (prev)
  223. set_slob(prev, slob_units(prev), cur + units);
  224. else
  225. sp->free = cur + units;
  226. set_slob(cur + units, avail - units, next);
  227. }
  228. sp->units -= units;
  229. if (!sp->units)
  230. clear_slob_page_free(sp);
  231. return cur;
  232. }
  233. if (slob_last(cur))
  234. return NULL;
  235. }
  236. }
  237. /*
  238. * slob_alloc: entry point into the slob allocator.
  239. */
  240. static void *slob_alloc(size_t size, gfp_t gfp, int align)
  241. {
  242. struct slob_page *sp;
  243. slob_t *b = NULL;
  244. unsigned long flags;
  245. spin_lock_irqsave(&slob_lock, flags);
  246. /* Iterate through each partially free page, try to find room */
  247. list_for_each_entry(sp, &free_slob_pages, list) {
  248. if (sp->units >= SLOB_UNITS(size)) {
  249. b = slob_page_alloc(sp, size, align);
  250. if (b)
  251. break;
  252. }
  253. }
  254. spin_unlock_irqrestore(&slob_lock, flags);
  255. /* Not enough space: must allocate a new page */
  256. if (!b) {
  257. b = (slob_t *)__get_free_page(gfp);
  258. if (!b)
  259. return 0;
  260. sp = (struct slob_page *)virt_to_page(b);
  261. set_slob_page(sp);
  262. spin_lock_irqsave(&slob_lock, flags);
  263. sp->units = SLOB_UNITS(PAGE_SIZE);
  264. sp->free = b;
  265. INIT_LIST_HEAD(&sp->list);
  266. set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
  267. set_slob_page_free(sp);
  268. b = slob_page_alloc(sp, size, align);
  269. BUG_ON(!b);
  270. spin_unlock_irqrestore(&slob_lock, flags);
  271. }
  272. return b;
  273. }
  274. /*
  275. * slob_free: entry point into the slob allocator.
  276. */
  277. static void slob_free(void *block, int size)
  278. {
  279. struct slob_page *sp;
  280. slob_t *prev, *next, *b = (slob_t *)block;
  281. slobidx_t units;
  282. unsigned long flags;
  283. if (!block)
  284. return;
  285. BUG_ON(!size);
  286. sp = (struct slob_page *)virt_to_page(block);
  287. units = SLOB_UNITS(size);
  288. spin_lock_irqsave(&slob_lock, flags);
  289. if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
  290. /* Go directly to page allocator. Do not pass slob allocator */
  291. if (slob_page_free(sp))
  292. clear_slob_page_free(sp);
  293. clear_slob_page(sp);
  294. free_slob_page(sp);
  295. free_page((unsigned long)b);
  296. goto out;
  297. }
  298. if (!slob_page_free(sp)) {
  299. /* This slob page is about to become partially free. Easy! */
  300. sp->units = units;
  301. sp->free = b;
  302. set_slob(b, units,
  303. (void *)((unsigned long)(b +
  304. SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
  305. set_slob_page_free(sp);
  306. goto out;
  307. }
  308. /*
  309. * Otherwise the page is already partially free, so find reinsertion
  310. * point.
  311. */
  312. sp->units += units;
  313. if (b < sp->free) {
  314. set_slob(b, units, sp->free);
  315. sp->free = b;
  316. } else {
  317. prev = sp->free;
  318. next = slob_next(prev);
  319. while (b > next) {
  320. prev = next;
  321. next = slob_next(prev);
  322. }
  323. if (!slob_last(prev) && b + units == next) {
  324. units += slob_units(next);
  325. set_slob(b, units, slob_next(next));
  326. } else
  327. set_slob(b, units, next);
  328. if (prev + slob_units(prev) == b) {
  329. units = slob_units(b) + slob_units(prev);
  330. set_slob(prev, units, slob_next(b));
  331. } else
  332. set_slob(prev, slob_units(prev), b);
  333. }
  334. out:
  335. spin_unlock_irqrestore(&slob_lock, flags);
  336. }
  337. /*
  338. * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
  339. */
  340. struct bigblock {
  341. int order;
  342. void *pages;
  343. struct bigblock *next;
  344. };
  345. typedef struct bigblock bigblock_t;
  346. static bigblock_t *bigblocks;
  347. static DEFINE_SPINLOCK(block_lock);
  348. void *__kmalloc(size_t size, gfp_t gfp)
  349. {
  350. slob_t *m;
  351. bigblock_t *bb;
  352. unsigned long flags;
  353. if (size < PAGE_SIZE - SLOB_UNIT) {
  354. m = slob_alloc(size + SLOB_UNIT, gfp, 0);
  355. if (m)
  356. m->units = size;
  357. return m+1;
  358. }
  359. bb = slob_alloc(sizeof(bigblock_t), gfp, 0);
  360. if (!bb)
  361. return 0;
  362. bb->order = get_order(size);
  363. bb->pages = (void *)__get_free_pages(gfp, bb->order);
  364. if (bb->pages) {
  365. spin_lock_irqsave(&block_lock, flags);
  366. bb->next = bigblocks;
  367. bigblocks = bb;
  368. spin_unlock_irqrestore(&block_lock, flags);
  369. return bb->pages;
  370. }
  371. slob_free(bb, sizeof(bigblock_t));
  372. return 0;
  373. }
  374. EXPORT_SYMBOL(__kmalloc);
  375. /**
  376. * krealloc - reallocate memory. The contents will remain unchanged.
  377. *
  378. * @p: object to reallocate memory for.
  379. * @new_size: how many bytes of memory are required.
  380. * @flags: the type of memory to allocate.
  381. *
  382. * The contents of the object pointed to are preserved up to the
  383. * lesser of the new and old sizes. If @p is %NULL, krealloc()
  384. * behaves exactly like kmalloc(). If @size is 0 and @p is not a
  385. * %NULL pointer, the object pointed to is freed.
  386. */
  387. void *krealloc(const void *p, size_t new_size, gfp_t flags)
  388. {
  389. void *ret;
  390. if (unlikely(!p))
  391. return kmalloc_track_caller(new_size, flags);
  392. if (unlikely(!new_size)) {
  393. kfree(p);
  394. return NULL;
  395. }
  396. ret = kmalloc_track_caller(new_size, flags);
  397. if (ret) {
  398. memcpy(ret, p, min(new_size, ksize(p)));
  399. kfree(p);
  400. }
  401. return ret;
  402. }
  403. EXPORT_SYMBOL(krealloc);
  404. void kfree(const void *block)
  405. {
  406. struct slob_page *sp;
  407. slob_t *m;
  408. bigblock_t *bb, **last = &bigblocks;
  409. unsigned long flags;
  410. if (!block)
  411. return;
  412. sp = (struct slob_page *)virt_to_page(block);
  413. if (!slob_page(sp)) {
  414. /* on the big block list */
  415. spin_lock_irqsave(&block_lock, flags);
  416. for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) {
  417. if (bb->pages == block) {
  418. *last = bb->next;
  419. spin_unlock_irqrestore(&block_lock, flags);
  420. free_pages((unsigned long)block, bb->order);
  421. slob_free(bb, sizeof(bigblock_t));
  422. return;
  423. }
  424. }
  425. spin_unlock_irqrestore(&block_lock, flags);
  426. WARN_ON(1);
  427. return;
  428. }
  429. m = (slob_t *)block - 1;
  430. slob_free(m, m->units + SLOB_UNIT);
  431. return;
  432. }
  433. EXPORT_SYMBOL(kfree);
  434. size_t ksize(const void *block)
  435. {
  436. struct slob_page *sp;
  437. bigblock_t *bb;
  438. unsigned long flags;
  439. if (!block)
  440. return 0;
  441. sp = (struct slob_page *)virt_to_page(block);
  442. if (!slob_page(sp)) {
  443. spin_lock_irqsave(&block_lock, flags);
  444. for (bb = bigblocks; bb; bb = bb->next)
  445. if (bb->pages == block) {
  446. spin_unlock_irqrestore(&slob_lock, flags);
  447. return PAGE_SIZE << bb->order;
  448. }
  449. spin_unlock_irqrestore(&block_lock, flags);
  450. }
  451. return ((slob_t *)block - 1)->units + SLOB_UNIT;
  452. }
  453. struct kmem_cache {
  454. unsigned int size, align;
  455. unsigned long flags;
  456. const char *name;
  457. void (*ctor)(void *, struct kmem_cache *, unsigned long);
  458. };
  459. struct kmem_cache *kmem_cache_create(const char *name, size_t size,
  460. size_t align, unsigned long flags,
  461. void (*ctor)(void*, struct kmem_cache *, unsigned long),
  462. void (*dtor)(void*, struct kmem_cache *, unsigned long))
  463. {
  464. struct kmem_cache *c;
  465. c = slob_alloc(sizeof(struct kmem_cache), flags, 0);
  466. if (c) {
  467. c->name = name;
  468. c->size = size;
  469. if (flags & SLAB_DESTROY_BY_RCU) {
  470. /* leave room for rcu footer at the end of object */
  471. c->size += sizeof(struct slob_rcu);
  472. }
  473. c->flags = flags;
  474. c->ctor = ctor;
  475. /* ignore alignment unless it's forced */
  476. c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
  477. if (c->align < align)
  478. c->align = align;
  479. } else if (flags & SLAB_PANIC)
  480. panic("Cannot create slab cache %s\n", name);
  481. return c;
  482. }
  483. EXPORT_SYMBOL(kmem_cache_create);
  484. void kmem_cache_destroy(struct kmem_cache *c)
  485. {
  486. slob_free(c, sizeof(struct kmem_cache));
  487. }
  488. EXPORT_SYMBOL(kmem_cache_destroy);
  489. void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
  490. {
  491. void *b;
  492. if (c->size < PAGE_SIZE)
  493. b = slob_alloc(c->size, flags, c->align);
  494. else
  495. b = (void *)__get_free_pages(flags, get_order(c->size));
  496. if (c->ctor)
  497. c->ctor(b, c, 0);
  498. return b;
  499. }
  500. EXPORT_SYMBOL(kmem_cache_alloc);
  501. void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
  502. {
  503. void *ret = kmem_cache_alloc(c, flags);
  504. if (ret)
  505. memset(ret, 0, c->size);
  506. return ret;
  507. }
  508. EXPORT_SYMBOL(kmem_cache_zalloc);
  509. static void __kmem_cache_free(void *b, int size)
  510. {
  511. if (size < PAGE_SIZE)
  512. slob_free(b, size);
  513. else
  514. free_pages((unsigned long)b, get_order(size));
  515. }
  516. static void kmem_rcu_free(struct rcu_head *head)
  517. {
  518. struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
  519. void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
  520. __kmem_cache_free(b, slob_rcu->size);
  521. }
  522. void kmem_cache_free(struct kmem_cache *c, void *b)
  523. {
  524. if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
  525. struct slob_rcu *slob_rcu;
  526. slob_rcu = b + (c->size - sizeof(struct slob_rcu));
  527. INIT_RCU_HEAD(&slob_rcu->head);
  528. slob_rcu->size = c->size;
  529. call_rcu(&slob_rcu->head, kmem_rcu_free);
  530. } else {
  531. __kmem_cache_free(b, c->size);
  532. }
  533. }
  534. EXPORT_SYMBOL(kmem_cache_free);
  535. unsigned int kmem_cache_size(struct kmem_cache *c)
  536. {
  537. return c->size;
  538. }
  539. EXPORT_SYMBOL(kmem_cache_size);
  540. const char *kmem_cache_name(struct kmem_cache *c)
  541. {
  542. return c->name;
  543. }
  544. EXPORT_SYMBOL(kmem_cache_name);
  545. int kmem_cache_shrink(struct kmem_cache *d)
  546. {
  547. return 0;
  548. }
  549. EXPORT_SYMBOL(kmem_cache_shrink);
  550. int kmem_ptr_validate(struct kmem_cache *a, const void *b)
  551. {
  552. return 0;
  553. }
  554. void __init kmem_cache_init(void)
  555. {
  556. }