slub_def.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. #ifndef _LINUX_SLUB_DEF_H
  2. #define _LINUX_SLUB_DEF_H
  3. /*
  4. * SLUB : A Slab allocator without object queues.
  5. *
  6. * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
  7. */
  8. #include <linux/types.h>
  9. #include <linux/gfp.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/kobject.h>
  12. enum stat_item {
  13. ALLOC_FASTPATH, /* Allocation from cpu slab */
  14. ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
  15. FREE_FASTPATH, /* Free to cpu slub */
  16. FREE_SLOWPATH, /* Freeing not to cpu slab */
  17. FREE_FROZEN, /* Freeing to frozen slab */
  18. FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
  19. FREE_REMOVE_PARTIAL, /* Freeing removes last object */
  20. ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */
  21. ALLOC_SLAB, /* Cpu slab acquired from page allocator */
  22. ALLOC_REFILL, /* Refill cpu slab from slab freelist */
  23. FREE_SLAB, /* Slab freed to the page allocator */
  24. CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
  25. DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
  26. DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
  27. DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
  28. DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
  29. DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
  30. ORDER_FALLBACK, /* Number of times fallback was necessary */
  31. NR_SLUB_STAT_ITEMS };
  32. struct kmem_cache_cpu {
  33. void **freelist; /* Pointer to first free per cpu object */
  34. struct page *page; /* The slab from which we are allocating */
  35. int node; /* The node of the page (or -1 for debug) */
  36. unsigned int offset; /* Freepointer offset (in word units) */
  37. unsigned int objsize; /* Size of an object (from kmem_cache) */
  38. #ifdef CONFIG_SLUB_STATS
  39. unsigned stat[NR_SLUB_STAT_ITEMS];
  40. #endif
  41. };
  42. struct kmem_cache_node {
  43. spinlock_t list_lock; /* Protect partial list and nr_partial */
  44. unsigned long nr_partial;
  45. struct list_head partial;
  46. #ifdef CONFIG_SLUB_DEBUG
  47. atomic_long_t nr_slabs;
  48. atomic_long_t total_objects;
  49. struct list_head full;
  50. #endif
  51. };
  52. /*
  53. * Word size structure that can be atomically updated or read and that
  54. * contains both the order and the number of objects that a slab of the
  55. * given order would contain.
  56. */
  57. struct kmem_cache_order_objects {
  58. unsigned long x;
  59. };
  60. /*
  61. * Slab cache management.
  62. */
  63. struct kmem_cache {
  64. /* Used for retriving partial slabs etc */
  65. unsigned long flags;
  66. int size; /* The size of an object including meta data */
  67. int objsize; /* The size of an object without meta data */
  68. int offset; /* Free pointer offset. */
  69. struct kmem_cache_order_objects oo;
  70. /*
  71. * Avoid an extra cache line for UP, SMP and for the node local to
  72. * struct kmem_cache.
  73. */
  74. struct kmem_cache_node local_node;
  75. /* Allocation and freeing of slabs */
  76. struct kmem_cache_order_objects max;
  77. struct kmem_cache_order_objects min;
  78. gfp_t allocflags; /* gfp flags to use on each alloc */
  79. int refcount; /* Refcount for slab cache destroy */
  80. void (*ctor)(struct kmem_cache *, void *);
  81. int inuse; /* Offset to metadata */
  82. int align; /* Alignment */
  83. const char *name; /* Name (only for display!) */
  84. struct list_head list; /* List of slab caches */
  85. #ifdef CONFIG_SLUB_DEBUG
  86. struct kobject kobj; /* For sysfs */
  87. #endif
  88. #ifdef CONFIG_NUMA
  89. /*
  90. * Defragmentation by allocating from a remote node.
  91. */
  92. int remote_node_defrag_ratio;
  93. struct kmem_cache_node *node[MAX_NUMNODES];
  94. #endif
  95. #ifdef CONFIG_SMP
  96. struct kmem_cache_cpu *cpu_slab[NR_CPUS];
  97. #else
  98. struct kmem_cache_cpu cpu_slab;
  99. #endif
  100. };
  101. /*
  102. * Kmalloc subsystem.
  103. */
  104. #if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
  105. #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
  106. #else
  107. #define KMALLOC_MIN_SIZE 8
  108. #endif
  109. #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
  110. /*
  111. * We keep the general caches in an array of slab caches that are used for
  112. * 2^x bytes of allocations.
  113. */
  114. extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1];
  115. /*
  116. * Sorry that the following has to be that ugly but some versions of GCC
  117. * have trouble with constant propagation and loops.
  118. */
  119. static __always_inline int kmalloc_index(size_t size)
  120. {
  121. if (!size)
  122. return 0;
  123. if (size <= KMALLOC_MIN_SIZE)
  124. return KMALLOC_SHIFT_LOW;
  125. if (size > 64 && size <= 96)
  126. return 1;
  127. if (size > 128 && size <= 192)
  128. return 2;
  129. if (size <= 8) return 3;
  130. if (size <= 16) return 4;
  131. if (size <= 32) return 5;
  132. if (size <= 64) return 6;
  133. if (size <= 128) return 7;
  134. if (size <= 256) return 8;
  135. if (size <= 512) return 9;
  136. if (size <= 1024) return 10;
  137. if (size <= 2 * 1024) return 11;
  138. if (size <= 4 * 1024) return 12;
  139. /*
  140. * The following is only needed to support architectures with a larger page
  141. * size than 4k.
  142. */
  143. if (size <= 8 * 1024) return 13;
  144. if (size <= 16 * 1024) return 14;
  145. if (size <= 32 * 1024) return 15;
  146. if (size <= 64 * 1024) return 16;
  147. if (size <= 128 * 1024) return 17;
  148. if (size <= 256 * 1024) return 18;
  149. if (size <= 512 * 1024) return 19;
  150. if (size <= 1024 * 1024) return 20;
  151. if (size <= 2 * 1024 * 1024) return 21;
  152. return -1;
  153. /*
  154. * What we really wanted to do and cannot do because of compiler issues is:
  155. * int i;
  156. * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
  157. * if (size <= (1 << i))
  158. * return i;
  159. */
  160. }
  161. /*
  162. * Find the slab cache for a given combination of allocation flags and size.
  163. *
  164. * This ought to end up with a global pointer to the right cache
  165. * in kmalloc_caches.
  166. */
  167. static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
  168. {
  169. int index = kmalloc_index(size);
  170. if (index == 0)
  171. return NULL;
  172. return &kmalloc_caches[index];
  173. }
  174. #ifdef CONFIG_ZONE_DMA
  175. #define SLUB_DMA __GFP_DMA
  176. #else
  177. /* Disable DMA functionality */
  178. #define SLUB_DMA (__force gfp_t)0
  179. #endif
  180. void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
  181. void *__kmalloc(size_t size, gfp_t flags);
  182. static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
  183. {
  184. return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size));
  185. }
  186. static __always_inline void *kmalloc(size_t size, gfp_t flags)
  187. {
  188. if (__builtin_constant_p(size)) {
  189. if (size > PAGE_SIZE)
  190. return kmalloc_large(size, flags);
  191. if (!(flags & SLUB_DMA)) {
  192. struct kmem_cache *s = kmalloc_slab(size);
  193. if (!s)
  194. return ZERO_SIZE_PTR;
  195. return kmem_cache_alloc(s, flags);
  196. }
  197. }
  198. return __kmalloc(size, flags);
  199. }
  200. #ifdef CONFIG_NUMA
  201. void *__kmalloc_node(size_t size, gfp_t flags, int node);
  202. void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
  203. static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  204. {
  205. if (__builtin_constant_p(size) &&
  206. size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
  207. struct kmem_cache *s = kmalloc_slab(size);
  208. if (!s)
  209. return ZERO_SIZE_PTR;
  210. return kmem_cache_alloc_node(s, flags, node);
  211. }
  212. return __kmalloc_node(size, flags, node);
  213. }
  214. #endif
  215. #endif /* _LINUX_SLUB_DEF_H */