slub_def.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. #ifndef _LINUX_SLUB_DEF_H
  2. #define _LINUX_SLUB_DEF_H
  3. /*
  4. * SLUB : A Slab allocator without object queues.
  5. *
  6. * (C) 2007 SGI, Christoph Lameter
  7. */
  8. #include <linux/types.h>
  9. #include <linux/gfp.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/kobject.h>
  12. #include <linux/kmemtrace.h>
  13. enum stat_item {
  14. ALLOC_FASTPATH, /* Allocation from cpu slab */
  15. ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
  16. FREE_FASTPATH, /* Free to cpu slub */
  17. FREE_SLOWPATH, /* Freeing not to cpu slab */
  18. FREE_FROZEN, /* Freeing to frozen slab */
  19. FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
  20. FREE_REMOVE_PARTIAL, /* Freeing removes last object */
  21. ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */
  22. ALLOC_SLAB, /* Cpu slab acquired from page allocator */
  23. ALLOC_REFILL, /* Refill cpu slab from slab freelist */
  24. FREE_SLAB, /* Slab freed to the page allocator */
  25. CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
  26. DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
  27. DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
  28. DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
  29. DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
  30. DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
  31. ORDER_FALLBACK, /* Number of times fallback was necessary */
  32. NR_SLUB_STAT_ITEMS };
  33. struct kmem_cache_cpu {
  34. void **freelist; /* Pointer to first free per cpu object */
  35. struct page *page; /* The slab from which we are allocating */
  36. int node; /* The node of the page (or -1 for debug) */
  37. unsigned int offset; /* Freepointer offset (in word units) */
  38. unsigned int objsize; /* Size of an object (from kmem_cache) */
  39. #ifdef CONFIG_SLUB_STATS
  40. unsigned stat[NR_SLUB_STAT_ITEMS];
  41. #endif
  42. };
  43. struct kmem_cache_node {
  44. spinlock_t list_lock; /* Protect partial list and nr_partial */
  45. unsigned long nr_partial;
  46. unsigned long min_partial;
  47. struct list_head partial;
  48. #ifdef CONFIG_SLUB_DEBUG
  49. atomic_long_t nr_slabs;
  50. atomic_long_t total_objects;
  51. struct list_head full;
  52. #endif
  53. };
  54. /*
  55. * Word size structure that can be atomically updated or read and that
  56. * contains both the order and the number of objects that a slab of the
  57. * given order would contain.
  58. */
  59. struct kmem_cache_order_objects {
  60. unsigned long x;
  61. };
  62. /*
  63. * Slab cache management.
  64. */
  65. struct kmem_cache {
  66. /* Used for retriving partial slabs etc */
  67. unsigned long flags;
  68. int size; /* The size of an object including meta data */
  69. int objsize; /* The size of an object without meta data */
  70. int offset; /* Free pointer offset. */
  71. struct kmem_cache_order_objects oo;
  72. /*
  73. * Avoid an extra cache line for UP, SMP and for the node local to
  74. * struct kmem_cache.
  75. */
  76. struct kmem_cache_node local_node;
  77. /* Allocation and freeing of slabs */
  78. struct kmem_cache_order_objects max;
  79. struct kmem_cache_order_objects min;
  80. gfp_t allocflags; /* gfp flags to use on each alloc */
  81. int refcount; /* Refcount for slab cache destroy */
  82. void (*ctor)(void *);
  83. int inuse; /* Offset to metadata */
  84. int align; /* Alignment */
  85. const char *name; /* Name (only for display!) */
  86. struct list_head list; /* List of slab caches */
  87. #ifdef CONFIG_SLUB_DEBUG
  88. struct kobject kobj; /* For sysfs */
  89. #endif
  90. #ifdef CONFIG_NUMA
  91. /*
  92. * Defragmentation by allocating from a remote node.
  93. */
  94. int remote_node_defrag_ratio;
  95. struct kmem_cache_node *node[MAX_NUMNODES];
  96. #endif
  97. #ifdef CONFIG_SMP
  98. struct kmem_cache_cpu *cpu_slab[NR_CPUS];
  99. #else
  100. struct kmem_cache_cpu cpu_slab;
  101. #endif
  102. };
  103. /*
  104. * Kmalloc subsystem.
  105. */
  106. #if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
  107. #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
  108. #else
  109. #define KMALLOC_MIN_SIZE 8
  110. #endif
  111. #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
  112. /*
  113. * We keep the general caches in an array of slab caches that are used for
  114. * 2^x bytes of allocations.
  115. */
  116. extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1];
  117. /*
  118. * Sorry that the following has to be that ugly but some versions of GCC
  119. * have trouble with constant propagation and loops.
  120. */
  121. static __always_inline int kmalloc_index(size_t size)
  122. {
  123. if (!size)
  124. return 0;
  125. if (size <= KMALLOC_MIN_SIZE)
  126. return KMALLOC_SHIFT_LOW;
  127. #if KMALLOC_MIN_SIZE <= 64
  128. if (size > 64 && size <= 96)
  129. return 1;
  130. if (size > 128 && size <= 192)
  131. return 2;
  132. #endif
  133. if (size <= 8) return 3;
  134. if (size <= 16) return 4;
  135. if (size <= 32) return 5;
  136. if (size <= 64) return 6;
  137. if (size <= 128) return 7;
  138. if (size <= 256) return 8;
  139. if (size <= 512) return 9;
  140. if (size <= 1024) return 10;
  141. if (size <= 2 * 1024) return 11;
  142. if (size <= 4 * 1024) return 12;
  143. /*
  144. * The following is only needed to support architectures with a larger page
  145. * size than 4k.
  146. */
  147. if (size <= 8 * 1024) return 13;
  148. if (size <= 16 * 1024) return 14;
  149. if (size <= 32 * 1024) return 15;
  150. if (size <= 64 * 1024) return 16;
  151. if (size <= 128 * 1024) return 17;
  152. if (size <= 256 * 1024) return 18;
  153. if (size <= 512 * 1024) return 19;
  154. if (size <= 1024 * 1024) return 20;
  155. if (size <= 2 * 1024 * 1024) return 21;
  156. return -1;
  157. /*
  158. * What we really wanted to do and cannot do because of compiler issues is:
  159. * int i;
  160. * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
  161. * if (size <= (1 << i))
  162. * return i;
  163. */
  164. }
  165. /*
  166. * Find the slab cache for a given combination of allocation flags and size.
  167. *
  168. * This ought to end up with a global pointer to the right cache
  169. * in kmalloc_caches.
  170. */
  171. static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
  172. {
  173. int index = kmalloc_index(size);
  174. if (index == 0)
  175. return NULL;
  176. return &kmalloc_caches[index];
  177. }
  178. #ifdef CONFIG_ZONE_DMA
  179. #define SLUB_DMA __GFP_DMA
  180. #else
  181. /* Disable DMA functionality */
  182. #define SLUB_DMA (__force gfp_t)0
  183. #endif
  184. void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
  185. void *__kmalloc(size_t size, gfp_t flags);
  186. #ifdef CONFIG_KMEMTRACE
  187. extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
  188. #else
  189. static __always_inline void *
  190. kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
  191. {
  192. return kmem_cache_alloc(s, gfpflags);
  193. }
  194. #endif
  195. static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
  196. {
  197. unsigned int order = get_order(size);
  198. void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
  199. kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
  200. size, PAGE_SIZE << order, flags);
  201. return ret;
  202. }
  203. static __always_inline void *kmalloc(size_t size, gfp_t flags)
  204. {
  205. void *ret;
  206. if (__builtin_constant_p(size)) {
  207. if (size > PAGE_SIZE)
  208. return kmalloc_large(size, flags);
  209. if (!(flags & SLUB_DMA)) {
  210. struct kmem_cache *s = kmalloc_slab(size);
  211. if (!s)
  212. return ZERO_SIZE_PTR;
  213. ret = kmem_cache_alloc_notrace(s, flags);
  214. kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
  215. _THIS_IP_, ret,
  216. size, s->size, flags);
  217. return ret;
  218. }
  219. }
  220. return __kmalloc(size, flags);
  221. }
  222. #ifdef CONFIG_NUMA
  223. void *__kmalloc_node(size_t size, gfp_t flags, int node);
  224. void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
  225. #ifdef CONFIG_KMEMTRACE
  226. extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
  227. gfp_t gfpflags,
  228. int node);
  229. #else
  230. static __always_inline void *
  231. kmem_cache_alloc_node_notrace(struct kmem_cache *s,
  232. gfp_t gfpflags,
  233. int node)
  234. {
  235. return kmem_cache_alloc_node(s, gfpflags, node);
  236. }
  237. #endif
  238. static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  239. {
  240. void *ret;
  241. if (__builtin_constant_p(size) &&
  242. size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
  243. struct kmem_cache *s = kmalloc_slab(size);
  244. if (!s)
  245. return ZERO_SIZE_PTR;
  246. ret = kmem_cache_alloc_node_notrace(s, flags, node);
  247. kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
  248. _THIS_IP_, ret,
  249. size, s->size, flags, node);
  250. return ret;
  251. }
  252. return __kmalloc_node(size, flags, node);
  253. }
  254. #endif
  255. #endif /* _LINUX_SLUB_DEF_H */