slab_def.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. #ifndef _LINUX_SLAB_DEF_H
  2. #define _LINUX_SLAB_DEF_H
  3. /*
  4. * Definitions unique to the original Linux SLAB allocator.
  5. *
  6. * What we provide here is a way to optimize the frequent kmalloc
  7. * calls in the kernel by selecting the appropriate general cache
  8. * if kmalloc was called with a size that can be established at
  9. * compile time.
  10. */
  11. #include <linux/init.h>
  12. #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
  13. #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
  14. #include <linux/compiler.h>
  15. #include <linux/kmemtrace.h>
  16. #ifndef ARCH_KMALLOC_MINALIGN
  17. /*
  18. * Enforce a minimum alignment for the kmalloc caches.
  19. * Usually, the kmalloc caches are cache_line_size() aligned, except when
  20. * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
  21. * Some archs want to perform DMA into kmalloc caches and need a guaranteed
  22. * alignment larger than the alignment of a 64-bit integer.
  23. * ARCH_KMALLOC_MINALIGN allows that.
  24. * Note that increasing this value may disable some debug features.
  25. */
  26. #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
  27. #endif
  28. #ifndef ARCH_SLAB_MINALIGN
  29. /*
  30. * Enforce a minimum alignment for all caches.
  31. * Intended for archs that get misalignment faults even for BYTES_PER_WORD
  32. * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
  33. * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
  34. * some debug features.
  35. */
  36. #define ARCH_SLAB_MINALIGN 0
  37. #endif
  38. /*
  39. * struct kmem_cache
  40. *
  41. * manages a cache.
  42. */
  43. struct kmem_cache {
  44. /* 1) per-cpu data, touched during every alloc/free */
  45. struct array_cache *array[NR_CPUS];
  46. /* 2) Cache tunables. Protected by cache_chain_mutex */
  47. unsigned int batchcount;
  48. unsigned int limit;
  49. unsigned int shared;
  50. unsigned int buffer_size;
  51. u32 reciprocal_buffer_size;
  52. /* 3) touched by every alloc & free from the backend */
  53. unsigned int flags; /* constant flags */
  54. unsigned int num; /* # of objs per slab */
  55. /* 4) cache_grow/shrink */
  56. /* order of pgs per slab (2^n) */
  57. unsigned int gfporder;
  58. /* force GFP flags, e.g. GFP_DMA */
  59. gfp_t gfpflags;
  60. size_t colour; /* cache colouring range */
  61. unsigned int colour_off; /* colour offset */
  62. struct kmem_cache *slabp_cache;
  63. unsigned int slab_size;
  64. unsigned int dflags; /* dynamic flags */
  65. /* constructor func */
  66. void (*ctor)(void *obj);
  67. /* 5) cache creation/removal */
  68. const char *name;
  69. struct list_head next;
  70. /* 6) statistics */
  71. #ifdef CONFIG_DEBUG_SLAB
  72. unsigned long num_active;
  73. unsigned long num_allocations;
  74. unsigned long high_mark;
  75. unsigned long grown;
  76. unsigned long reaped;
  77. unsigned long errors;
  78. unsigned long max_freeable;
  79. unsigned long node_allocs;
  80. unsigned long node_frees;
  81. unsigned long node_overflow;
  82. atomic_t allochit;
  83. atomic_t allocmiss;
  84. atomic_t freehit;
  85. atomic_t freemiss;
  86. /*
  87. * If debugging is enabled, then the allocator can add additional
  88. * fields and/or padding to every object. buffer_size contains the total
  89. * object size including these internal fields, the following two
  90. * variables contain the offset to the user object and its size.
  91. */
  92. int obj_offset;
  93. int obj_size;
  94. #endif /* CONFIG_DEBUG_SLAB */
  95. /*
  96. * We put nodelists[] at the end of kmem_cache, because we want to size
  97. * this array to nr_node_ids slots instead of MAX_NUMNODES
  98. * (see kmem_cache_init())
  99. * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
  100. * is statically defined, so we reserve the max number of nodes.
  101. */
  102. struct kmem_list3 *nodelists[MAX_NUMNODES];
  103. /*
  104. * Do not add fields after nodelists[]
  105. */
  106. };
  107. /* Size description struct for general caches. */
  108. struct cache_sizes {
  109. size_t cs_size;
  110. struct kmem_cache *cs_cachep;
  111. #ifdef CONFIG_ZONE_DMA
  112. struct kmem_cache *cs_dmacachep;
  113. #endif
  114. };
  115. extern struct cache_sizes malloc_sizes[];
  116. void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
  117. void *__kmalloc(size_t size, gfp_t flags);
  118. #ifdef CONFIG_TRACING
  119. extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
  120. extern size_t slab_buffer_size(struct kmem_cache *cachep);
  121. #else
  122. static __always_inline void *
  123. kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
  124. {
  125. return kmem_cache_alloc(cachep, flags);
  126. }
  127. static inline size_t slab_buffer_size(struct kmem_cache *cachep)
  128. {
  129. return 0;
  130. }
  131. #endif
  132. static __always_inline void *kmalloc(size_t size, gfp_t flags)
  133. {
  134. struct kmem_cache *cachep;
  135. void *ret;
  136. if (__builtin_constant_p(size)) {
  137. int i = 0;
  138. if (!size)
  139. return ZERO_SIZE_PTR;
  140. #define CACHE(x) \
  141. if (size <= x) \
  142. goto found; \
  143. else \
  144. i++;
  145. #include <linux/kmalloc_sizes.h>
  146. #undef CACHE
  147. return NULL;
  148. found:
  149. #ifdef CONFIG_ZONE_DMA
  150. if (flags & GFP_DMA)
  151. cachep = malloc_sizes[i].cs_dmacachep;
  152. else
  153. #endif
  154. cachep = malloc_sizes[i].cs_cachep;
  155. ret = kmem_cache_alloc_notrace(cachep, flags);
  156. trace_kmalloc(_THIS_IP_, ret,
  157. size, slab_buffer_size(cachep), flags);
  158. return ret;
  159. }
  160. return __kmalloc(size, flags);
  161. }
  162. #ifdef CONFIG_NUMA
  163. extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
  164. extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
  165. #ifdef CONFIG_TRACING
  166. extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
  167. gfp_t flags,
  168. int nodeid);
  169. #else
  170. static __always_inline void *
  171. kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
  172. gfp_t flags,
  173. int nodeid)
  174. {
  175. return kmem_cache_alloc_node(cachep, flags, nodeid);
  176. }
  177. #endif
  178. static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  179. {
  180. struct kmem_cache *cachep;
  181. void *ret;
  182. if (__builtin_constant_p(size)) {
  183. int i = 0;
  184. if (!size)
  185. return ZERO_SIZE_PTR;
  186. #define CACHE(x) \
  187. if (size <= x) \
  188. goto found; \
  189. else \
  190. i++;
  191. #include <linux/kmalloc_sizes.h>
  192. #undef CACHE
  193. return NULL;
  194. found:
  195. #ifdef CONFIG_ZONE_DMA
  196. if (flags & GFP_DMA)
  197. cachep = malloc_sizes[i].cs_dmacachep;
  198. else
  199. #endif
  200. cachep = malloc_sizes[i].cs_cachep;
  201. ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
  202. trace_kmalloc_node(_THIS_IP_, ret,
  203. size, slab_buffer_size(cachep),
  204. flags, node);
  205. return ret;
  206. }
  207. return __kmalloc_node(size, flags, node);
  208. }
  209. #endif /* CONFIG_NUMA */
  210. #endif /* _LINUX_SLAB_DEF_H */