slub_def.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. #ifndef _LINUX_SLUB_DEF_H
  2. #define _LINUX_SLUB_DEF_H
  3. /*
  4. * SLUB : A Slab allocator without object queues.
  5. *
  6. * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
  7. */
  8. #include <linux/types.h>
  9. #include <linux/gfp.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/kobject.h>
  12. struct kmem_cache_node {
  13. spinlock_t list_lock; /* Protect partial list and nr_partial */
  14. unsigned long nr_partial;
  15. atomic_long_t nr_slabs;
  16. struct list_head partial;
  17. };
  18. /*
  19. * Slab cache management.
  20. */
  21. struct kmem_cache {
  22. /* Used for retriving partial slabs etc */
  23. unsigned long flags;
  24. int size; /* The size of an object including meta data */
  25. int objsize; /* The size of an object without meta data */
  26. int offset; /* Free pointer offset. */
  27. unsigned int order;
  28. /*
  29. * Avoid an extra cache line for UP, SMP and for the node local to
  30. * struct kmem_cache.
  31. */
  32. struct kmem_cache_node local_node;
  33. /* Allocation and freeing of slabs */
  34. int objects; /* Number of objects in slab */
  35. int refcount; /* Refcount for slab cache destroy */
  36. void (*ctor)(void *, struct kmem_cache *, unsigned long);
  37. void (*dtor)(void *, struct kmem_cache *, unsigned long);
  38. int inuse; /* Offset to metadata */
  39. int align; /* Alignment */
  40. const char *name; /* Name (only for display!) */
  41. struct list_head list; /* List of slab caches */
  42. struct kobject kobj; /* For sysfs */
  43. #ifdef CONFIG_NUMA
  44. int defrag_ratio;
  45. struct kmem_cache_node *node[MAX_NUMNODES];
  46. #endif
  47. struct page *cpu_slab[NR_CPUS];
  48. };
  49. /*
  50. * Kmalloc subsystem.
  51. */
  52. #define KMALLOC_SHIFT_LOW 3
  53. #ifdef CONFIG_LARGE_ALLOCS
  54. #define KMALLOC_SHIFT_HIGH 25
  55. #else
  56. #if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256
  57. #define KMALLOC_SHIFT_HIGH 20
  58. #else
  59. #define KMALLOC_SHIFT_HIGH 18
  60. #endif
  61. #endif
  62. /*
  63. * We keep the general caches in an array of slab caches that are used for
  64. * 2^x bytes of allocations.
  65. */
  66. extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
  67. /*
  68. * Sorry that the following has to be that ugly but some versions of GCC
  69. * have trouble with constant propagation and loops.
  70. */
  71. static inline int kmalloc_index(int size)
  72. {
  73. /*
  74. * We should return 0 if size == 0 but we use the smallest object
  75. * here for SLAB legacy reasons.
  76. */
  77. WARN_ON_ONCE(size == 0);
  78. if (size > 64 && size <= 96)
  79. return 1;
  80. if (size > 128 && size <= 192)
  81. return 2;
  82. if (size <= 8) return 3;
  83. if (size <= 16) return 4;
  84. if (size <= 32) return 5;
  85. if (size <= 64) return 6;
  86. if (size <= 128) return 7;
  87. if (size <= 256) return 8;
  88. if (size <= 512) return 9;
  89. if (size <= 1024) return 10;
  90. if (size <= 2 * 1024) return 11;
  91. if (size <= 4 * 1024) return 12;
  92. if (size <= 8 * 1024) return 13;
  93. if (size <= 16 * 1024) return 14;
  94. if (size <= 32 * 1024) return 15;
  95. if (size <= 64 * 1024) return 16;
  96. if (size <= 128 * 1024) return 17;
  97. if (size <= 256 * 1024) return 18;
  98. #if KMALLOC_SHIFT_HIGH > 18
  99. if (size <= 512 * 1024) return 19;
  100. if (size <= 1024 * 1024) return 20;
  101. #endif
  102. #if KMALLOC_SHIFT_HIGH > 20
  103. if (size <= 2 * 1024 * 1024) return 21;
  104. if (size <= 4 * 1024 * 1024) return 22;
  105. if (size <= 8 * 1024 * 1024) return 23;
  106. if (size <= 16 * 1024 * 1024) return 24;
  107. if (size <= 32 * 1024 * 1024) return 25;
  108. #endif
  109. return -1;
  110. /*
  111. * What we really wanted to do and cannot do because of compiler issues is:
  112. * int i;
  113. * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
  114. * if (size <= (1 << i))
  115. * return i;
  116. */
  117. }
  118. /*
  119. * Find the slab cache for a given combination of allocation flags and size.
  120. *
  121. * This ought to end up with a global pointer to the right cache
  122. * in kmalloc_caches.
  123. */
  124. static inline struct kmem_cache *kmalloc_slab(size_t size)
  125. {
  126. int index = kmalloc_index(size);
  127. if (index == 0)
  128. return NULL;
  129. if (index < 0) {
  130. /*
  131. * Generate a link failure. Would be great if we could
  132. * do something to stop the compile here.
  133. */
  134. extern void __kmalloc_size_too_large(void);
  135. __kmalloc_size_too_large();
  136. }
  137. return &kmalloc_caches[index];
  138. }
  139. #ifdef CONFIG_ZONE_DMA
  140. #define SLUB_DMA __GFP_DMA
  141. #else
  142. /* Disable DMA functionality */
  143. #define SLUB_DMA 0
  144. #endif
  145. static inline void *kmalloc(size_t size, gfp_t flags)
  146. {
  147. if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
  148. struct kmem_cache *s = kmalloc_slab(size);
  149. if (!s)
  150. return NULL;
  151. return kmem_cache_alloc(s, flags);
  152. } else
  153. return __kmalloc(size, flags);
  154. }
  155. static inline void *kzalloc(size_t size, gfp_t flags)
  156. {
  157. if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
  158. struct kmem_cache *s = kmalloc_slab(size);
  159. if (!s)
  160. return NULL;
  161. return kmem_cache_zalloc(s, flags);
  162. } else
  163. return __kzalloc(size, flags);
  164. }
  165. #ifdef CONFIG_NUMA
  166. extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
  167. static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  168. {
  169. if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
  170. struct kmem_cache *s = kmalloc_slab(size);
  171. if (!s)
  172. return NULL;
  173. return kmem_cache_alloc_node(s, flags, node);
  174. } else
  175. return __kmalloc_node(size, flags, node);
  176. }
  177. #endif
  178. #endif /* _LINUX_SLUB_DEF_H */