slub_def.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. #ifndef _LINUX_SLUB_DEF_H
  2. #define _LINUX_SLUB_DEF_H
  3. /*
  4. * SLUB : A Slab allocator without object queues.
  5. *
  6. * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
  7. */
  8. #include <linux/types.h>
  9. #include <linux/gfp.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/kobject.h>
  12. struct kmem_cache_node {
  13. spinlock_t list_lock; /* Protect partial list and nr_partial */
  14. unsigned long nr_partial;
  15. atomic_long_t nr_slabs;
  16. struct list_head partial;
  17. #ifdef CONFIG_SLUB_DEBUG
  18. struct list_head full;
  19. #endif
  20. };
  21. /*
  22. * Slab cache management.
  23. */
  24. struct kmem_cache {
  25. /* Used for retriving partial slabs etc */
  26. unsigned long flags;
  27. int size; /* The size of an object including meta data */
  28. int objsize; /* The size of an object without meta data */
  29. int offset; /* Free pointer offset. */
  30. int order;
  31. /*
  32. * Avoid an extra cache line for UP, SMP and for the node local to
  33. * struct kmem_cache.
  34. */
  35. struct kmem_cache_node local_node;
  36. /* Allocation and freeing of slabs */
  37. int objects; /* Number of objects in slab */
  38. int refcount; /* Refcount for slab cache destroy */
  39. void (*ctor)(void *, struct kmem_cache *, unsigned long);
  40. int inuse; /* Offset to metadata */
  41. int align; /* Alignment */
  42. const char *name; /* Name (only for display!) */
  43. struct list_head list; /* List of slab caches */
  44. #ifdef CONFIG_SLUB_DEBUG
  45. struct kobject kobj; /* For sysfs */
  46. #endif
  47. #ifdef CONFIG_NUMA
  48. int defrag_ratio;
  49. struct kmem_cache_node *node[MAX_NUMNODES];
  50. #endif
  51. struct page *cpu_slab[NR_CPUS];
  52. };
  53. /*
  54. * Kmalloc subsystem.
  55. */
  56. #if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
  57. #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
  58. #else
  59. #define KMALLOC_MIN_SIZE 8
  60. #endif
  61. #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
  62. /*
  63. * We keep the general caches in an array of slab caches that are used for
  64. * 2^x bytes of allocations.
  65. */
  66. extern struct kmem_cache kmalloc_caches[PAGE_SHIFT];
  67. /*
  68. * Sorry that the following has to be that ugly but some versions of GCC
  69. * have trouble with constant propagation and loops.
  70. */
  71. static __always_inline int kmalloc_index(size_t size)
  72. {
  73. if (!size)
  74. return 0;
  75. if (size <= KMALLOC_MIN_SIZE)
  76. return KMALLOC_SHIFT_LOW;
  77. if (size > 64 && size <= 96)
  78. return 1;
  79. if (size > 128 && size <= 192)
  80. return 2;
  81. if (size <= 8) return 3;
  82. if (size <= 16) return 4;
  83. if (size <= 32) return 5;
  84. if (size <= 64) return 6;
  85. if (size <= 128) return 7;
  86. if (size <= 256) return 8;
  87. if (size <= 512) return 9;
  88. if (size <= 1024) return 10;
  89. if (size <= 2 * 1024) return 11;
  90. /*
  91. * The following is only needed to support architectures with a larger page
  92. * size than 4k.
  93. */
  94. if (size <= 4 * 1024) return 12;
  95. if (size <= 8 * 1024) return 13;
  96. if (size <= 16 * 1024) return 14;
  97. if (size <= 32 * 1024) return 15;
  98. if (size <= 64 * 1024) return 16;
  99. if (size <= 128 * 1024) return 17;
  100. if (size <= 256 * 1024) return 18;
  101. if (size <= 512 * 1024) return 19;
  102. if (size <= 1024 * 1024) return 20;
  103. if (size <= 2 * 1024 * 1024) return 21;
  104. return -1;
  105. /*
  106. * What we really wanted to do and cannot do because of compiler issues is:
  107. * int i;
  108. * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
  109. * if (size <= (1 << i))
  110. * return i;
  111. */
  112. }
  113. /*
  114. * Find the slab cache for a given combination of allocation flags and size.
  115. *
  116. * This ought to end up with a global pointer to the right cache
  117. * in kmalloc_caches.
  118. */
  119. static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
  120. {
  121. int index = kmalloc_index(size);
  122. if (index == 0)
  123. return NULL;
  124. return &kmalloc_caches[index];
  125. }
  126. #ifdef CONFIG_ZONE_DMA
  127. #define SLUB_DMA __GFP_DMA
  128. #else
  129. /* Disable DMA functionality */
  130. #define SLUB_DMA (__force gfp_t)0
  131. #endif
  132. void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
  133. void *__kmalloc(size_t size, gfp_t flags);
  134. static __always_inline void *kmalloc(size_t size, gfp_t flags)
  135. {
  136. if (__builtin_constant_p(size)) {
  137. if (size > PAGE_SIZE / 2)
  138. return (void *)__get_free_pages(flags | __GFP_COMP,
  139. get_order(size));
  140. if (!(flags & SLUB_DMA)) {
  141. struct kmem_cache *s = kmalloc_slab(size);
  142. if (!s)
  143. return ZERO_SIZE_PTR;
  144. return kmem_cache_alloc(s, flags);
  145. }
  146. }
  147. return __kmalloc(size, flags);
  148. }
  149. #ifdef CONFIG_NUMA
  150. void *__kmalloc_node(size_t size, gfp_t flags, int node);
  151. void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
  152. static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  153. {
  154. if (__builtin_constant_p(size) &&
  155. size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) {
  156. struct kmem_cache *s = kmalloc_slab(size);
  157. if (!s)
  158. return ZERO_SIZE_PTR;
  159. return kmem_cache_alloc_node(s, flags, node);
  160. }
  161. return __kmalloc_node(size, flags, node);
  162. }
  163. #endif
  164. #endif /* _LINUX_SLUB_DEF_H */