slab_def.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. #ifndef _LINUX_SLAB_DEF_H
  2. #define _LINUX_SLAB_DEF_H
  3. /*
  4. * Definitions unique to the original Linux SLAB allocator.
  5. *
  6. * What we provide here is a way to optimize the frequent kmalloc
  7. * calls in the kernel by selecting the appropriate general cache
  8. * if kmalloc was called with a size that can be established at
  9. * compile time.
  10. */
  11. #include <linux/init.h>
  12. #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
  13. #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
  14. #include <linux/compiler.h>
  15. #include <linux/kmemtrace.h>
  16. /* Size description struct for general caches. */
  17. struct cache_sizes {
  18. size_t cs_size;
  19. struct kmem_cache *cs_cachep;
  20. #ifdef CONFIG_ZONE_DMA
  21. struct kmem_cache *cs_dmacachep;
  22. #endif
  23. };
  24. extern struct cache_sizes malloc_sizes[];
  25. void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
  26. void *__kmalloc(size_t size, gfp_t flags);
  27. #ifdef CONFIG_KMEMTRACE
  28. extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
  29. extern size_t slab_buffer_size(struct kmem_cache *cachep);
  30. #else
  31. static __always_inline void *
  32. kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
  33. {
  34. return kmem_cache_alloc(cachep, flags);
  35. }
  36. static inline size_t slab_buffer_size(struct kmem_cache *cachep)
  37. {
  38. return 0;
  39. }
  40. #endif
  41. static __always_inline void *kmalloc(size_t size, gfp_t flags)
  42. {
  43. struct kmem_cache *cachep;
  44. void *ret;
  45. if (__builtin_constant_p(size)) {
  46. int i = 0;
  47. if (!size)
  48. return ZERO_SIZE_PTR;
  49. #define CACHE(x) \
  50. if (size <= x) \
  51. goto found; \
  52. else \
  53. i++;
  54. #include <linux/kmalloc_sizes.h>
  55. #undef CACHE
  56. return NULL;
  57. found:
  58. #ifdef CONFIG_ZONE_DMA
  59. if (flags & GFP_DMA)
  60. cachep = malloc_sizes[i].cs_dmacachep;
  61. else
  62. #endif
  63. cachep = malloc_sizes[i].cs_cachep;
  64. ret = kmem_cache_alloc_notrace(cachep, flags);
  65. trace_kmalloc(_THIS_IP_, ret,
  66. size, slab_buffer_size(cachep), flags);
  67. return ret;
  68. }
  69. return __kmalloc(size, flags);
  70. }
  71. #ifdef CONFIG_NUMA
  72. extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
  73. extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
  74. #ifdef CONFIG_KMEMTRACE
  75. extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
  76. gfp_t flags,
  77. int nodeid);
  78. #else
  79. static __always_inline void *
  80. kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
  81. gfp_t flags,
  82. int nodeid)
  83. {
  84. return kmem_cache_alloc_node(cachep, flags, nodeid);
  85. }
  86. #endif
  87. static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  88. {
  89. struct kmem_cache *cachep;
  90. void *ret;
  91. if (__builtin_constant_p(size)) {
  92. int i = 0;
  93. if (!size)
  94. return ZERO_SIZE_PTR;
  95. #define CACHE(x) \
  96. if (size <= x) \
  97. goto found; \
  98. else \
  99. i++;
  100. #include <linux/kmalloc_sizes.h>
  101. #undef CACHE
  102. return NULL;
  103. found:
  104. #ifdef CONFIG_ZONE_DMA
  105. if (flags & GFP_DMA)
  106. cachep = malloc_sizes[i].cs_dmacachep;
  107. else
  108. #endif
  109. cachep = malloc_sizes[i].cs_cachep;
  110. ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
  111. trace_kmalloc_node(_THIS_IP_, ret,
  112. size, slab_buffer_size(cachep),
  113. flags, node);
  114. return ret;
  115. }
  116. return __kmalloc_node(size, flags, node);
  117. }
  118. #endif /* CONFIG_NUMA */
  119. #endif /* _LINUX_SLAB_DEF_H */