slab_def.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. #ifndef _LINUX_SLAB_DEF_H
  2. #define _LINUX_SLAB_DEF_H
  3. /*
  4. * Definitions unique to the original Linux SLAB allocator.
  5. *
  6. * What we provide here is a way to optimize the frequent kmalloc
  7. * calls in the kernel by selecting the appropriate general cache
  8. * if kmalloc was called with a size that can be established at
  9. * compile time.
  10. */
  11. #include <linux/init.h>
  12. #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
  13. #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
  14. #include <linux/compiler.h>
  15. /* Size description struct for general caches. */
  16. struct cache_sizes {
  17. size_t cs_size;
  18. struct kmem_cache *cs_cachep;
  19. #ifdef CONFIG_ZONE_DMA
  20. struct kmem_cache *cs_dmacachep;
  21. #endif
  22. };
  23. extern struct cache_sizes malloc_sizes[];
  24. void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
  25. void *__kmalloc(size_t size, gfp_t flags);
  26. static inline void *kmalloc(size_t size, gfp_t flags)
  27. {
  28. if (__builtin_constant_p(size)) {
  29. int i = 0;
  30. if (!size)
  31. return ZERO_SIZE_PTR;
  32. #define CACHE(x) \
  33. if (size <= x) \
  34. goto found; \
  35. else \
  36. i++;
  37. #include "kmalloc_sizes.h"
  38. #undef CACHE
  39. {
  40. extern void __you_cannot_kmalloc_that_much(void);
  41. __you_cannot_kmalloc_that_much();
  42. }
  43. found:
  44. #ifdef CONFIG_ZONE_DMA
  45. if (flags & GFP_DMA)
  46. return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
  47. flags);
  48. #endif
  49. return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
  50. }
  51. return __kmalloc(size, flags);
  52. }
  53. static inline void *kzalloc(size_t size, gfp_t flags)
  54. {
  55. if (__builtin_constant_p(size)) {
  56. int i = 0;
  57. if (!size)
  58. return ZERO_SIZE_PTR;
  59. #define CACHE(x) \
  60. if (size <= x) \
  61. goto found; \
  62. else \
  63. i++;
  64. #include "kmalloc_sizes.h"
  65. #undef CACHE
  66. {
  67. extern void __you_cannot_kzalloc_that_much(void);
  68. __you_cannot_kzalloc_that_much();
  69. }
  70. found:
  71. #ifdef CONFIG_ZONE_DMA
  72. if (flags & GFP_DMA)
  73. return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep,
  74. flags);
  75. #endif
  76. return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags);
  77. }
  78. return __kzalloc(size, flags);
  79. }
  80. #ifdef CONFIG_NUMA
  81. extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
  82. extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
  83. static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  84. {
  85. if (__builtin_constant_p(size)) {
  86. int i = 0;
  87. if (!size)
  88. return ZERO_SIZE_PTR;
  89. #define CACHE(x) \
  90. if (size <= x) \
  91. goto found; \
  92. else \
  93. i++;
  94. #include "kmalloc_sizes.h"
  95. #undef CACHE
  96. {
  97. extern void __you_cannot_kmalloc_that_much(void);
  98. __you_cannot_kmalloc_that_much();
  99. }
  100. found:
  101. #ifdef CONFIG_ZONE_DMA
  102. if (flags & GFP_DMA)
  103. return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
  104. flags, node);
  105. #endif
  106. return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
  107. flags, node);
  108. }
  109. return __kmalloc_node(size, flags, node);
  110. }
  111. #endif /* CONFIG_NUMA */
  112. extern const struct seq_operations slabinfo_op;
  113. ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
  114. #endif /* _LINUX_SLAB_DEF_H */