kmemcheck.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. #ifndef LINUX_KMEMCHECK_H
  2. #define LINUX_KMEMCHECK_H
  3. #include <linux/mm_types.h>
  4. #include <linux/types.h>
  5. #ifdef CONFIG_KMEMCHECK
  6. extern int kmemcheck_enabled;
  7. /* The slab-related functions. */
  8. void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
  9. void kmemcheck_free_shadow(struct page *page, int order);
  10. void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
  11. size_t size);
  12. void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
  13. void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
  14. gfp_t gfpflags);
  15. void kmemcheck_show_pages(struct page *p, unsigned int n);
  16. void kmemcheck_hide_pages(struct page *p, unsigned int n);
  17. bool kmemcheck_page_is_tracked(struct page *p);
  18. void kmemcheck_mark_unallocated(void *address, unsigned int n);
  19. void kmemcheck_mark_uninitialized(void *address, unsigned int n);
  20. void kmemcheck_mark_initialized(void *address, unsigned int n);
  21. void kmemcheck_mark_freed(void *address, unsigned int n);
  22. void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
  23. void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
  24. void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
  25. int kmemcheck_show_addr(unsigned long address);
  26. int kmemcheck_hide_addr(unsigned long address);
  27. #else
  28. #define kmemcheck_enabled 0
  29. static inline void
  30. kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
  31. {
  32. }
  33. static inline void
  34. kmemcheck_free_shadow(struct page *page, int order)
  35. {
  36. }
  37. static inline void
  38. kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
  39. size_t size)
  40. {
  41. }
  42. static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
  43. size_t size)
  44. {
  45. }
  46. static inline void kmemcheck_pagealloc_alloc(struct page *p,
  47. unsigned int order, gfp_t gfpflags)
  48. {
  49. }
  50. static inline bool kmemcheck_page_is_tracked(struct page *p)
  51. {
  52. return false;
  53. }
  54. static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
  55. {
  56. }
  57. static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
  58. {
  59. }
  60. static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
  61. {
  62. }
  63. static inline void kmemcheck_mark_freed(void *address, unsigned int n)
  64. {
  65. }
  66. static inline void kmemcheck_mark_unallocated_pages(struct page *p,
  67. unsigned int n)
  68. {
  69. }
  70. static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
  71. unsigned int n)
  72. {
  73. }
  74. static inline void kmemcheck_mark_initialized_pages(struct page *p,
  75. unsigned int n)
  76. {
  77. }
  78. #endif /* CONFIG_KMEMCHECK */
  79. /*
  80. * Bitfield annotations
  81. *
  82. * How to use: If you have a struct using bitfields, for example
  83. *
  84. * struct a {
  85. * int x:8, y:8;
  86. * };
  87. *
  88. * then this should be rewritten as
  89. *
  90. * struct a {
  91. * kmemcheck_bitfield_begin(flags);
  92. * int x:8, y:8;
  93. * kmemcheck_bitfield_end(flags);
  94. * };
  95. *
  96. * Now the "flags_begin" and "flags_end" members may be used to refer to the
  97. * beginning and end, respectively, of the bitfield (and things like
  98. * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
  99. * fields should be annotated:
  100. *
  101. * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
  102. * kmemcheck_annotate_bitfield(a, flags);
  103. *
  104. * Note: We provide the same definitions for both kmemcheck and non-
  105. * kmemcheck kernels. This makes it harder to introduce accidental errors. It
  106. * is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield().
  107. */
  108. #define kmemcheck_bitfield_begin(name) \
  109. int name##_begin[0];
  110. #define kmemcheck_bitfield_end(name) \
  111. int name##_end[0];
  112. #define kmemcheck_annotate_bitfield(ptr, name) \
  113. do if (ptr) { \
  114. int _n = (long) &((ptr)->name##_end) \
  115. - (long) &((ptr)->name##_begin); \
  116. BUILD_BUG_ON(_n < 0); \
  117. \
  118. kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
  119. } while (0)
  120. #define kmemcheck_annotate_variable(var) \
  121. do { \
  122. kmemcheck_mark_initialized(&(var), sizeof(var)); \
  123. } while (0) \
  124. #endif /* LINUX_KMEMCHECK_H */