compiler.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. #ifndef __LINUX_COMPILER_H
  2. #define __LINUX_COMPILER_H
  3. #ifndef __ASSEMBLY__
  4. #ifdef __CHECKER__
  5. # define __user __attribute__((noderef, address_space(1)))
  6. # define __kernel /* default address space */
  7. # define __safe __attribute__((safe))
  8. # define __force __attribute__((force))
  9. # define __nocast __attribute__((nocast))
  10. # define __iomem __attribute__((noderef, address_space(2)))
  11. # define __acquires(x) __attribute__((context(x,0,1)))
  12. # define __releases(x) __attribute__((context(x,1,0)))
  13. # define __acquire(x) __context__(x,1)
  14. # define __release(x) __context__(x,-1)
  15. # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
  16. extern void __chk_user_ptr(const volatile void __user *);
  17. extern void __chk_io_ptr(const volatile void __iomem *);
  18. #else
  19. # define __user
  20. # define __kernel
  21. # define __safe
  22. # define __force
  23. # define __nocast
  24. # define __iomem
  25. # define __chk_user_ptr(x) (void)0
  26. # define __chk_io_ptr(x) (void)0
  27. # define __builtin_warning(x, y...) (1)
  28. # define __acquires(x)
  29. # define __releases(x)
  30. # define __acquire(x) (void)0
  31. # define __release(x) (void)0
  32. # define __cond_lock(x,c) (c)
  33. #endif
  34. #ifdef __KERNEL__
  35. #ifdef __GNUC__
  36. #include <linux/compiler-gcc.h>
  37. #endif
  38. #define notrace __attribute__((no_instrument_function))
  39. /* Intel compiler defines __GNUC__. So we will overwrite implementations
  40. * coming from above header files here
  41. */
  42. #ifdef __INTEL_COMPILER
  43. # include <linux/compiler-intel.h>
  44. #endif
  45. /*
  46. * Generic compiler-dependent macros required for kernel
  47. * build go below this comment. Actual compiler/compiler version
  48. * specific implementations come from the above header files
  49. */
  50. struct ftrace_branch_data {
  51. const char *func;
  52. const char *file;
  53. unsigned line;
  54. union {
  55. struct {
  56. unsigned long correct;
  57. unsigned long incorrect;
  58. };
  59. struct {
  60. unsigned long miss;
  61. unsigned long hit;
  62. };
  63. unsigned long miss_hit[2];
  64. };
  65. };
  66. /*
  67. * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
  68. * to disable branch tracing on a per file basis.
  69. */
  70. #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
  71. && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
  72. void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
  73. #define likely_notrace(x) __builtin_expect(!!(x), 1)
  74. #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
  75. #define __branch_check__(x, expect) ({ \
  76. int ______r; \
  77. static struct ftrace_branch_data \
  78. __attribute__((__aligned__(4))) \
  79. __attribute__((section("_ftrace_annotated_branch"))) \
  80. ______f = { \
  81. .func = __func__, \
  82. .file = __FILE__, \
  83. .line = __LINE__, \
  84. }; \
  85. ______r = likely_notrace(x); \
  86. ftrace_likely_update(&______f, ______r, expect); \
  87. ______r; \
  88. })
  89. /*
  90. * Using __builtin_constant_p(x) to ignore cases where the return
  91. * value is always the same. This idea is taken from a similar patch
  92. * written by Daniel Walker.
  93. */
  94. # ifndef likely
  95. # define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
  96. # endif
  97. # ifndef unlikely
  98. # define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
  99. # endif
  100. #ifdef CONFIG_PROFILE_ALL_BRANCHES
  101. /*
  102. * "Define 'is'", Bill Clinton
  103. * "Define 'if'", Steven Rostedt
  104. */
  105. #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
  106. #define __trace_if(cond) \
  107. if (__builtin_constant_p((cond)) ? !!(cond) : \
  108. ({ \
  109. int ______r; \
  110. static struct ftrace_branch_data \
  111. __attribute__((__aligned__(4))) \
  112. __attribute__((section("_ftrace_branch"))) \
  113. ______f = { \
  114. .func = __func__, \
  115. .file = __FILE__, \
  116. .line = __LINE__, \
  117. }; \
  118. ______r = !!(cond); \
  119. ______f.miss_hit[______r]++; \
  120. ______r; \
  121. }))
  122. #endif /* CONFIG_PROFILE_ALL_BRANCHES */
  123. #else
  124. # define likely(x) __builtin_expect(!!(x), 1)
  125. # define unlikely(x) __builtin_expect(!!(x), 0)
  126. #endif
  127. /* Optimization barrier */
  128. #ifndef barrier
  129. # define barrier() __memory_barrier()
  130. #endif
  131. /* Unreachable code */
  132. #ifndef unreachable
  133. # define unreachable() do { } while (1)
  134. #endif
  135. #ifndef RELOC_HIDE
  136. # define RELOC_HIDE(ptr, off) \
  137. ({ unsigned long __ptr; \
  138. __ptr = (unsigned long) (ptr); \
  139. (typeof(ptr)) (__ptr + (off)); })
  140. #endif
  141. #endif /* __KERNEL__ */
  142. #endif /* __ASSEMBLY__ */
  143. #ifdef __KERNEL__
  144. /*
  145. * Allow us to mark functions as 'deprecated' and have gcc emit a nice
  146. * warning for each use, in hopes of speeding the functions removal.
  147. * Usage is:
  148. * int __deprecated foo(void)
  149. */
  150. #ifndef __deprecated
  151. # define __deprecated /* unimplemented */
  152. #endif
  153. #ifdef MODULE
  154. #define __deprecated_for_modules __deprecated
  155. #else
  156. #define __deprecated_for_modules
  157. #endif
  158. #ifndef __must_check
  159. #define __must_check
  160. #endif
  161. #ifndef CONFIG_ENABLE_MUST_CHECK
  162. #undef __must_check
  163. #define __must_check
  164. #endif
  165. #ifndef CONFIG_ENABLE_WARN_DEPRECATED
  166. #undef __deprecated
  167. #undef __deprecated_for_modules
  168. #define __deprecated
  169. #define __deprecated_for_modules
  170. #endif
  171. /*
  172. * Allow us to avoid 'defined but not used' warnings on functions and data,
  173. * as well as force them to be emitted to the assembly file.
  174. *
  175. * As of gcc 3.4, static functions that are not marked with attribute((used))
  176. * may be elided from the assembly file. As of gcc 3.4, static data not so
  177. * marked will not be elided, but this may change in a future gcc version.
  178. *
  179. * NOTE: Because distributions shipped with a backported unit-at-a-time
  180. * compiler in gcc 3.3, we must define __used to be __attribute__((used))
  181. * for gcc >=3.3 instead of 3.4.
  182. *
  183. * In prior versions of gcc, such functions and data would be emitted, but
  184. * would be warned about except with attribute((unused)).
  185. *
  186. * Mark functions that are referenced only in inline assembly as __used so
  187. * the code is emitted even though it appears to be unreferenced.
  188. */
  189. #ifndef __used
  190. # define __used /* unimplemented */
  191. #endif
  192. #ifndef __maybe_unused
  193. # define __maybe_unused /* unimplemented */
  194. #endif
  195. #ifndef __always_unused
  196. # define __always_unused /* unimplemented */
  197. #endif
  198. #ifndef noinline
  199. #define noinline
  200. #endif
  201. /*
  202. * Rather then using noinline to prevent stack consumption, use
  203. * noinline_for_stack instead. For documentaiton reasons.
  204. */
  205. #define noinline_for_stack noinline
  206. #ifndef __always_inline
  207. #define __always_inline inline
  208. #endif
  209. #endif /* __KERNEL__ */
  210. /*
  211. * From the GCC manual:
  212. *
  213. * Many functions do not examine any values except their arguments,
  214. * and have no effects except the return value. Basically this is
  215. * just slightly more strict class than the `pure' attribute above,
  216. * since function is not allowed to read global memory.
  217. *
  218. * Note that a function that has pointer arguments and examines the
  219. * data pointed to must _not_ be declared `const'. Likewise, a
  220. * function that calls a non-`const' function usually must not be
  221. * `const'. It does not make sense for a `const' function to return
  222. * `void'.
  223. */
  224. #ifndef __attribute_const__
  225. # define __attribute_const__ /* unimplemented */
  226. #endif
  227. /*
  228. * Tell gcc if a function is cold. The compiler will assume any path
  229. * directly leading to the call is unlikely.
  230. */
  231. #ifndef __cold
  232. #define __cold
  233. #endif
  234. /* Simple shorthand for a section definition */
  235. #ifndef __section
  236. # define __section(S) __attribute__ ((__section__(#S)))
  237. #endif
  238. /* Are two types/vars the same type (ignoring qualifiers)? */
  239. #ifndef __same_type
  240. # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
  241. #endif
  242. /* Compile time object size, -1 for unknown */
  243. #ifndef __compiletime_object_size
  244. # define __compiletime_object_size(obj) -1
  245. #endif
  246. #ifndef __compiletime_warning
  247. # define __compiletime_warning(message)
  248. #endif
  249. #ifndef __compiletime_error
  250. # define __compiletime_error(message)
  251. #endif
  252. /*
  253. * Prevent the compiler from merging or refetching accesses. The compiler
  254. * is also forbidden from reordering successive instances of ACCESS_ONCE(),
  255. * but only when the compiler is aware of some particular ordering. One way
  256. * to make the compiler aware of ordering is to put the two invocations of
  257. * ACCESS_ONCE() in different C statements.
  258. *
  259. * This macro does absolutely -nothing- to prevent the CPU from reordering,
  260. * merging, or refetching absolutely anything at any time. Its main intended
  261. * use is to mediate communication between process-level code and irq/NMI
  262. * handlers, all running on the same CPU.
  263. */
  264. #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
  265. #endif /* __LINUX_COMPILER_H */