ftrace.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794
  1. /*
  2. * Stage 1 of the trace events.
  3. *
  4. * Override the macros in <trace/trace_events.h> to include the following:
  5. *
  6. * struct ftrace_raw_<call> {
  7. * struct trace_entry ent;
  8. * <type> <item>;
  9. * <type2> <item2>[<len>];
  10. * [...]
  11. * };
  12. *
  13. * The <type> <item> is created by the __field(type, item) macro or
  14. * the __array(type2, item2, len) macro.
  15. * We simply do "type item;", and that will create the fields
  16. * in the structure.
  17. */
  18. #include <linux/ftrace_event.h>
  19. #undef __field
  20. #define __field(type, item) type item;
  21. #undef __field_ext
  22. #define __field_ext(type, item, filter_type) type item;
  23. #undef __array
  24. #define __array(type, item, len) type item[len];
  25. #undef __dynamic_array
  26. #define __dynamic_array(type, item, len) u32 __data_loc_##item;
  27. #undef __string
  28. #define __string(item, src) __dynamic_array(char, item, -1)
  29. #undef TP_STRUCT__entry
  30. #define TP_STRUCT__entry(args...) args
  31. #undef TRACE_EVENT
  32. #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
  33. struct ftrace_raw_##name { \
  34. struct trace_entry ent; \
  35. tstruct \
  36. char __data[0]; \
  37. }; \
  38. static struct ftrace_event_call event_##name
  39. #undef __cpparg
  40. #define __cpparg(arg...) arg
  41. /* Callbacks are meaningless to ftrace. */
  42. #undef TRACE_EVENT_FN
  43. #define TRACE_EVENT_FN(name, proto, args, tstruct, \
  44. assign, print, reg, unreg) \
  45. TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
  46. __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
  47. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  48. /*
  49. * Stage 2 of the trace events.
  50. *
  51. * Include the following:
  52. *
  53. * struct ftrace_data_offsets_<call> {
  54. * u32 <item1>;
  55. * u32 <item2>;
  56. * [...]
  57. * };
  58. *
  59. * The __dynamic_array() macro will create each u32 <item>, this is
  60. * to keep the offset of each array from the beginning of the event.
  61. * The size of an array is also encoded, in the higher 16 bits of <item>.
  62. */
  63. #undef __field
  64. #define __field(type, item)
  65. #undef __field_ext
  66. #define __field_ext(type, item, filter_type)
  67. #undef __array
  68. #define __array(type, item, len)
  69. #undef __dynamic_array
  70. #define __dynamic_array(type, item, len) u32 item;
  71. #undef __string
  72. #define __string(item, src) __dynamic_array(char, item, -1)
  73. #undef TRACE_EVENT
  74. #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
  75. struct ftrace_data_offsets_##call { \
  76. tstruct; \
  77. };
  78. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  79. /*
  80. * Setup the showing format of trace point.
  81. *
  82. * int
  83. * ftrace_format_##call(struct trace_seq *s)
  84. * {
  85. * struct ftrace_raw_##call field;
  86. * int ret;
  87. *
  88. * ret = trace_seq_printf(s, #type " " #item ";"
  89. * " offset:%u; size:%u;\n",
  90. * offsetof(struct ftrace_raw_##call, item),
  91. * sizeof(field.type));
  92. *
  93. * }
  94. */
  95. #undef TP_STRUCT__entry
  96. #define TP_STRUCT__entry(args...) args
  97. #undef __field
  98. #define __field(type, item) \
  99. ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
  100. "offset:%u;\tsize:%u;\tsigned:%u;\n", \
  101. (unsigned int)offsetof(typeof(field), item), \
  102. (unsigned int)sizeof(field.item), \
  103. (unsigned int)is_signed_type(type)); \
  104. if (!ret) \
  105. return 0;
  106. #undef __field_ext
  107. #define __field_ext(type, item, filter_type) __field(type, item)
  108. #undef __array
  109. #define __array(type, item, len) \
  110. ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
  111. "offset:%u;\tsize:%u;\tsigned:%u;\n", \
  112. (unsigned int)offsetof(typeof(field), item), \
  113. (unsigned int)sizeof(field.item), \
  114. (unsigned int)is_signed_type(type)); \
  115. if (!ret) \
  116. return 0;
  117. #undef __dynamic_array
  118. #define __dynamic_array(type, item, len) \
  119. ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
  120. "offset:%u;\tsize:%u;\tsigned:%u;\n", \
  121. (unsigned int)offsetof(typeof(field), \
  122. __data_loc_##item), \
  123. (unsigned int)sizeof(field.__data_loc_##item), \
  124. (unsigned int)is_signed_type(type)); \
  125. if (!ret) \
  126. return 0;
  127. #undef __string
  128. #define __string(item, src) __dynamic_array(char, item, -1)
  129. #undef __entry
  130. #define __entry REC
  131. #undef __print_symbolic
  132. #undef __get_dynamic_array
  133. #undef __get_str
  134. #undef TP_printk
  135. #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
  136. #undef TP_fast_assign
  137. #define TP_fast_assign(args...) args
  138. #undef TP_perf_assign
  139. #define TP_perf_assign(args...)
  140. #undef TRACE_EVENT
  141. #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
  142. static int \
  143. ftrace_format_##call(struct ftrace_event_call *unused, \
  144. struct trace_seq *s) \
  145. { \
  146. struct ftrace_raw_##call field __attribute__((unused)); \
  147. int ret = 0; \
  148. \
  149. tstruct; \
  150. \
  151. trace_seq_printf(s, "\nprint fmt: " print); \
  152. \
  153. return ret; \
  154. }
  155. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  156. /*
  157. * Stage 3 of the trace events.
  158. *
  159. * Override the macros in <trace/trace_events.h> to include the following:
  160. *
  161. * enum print_line_t
  162. * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
  163. * {
  164. * struct trace_seq *s = &iter->seq;
  165. * struct ftrace_raw_<call> *field; <-- defined in stage 1
  166. * struct trace_entry *entry;
  167. * struct trace_seq *p;
  168. * int ret;
  169. *
  170. * entry = iter->ent;
  171. *
  172. * if (entry->type != event_<call>.id) {
  173. * WARN_ON_ONCE(1);
  174. * return TRACE_TYPE_UNHANDLED;
  175. * }
  176. *
  177. * field = (typeof(field))entry;
  178. *
  179. * p = get_cpu_var(ftrace_event_seq);
  180. * trace_seq_init(p);
  181. * ret = trace_seq_printf(s, <TP_printk> "\n");
  182. * put_cpu();
  183. * if (!ret)
  184. * return TRACE_TYPE_PARTIAL_LINE;
  185. *
  186. * return TRACE_TYPE_HANDLED;
  187. * }
  188. *
  189. * This is the method used to print the raw event to the trace
  190. * output format. Note, this is not needed if the data is read
  191. * in binary.
  192. */
  193. #undef __entry
  194. #define __entry field
  195. #undef TP_printk
  196. #define TP_printk(fmt, args...) fmt "\n", args
  197. #undef __get_dynamic_array
  198. #define __get_dynamic_array(field) \
  199. ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
  200. #undef __get_str
  201. #define __get_str(field) (char *)__get_dynamic_array(field)
  202. #undef __print_flags
  203. #define __print_flags(flag, delim, flag_array...) \
  204. ({ \
  205. static const struct trace_print_flags __flags[] = \
  206. { flag_array, { -1, NULL }}; \
  207. ftrace_print_flags_seq(p, delim, flag, __flags); \
  208. })
  209. #undef __print_symbolic
  210. #define __print_symbolic(value, symbol_array...) \
  211. ({ \
  212. static const struct trace_print_flags symbols[] = \
  213. { symbol_array, { -1, NULL }}; \
  214. ftrace_print_symbols_seq(p, value, symbols); \
  215. })
  216. #undef TRACE_EVENT
  217. #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
  218. static enum print_line_t \
  219. ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
  220. { \
  221. struct trace_seq *s = &iter->seq; \
  222. struct ftrace_raw_##call *field; \
  223. struct trace_entry *entry; \
  224. struct trace_seq *p; \
  225. int ret; \
  226. \
  227. entry = iter->ent; \
  228. \
  229. if (entry->type != event_##call.id) { \
  230. WARN_ON_ONCE(1); \
  231. return TRACE_TYPE_UNHANDLED; \
  232. } \
  233. \
  234. field = (typeof(field))entry; \
  235. \
  236. p = &get_cpu_var(ftrace_event_seq); \
  237. trace_seq_init(p); \
  238. ret = trace_seq_printf(s, #call ": " print); \
  239. put_cpu(); \
  240. if (!ret) \
  241. return TRACE_TYPE_PARTIAL_LINE; \
  242. \
  243. return TRACE_TYPE_HANDLED; \
  244. }
  245. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  246. #undef __field_ext
  247. #define __field_ext(type, item, filter_type) \
  248. ret = trace_define_field(event_call, #type, #item, \
  249. offsetof(typeof(field), item), \
  250. sizeof(field.item), \
  251. is_signed_type(type), filter_type); \
  252. if (ret) \
  253. return ret;
  254. #undef __field
  255. #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
  256. #undef __array
  257. #define __array(type, item, len) \
  258. BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
  259. ret = trace_define_field(event_call, #type "[" #len "]", #item, \
  260. offsetof(typeof(field), item), \
  261. sizeof(field.item), 0, FILTER_OTHER); \
  262. if (ret) \
  263. return ret;
  264. #undef __dynamic_array
  265. #define __dynamic_array(type, item, len) \
  266. ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
  267. offsetof(typeof(field), __data_loc_##item), \
  268. sizeof(field.__data_loc_##item), 0, \
  269. FILTER_OTHER);
  270. #undef __string
  271. #define __string(item, src) __dynamic_array(char, item, -1)
  272. #undef TRACE_EVENT
  273. #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
  274. static int \
  275. ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
  276. { \
  277. struct ftrace_raw_##call field; \
  278. int ret; \
  279. \
  280. ret = trace_define_common_fields(event_call); \
  281. if (ret) \
  282. return ret; \
  283. \
  284. tstruct; \
  285. \
  286. return ret; \
  287. }
  288. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  289. /*
  290. * remember the offset of each array from the beginning of the event.
  291. */
  292. #undef __entry
  293. #define __entry entry
  294. #undef __field
  295. #define __field(type, item)
  296. #undef __field_ext
  297. #define __field_ext(type, item, filter_type)
  298. #undef __array
  299. #define __array(type, item, len)
  300. #undef __dynamic_array
  301. #define __dynamic_array(type, item, len) \
  302. __data_offsets->item = __data_size + \
  303. offsetof(typeof(*entry), __data); \
  304. __data_offsets->item |= (len * sizeof(type)) << 16; \
  305. __data_size += (len) * sizeof(type);
  306. #undef __string
  307. #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \
  308. #undef TRACE_EVENT
  309. #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
  310. static inline int ftrace_get_offsets_##call( \
  311. struct ftrace_data_offsets_##call *__data_offsets, proto) \
  312. { \
  313. int __data_size = 0; \
  314. struct ftrace_raw_##call __maybe_unused *entry; \
  315. \
  316. tstruct; \
  317. \
  318. return __data_size; \
  319. }
  320. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  321. #ifdef CONFIG_EVENT_PROFILE
  322. /*
  323. * Generate the functions needed for tracepoint perf_event support.
  324. *
  325. * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
  326. *
  327. * static int ftrace_profile_enable_<call>(void)
  328. * {
  329. * return register_trace_<call>(ftrace_profile_<call>);
  330. * }
  331. *
  332. * static void ftrace_profile_disable_<call>(void)
  333. * {
  334. * unregister_trace_<call>(ftrace_profile_<call>);
  335. * }
  336. *
  337. */
  338. #undef TRACE_EVENT
  339. #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
  340. \
  341. static void ftrace_profile_##call(proto); \
  342. \
  343. static int ftrace_profile_enable_##call(struct ftrace_event_call *unused)\
  344. { \
  345. return register_trace_##call(ftrace_profile_##call); \
  346. } \
  347. \
  348. static void ftrace_profile_disable_##call(struct ftrace_event_call *unused)\
  349. { \
  350. unregister_trace_##call(ftrace_profile_##call); \
  351. }
  352. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  353. #endif
  354. /*
  355. * Stage 4 of the trace events.
  356. *
  357. * Override the macros in <trace/trace_events.h> to include the following:
  358. *
  359. * static void ftrace_event_<call>(proto)
  360. * {
  361. * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
  362. * }
  363. *
  364. * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
  365. * {
  366. * int ret;
  367. *
  368. * ret = register_trace_<call>(ftrace_event_<call>);
  369. * if (!ret)
  370. * pr_info("event trace: Could not activate trace point "
  371. * "probe to <call>");
  372. * return ret;
  373. * }
  374. *
  375. * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
  376. * {
  377. * unregister_trace_<call>(ftrace_event_<call>);
  378. * }
  379. *
  380. *
  381. * For those macros defined with TRACE_EVENT:
  382. *
  383. * static struct ftrace_event_call event_<call>;
  384. *
  385. * static void ftrace_raw_event_<call>(proto)
  386. * {
  387. * struct ring_buffer_event *event;
  388. * struct ftrace_raw_<call> *entry; <-- defined in stage 1
  389. * struct ring_buffer *buffer;
  390. * unsigned long irq_flags;
  391. * int pc;
  392. *
  393. * local_save_flags(irq_flags);
  394. * pc = preempt_count();
  395. *
  396. * event = trace_current_buffer_lock_reserve(&buffer,
  397. * event_<call>.id,
  398. * sizeof(struct ftrace_raw_<call>),
  399. * irq_flags, pc);
  400. * if (!event)
  401. * return;
  402. * entry = ring_buffer_event_data(event);
  403. *
  404. * <assign>; <-- Here we assign the entries by the __field and
  405. * __array macros.
  406. *
  407. * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
  408. * }
  409. *
  410. * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
  411. * {
  412. * int ret;
  413. *
  414. * ret = register_trace_<call>(ftrace_raw_event_<call>);
  415. * if (!ret)
  416. * pr_info("event trace: Could not activate trace point "
  417. * "probe to <call>");
  418. * return ret;
  419. * }
  420. *
  421. * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
  422. * {
  423. * unregister_trace_<call>(ftrace_raw_event_<call>);
  424. * }
  425. *
  426. * static struct trace_event ftrace_event_type_<call> = {
  427. * .trace = ftrace_raw_output_<call>, <-- stage 2
  428. * };
  429. *
  430. * static int ftrace_raw_init_event_<call>(struct ftrace_event_call *unused)
  431. * {
  432. * int id;
  433. *
  434. * id = register_ftrace_event(&ftrace_event_type_<call>);
  435. * if (!id)
  436. * return -ENODEV;
  437. * event_<call>.id = id;
  438. * return 0;
  439. * }
  440. *
  441. * static struct ftrace_event_call __used
  442. * __attribute__((__aligned__(4)))
  443. * __attribute__((section("_ftrace_events"))) event_<call> = {
  444. * .name = "<call>",
  445. * .system = "<system>",
  446. * .raw_init = ftrace_raw_init_event_<call>,
  447. * .regfunc = ftrace_reg_event_<call>,
  448. * .unregfunc = ftrace_unreg_event_<call>,
  449. * .show_format = ftrace_format_<call>,
  450. * }
  451. *
  452. */
  453. #undef TP_FMT
  454. #define TP_FMT(fmt, args...) fmt "\n", ##args
  455. #ifdef CONFIG_EVENT_PROFILE
  456. #define _TRACE_PROFILE_INIT(call) \
  457. .profile_count = ATOMIC_INIT(-1), \
  458. .profile_enable = ftrace_profile_enable_##call, \
  459. .profile_disable = ftrace_profile_disable_##call,
  460. #else
  461. #define _TRACE_PROFILE_INIT(call)
  462. #endif
  463. #undef __entry
  464. #define __entry entry
  465. #undef __field
  466. #define __field(type, item)
  467. #undef __array
  468. #define __array(type, item, len)
  469. #undef __dynamic_array
  470. #define __dynamic_array(type, item, len) \
  471. __entry->__data_loc_##item = __data_offsets.item;
  472. #undef __string
  473. #define __string(item, src) __dynamic_array(char, item, -1) \
  474. #undef __assign_str
  475. #define __assign_str(dst, src) \
  476. strcpy(__get_str(dst), src);
  477. #undef TRACE_EVENT
  478. #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
  479. \
  480. static struct ftrace_event_call event_##call; \
  481. \
  482. static void ftrace_raw_event_##call(proto) \
  483. { \
  484. struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
  485. struct ftrace_event_call *event_call = &event_##call; \
  486. struct ring_buffer_event *event; \
  487. struct ftrace_raw_##call *entry; \
  488. struct ring_buffer *buffer; \
  489. unsigned long irq_flags; \
  490. int __data_size; \
  491. int pc; \
  492. \
  493. local_save_flags(irq_flags); \
  494. pc = preempt_count(); \
  495. \
  496. __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
  497. \
  498. event = trace_current_buffer_lock_reserve(&buffer, \
  499. event_##call.id, \
  500. sizeof(*entry) + __data_size, \
  501. irq_flags, pc); \
  502. if (!event) \
  503. return; \
  504. entry = ring_buffer_event_data(event); \
  505. \
  506. \
  507. tstruct \
  508. \
  509. { assign; } \
  510. \
  511. if (!filter_current_check_discard(buffer, event_call, entry, event)) \
  512. trace_nowake_buffer_unlock_commit(buffer, \
  513. event, irq_flags, pc); \
  514. } \
  515. \
  516. static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
  517. { \
  518. int ret; \
  519. \
  520. ret = register_trace_##call(ftrace_raw_event_##call); \
  521. if (ret) \
  522. pr_info("event trace: Could not activate trace point " \
  523. "probe to " #call "\n"); \
  524. return ret; \
  525. } \
  526. \
  527. static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
  528. { \
  529. unregister_trace_##call(ftrace_raw_event_##call); \
  530. } \
  531. \
  532. static struct trace_event ftrace_event_type_##call = { \
  533. .trace = ftrace_raw_output_##call, \
  534. }; \
  535. \
  536. static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\
  537. { \
  538. int id; \
  539. \
  540. id = register_ftrace_event(&ftrace_event_type_##call); \
  541. if (!id) \
  542. return -ENODEV; \
  543. event_##call.id = id; \
  544. INIT_LIST_HEAD(&event_##call.fields); \
  545. return 0; \
  546. } \
  547. \
  548. static struct ftrace_event_call __used \
  549. __attribute__((__aligned__(4))) \
  550. __attribute__((section("_ftrace_events"))) event_##call = { \
  551. .name = #call, \
  552. .system = __stringify(TRACE_SYSTEM), \
  553. .event = &ftrace_event_type_##call, \
  554. .raw_init = ftrace_raw_init_event_##call, \
  555. .regfunc = ftrace_raw_reg_event_##call, \
  556. .unregfunc = ftrace_raw_unreg_event_##call, \
  557. .show_format = ftrace_format_##call, \
  558. .define_fields = ftrace_define_fields_##call, \
  559. _TRACE_PROFILE_INIT(call) \
  560. }
  561. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  562. /*
  563. * Define the insertion callback to profile events
  564. *
  565. * The job is very similar to ftrace_raw_event_<call> except that we don't
  566. * insert in the ring buffer but in a perf counter.
  567. *
  568. * static void ftrace_profile_<call>(proto)
  569. * {
  570. * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
  571. * struct ftrace_event_call *event_call = &event_<call>;
  572. * extern void perf_tp_event(int, u64, u64, void *, int);
  573. * struct ftrace_raw_##call *entry;
  574. * struct perf_trace_buf *trace_buf;
  575. * u64 __addr = 0, __count = 1;
  576. * unsigned long irq_flags;
  577. * struct trace_entry *ent;
  578. * int __entry_size;
  579. * int __data_size;
  580. * int __cpu
  581. * int pc;
  582. *
  583. * pc = preempt_count();
  584. *
  585. * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
  586. *
  587. * // Below we want to get the aligned size by taking into account
  588. * // the u32 field that will later store the buffer size
  589. * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
  590. * sizeof(u64));
  591. * __entry_size -= sizeof(u32);
  592. *
  593. * // Protect the non nmi buffer
  594. * // This also protects the rcu read side
  595. * local_irq_save(irq_flags);
  596. * __cpu = smp_processor_id();
  597. *
  598. * if (in_nmi())
  599. * trace_buf = rcu_dereference(perf_trace_buf_nmi);
  600. * else
  601. * trace_buf = rcu_dereference(perf_trace_buf);
  602. *
  603. * if (!trace_buf)
  604. * goto end;
  605. *
  606. * trace_buf = per_cpu_ptr(trace_buf, __cpu);
  607. *
  608. * // Avoid recursion from perf that could mess up the buffer
  609. * if (trace_buf->recursion++)
  610. * goto end_recursion;
  611. *
  612. * raw_data = trace_buf->buf;
  613. *
  614. * // Make recursion update visible before entering perf_tp_event
  615. * // so that we protect from perf recursions.
  616. *
  617. * barrier();
  618. *
  619. * //zero dead bytes from alignment to avoid stack leak to userspace:
  620. * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
  621. * entry = (struct ftrace_raw_<call> *)raw_data;
  622. * ent = &entry->ent;
  623. * tracing_generic_entry_update(ent, irq_flags, pc);
  624. * ent->type = event_call->id;
  625. *
  626. * <tstruct> <- do some jobs with dynamic arrays
  627. *
  628. * <assign> <- affect our values
  629. *
  630. * perf_tp_event(event_call->id, __addr, __count, entry,
  631. * __entry_size); <- submit them to perf counter
  632. *
  633. * }
  634. */
  635. #ifdef CONFIG_EVENT_PROFILE
  636. #undef __perf_addr
  637. #define __perf_addr(a) __addr = (a)
  638. #undef __perf_count
  639. #define __perf_count(c) __count = (c)
  640. #undef TRACE_EVENT
  641. #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
  642. static void ftrace_profile_##call(proto) \
  643. { \
  644. struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
  645. struct ftrace_event_call *event_call = &event_##call; \
  646. extern void perf_tp_event(int, u64, u64, void *, int); \
  647. struct ftrace_raw_##call *entry; \
  648. struct perf_trace_buf *trace_buf; \
  649. u64 __addr = 0, __count = 1; \
  650. unsigned long irq_flags; \
  651. struct trace_entry *ent; \
  652. int __entry_size; \
  653. int __data_size; \
  654. char *raw_data; \
  655. int __cpu; \
  656. int pc; \
  657. \
  658. pc = preempt_count(); \
  659. \
  660. __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
  661. __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
  662. sizeof(u64)); \
  663. __entry_size -= sizeof(u32); \
  664. \
  665. if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
  666. "profile buffer not large enough")) \
  667. return; \
  668. \
  669. local_irq_save(irq_flags); \
  670. __cpu = smp_processor_id(); \
  671. \
  672. if (in_nmi()) \
  673. trace_buf = rcu_dereference(perf_trace_buf_nmi); \
  674. else \
  675. trace_buf = rcu_dereference(perf_trace_buf); \
  676. \
  677. if (!trace_buf) \
  678. goto end; \
  679. \
  680. trace_buf = per_cpu_ptr(trace_buf, __cpu); \
  681. if (trace_buf->recursion++) \
  682. goto end_recursion; \
  683. \
  684. barrier(); \
  685. \
  686. raw_data = trace_buf->buf; \
  687. \
  688. *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
  689. entry = (struct ftrace_raw_##call *)raw_data; \
  690. ent = &entry->ent; \
  691. tracing_generic_entry_update(ent, irq_flags, pc); \
  692. ent->type = event_call->id; \
  693. \
  694. tstruct \
  695. \
  696. { assign; } \
  697. \
  698. perf_tp_event(event_call->id, __addr, __count, entry, \
  699. __entry_size); \
  700. \
  701. end_recursion: \
  702. trace_buf->recursion--; \
  703. end: \
  704. local_irq_restore(irq_flags); \
  705. \
  706. }
  707. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  708. #endif /* CONFIG_EVENT_PROFILE */
  709. #undef _TRACE_PROFILE_INIT