ftrace.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741
  1. /*
  2. * Stage 1 of the trace events.
  3. *
  4. * Override the macros in <trace/trace_events.h> to include the following:
  5. *
  6. * struct ftrace_raw_<call> {
  7. * struct trace_entry ent;
  8. * <type> <item>;
  9. * <type2> <item2>[<len>];
  10. * [...]
  11. * };
  12. *
  13. * The <type> <item> is created by the __field(type, item) macro or
  14. * the __array(type2, item2, len) macro.
  15. * We simply do "type item;", and that will create the fields
  16. * in the structure.
  17. */
  18. #include <linux/ftrace_event.h>
  19. #undef __field
  20. #define __field(type, item) type item;
  21. #undef __field_ext
  22. #define __field_ext(type, item, filter_type) type item;
  23. #undef __array
  24. #define __array(type, item, len) type item[len];
  25. #undef __dynamic_array
  26. #define __dynamic_array(type, item, len) u32 __data_loc_##item;
  27. #undef __string
  28. #define __string(item, src) __dynamic_array(char, item, -1)
  29. #undef TP_STRUCT__entry
  30. #define TP_STRUCT__entry(args...) args
  31. #undef TRACE_EVENT
  32. #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
  33. struct ftrace_raw_##name { \
  34. struct trace_entry ent; \
  35. tstruct \
  36. char __data[0]; \
  37. }; \
  38. static struct ftrace_event_call event_##name
  39. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  40. /*
  41. * Stage 2 of the trace events.
  42. *
  43. * Include the following:
  44. *
  45. * struct ftrace_data_offsets_<call> {
  46. * u32 <item1>;
  47. * u32 <item2>;
  48. * [...]
  49. * };
  50. *
  51. * The __dynamic_array() macro will create each u32 <item>, this is
  52. * to keep the offset of each array from the beginning of the event.
  53. * The size of an array is also encoded, in the higher 16 bits of <item>.
  54. */
  55. #undef __field
  56. #define __field(type, item)
  57. #undef __field_ext
  58. #define __field_ext(type, item, filter_type)
  59. #undef __array
  60. #define __array(type, item, len)
  61. #undef __dynamic_array
  62. #define __dynamic_array(type, item, len) u32 item;
  63. #undef __string
  64. #define __string(item, src) __dynamic_array(char, item, -1)
  65. #undef TRACE_EVENT
  66. #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
  67. struct ftrace_data_offsets_##call { \
  68. tstruct; \
  69. };
  70. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  71. /*
  72. * Setup the showing format of trace point.
  73. *
  74. * int
  75. * ftrace_format_##call(struct trace_seq *s)
  76. * {
  77. * struct ftrace_raw_##call field;
  78. * int ret;
  79. *
  80. * ret = trace_seq_printf(s, #type " " #item ";"
  81. * " offset:%u; size:%u;\n",
  82. * offsetof(struct ftrace_raw_##call, item),
  83. * sizeof(field.type));
  84. *
  85. * }
  86. */
  87. #undef TP_STRUCT__entry
  88. #define TP_STRUCT__entry(args...) args
  89. #undef __field
  90. #define __field(type, item) \
  91. ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
  92. "offset:%u;\tsize:%u;\n", \
  93. (unsigned int)offsetof(typeof(field), item), \
  94. (unsigned int)sizeof(field.item)); \
  95. if (!ret) \
  96. return 0;
  97. #undef __field_ext
  98. #define __field_ext(type, item, filter_type) __field(type, item)
  99. #undef __array
  100. #define __array(type, item, len) \
  101. ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
  102. "offset:%u;\tsize:%u;\n", \
  103. (unsigned int)offsetof(typeof(field), item), \
  104. (unsigned int)sizeof(field.item)); \
  105. if (!ret) \
  106. return 0;
  107. #undef __dynamic_array
  108. #define __dynamic_array(type, item, len) \
  109. ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
  110. "offset:%u;\tsize:%u;\n", \
  111. (unsigned int)offsetof(typeof(field), \
  112. __data_loc_##item), \
  113. (unsigned int)sizeof(field.__data_loc_##item)); \
  114. if (!ret) \
  115. return 0;
  116. #undef __string
  117. #define __string(item, src) __dynamic_array(char, item, -1)
  118. #undef __entry
  119. #define __entry REC
  120. #undef __print_symbolic
  121. #undef __get_dynamic_array
  122. #undef __get_str
  123. #undef TP_printk
  124. #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
  125. #undef TP_fast_assign
  126. #define TP_fast_assign(args...) args
  127. #undef TP_perf_assign
  128. #define TP_perf_assign(args...)
  129. #undef TRACE_EVENT
  130. #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
  131. static int \
  132. ftrace_format_##call(struct ftrace_event_call *unused, \
  133. struct trace_seq *s) \
  134. { \
  135. struct ftrace_raw_##call field __attribute__((unused)); \
  136. int ret = 0; \
  137. \
  138. tstruct; \
  139. \
  140. trace_seq_printf(s, "\nprint fmt: " print); \
  141. \
  142. return ret; \
  143. }
  144. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  145. /*
  146. * Stage 3 of the trace events.
  147. *
  148. * Override the macros in <trace/trace_events.h> to include the following:
  149. *
  150. * enum print_line_t
  151. * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
  152. * {
  153. * struct trace_seq *s = &iter->seq;
  154. * struct ftrace_raw_<call> *field; <-- defined in stage 1
  155. * struct trace_entry *entry;
  156. * struct trace_seq *p;
  157. * int ret;
  158. *
  159. * entry = iter->ent;
  160. *
  161. * if (entry->type != event_<call>.id) {
  162. * WARN_ON_ONCE(1);
  163. * return TRACE_TYPE_UNHANDLED;
  164. * }
  165. *
  166. * field = (typeof(field))entry;
  167. *
  168. * p = get_cpu_var(ftrace_event_seq);
  169. * trace_seq_init(p);
  170. * ret = trace_seq_printf(s, <TP_printk> "\n");
  171. * put_cpu();
  172. * if (!ret)
  173. * return TRACE_TYPE_PARTIAL_LINE;
  174. *
  175. * return TRACE_TYPE_HANDLED;
  176. * }
  177. *
  178. * This is the method used to print the raw event to the trace
  179. * output format. Note, this is not needed if the data is read
  180. * in binary.
  181. */
  182. #undef __entry
  183. #define __entry field
  184. #undef TP_printk
  185. #define TP_printk(fmt, args...) fmt "\n", args
  186. #undef __get_dynamic_array
  187. #define __get_dynamic_array(field) \
  188. ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
  189. #undef __get_str
  190. #define __get_str(field) (char *)__get_dynamic_array(field)
  191. #undef __print_flags
  192. #define __print_flags(flag, delim, flag_array...) \
  193. ({ \
  194. static const struct trace_print_flags flags[] = \
  195. { flag_array, { -1, NULL }}; \
  196. ftrace_print_flags_seq(p, delim, flag, flags); \
  197. })
  198. #undef __print_symbolic
  199. #define __print_symbolic(value, symbol_array...) \
  200. ({ \
  201. static const struct trace_print_flags symbols[] = \
  202. { symbol_array, { -1, NULL }}; \
  203. ftrace_print_symbols_seq(p, value, symbols); \
  204. })
  205. #undef TRACE_EVENT
  206. #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
  207. enum print_line_t \
  208. ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
  209. { \
  210. struct trace_seq *s = &iter->seq; \
  211. struct ftrace_raw_##call *field; \
  212. struct trace_entry *entry; \
  213. struct trace_seq *p; \
  214. int ret; \
  215. \
  216. entry = iter->ent; \
  217. \
  218. if (entry->type != event_##call.id) { \
  219. WARN_ON_ONCE(1); \
  220. return TRACE_TYPE_UNHANDLED; \
  221. } \
  222. \
  223. field = (typeof(field))entry; \
  224. \
  225. p = &get_cpu_var(ftrace_event_seq); \
  226. trace_seq_init(p); \
  227. ret = trace_seq_printf(s, #call ": " print); \
  228. put_cpu(); \
  229. if (!ret) \
  230. return TRACE_TYPE_PARTIAL_LINE; \
  231. \
  232. return TRACE_TYPE_HANDLED; \
  233. }
  234. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  235. #undef __field_ext
  236. #define __field_ext(type, item, filter_type) \
  237. ret = trace_define_field(event_call, #type, #item, \
  238. offsetof(typeof(field), item), \
  239. sizeof(field.item), \
  240. is_signed_type(type), filter_type); \
  241. if (ret) \
  242. return ret;
  243. #undef __field
  244. #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
  245. #undef __array
  246. #define __array(type, item, len) \
  247. BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
  248. ret = trace_define_field(event_call, #type "[" #len "]", #item, \
  249. offsetof(typeof(field), item), \
  250. sizeof(field.item), 0, FILTER_OTHER); \
  251. if (ret) \
  252. return ret;
  253. #undef __dynamic_array
  254. #define __dynamic_array(type, item, len) \
  255. ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
  256. offsetof(typeof(field), __data_loc_##item), \
  257. sizeof(field.__data_loc_##item), 0, \
  258. FILTER_OTHER);
  259. #undef __string
  260. #define __string(item, src) __dynamic_array(char, item, -1)
  261. #undef TRACE_EVENT
  262. #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
  263. int \
  264. ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
  265. { \
  266. struct ftrace_raw_##call field; \
  267. int ret; \
  268. \
  269. ret = trace_define_common_fields(event_call); \
  270. if (ret) \
  271. return ret; \
  272. \
  273. tstruct; \
  274. \
  275. return ret; \
  276. }
  277. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  278. /*
  279. * remember the offset of each array from the beginning of the event.
  280. */
  281. #undef __entry
  282. #define __entry entry
  283. #undef __field
  284. #define __field(type, item)
  285. #undef __field_ext
  286. #define __field_ext(type, item, filter_type)
  287. #undef __array
  288. #define __array(type, item, len)
  289. #undef __dynamic_array
  290. #define __dynamic_array(type, item, len) \
  291. __data_offsets->item = __data_size + \
  292. offsetof(typeof(*entry), __data); \
  293. __data_offsets->item |= (len * sizeof(type)) << 16; \
  294. __data_size += (len) * sizeof(type);
  295. #undef __string
  296. #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \
  297. #undef TRACE_EVENT
  298. #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
  299. static inline int ftrace_get_offsets_##call( \
  300. struct ftrace_data_offsets_##call *__data_offsets, proto) \
  301. { \
  302. int __data_size = 0; \
  303. struct ftrace_raw_##call __maybe_unused *entry; \
  304. \
  305. tstruct; \
  306. \
  307. return __data_size; \
  308. }
  309. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  310. #ifdef CONFIG_EVENT_PROFILE
  311. /*
  312. * Generate the functions needed for tracepoint perf_counter support.
  313. *
  314. * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
  315. *
  316. * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
  317. * {
  318. * int ret = 0;
  319. *
  320. * if (!atomic_inc_return(&event_call->profile_count))
  321. * ret = register_trace_<call>(ftrace_profile_<call>);
  322. *
  323. * return ret;
  324. * }
  325. *
  326. * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
  327. * {
  328. * if (atomic_add_negative(-1, &event->call->profile_count))
  329. * unregister_trace_<call>(ftrace_profile_<call>);
  330. * }
  331. *
  332. */
  333. #undef TRACE_EVENT
  334. #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
  335. \
  336. static void ftrace_profile_##call(proto); \
  337. \
  338. static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
  339. { \
  340. int ret = 0; \
  341. \
  342. if (!atomic_inc_return(&event_call->profile_count)) \
  343. ret = register_trace_##call(ftrace_profile_##call); \
  344. \
  345. return ret; \
  346. } \
  347. \
  348. static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
  349. { \
  350. if (atomic_add_negative(-1, &event_call->profile_count)) \
  351. unregister_trace_##call(ftrace_profile_##call); \
  352. }
  353. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  354. #endif
  355. /*
  356. * Stage 4 of the trace events.
  357. *
  358. * Override the macros in <trace/trace_events.h> to include the following:
  359. *
  360. * static void ftrace_event_<call>(proto)
  361. * {
  362. * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
  363. * }
  364. *
  365. * static int ftrace_reg_event_<call>(void)
  366. * {
  367. * int ret;
  368. *
  369. * ret = register_trace_<call>(ftrace_event_<call>);
  370. * if (!ret)
  371. * pr_info("event trace: Could not activate trace point "
  372. * "probe to <call>");
  373. * return ret;
  374. * }
  375. *
  376. * static void ftrace_unreg_event_<call>(void)
  377. * {
  378. * unregister_trace_<call>(ftrace_event_<call>);
  379. * }
  380. *
  381. *
  382. * For those macros defined with TRACE_EVENT:
  383. *
  384. * static struct ftrace_event_call event_<call>;
  385. *
  386. * static void ftrace_raw_event_<call>(proto)
  387. * {
  388. * struct ring_buffer_event *event;
  389. * struct ftrace_raw_<call> *entry; <-- defined in stage 1
  390. * unsigned long irq_flags;
  391. * int pc;
  392. *
  393. * local_save_flags(irq_flags);
  394. * pc = preempt_count();
  395. *
  396. * event = trace_current_buffer_lock_reserve(event_<call>.id,
  397. * sizeof(struct ftrace_raw_<call>),
  398. * irq_flags, pc);
  399. * if (!event)
  400. * return;
  401. * entry = ring_buffer_event_data(event);
  402. *
  403. * <assign>; <-- Here we assign the entries by the __field and
  404. * __array macros.
  405. *
  406. * trace_current_buffer_unlock_commit(event, irq_flags, pc);
  407. * }
  408. *
  409. * static int ftrace_raw_reg_event_<call>(void)
  410. * {
  411. * int ret;
  412. *
  413. * ret = register_trace_<call>(ftrace_raw_event_<call>);
  414. * if (!ret)
  415. * pr_info("event trace: Could not activate trace point "
  416. * "probe to <call>");
  417. * return ret;
  418. * }
  419. *
  420. * static void ftrace_unreg_event_<call>(void)
  421. * {
  422. * unregister_trace_<call>(ftrace_raw_event_<call>);
  423. * }
  424. *
  425. * static struct trace_event ftrace_event_type_<call> = {
  426. * .trace = ftrace_raw_output_<call>, <-- stage 2
  427. * };
  428. *
  429. * static int ftrace_raw_init_event_<call>(void)
  430. * {
  431. * int id;
  432. *
  433. * id = register_ftrace_event(&ftrace_event_type_<call>);
  434. * if (!id)
  435. * return -ENODEV;
  436. * event_<call>.id = id;
  437. * return 0;
  438. * }
  439. *
  440. * static struct ftrace_event_call __used
  441. * __attribute__((__aligned__(4)))
  442. * __attribute__((section("_ftrace_events"))) event_<call> = {
  443. * .name = "<call>",
  444. * .system = "<system>",
  445. * .raw_init = ftrace_raw_init_event_<call>,
  446. * .regfunc = ftrace_reg_event_<call>,
  447. * .unregfunc = ftrace_unreg_event_<call>,
  448. * .show_format = ftrace_format_<call>,
  449. * }
  450. *
  451. */
  452. #undef TP_FMT
  453. #define TP_FMT(fmt, args...) fmt "\n", ##args
  454. #ifdef CONFIG_EVENT_PROFILE
  455. #define _TRACE_PROFILE_INIT(call) \
  456. .profile_count = ATOMIC_INIT(-1), \
  457. .profile_enable = ftrace_profile_enable_##call, \
  458. .profile_disable = ftrace_profile_disable_##call,
  459. #else
  460. #define _TRACE_PROFILE_INIT(call)
  461. #endif
  462. #undef __entry
  463. #define __entry entry
  464. #undef __field
  465. #define __field(type, item)
  466. #undef __array
  467. #define __array(type, item, len)
  468. #undef __dynamic_array
  469. #define __dynamic_array(type, item, len) \
  470. __entry->__data_loc_##item = __data_offsets.item;
  471. #undef __string
  472. #define __string(item, src) __dynamic_array(char, item, -1) \
  473. #undef __assign_str
  474. #define __assign_str(dst, src) \
  475. strcpy(__get_str(dst), src);
  476. #undef TRACE_EVENT
  477. #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
  478. \
  479. static struct ftrace_event_call event_##call; \
  480. \
  481. static void ftrace_raw_event_##call(proto) \
  482. { \
  483. struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
  484. struct ftrace_event_call *event_call = &event_##call; \
  485. struct ring_buffer_event *event; \
  486. struct ftrace_raw_##call *entry; \
  487. unsigned long irq_flags; \
  488. int __data_size; \
  489. int pc; \
  490. \
  491. local_save_flags(irq_flags); \
  492. pc = preempt_count(); \
  493. \
  494. __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
  495. \
  496. event = trace_current_buffer_lock_reserve(event_##call.id, \
  497. sizeof(*entry) + __data_size, \
  498. irq_flags, pc); \
  499. if (!event) \
  500. return; \
  501. entry = ring_buffer_event_data(event); \
  502. \
  503. \
  504. tstruct \
  505. \
  506. { assign; } \
  507. \
  508. if (!filter_current_check_discard(event_call, entry, event)) \
  509. trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
  510. } \
  511. \
  512. static int ftrace_raw_reg_event_##call(void *ptr) \
  513. { \
  514. int ret; \
  515. \
  516. ret = register_trace_##call(ftrace_raw_event_##call); \
  517. if (ret) \
  518. pr_info("event trace: Could not activate trace point " \
  519. "probe to " #call "\n"); \
  520. return ret; \
  521. } \
  522. \
  523. static void ftrace_raw_unreg_event_##call(void *ptr) \
  524. { \
  525. unregister_trace_##call(ftrace_raw_event_##call); \
  526. } \
  527. \
  528. static struct trace_event ftrace_event_type_##call = { \
  529. .trace = ftrace_raw_output_##call, \
  530. }; \
  531. \
  532. static int ftrace_raw_init_event_##call(void) \
  533. { \
  534. int id; \
  535. \
  536. id = register_ftrace_event(&ftrace_event_type_##call); \
  537. if (!id) \
  538. return -ENODEV; \
  539. event_##call.id = id; \
  540. INIT_LIST_HEAD(&event_##call.fields); \
  541. init_preds(&event_##call); \
  542. return 0; \
  543. } \
  544. \
  545. static struct ftrace_event_call __used \
  546. __attribute__((__aligned__(4))) \
  547. __attribute__((section("_ftrace_events"))) event_##call = { \
  548. .name = #call, \
  549. .system = __stringify(TRACE_SYSTEM), \
  550. .event = &ftrace_event_type_##call, \
  551. .raw_init = ftrace_raw_init_event_##call, \
  552. .regfunc = ftrace_raw_reg_event_##call, \
  553. .unregfunc = ftrace_raw_unreg_event_##call, \
  554. .show_format = ftrace_format_##call, \
  555. .define_fields = ftrace_define_fields_##call, \
  556. _TRACE_PROFILE_INIT(call) \
  557. }
  558. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  559. /*
  560. * Define the insertion callback to profile events
  561. *
  562. * The job is very similar to ftrace_raw_event_<call> except that we don't
  563. * insert in the ring buffer but in a perf counter.
  564. *
  565. * static void ftrace_profile_<call>(proto)
  566. * {
  567. * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
  568. * struct ftrace_event_call *event_call = &event_<call>;
  569. * extern void perf_tpcounter_event(int, u64, u64, void *, int);
  570. * struct ftrace_raw_##call *entry;
  571. * u64 __addr = 0, __count = 1;
  572. * unsigned long irq_flags;
  573. * int __entry_size;
  574. * int __data_size;
  575. * int pc;
  576. *
  577. * local_save_flags(irq_flags);
  578. * pc = preempt_count();
  579. *
  580. * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
  581. *
  582. * // Below we want to get the aligned size by taking into account
  583. * // the u32 field that will later store the buffer size
  584. * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
  585. * sizeof(u64));
  586. * __entry_size -= sizeof(u32);
  587. *
  588. * do {
  589. * char raw_data[__entry_size]; <- allocate our sample in the stack
  590. * struct trace_entry *ent;
  591. *
  592. * zero dead bytes from alignment to avoid stack leak to userspace:
  593. *
  594. * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
  595. * entry = (struct ftrace_raw_<call> *)raw_data;
  596. * ent = &entry->ent;
  597. * tracing_generic_entry_update(ent, irq_flags, pc);
  598. * ent->type = event_call->id;
  599. *
  600. * <tstruct> <- do some jobs with dynamic arrays
  601. *
  602. * <assign> <- affect our values
  603. *
  604. * perf_tpcounter_event(event_call->id, __addr, __count, entry,
  605. * __entry_size); <- submit them to perf counter
  606. * } while (0);
  607. *
  608. * }
  609. */
  610. #ifdef CONFIG_EVENT_PROFILE
  611. #undef __perf_addr
  612. #define __perf_addr(a) __addr = (a)
  613. #undef __perf_count
  614. #define __perf_count(c) __count = (c)
  615. #undef TRACE_EVENT
  616. #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
  617. static void ftrace_profile_##call(proto) \
  618. { \
  619. struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
  620. struct ftrace_event_call *event_call = &event_##call; \
  621. extern void perf_tpcounter_event(int, u64, u64, void *, int); \
  622. struct ftrace_raw_##call *entry; \
  623. u64 __addr = 0, __count = 1; \
  624. unsigned long irq_flags; \
  625. int __entry_size; \
  626. int __data_size; \
  627. int pc; \
  628. \
  629. local_save_flags(irq_flags); \
  630. pc = preempt_count(); \
  631. \
  632. __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
  633. __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
  634. sizeof(u64)); \
  635. __entry_size -= sizeof(u32); \
  636. \
  637. do { \
  638. char raw_data[__entry_size]; \
  639. struct trace_entry *ent; \
  640. \
  641. *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
  642. entry = (struct ftrace_raw_##call *)raw_data; \
  643. ent = &entry->ent; \
  644. tracing_generic_entry_update(ent, irq_flags, pc); \
  645. ent->type = event_call->id; \
  646. \
  647. tstruct \
  648. \
  649. { assign; } \
  650. \
  651. perf_tpcounter_event(event_call->id, __addr, __count, entry,\
  652. __entry_size); \
  653. } while (0); \
  654. \
  655. }
  656. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  657. #endif /* CONFIG_EVENT_PROFILE */
  658. #undef _TRACE_PROFILE_INIT