ftrace.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843
  1. /*
  2. * Stage 1 of the trace events.
  3. *
  4. * Override the macros in <trace/trace_events.h> to include the following:
  5. *
  6. * struct ftrace_raw_<call> {
  7. * struct trace_entry ent;
  8. * <type> <item>;
  9. * <type2> <item2>[<len>];
  10. * [...]
  11. * };
  12. *
  13. * The <type> <item> is created by the __field(type, item) macro or
  14. * the __array(type2, item2, len) macro.
  15. * We simply do "type item;", and that will create the fields
  16. * in the structure.
  17. */
  18. #include <linux/ftrace_event.h>
  19. /*
  20. * DECLARE_EVENT_CLASS can be used to add a generic function
  21. * handlers for events. That is, if all events have the same
  22. * parameters and just have distinct trace points.
  23. * Each tracepoint can be defined with DEFINE_EVENT and that
  24. * will map the DECLARE_EVENT_CLASS to the tracepoint.
  25. *
  26. * TRACE_EVENT is a one to one mapping between tracepoint and template.
  27. */
  28. #undef TRACE_EVENT
  29. #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
  30. DECLARE_EVENT_CLASS(name, \
  31. PARAMS(proto), \
  32. PARAMS(args), \
  33. PARAMS(tstruct), \
  34. PARAMS(assign), \
  35. PARAMS(print)); \
  36. DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
  37. #undef __field
  38. #define __field(type, item) type item;
  39. #undef __field_ext
  40. #define __field_ext(type, item, filter_type) type item;
  41. #undef __array
  42. #define __array(type, item, len) type item[len];
  43. #undef __dynamic_array
  44. #define __dynamic_array(type, item, len) u32 __data_loc_##item;
  45. #undef __string
  46. #define __string(item, src) __dynamic_array(char, item, -1)
  47. #undef TP_STRUCT__entry
  48. #define TP_STRUCT__entry(args...) args
  49. #undef DECLARE_EVENT_CLASS
  50. #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
  51. struct ftrace_raw_##name { \
  52. struct trace_entry ent; \
  53. tstruct \
  54. char __data[0]; \
  55. };
  56. #undef DEFINE_EVENT
  57. #define DEFINE_EVENT(template, name, proto, args) \
  58. static struct ftrace_event_call event_##name
  59. #undef DEFINE_EVENT_PRINT
  60. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  61. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  62. #undef __cpparg
  63. #define __cpparg(arg...) arg
  64. /* Callbacks are meaningless to ftrace. */
  65. #undef TRACE_EVENT_FN
  66. #define TRACE_EVENT_FN(name, proto, args, tstruct, \
  67. assign, print, reg, unreg) \
  68. TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
  69. __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
  70. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  71. /*
  72. * Stage 2 of the trace events.
  73. *
  74. * Include the following:
  75. *
  76. * struct ftrace_data_offsets_<call> {
  77. * u32 <item1>;
  78. * u32 <item2>;
  79. * [...]
  80. * };
  81. *
  82. * The __dynamic_array() macro will create each u32 <item>, this is
  83. * to keep the offset of each array from the beginning of the event.
  84. * The size of an array is also encoded, in the higher 16 bits of <item>.
  85. */
  86. #undef __field
  87. #define __field(type, item)
  88. #undef __field_ext
  89. #define __field_ext(type, item, filter_type)
  90. #undef __array
  91. #define __array(type, item, len)
  92. #undef __dynamic_array
  93. #define __dynamic_array(type, item, len) u32 item;
  94. #undef __string
  95. #define __string(item, src) __dynamic_array(char, item, -1)
  96. #undef DECLARE_EVENT_CLASS
  97. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  98. struct ftrace_data_offsets_##call { \
  99. tstruct; \
  100. };
  101. #undef DEFINE_EVENT
  102. #define DEFINE_EVENT(template, name, proto, args)
  103. #undef DEFINE_EVENT_PRINT
  104. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  105. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  106. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  107. /*
  108. * Stage 3 of the trace events.
  109. *
  110. * Override the macros in <trace/trace_events.h> to include the following:
  111. *
  112. * enum print_line_t
  113. * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
  114. * {
  115. * struct trace_seq *s = &iter->seq;
  116. * struct ftrace_raw_<call> *field; <-- defined in stage 1
  117. * struct trace_entry *entry;
  118. * struct trace_seq *p;
  119. * int ret;
  120. *
  121. * entry = iter->ent;
  122. *
  123. * if (entry->type != event_<call>.id) {
  124. * WARN_ON_ONCE(1);
  125. * return TRACE_TYPE_UNHANDLED;
  126. * }
  127. *
  128. * field = (typeof(field))entry;
  129. *
  130. * p = get_cpu_var(ftrace_event_seq);
  131. * trace_seq_init(p);
  132. * ret = trace_seq_printf(s, <TP_printk> "\n");
  133. * put_cpu();
  134. * if (!ret)
  135. * return TRACE_TYPE_PARTIAL_LINE;
  136. *
  137. * return TRACE_TYPE_HANDLED;
  138. * }
  139. *
  140. * This is the method used to print the raw event to the trace
  141. * output format. Note, this is not needed if the data is read
  142. * in binary.
  143. */
  144. #undef __entry
  145. #define __entry field
  146. #undef TP_printk
  147. #define TP_printk(fmt, args...) fmt "\n", args
  148. #undef __get_dynamic_array
  149. #define __get_dynamic_array(field) \
  150. ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
  151. #undef __get_str
  152. #define __get_str(field) (char *)__get_dynamic_array(field)
  153. #undef __print_flags
  154. #define __print_flags(flag, delim, flag_array...) \
  155. ({ \
  156. static const struct trace_print_flags __flags[] = \
  157. { flag_array, { -1, NULL }}; \
  158. ftrace_print_flags_seq(p, delim, flag, __flags); \
  159. })
  160. #undef __print_symbolic
  161. #define __print_symbolic(value, symbol_array...) \
  162. ({ \
  163. static const struct trace_print_flags symbols[] = \
  164. { symbol_array, { -1, NULL }}; \
  165. ftrace_print_symbols_seq(p, value, symbols); \
  166. })
  167. #undef DECLARE_EVENT_CLASS
  168. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  169. static notrace enum print_line_t \
  170. ftrace_raw_output_id_##call(int event_id, const char *name, \
  171. struct trace_iterator *iter, int flags) \
  172. { \
  173. struct trace_seq *s = &iter->seq; \
  174. struct ftrace_raw_##call *field; \
  175. struct trace_entry *entry; \
  176. struct trace_seq *p; \
  177. int ret; \
  178. \
  179. entry = iter->ent; \
  180. \
  181. if (entry->type != event_id) { \
  182. WARN_ON_ONCE(1); \
  183. return TRACE_TYPE_UNHANDLED; \
  184. } \
  185. \
  186. field = (typeof(field))entry; \
  187. \
  188. p = &get_cpu_var(ftrace_event_seq); \
  189. trace_seq_init(p); \
  190. ret = trace_seq_printf(s, "%s: ", name); \
  191. if (ret) \
  192. ret = trace_seq_printf(s, print); \
  193. put_cpu(); \
  194. if (!ret) \
  195. return TRACE_TYPE_PARTIAL_LINE; \
  196. \
  197. return TRACE_TYPE_HANDLED; \
  198. }
  199. #undef DEFINE_EVENT
  200. #define DEFINE_EVENT(template, name, proto, args) \
  201. static notrace enum print_line_t \
  202. ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
  203. { \
  204. return ftrace_raw_output_id_##template(event_##name.id, \
  205. #name, iter, flags); \
  206. }
  207. #undef DEFINE_EVENT_PRINT
  208. #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
  209. static notrace enum print_line_t \
  210. ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
  211. { \
  212. struct trace_seq *s = &iter->seq; \
  213. struct ftrace_raw_##template *field; \
  214. struct trace_entry *entry; \
  215. struct trace_seq *p; \
  216. int ret; \
  217. \
  218. entry = iter->ent; \
  219. \
  220. if (entry->type != event_##call.id) { \
  221. WARN_ON_ONCE(1); \
  222. return TRACE_TYPE_UNHANDLED; \
  223. } \
  224. \
  225. field = (typeof(field))entry; \
  226. \
  227. p = &get_cpu_var(ftrace_event_seq); \
  228. trace_seq_init(p); \
  229. ret = trace_seq_printf(s, "%s: ", #call); \
  230. if (ret) \
  231. ret = trace_seq_printf(s, print); \
  232. put_cpu(); \
  233. if (!ret) \
  234. return TRACE_TYPE_PARTIAL_LINE; \
  235. \
  236. return TRACE_TYPE_HANDLED; \
  237. }
  238. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  239. #undef __field_ext
  240. #define __field_ext(type, item, filter_type) \
  241. ret = trace_define_field(event_call, #type, #item, \
  242. offsetof(typeof(field), item), \
  243. sizeof(field.item), \
  244. is_signed_type(type), filter_type); \
  245. if (ret) \
  246. return ret;
  247. #undef __field
  248. #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
  249. #undef __array
  250. #define __array(type, item, len) \
  251. BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
  252. ret = trace_define_field(event_call, #type "[" #len "]", #item, \
  253. offsetof(typeof(field), item), \
  254. sizeof(field.item), \
  255. is_signed_type(type), FILTER_OTHER); \
  256. if (ret) \
  257. return ret;
  258. #undef __dynamic_array
  259. #define __dynamic_array(type, item, len) \
  260. ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
  261. offsetof(typeof(field), __data_loc_##item), \
  262. sizeof(field.__data_loc_##item), \
  263. is_signed_type(type), FILTER_OTHER);
  264. #undef __string
  265. #define __string(item, src) __dynamic_array(char, item, -1)
  266. #undef DECLARE_EVENT_CLASS
  267. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
  268. static int notrace \
  269. ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
  270. { \
  271. struct ftrace_raw_##call field; \
  272. int ret; \
  273. \
  274. tstruct; \
  275. \
  276. return ret; \
  277. }
  278. #undef DEFINE_EVENT
  279. #define DEFINE_EVENT(template, name, proto, args)
  280. #undef DEFINE_EVENT_PRINT
  281. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  282. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  283. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  284. /*
  285. * remember the offset of each array from the beginning of the event.
  286. */
  287. #undef __entry
  288. #define __entry entry
  289. #undef __field
  290. #define __field(type, item)
  291. #undef __field_ext
  292. #define __field_ext(type, item, filter_type)
  293. #undef __array
  294. #define __array(type, item, len)
  295. #undef __dynamic_array
  296. #define __dynamic_array(type, item, len) \
  297. __data_offsets->item = __data_size + \
  298. offsetof(typeof(*entry), __data); \
  299. __data_offsets->item |= (len * sizeof(type)) << 16; \
  300. __data_size += (len) * sizeof(type);
  301. #undef __string
  302. #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
  303. #undef DECLARE_EVENT_CLASS
  304. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  305. static inline notrace int ftrace_get_offsets_##call( \
  306. struct ftrace_data_offsets_##call *__data_offsets, proto) \
  307. { \
  308. int __data_size = 0; \
  309. struct ftrace_raw_##call __maybe_unused *entry; \
  310. \
  311. tstruct; \
  312. \
  313. return __data_size; \
  314. }
  315. #undef DEFINE_EVENT
  316. #define DEFINE_EVENT(template, name, proto, args)
  317. #undef DEFINE_EVENT_PRINT
  318. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  319. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  320. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  321. #ifdef CONFIG_EVENT_PROFILE
  322. /*
  323. * Generate the functions needed for tracepoint perf_event support.
  324. *
  325. * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
  326. *
  327. * static int ftrace_profile_enable_<call>(void)
  328. * {
  329. * return register_trace_<call>(ftrace_profile_<call>);
  330. * }
  331. *
  332. * static void ftrace_profile_disable_<call>(void)
  333. * {
  334. * unregister_trace_<call>(ftrace_profile_<call>);
  335. * }
  336. *
  337. */
  338. #undef DECLARE_EVENT_CLASS
  339. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
  340. #undef DEFINE_EVENT
  341. #define DEFINE_EVENT(template, name, proto, args) \
  342. \
  343. static void ftrace_profile_##name(proto); \
  344. \
  345. static notrace int \
  346. ftrace_profile_enable_##name(struct ftrace_event_call *unused) \
  347. { \
  348. return register_trace_##name(ftrace_profile_##name); \
  349. } \
  350. \
  351. static notrace void \
  352. ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
  353. { \
  354. unregister_trace_##name(ftrace_profile_##name); \
  355. }
  356. #undef DEFINE_EVENT_PRINT
  357. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  358. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  359. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  360. #endif
  361. /*
  362. * Stage 4 of the trace events.
  363. *
  364. * Override the macros in <trace/trace_events.h> to include the following:
  365. *
  366. * static void ftrace_event_<call>(proto)
  367. * {
  368. * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
  369. * }
  370. *
  371. * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
  372. * {
  373. * return register_trace_<call>(ftrace_event_<call>);
  374. * }
  375. *
  376. * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
  377. * {
  378. * unregister_trace_<call>(ftrace_event_<call>);
  379. * }
  380. *
  381. *
  382. * For those macros defined with TRACE_EVENT:
  383. *
  384. * static struct ftrace_event_call event_<call>;
  385. *
  386. * static void ftrace_raw_event_<call>(proto)
  387. * {
  388. * struct ring_buffer_event *event;
  389. * struct ftrace_raw_<call> *entry; <-- defined in stage 1
  390. * struct ring_buffer *buffer;
  391. * unsigned long irq_flags;
  392. * int pc;
  393. *
  394. * local_save_flags(irq_flags);
  395. * pc = preempt_count();
  396. *
  397. * event = trace_current_buffer_lock_reserve(&buffer,
  398. * event_<call>.id,
  399. * sizeof(struct ftrace_raw_<call>),
  400. * irq_flags, pc);
  401. * if (!event)
  402. * return;
  403. * entry = ring_buffer_event_data(event);
  404. *
  405. * <assign>; <-- Here we assign the entries by the __field and
  406. * __array macros.
  407. *
  408. * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
  409. * }
  410. *
  411. * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
  412. * {
  413. * int ret;
  414. *
  415. * ret = register_trace_<call>(ftrace_raw_event_<call>);
  416. * if (!ret)
  417. * pr_info("event trace: Could not activate trace point "
  418. * "probe to <call>");
  419. * return ret;
  420. * }
  421. *
  422. * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
  423. * {
  424. * unregister_trace_<call>(ftrace_raw_event_<call>);
  425. * }
  426. *
  427. * static struct trace_event ftrace_event_type_<call> = {
  428. * .trace = ftrace_raw_output_<call>, <-- stage 2
  429. * };
  430. *
  431. * static struct ftrace_event_call __used
  432. * __attribute__((__aligned__(4)))
  433. * __attribute__((section("_ftrace_events"))) event_<call> = {
  434. * .name = "<call>",
  435. * .system = "<system>",
  436. * .raw_init = trace_event_raw_init,
  437. * .regfunc = ftrace_reg_event_<call>,
  438. * .unregfunc = ftrace_unreg_event_<call>,
  439. * }
  440. *
  441. */
  442. #ifdef CONFIG_EVENT_PROFILE
  443. #define _TRACE_PROFILE_INIT(call) \
  444. .profile_enable = ftrace_profile_enable_##call, \
  445. .profile_disable = ftrace_profile_disable_##call,
  446. #else
  447. #define _TRACE_PROFILE_INIT(call)
  448. #endif
  449. #undef __entry
  450. #define __entry entry
  451. #undef __field
  452. #define __field(type, item)
  453. #undef __array
  454. #define __array(type, item, len)
  455. #undef __dynamic_array
  456. #define __dynamic_array(type, item, len) \
  457. __entry->__data_loc_##item = __data_offsets.item;
  458. #undef __string
  459. #define __string(item, src) __dynamic_array(char, item, -1) \
  460. #undef __assign_str
  461. #define __assign_str(dst, src) \
  462. strcpy(__get_str(dst), src);
  463. #undef TP_fast_assign
  464. #define TP_fast_assign(args...) args
  465. #undef TP_perf_assign
  466. #define TP_perf_assign(args...)
  467. #undef DECLARE_EVENT_CLASS
  468. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  469. \
  470. static notrace void \
  471. ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
  472. proto) \
  473. { \
  474. struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
  475. struct ring_buffer_event *event; \
  476. struct ftrace_raw_##call *entry; \
  477. struct ring_buffer *buffer; \
  478. unsigned long irq_flags; \
  479. int __data_size; \
  480. int pc; \
  481. \
  482. local_save_flags(irq_flags); \
  483. pc = preempt_count(); \
  484. \
  485. __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
  486. \
  487. event = trace_current_buffer_lock_reserve(&buffer, \
  488. event_call->id, \
  489. sizeof(*entry) + __data_size, \
  490. irq_flags, pc); \
  491. if (!event) \
  492. return; \
  493. entry = ring_buffer_event_data(event); \
  494. \
  495. \
  496. tstruct \
  497. \
  498. { assign; } \
  499. \
  500. if (!filter_current_check_discard(buffer, event_call, entry, event)) \
  501. trace_nowake_buffer_unlock_commit(buffer, \
  502. event, irq_flags, pc); \
  503. }
  504. #undef DEFINE_EVENT
  505. #define DEFINE_EVENT(template, call, proto, args) \
  506. \
  507. static notrace void ftrace_raw_event_##call(proto) \
  508. { \
  509. ftrace_raw_event_id_##template(&event_##call, args); \
  510. } \
  511. \
  512. static notrace int \
  513. ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \
  514. { \
  515. return register_trace_##call(ftrace_raw_event_##call); \
  516. } \
  517. \
  518. static notrace void \
  519. ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \
  520. { \
  521. unregister_trace_##call(ftrace_raw_event_##call); \
  522. } \
  523. \
  524. static struct trace_event ftrace_event_type_##call = { \
  525. .trace = ftrace_raw_output_##call, \
  526. };
  527. #undef DEFINE_EVENT_PRINT
  528. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  529. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  530. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  531. #undef __entry
  532. #define __entry REC
  533. #undef __print_flags
  534. #undef __print_symbolic
  535. #undef __get_dynamic_array
  536. #undef __get_str
  537. #undef TP_printk
  538. #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
  539. #undef DECLARE_EVENT_CLASS
  540. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  541. static const char print_fmt_##call[] = print;
  542. #undef DEFINE_EVENT
  543. #define DEFINE_EVENT(template, call, proto, args) \
  544. \
  545. static struct ftrace_event_call __used \
  546. __attribute__((__aligned__(4))) \
  547. __attribute__((section("_ftrace_events"))) event_##call = { \
  548. .name = #call, \
  549. .system = __stringify(TRACE_SYSTEM), \
  550. .event = &ftrace_event_type_##call, \
  551. .raw_init = trace_event_raw_init, \
  552. .regfunc = ftrace_raw_reg_event_##call, \
  553. .unregfunc = ftrace_raw_unreg_event_##call, \
  554. .print_fmt = print_fmt_##template, \
  555. .define_fields = ftrace_define_fields_##template, \
  556. _TRACE_PROFILE_INIT(call) \
  557. }
  558. #undef DEFINE_EVENT_PRINT
  559. #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
  560. \
  561. static const char print_fmt_##call[] = print; \
  562. \
  563. static struct ftrace_event_call __used \
  564. __attribute__((__aligned__(4))) \
  565. __attribute__((section("_ftrace_events"))) event_##call = { \
  566. .name = #call, \
  567. .system = __stringify(TRACE_SYSTEM), \
  568. .event = &ftrace_event_type_##call, \
  569. .raw_init = trace_event_raw_init, \
  570. .regfunc = ftrace_raw_reg_event_##call, \
  571. .unregfunc = ftrace_raw_unreg_event_##call, \
  572. .print_fmt = print_fmt_##call, \
  573. .define_fields = ftrace_define_fields_##template, \
  574. _TRACE_PROFILE_INIT(call) \
  575. }
  576. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  577. /*
  578. * Define the insertion callback to profile events
  579. *
  580. * The job is very similar to ftrace_raw_event_<call> except that we don't
  581. * insert in the ring buffer but in a perf counter.
  582. *
  583. * static void ftrace_profile_<call>(proto)
  584. * {
  585. * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
  586. * struct ftrace_event_call *event_call = &event_<call>;
  587. * extern void perf_tp_event(int, u64, u64, void *, int);
  588. * struct ftrace_raw_##call *entry;
  589. * struct perf_trace_buf *trace_buf;
  590. * u64 __addr = 0, __count = 1;
  591. * unsigned long irq_flags;
  592. * struct trace_entry *ent;
  593. * int __entry_size;
  594. * int __data_size;
  595. * int __cpu
  596. * int pc;
  597. *
  598. * pc = preempt_count();
  599. *
  600. * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
  601. *
  602. * // Below we want to get the aligned size by taking into account
  603. * // the u32 field that will later store the buffer size
  604. * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
  605. * sizeof(u64));
  606. * __entry_size -= sizeof(u32);
  607. *
  608. * // Protect the non nmi buffer
  609. * // This also protects the rcu read side
  610. * local_irq_save(irq_flags);
  611. * __cpu = smp_processor_id();
  612. *
  613. * if (in_nmi())
  614. * trace_buf = rcu_dereference(perf_trace_buf_nmi);
  615. * else
  616. * trace_buf = rcu_dereference(perf_trace_buf);
  617. *
  618. * if (!trace_buf)
  619. * goto end;
  620. *
  621. * trace_buf = per_cpu_ptr(trace_buf, __cpu);
  622. *
  623. * // Avoid recursion from perf that could mess up the buffer
  624. * if (trace_buf->recursion++)
  625. * goto end_recursion;
  626. *
  627. * raw_data = trace_buf->buf;
  628. *
  629. * // Make recursion update visible before entering perf_tp_event
  630. * // so that we protect from perf recursions.
  631. *
  632. * barrier();
  633. *
  634. * //zero dead bytes from alignment to avoid stack leak to userspace:
  635. * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
  636. * entry = (struct ftrace_raw_<call> *)raw_data;
  637. * ent = &entry->ent;
  638. * tracing_generic_entry_update(ent, irq_flags, pc);
  639. * ent->type = event_call->id;
  640. *
  641. * <tstruct> <- do some jobs with dynamic arrays
  642. *
  643. * <assign> <- affect our values
  644. *
  645. * perf_tp_event(event_call->id, __addr, __count, entry,
  646. * __entry_size); <- submit them to perf counter
  647. *
  648. * }
  649. */
  650. #ifdef CONFIG_EVENT_PROFILE
  651. #undef __entry
  652. #define __entry entry
  653. #undef __get_dynamic_array
  654. #define __get_dynamic_array(field) \
  655. ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
  656. #undef __get_str
  657. #define __get_str(field) (char *)__get_dynamic_array(field)
  658. #undef __perf_addr
  659. #define __perf_addr(a) __addr = (a)
  660. #undef __perf_count
  661. #define __perf_count(c) __count = (c)
  662. #undef DECLARE_EVENT_CLASS
  663. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  664. static notrace void \
  665. ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
  666. proto) \
  667. { \
  668. struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
  669. extern int perf_swevent_get_recursion_context(void); \
  670. extern void perf_swevent_put_recursion_context(int rctx); \
  671. extern void perf_tp_event(int, u64, u64, void *, int); \
  672. struct ftrace_raw_##call *entry; \
  673. u64 __addr = 0, __count = 1; \
  674. unsigned long irq_flags; \
  675. struct trace_entry *ent; \
  676. int __entry_size; \
  677. int __data_size; \
  678. char *trace_buf; \
  679. char *raw_data; \
  680. int __cpu; \
  681. int rctx; \
  682. int pc; \
  683. \
  684. pc = preempt_count(); \
  685. \
  686. __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
  687. __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
  688. sizeof(u64)); \
  689. __entry_size -= sizeof(u32); \
  690. \
  691. if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
  692. "profile buffer not large enough")) \
  693. return; \
  694. \
  695. local_irq_save(irq_flags); \
  696. \
  697. rctx = perf_swevent_get_recursion_context(); \
  698. if (rctx < 0) \
  699. goto end_recursion; \
  700. \
  701. __cpu = smp_processor_id(); \
  702. \
  703. if (in_nmi()) \
  704. trace_buf = rcu_dereference(perf_trace_buf_nmi); \
  705. else \
  706. trace_buf = rcu_dereference(perf_trace_buf); \
  707. \
  708. if (!trace_buf) \
  709. goto end; \
  710. \
  711. raw_data = per_cpu_ptr(trace_buf, __cpu); \
  712. \
  713. *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
  714. entry = (struct ftrace_raw_##call *)raw_data; \
  715. ent = &entry->ent; \
  716. tracing_generic_entry_update(ent, irq_flags, pc); \
  717. ent->type = event_call->id; \
  718. \
  719. tstruct \
  720. \
  721. { assign; } \
  722. \
  723. perf_tp_event(event_call->id, __addr, __count, entry, \
  724. __entry_size); \
  725. \
  726. end: \
  727. perf_swevent_put_recursion_context(rctx); \
  728. end_recursion: \
  729. local_irq_restore(irq_flags); \
  730. }
  731. #undef DEFINE_EVENT
  732. #define DEFINE_EVENT(template, call, proto, args) \
  733. static notrace void ftrace_profile_##call(proto) \
  734. { \
  735. struct ftrace_event_call *event_call = &event_##call; \
  736. \
  737. ftrace_profile_templ_##template(event_call, args); \
  738. }
  739. #undef DEFINE_EVENT_PRINT
  740. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  741. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  742. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  743. #endif /* CONFIG_EVENT_PROFILE */
  744. #undef _TRACE_PROFILE_INIT