ftrace.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933
  1. /*
  2. * Stage 1 of the trace events.
  3. *
  4. * Override the macros in <trace/trace_events.h> to include the following:
  5. *
  6. * struct ftrace_raw_<call> {
  7. * struct trace_entry ent;
  8. * <type> <item>;
  9. * <type2> <item2>[<len>];
  10. * [...]
  11. * };
  12. *
  13. * The <type> <item> is created by the __field(type, item) macro or
  14. * the __array(type2, item2, len) macro.
  15. * We simply do "type item;", and that will create the fields
  16. * in the structure.
  17. */
  18. #include <linux/ftrace_event.h>
  19. /*
  20. * DECLARE_EVENT_CLASS can be used to add a generic function
  21. * handlers for events. That is, if all events have the same
  22. * parameters and just have distinct trace points.
  23. * Each tracepoint can be defined with DEFINE_EVENT and that
  24. * will map the DECLARE_EVENT_CLASS to the tracepoint.
  25. *
  26. * TRACE_EVENT is a one to one mapping between tracepoint and template.
  27. */
  28. #undef TRACE_EVENT
  29. #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
  30. DECLARE_EVENT_CLASS(name, \
  31. PARAMS(proto), \
  32. PARAMS(args), \
  33. PARAMS(tstruct), \
  34. PARAMS(assign), \
  35. PARAMS(print)); \
  36. DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
  37. #undef __field
  38. #define __field(type, item) type item;
  39. #undef __field_ext
  40. #define __field_ext(type, item, filter_type) type item;
  41. #undef __array
  42. #define __array(type, item, len) type item[len];
  43. #undef __dynamic_array
  44. #define __dynamic_array(type, item, len) u32 __data_loc_##item;
  45. #undef __string
  46. #define __string(item, src) __dynamic_array(char, item, -1)
  47. #undef TP_STRUCT__entry
  48. #define TP_STRUCT__entry(args...) args
  49. #undef DECLARE_EVENT_CLASS
  50. #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
  51. struct ftrace_raw_##name { \
  52. struct trace_entry ent; \
  53. tstruct \
  54. char __data[0]; \
  55. };
  56. #undef DEFINE_EVENT
  57. #define DEFINE_EVENT(template, name, proto, args) \
  58. static struct ftrace_event_call event_##name
  59. #undef DEFINE_EVENT_PRINT
  60. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  61. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  62. #undef __cpparg
  63. #define __cpparg(arg...) arg
  64. /* Callbacks are meaningless to ftrace. */
  65. #undef TRACE_EVENT_FN
  66. #define TRACE_EVENT_FN(name, proto, args, tstruct, \
  67. assign, print, reg, unreg) \
  68. TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
  69. __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
  70. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  71. /*
  72. * Stage 2 of the trace events.
  73. *
  74. * Include the following:
  75. *
  76. * struct ftrace_data_offsets_<call> {
  77. * u32 <item1>;
  78. * u32 <item2>;
  79. * [...]
  80. * };
  81. *
  82. * The __dynamic_array() macro will create each u32 <item>, this is
  83. * to keep the offset of each array from the beginning of the event.
  84. * The size of an array is also encoded, in the higher 16 bits of <item>.
  85. */
  86. #undef __field
  87. #define __field(type, item)
  88. #undef __field_ext
  89. #define __field_ext(type, item, filter_type)
  90. #undef __array
  91. #define __array(type, item, len)
  92. #undef __dynamic_array
  93. #define __dynamic_array(type, item, len) u32 item;
  94. #undef __string
  95. #define __string(item, src) __dynamic_array(char, item, -1)
  96. #undef DECLARE_EVENT_CLASS
  97. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  98. struct ftrace_data_offsets_##call { \
  99. tstruct; \
  100. };
  101. #undef DEFINE_EVENT
  102. #define DEFINE_EVENT(template, name, proto, args)
  103. #undef DEFINE_EVENT_PRINT
  104. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  105. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  106. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  107. /*
  108. * Setup the showing format of trace point.
  109. *
  110. * int
  111. * ftrace_format_##call(struct trace_seq *s)
  112. * {
  113. * struct ftrace_raw_##call field;
  114. * int ret;
  115. *
  116. * ret = trace_seq_printf(s, #type " " #item ";"
  117. * " offset:%u; size:%u;\n",
  118. * offsetof(struct ftrace_raw_##call, item),
  119. * sizeof(field.type));
  120. *
  121. * }
  122. */
  123. #undef TP_STRUCT__entry
  124. #define TP_STRUCT__entry(args...) args
  125. #undef __field
  126. #define __field(type, item) \
  127. ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
  128. "offset:%u;\tsize:%u;\tsigned:%u;\n", \
  129. (unsigned int)offsetof(typeof(field), item), \
  130. (unsigned int)sizeof(field.item), \
  131. (unsigned int)is_signed_type(type)); \
  132. if (!ret) \
  133. return 0;
  134. #undef __field_ext
  135. #define __field_ext(type, item, filter_type) __field(type, item)
  136. #undef __array
  137. #define __array(type, item, len) \
  138. ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
  139. "offset:%u;\tsize:%u;\tsigned:%u;\n", \
  140. (unsigned int)offsetof(typeof(field), item), \
  141. (unsigned int)sizeof(field.item), \
  142. (unsigned int)is_signed_type(type)); \
  143. if (!ret) \
  144. return 0;
  145. #undef __dynamic_array
  146. #define __dynamic_array(type, item, len) \
  147. ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
  148. "offset:%u;\tsize:%u;\tsigned:%u;\n", \
  149. (unsigned int)offsetof(typeof(field), \
  150. __data_loc_##item), \
  151. (unsigned int)sizeof(field.__data_loc_##item), \
  152. (unsigned int)is_signed_type(type)); \
  153. if (!ret) \
  154. return 0;
  155. #undef __string
  156. #define __string(item, src) __dynamic_array(char, item, -1)
  157. #undef __entry
  158. #define __entry REC
  159. #undef __print_symbolic
  160. #undef __get_dynamic_array
  161. #undef __get_str
  162. #undef TP_printk
  163. #define TP_printk(fmt, args...) "\"%s\", %s\n", fmt, __stringify(args)
  164. #undef TP_fast_assign
  165. #define TP_fast_assign(args...) args
  166. #undef TP_perf_assign
  167. #define TP_perf_assign(args...)
  168. #undef DECLARE_EVENT_CLASS
  169. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
  170. static int \
  171. ftrace_format_setup_##call(struct ftrace_event_call *unused, \
  172. struct trace_seq *s) \
  173. { \
  174. struct ftrace_raw_##call field __attribute__((unused)); \
  175. int ret = 0; \
  176. \
  177. tstruct; \
  178. \
  179. return ret; \
  180. } \
  181. \
  182. static int \
  183. ftrace_format_##call(struct ftrace_event_call *unused, \
  184. struct trace_seq *s) \
  185. { \
  186. int ret = 0; \
  187. \
  188. ret = ftrace_format_setup_##call(unused, s); \
  189. if (!ret) \
  190. return ret; \
  191. \
  192. ret = trace_seq_printf(s, "\nprint fmt: " print); \
  193. \
  194. return ret; \
  195. }
  196. #undef DEFINE_EVENT
  197. #define DEFINE_EVENT(template, name, proto, args)
  198. #undef DEFINE_EVENT_PRINT
  199. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  200. static int \
  201. ftrace_format_##name(struct ftrace_event_call *unused, \
  202. struct trace_seq *s) \
  203. { \
  204. int ret = 0; \
  205. \
  206. ret = ftrace_format_setup_##template(unused, s); \
  207. if (!ret) \
  208. return ret; \
  209. \
  210. trace_seq_printf(s, "\nprint fmt: " print); \
  211. \
  212. return ret; \
  213. }
  214. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  215. /*
  216. * Stage 3 of the trace events.
  217. *
  218. * Override the macros in <trace/trace_events.h> to include the following:
  219. *
  220. * enum print_line_t
  221. * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
  222. * {
  223. * struct trace_seq *s = &iter->seq;
  224. * struct ftrace_raw_<call> *field; <-- defined in stage 1
  225. * struct trace_entry *entry;
  226. * struct trace_seq *p;
  227. * int ret;
  228. *
  229. * entry = iter->ent;
  230. *
  231. * if (entry->type != event_<call>.id) {
  232. * WARN_ON_ONCE(1);
  233. * return TRACE_TYPE_UNHANDLED;
  234. * }
  235. *
  236. * field = (typeof(field))entry;
  237. *
  238. * p = get_cpu_var(ftrace_event_seq);
  239. * trace_seq_init(p);
  240. * ret = trace_seq_printf(s, <TP_printk> "\n");
  241. * put_cpu();
  242. * if (!ret)
  243. * return TRACE_TYPE_PARTIAL_LINE;
  244. *
  245. * return TRACE_TYPE_HANDLED;
  246. * }
  247. *
  248. * This is the method used to print the raw event to the trace
  249. * output format. Note, this is not needed if the data is read
  250. * in binary.
  251. */
  252. #undef __entry
  253. #define __entry field
  254. #undef TP_printk
  255. #define TP_printk(fmt, args...) fmt "\n", args
  256. #undef __get_dynamic_array
  257. #define __get_dynamic_array(field) \
  258. ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
  259. #undef __get_str
  260. #define __get_str(field) (char *)__get_dynamic_array(field)
  261. #undef __print_flags
  262. #define __print_flags(flag, delim, flag_array...) \
  263. ({ \
  264. static const struct trace_print_flags __flags[] = \
  265. { flag_array, { -1, NULL }}; \
  266. ftrace_print_flags_seq(p, delim, flag, __flags); \
  267. })
  268. #undef __print_symbolic
  269. #define __print_symbolic(value, symbol_array...) \
  270. ({ \
  271. static const struct trace_print_flags symbols[] = \
  272. { symbol_array, { -1, NULL }}; \
  273. ftrace_print_symbols_seq(p, value, symbols); \
  274. })
  275. #undef DECLARE_EVENT_CLASS
  276. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  277. static enum print_line_t \
  278. ftrace_raw_output_id_##call(int event_id, const char *name, \
  279. struct trace_iterator *iter, int flags) \
  280. { \
  281. struct trace_seq *s = &iter->seq; \
  282. struct ftrace_raw_##call *field; \
  283. struct trace_entry *entry; \
  284. struct trace_seq *p; \
  285. int ret; \
  286. \
  287. entry = iter->ent; \
  288. \
  289. if (entry->type != event_id) { \
  290. WARN_ON_ONCE(1); \
  291. return TRACE_TYPE_UNHANDLED; \
  292. } \
  293. \
  294. field = (typeof(field))entry; \
  295. \
  296. p = &get_cpu_var(ftrace_event_seq); \
  297. trace_seq_init(p); \
  298. ret = trace_seq_printf(s, "%s: ", name); \
  299. if (ret) \
  300. ret = trace_seq_printf(s, print); \
  301. put_cpu(); \
  302. if (!ret) \
  303. return TRACE_TYPE_PARTIAL_LINE; \
  304. \
  305. return TRACE_TYPE_HANDLED; \
  306. }
  307. #undef DEFINE_EVENT
  308. #define DEFINE_EVENT(template, name, proto, args) \
  309. static enum print_line_t \
  310. ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
  311. { \
  312. return ftrace_raw_output_id_##template(event_##name.id, \
  313. #name, iter, flags); \
  314. }
  315. #undef DEFINE_EVENT_PRINT
  316. #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
  317. static enum print_line_t \
  318. ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
  319. { \
  320. struct trace_seq *s = &iter->seq; \
  321. struct ftrace_raw_##template *field; \
  322. struct trace_entry *entry; \
  323. struct trace_seq *p; \
  324. int ret; \
  325. \
  326. entry = iter->ent; \
  327. \
  328. if (entry->type != event_##call.id) { \
  329. WARN_ON_ONCE(1); \
  330. return TRACE_TYPE_UNHANDLED; \
  331. } \
  332. \
  333. field = (typeof(field))entry; \
  334. \
  335. p = &get_cpu_var(ftrace_event_seq); \
  336. trace_seq_init(p); \
  337. ret = trace_seq_printf(s, "%s: ", #call); \
  338. if (ret) \
  339. ret = trace_seq_printf(s, print); \
  340. put_cpu(); \
  341. if (!ret) \
  342. return TRACE_TYPE_PARTIAL_LINE; \
  343. \
  344. return TRACE_TYPE_HANDLED; \
  345. }
  346. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  347. #undef __field_ext
  348. #define __field_ext(type, item, filter_type) \
  349. ret = trace_define_field(event_call, #type, #item, \
  350. offsetof(typeof(field), item), \
  351. sizeof(field.item), \
  352. is_signed_type(type), filter_type); \
  353. if (ret) \
  354. return ret;
  355. #undef __field
  356. #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
  357. #undef __array
  358. #define __array(type, item, len) \
  359. BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
  360. ret = trace_define_field(event_call, #type "[" #len "]", #item, \
  361. offsetof(typeof(field), item), \
  362. sizeof(field.item), \
  363. is_signed_type(type), FILTER_OTHER); \
  364. if (ret) \
  365. return ret;
  366. #undef __dynamic_array
  367. #define __dynamic_array(type, item, len) \
  368. ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
  369. offsetof(typeof(field), __data_loc_##item), \
  370. sizeof(field.__data_loc_##item), \
  371. is_signed_type(type), FILTER_OTHER);
  372. #undef __string
  373. #define __string(item, src) __dynamic_array(char, item, -1)
  374. #undef DECLARE_EVENT_CLASS
  375. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
  376. static int \
  377. ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
  378. { \
  379. struct ftrace_raw_##call field; \
  380. int ret; \
  381. \
  382. tstruct; \
  383. \
  384. return ret; \
  385. }
  386. #undef DEFINE_EVENT
  387. #define DEFINE_EVENT(template, name, proto, args)
  388. #undef DEFINE_EVENT_PRINT
  389. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  390. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  391. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  392. /*
  393. * remember the offset of each array from the beginning of the event.
  394. */
  395. #undef __entry
  396. #define __entry entry
  397. #undef __field
  398. #define __field(type, item)
  399. #undef __field_ext
  400. #define __field_ext(type, item, filter_type)
  401. #undef __array
  402. #define __array(type, item, len)
  403. #undef __dynamic_array
  404. #define __dynamic_array(type, item, len) \
  405. __data_offsets->item = __data_size + \
  406. offsetof(typeof(*entry), __data); \
  407. __data_offsets->item |= (len * sizeof(type)) << 16; \
  408. __data_size += (len) * sizeof(type);
  409. #undef __string
  410. #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
  411. #undef DECLARE_EVENT_CLASS
  412. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  413. static inline int ftrace_get_offsets_##call( \
  414. struct ftrace_data_offsets_##call *__data_offsets, proto) \
  415. { \
  416. int __data_size = 0; \
  417. struct ftrace_raw_##call __maybe_unused *entry; \
  418. \
  419. tstruct; \
  420. \
  421. return __data_size; \
  422. }
  423. #undef DEFINE_EVENT
  424. #define DEFINE_EVENT(template, name, proto, args)
  425. #undef DEFINE_EVENT_PRINT
  426. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  427. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  428. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  429. #ifdef CONFIG_EVENT_PROFILE
  430. /*
  431. * Generate the functions needed for tracepoint perf_event support.
  432. *
  433. * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
  434. *
  435. * static int ftrace_profile_enable_<call>(void)
  436. * {
  437. * return register_trace_<call>(ftrace_profile_<call>);
  438. * }
  439. *
  440. * static void ftrace_profile_disable_<call>(void)
  441. * {
  442. * unregister_trace_<call>(ftrace_profile_<call>);
  443. * }
  444. *
  445. */
  446. #undef DECLARE_EVENT_CLASS
  447. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
  448. #undef DEFINE_EVENT
  449. #define DEFINE_EVENT(template, name, proto, args) \
  450. \
  451. static void ftrace_profile_##name(proto); \
  452. \
  453. static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\
  454. { \
  455. return register_trace_##name(ftrace_profile_##name); \
  456. } \
  457. \
  458. static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
  459. { \
  460. unregister_trace_##name(ftrace_profile_##name); \
  461. }
  462. #undef DEFINE_EVENT_PRINT
  463. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  464. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  465. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  466. #endif
  467. /*
  468. * Stage 4 of the trace events.
  469. *
  470. * Override the macros in <trace/trace_events.h> to include the following:
  471. *
  472. * static void ftrace_event_<call>(proto)
  473. * {
  474. * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
  475. * }
  476. *
  477. * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
  478. * {
  479. * return register_trace_<call>(ftrace_event_<call>);
  480. * }
  481. *
  482. * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
  483. * {
  484. * unregister_trace_<call>(ftrace_event_<call>);
  485. * }
  486. *
  487. *
  488. * For those macros defined with TRACE_EVENT:
  489. *
  490. * static struct ftrace_event_call event_<call>;
  491. *
  492. * static void ftrace_raw_event_<call>(proto)
  493. * {
  494. * struct ring_buffer_event *event;
  495. * struct ftrace_raw_<call> *entry; <-- defined in stage 1
  496. * struct ring_buffer *buffer;
  497. * unsigned long irq_flags;
  498. * int pc;
  499. *
  500. * local_save_flags(irq_flags);
  501. * pc = preempt_count();
  502. *
  503. * event = trace_current_buffer_lock_reserve(&buffer,
  504. * event_<call>.id,
  505. * sizeof(struct ftrace_raw_<call>),
  506. * irq_flags, pc);
  507. * if (!event)
  508. * return;
  509. * entry = ring_buffer_event_data(event);
  510. *
  511. * <assign>; <-- Here we assign the entries by the __field and
  512. * __array macros.
  513. *
  514. * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
  515. * }
  516. *
  517. * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
  518. * {
  519. * int ret;
  520. *
  521. * ret = register_trace_<call>(ftrace_raw_event_<call>);
  522. * if (!ret)
  523. * pr_info("event trace: Could not activate trace point "
  524. * "probe to <call>");
  525. * return ret;
  526. * }
  527. *
  528. * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
  529. * {
  530. * unregister_trace_<call>(ftrace_raw_event_<call>);
  531. * }
  532. *
  533. * static struct trace_event ftrace_event_type_<call> = {
  534. * .trace = ftrace_raw_output_<call>, <-- stage 2
  535. * };
  536. *
  537. * static struct ftrace_event_call __used
  538. * __attribute__((__aligned__(4)))
  539. * __attribute__((section("_ftrace_events"))) event_<call> = {
  540. * .name = "<call>",
  541. * .system = "<system>",
  542. * .raw_init = trace_event_raw_init,
  543. * .regfunc = ftrace_reg_event_<call>,
  544. * .unregfunc = ftrace_unreg_event_<call>,
  545. * .show_format = ftrace_format_<call>,
  546. * }
  547. *
  548. */
  549. #ifdef CONFIG_EVENT_PROFILE
  550. #define _TRACE_PROFILE_INIT(call) \
  551. .profile_enable = ftrace_profile_enable_##call, \
  552. .profile_disable = ftrace_profile_disable_##call,
  553. #else
  554. #define _TRACE_PROFILE_INIT(call)
  555. #endif
  556. #undef __entry
  557. #define __entry entry
  558. #undef __field
  559. #define __field(type, item)
  560. #undef __array
  561. #define __array(type, item, len)
  562. #undef __dynamic_array
  563. #define __dynamic_array(type, item, len) \
  564. __entry->__data_loc_##item = __data_offsets.item;
  565. #undef __string
  566. #define __string(item, src) __dynamic_array(char, item, -1) \
  567. #undef __assign_str
  568. #define __assign_str(dst, src) \
  569. strcpy(__get_str(dst), src);
  570. #undef DECLARE_EVENT_CLASS
  571. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  572. \
  573. static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
  574. proto) \
  575. { \
  576. struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
  577. struct ring_buffer_event *event; \
  578. struct ftrace_raw_##call *entry; \
  579. struct ring_buffer *buffer; \
  580. unsigned long irq_flags; \
  581. int __data_size; \
  582. int pc; \
  583. \
  584. local_save_flags(irq_flags); \
  585. pc = preempt_count(); \
  586. \
  587. __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
  588. \
  589. event = trace_current_buffer_lock_reserve(&buffer, \
  590. event_call->id, \
  591. sizeof(*entry) + __data_size, \
  592. irq_flags, pc); \
  593. if (!event) \
  594. return; \
  595. entry = ring_buffer_event_data(event); \
  596. \
  597. \
  598. tstruct \
  599. \
  600. { assign; } \
  601. \
  602. if (!filter_current_check_discard(buffer, event_call, entry, event)) \
  603. trace_nowake_buffer_unlock_commit(buffer, \
  604. event, irq_flags, pc); \
  605. }
  606. #undef DEFINE_EVENT
  607. #define DEFINE_EVENT(template, call, proto, args) \
  608. \
  609. static void ftrace_raw_event_##call(proto) \
  610. { \
  611. ftrace_raw_event_id_##template(&event_##call, args); \
  612. } \
  613. \
  614. static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
  615. { \
  616. return register_trace_##call(ftrace_raw_event_##call); \
  617. } \
  618. \
  619. static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
  620. { \
  621. unregister_trace_##call(ftrace_raw_event_##call); \
  622. } \
  623. \
  624. static struct trace_event ftrace_event_type_##call = { \
  625. .trace = ftrace_raw_output_##call, \
  626. };
  627. #undef DEFINE_EVENT_PRINT
  628. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  629. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  630. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  631. #undef DECLARE_EVENT_CLASS
  632. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
  633. #undef DEFINE_EVENT
  634. #define DEFINE_EVENT(template, call, proto, args) \
  635. \
  636. static struct ftrace_event_call __used \
  637. __attribute__((__aligned__(4))) \
  638. __attribute__((section("_ftrace_events"))) event_##call = { \
  639. .name = #call, \
  640. .system = __stringify(TRACE_SYSTEM), \
  641. .event = &ftrace_event_type_##call, \
  642. .raw_init = trace_event_raw_init, \
  643. .regfunc = ftrace_raw_reg_event_##call, \
  644. .unregfunc = ftrace_raw_unreg_event_##call, \
  645. .show_format = ftrace_format_##template, \
  646. .define_fields = ftrace_define_fields_##template, \
  647. _TRACE_PROFILE_INIT(call) \
  648. }
  649. #undef DEFINE_EVENT_PRINT
  650. #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
  651. \
  652. static struct ftrace_event_call __used \
  653. __attribute__((__aligned__(4))) \
  654. __attribute__((section("_ftrace_events"))) event_##call = { \
  655. .name = #call, \
  656. .system = __stringify(TRACE_SYSTEM), \
  657. .event = &ftrace_event_type_##call, \
  658. .raw_init = trace_event_raw_init, \
  659. .regfunc = ftrace_raw_reg_event_##call, \
  660. .unregfunc = ftrace_raw_unreg_event_##call, \
  661. .show_format = ftrace_format_##call, \
  662. .define_fields = ftrace_define_fields_##template, \
  663. _TRACE_PROFILE_INIT(call) \
  664. }
  665. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  666. /*
  667. * Define the insertion callback to profile events
  668. *
  669. * The job is very similar to ftrace_raw_event_<call> except that we don't
  670. * insert in the ring buffer but in a perf counter.
  671. *
  672. * static void ftrace_profile_<call>(proto)
  673. * {
  674. * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
  675. * struct ftrace_event_call *event_call = &event_<call>;
  676. * extern void perf_tp_event(int, u64, u64, void *, int);
  677. * struct ftrace_raw_##call *entry;
  678. * struct perf_trace_buf *trace_buf;
  679. * u64 __addr = 0, __count = 1;
  680. * unsigned long irq_flags;
  681. * struct trace_entry *ent;
  682. * int __entry_size;
  683. * int __data_size;
  684. * int __cpu
  685. * int pc;
  686. *
  687. * pc = preempt_count();
  688. *
  689. * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
  690. *
  691. * // Below we want to get the aligned size by taking into account
  692. * // the u32 field that will later store the buffer size
  693. * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
  694. * sizeof(u64));
  695. * __entry_size -= sizeof(u32);
  696. *
  697. * // Protect the non nmi buffer
  698. * // This also protects the rcu read side
  699. * local_irq_save(irq_flags);
  700. * __cpu = smp_processor_id();
  701. *
  702. * if (in_nmi())
  703. * trace_buf = rcu_dereference(perf_trace_buf_nmi);
  704. * else
  705. * trace_buf = rcu_dereference(perf_trace_buf);
  706. *
  707. * if (!trace_buf)
  708. * goto end;
  709. *
  710. * trace_buf = per_cpu_ptr(trace_buf, __cpu);
  711. *
  712. * // Avoid recursion from perf that could mess up the buffer
  713. * if (trace_buf->recursion++)
  714. * goto end_recursion;
  715. *
  716. * raw_data = trace_buf->buf;
  717. *
  718. * // Make recursion update visible before entering perf_tp_event
  719. * // so that we protect from perf recursions.
  720. *
  721. * barrier();
  722. *
  723. * //zero dead bytes from alignment to avoid stack leak to userspace:
  724. * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
  725. * entry = (struct ftrace_raw_<call> *)raw_data;
  726. * ent = &entry->ent;
  727. * tracing_generic_entry_update(ent, irq_flags, pc);
  728. * ent->type = event_call->id;
  729. *
  730. * <tstruct> <- do some jobs with dynamic arrays
  731. *
  732. * <assign> <- affect our values
  733. *
  734. * perf_tp_event(event_call->id, __addr, __count, entry,
  735. * __entry_size); <- submit them to perf counter
  736. *
  737. * }
  738. */
  739. #ifdef CONFIG_EVENT_PROFILE
  740. #undef __perf_addr
  741. #define __perf_addr(a) __addr = (a)
  742. #undef __perf_count
  743. #define __perf_count(c) __count = (c)
  744. #undef DECLARE_EVENT_CLASS
  745. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  746. static void \
  747. ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
  748. proto) \
  749. { \
  750. struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
  751. extern int perf_swevent_get_recursion_context(void); \
  752. extern void perf_swevent_put_recursion_context(int rctx); \
  753. extern void perf_tp_event(int, u64, u64, void *, int); \
  754. struct ftrace_raw_##call *entry; \
  755. u64 __addr = 0, __count = 1; \
  756. unsigned long irq_flags; \
  757. struct trace_entry *ent; \
  758. int __entry_size; \
  759. int __data_size; \
  760. char *trace_buf; \
  761. char *raw_data; \
  762. int __cpu; \
  763. int rctx; \
  764. int pc; \
  765. \
  766. pc = preempt_count(); \
  767. \
  768. __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
  769. __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
  770. sizeof(u64)); \
  771. __entry_size -= sizeof(u32); \
  772. \
  773. if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
  774. "profile buffer not large enough")) \
  775. return; \
  776. \
  777. local_irq_save(irq_flags); \
  778. \
  779. rctx = perf_swevent_get_recursion_context(); \
  780. if (rctx < 0) \
  781. goto end_recursion; \
  782. \
  783. __cpu = smp_processor_id(); \
  784. \
  785. if (in_nmi()) \
  786. trace_buf = rcu_dereference(perf_trace_buf_nmi); \
  787. else \
  788. trace_buf = rcu_dereference(perf_trace_buf); \
  789. \
  790. if (!trace_buf) \
  791. goto end; \
  792. \
  793. raw_data = per_cpu_ptr(trace_buf, __cpu); \
  794. \
  795. *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
  796. entry = (struct ftrace_raw_##call *)raw_data; \
  797. ent = &entry->ent; \
  798. tracing_generic_entry_update(ent, irq_flags, pc); \
  799. ent->type = event_call->id; \
  800. \
  801. tstruct \
  802. \
  803. { assign; } \
  804. \
  805. perf_tp_event(event_call->id, __addr, __count, entry, \
  806. __entry_size); \
  807. \
  808. end: \
  809. perf_swevent_put_recursion_context(rctx); \
  810. end_recursion: \
  811. local_irq_restore(irq_flags); \
  812. }
  813. #undef DEFINE_EVENT
  814. #define DEFINE_EVENT(template, call, proto, args) \
  815. static void ftrace_profile_##call(proto) \
  816. { \
  817. struct ftrace_event_call *event_call = &event_##call; \
  818. \
  819. ftrace_profile_templ_##template(event_call, args); \
  820. }
  821. #undef DEFINE_EVENT_PRINT
  822. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  823. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  824. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  825. #endif /* CONFIG_EVENT_PROFILE */
  826. #undef _TRACE_PROFILE_INIT