ftrace.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932
  1. /*
  2. * Stage 1 of the trace events.
  3. *
  4. * Override the macros in <trace/trace_events.h> to include the following:
  5. *
  6. * struct ftrace_raw_<call> {
  7. * struct trace_entry ent;
  8. * <type> <item>;
  9. * <type2> <item2>[<len>];
  10. * [...]
  11. * };
  12. *
  13. * The <type> <item> is created by the __field(type, item) macro or
  14. * the __array(type2, item2, len) macro.
  15. * We simply do "type item;", and that will create the fields
  16. * in the structure.
  17. */
  18. #include <linux/ftrace_event.h>
  19. /*
  20. * DECLARE_EVENT_CLASS can be used to add a generic function
  21. * handlers for events. That is, if all events have the same
  22. * parameters and just have distinct trace points.
  23. * Each tracepoint can be defined with DEFINE_EVENT and that
  24. * will map the DECLARE_EVENT_CLASS to the tracepoint.
  25. *
  26. * TRACE_EVENT is a one to one mapping between tracepoint and template.
  27. */
  28. #undef TRACE_EVENT
  29. #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
  30. DECLARE_EVENT_CLASS(name, \
  31. PARAMS(proto), \
  32. PARAMS(args), \
  33. PARAMS(tstruct), \
  34. PARAMS(assign), \
  35. PARAMS(print)); \
  36. DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
  37. #undef __field
  38. #define __field(type, item) type item;
  39. #undef __field_ext
  40. #define __field_ext(type, item, filter_type) type item;
  41. #undef __array
  42. #define __array(type, item, len) type item[len];
  43. #undef __dynamic_array
  44. #define __dynamic_array(type, item, len) u32 __data_loc_##item;
  45. #undef __string
  46. #define __string(item, src) __dynamic_array(char, item, -1)
  47. #undef TP_STRUCT__entry
  48. #define TP_STRUCT__entry(args...) args
  49. #undef DECLARE_EVENT_CLASS
  50. #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
  51. struct ftrace_raw_##name { \
  52. struct trace_entry ent; \
  53. tstruct \
  54. char __data[0]; \
  55. };
  56. #undef DEFINE_EVENT
  57. #define DEFINE_EVENT(template, name, proto, args) \
  58. static struct ftrace_event_call event_##name
  59. #undef DEFINE_EVENT_PRINT
  60. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  61. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  62. #undef __cpparg
  63. #define __cpparg(arg...) arg
  64. /* Callbacks are meaningless to ftrace. */
  65. #undef TRACE_EVENT_FN
  66. #define TRACE_EVENT_FN(name, proto, args, tstruct, \
  67. assign, print, reg, unreg) \
  68. TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
  69. __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
  70. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  71. /*
  72. * Stage 2 of the trace events.
  73. *
  74. * Include the following:
  75. *
  76. * struct ftrace_data_offsets_<call> {
  77. * u32 <item1>;
  78. * u32 <item2>;
  79. * [...]
  80. * };
  81. *
  82. * The __dynamic_array() macro will create each u32 <item>, this is
  83. * to keep the offset of each array from the beginning of the event.
  84. * The size of an array is also encoded, in the higher 16 bits of <item>.
  85. */
  86. #undef __field
  87. #define __field(type, item)
  88. #undef __field_ext
  89. #define __field_ext(type, item, filter_type)
  90. #undef __array
  91. #define __array(type, item, len)
  92. #undef __dynamic_array
  93. #define __dynamic_array(type, item, len) u32 item;
  94. #undef __string
  95. #define __string(item, src) __dynamic_array(char, item, -1)
  96. #undef DECLARE_EVENT_CLASS
  97. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  98. struct ftrace_data_offsets_##call { \
  99. tstruct; \
  100. };
  101. #undef DEFINE_EVENT
  102. #define DEFINE_EVENT(template, name, proto, args)
  103. #undef DEFINE_EVENT_PRINT
  104. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  105. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  106. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  107. /*
  108. * Setup the showing format of trace point.
  109. *
  110. * int
  111. * ftrace_format_##call(struct trace_seq *s)
  112. * {
  113. * struct ftrace_raw_##call field;
  114. * int ret;
  115. *
  116. * ret = trace_seq_printf(s, #type " " #item ";"
  117. * " offset:%u; size:%u;\n",
  118. * offsetof(struct ftrace_raw_##call, item),
  119. * sizeof(field.type));
  120. *
  121. * }
  122. */
  123. #undef TP_STRUCT__entry
  124. #define TP_STRUCT__entry(args...) args
  125. #undef __field
  126. #define __field(type, item) \
  127. ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
  128. "offset:%u;\tsize:%u;\tsigned:%u;\n", \
  129. (unsigned int)offsetof(typeof(field), item), \
  130. (unsigned int)sizeof(field.item), \
  131. (unsigned int)is_signed_type(type)); \
  132. if (!ret) \
  133. return 0;
  134. #undef __field_ext
  135. #define __field_ext(type, item, filter_type) __field(type, item)
  136. #undef __array
  137. #define __array(type, item, len) \
  138. ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
  139. "offset:%u;\tsize:%u;\tsigned:%u;\n", \
  140. (unsigned int)offsetof(typeof(field), item), \
  141. (unsigned int)sizeof(field.item), \
  142. (unsigned int)is_signed_type(type)); \
  143. if (!ret) \
  144. return 0;
  145. #undef __dynamic_array
  146. #define __dynamic_array(type, item, len) \
  147. ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
  148. "offset:%u;\tsize:%u;\tsigned:%u;\n", \
  149. (unsigned int)offsetof(typeof(field), \
  150. __data_loc_##item), \
  151. (unsigned int)sizeof(field.__data_loc_##item), \
  152. (unsigned int)is_signed_type(type)); \
  153. if (!ret) \
  154. return 0;
  155. #undef __string
  156. #define __string(item, src) __dynamic_array(char, item, -1)
  157. #undef __entry
  158. #define __entry REC
  159. #undef __print_symbolic
  160. #undef __get_dynamic_array
  161. #undef __get_str
  162. #undef TP_printk
  163. #define TP_printk(fmt, args...) "\"%s\", %s\n", fmt, __stringify(args)
  164. #undef TP_fast_assign
  165. #define TP_fast_assign(args...) args
  166. #undef TP_perf_assign
  167. #define TP_perf_assign(args...)
  168. #undef DECLARE_EVENT_CLASS
  169. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
  170. static int \
  171. ftrace_format_setup_##call(struct ftrace_event_call *unused, \
  172. struct trace_seq *s) \
  173. { \
  174. struct ftrace_raw_##call field __attribute__((unused)); \
  175. int ret = 0; \
  176. \
  177. tstruct; \
  178. \
  179. return ret; \
  180. } \
  181. \
  182. static int \
  183. ftrace_format_##call(struct ftrace_event_call *unused, \
  184. struct trace_seq *s) \
  185. { \
  186. int ret = 0; \
  187. \
  188. ret = ftrace_format_setup_##call(unused, s); \
  189. if (!ret) \
  190. return ret; \
  191. \
  192. ret = trace_seq_printf(s, "\nprint fmt: " print); \
  193. \
  194. return ret; \
  195. }
  196. #undef DEFINE_EVENT
  197. #define DEFINE_EVENT(template, name, proto, args)
  198. #undef DEFINE_EVENT_PRINT
  199. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  200. static int \
  201. ftrace_format_##name(struct ftrace_event_call *unused, \
  202. struct trace_seq *s) \
  203. { \
  204. int ret = 0; \
  205. \
  206. ret = ftrace_format_setup_##template(unused, s); \
  207. if (!ret) \
  208. return ret; \
  209. \
  210. trace_seq_printf(s, "\nprint fmt: " print); \
  211. \
  212. return ret; \
  213. }
  214. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  215. /*
  216. * Stage 3 of the trace events.
  217. *
  218. * Override the macros in <trace/trace_events.h> to include the following:
  219. *
  220. * enum print_line_t
  221. * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
  222. * {
  223. * struct trace_seq *s = &iter->seq;
  224. * struct ftrace_raw_<call> *field; <-- defined in stage 1
  225. * struct trace_entry *entry;
  226. * struct trace_seq *p;
  227. * int ret;
  228. *
  229. * entry = iter->ent;
  230. *
  231. * if (entry->type != event_<call>.id) {
  232. * WARN_ON_ONCE(1);
  233. * return TRACE_TYPE_UNHANDLED;
  234. * }
  235. *
  236. * field = (typeof(field))entry;
  237. *
  238. * p = get_cpu_var(ftrace_event_seq);
  239. * trace_seq_init(p);
  240. * ret = trace_seq_printf(s, <TP_printk> "\n");
  241. * put_cpu();
  242. * if (!ret)
  243. * return TRACE_TYPE_PARTIAL_LINE;
  244. *
  245. * return TRACE_TYPE_HANDLED;
  246. * }
  247. *
  248. * This is the method used to print the raw event to the trace
  249. * output format. Note, this is not needed if the data is read
  250. * in binary.
  251. */
  252. #undef __entry
  253. #define __entry field
  254. #undef TP_printk
  255. #define TP_printk(fmt, args...) fmt "\n", args
  256. #undef __get_dynamic_array
  257. #define __get_dynamic_array(field) \
  258. ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
  259. #undef __get_str
  260. #define __get_str(field) (char *)__get_dynamic_array(field)
  261. #undef __print_flags
  262. #define __print_flags(flag, delim, flag_array...) \
  263. ({ \
  264. static const struct trace_print_flags __flags[] = \
  265. { flag_array, { -1, NULL }}; \
  266. ftrace_print_flags_seq(p, delim, flag, __flags); \
  267. })
  268. #undef __print_symbolic
  269. #define __print_symbolic(value, symbol_array...) \
  270. ({ \
  271. static const struct trace_print_flags symbols[] = \
  272. { symbol_array, { -1, NULL }}; \
  273. ftrace_print_symbols_seq(p, value, symbols); \
  274. })
  275. #undef DECLARE_EVENT_CLASS
  276. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  277. static enum print_line_t \
  278. ftrace_raw_output_id_##call(int event_id, const char *name, \
  279. struct trace_iterator *iter, int flags) \
  280. { \
  281. struct trace_seq *s = &iter->seq; \
  282. struct ftrace_raw_##call *field; \
  283. struct trace_entry *entry; \
  284. struct trace_seq *p; \
  285. int ret; \
  286. \
  287. entry = iter->ent; \
  288. \
  289. if (entry->type != event_id) { \
  290. WARN_ON_ONCE(1); \
  291. return TRACE_TYPE_UNHANDLED; \
  292. } \
  293. \
  294. field = (typeof(field))entry; \
  295. \
  296. p = &get_cpu_var(ftrace_event_seq); \
  297. trace_seq_init(p); \
  298. ret = trace_seq_printf(s, "%s: ", name); \
  299. if (ret) \
  300. ret = trace_seq_printf(s, print); \
  301. put_cpu(); \
  302. if (!ret) \
  303. return TRACE_TYPE_PARTIAL_LINE; \
  304. \
  305. return TRACE_TYPE_HANDLED; \
  306. }
  307. #undef DEFINE_EVENT
  308. #define DEFINE_EVENT(template, name, proto, args) \
  309. static enum print_line_t \
  310. ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
  311. { \
  312. return ftrace_raw_output_id_##template(event_##name.id, \
  313. #name, iter, flags); \
  314. }
  315. #undef DEFINE_EVENT_PRINT
  316. #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
  317. static enum print_line_t \
  318. ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
  319. { \
  320. struct trace_seq *s = &iter->seq; \
  321. struct ftrace_raw_##template *field; \
  322. struct trace_entry *entry; \
  323. struct trace_seq *p; \
  324. int ret; \
  325. \
  326. entry = iter->ent; \
  327. \
  328. if (entry->type != event_##call.id) { \
  329. WARN_ON_ONCE(1); \
  330. return TRACE_TYPE_UNHANDLED; \
  331. } \
  332. \
  333. field = (typeof(field))entry; \
  334. \
  335. p = &get_cpu_var(ftrace_event_seq); \
  336. trace_seq_init(p); \
  337. ret = trace_seq_printf(s, "%s: ", #call); \
  338. if (ret) \
  339. ret = trace_seq_printf(s, print); \
  340. put_cpu(); \
  341. if (!ret) \
  342. return TRACE_TYPE_PARTIAL_LINE; \
  343. \
  344. return TRACE_TYPE_HANDLED; \
  345. }
  346. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  347. #undef __field_ext
  348. #define __field_ext(type, item, filter_type) \
  349. ret = trace_define_field(event_call, #type, #item, \
  350. offsetof(typeof(field), item), \
  351. sizeof(field.item), \
  352. is_signed_type(type), filter_type); \
  353. if (ret) \
  354. return ret;
  355. #undef __field
  356. #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
  357. #undef __array
  358. #define __array(type, item, len) \
  359. BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
  360. ret = trace_define_field(event_call, #type "[" #len "]", #item, \
  361. offsetof(typeof(field), item), \
  362. sizeof(field.item), 0, FILTER_OTHER); \
  363. if (ret) \
  364. return ret;
  365. #undef __dynamic_array
  366. #define __dynamic_array(type, item, len) \
  367. ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
  368. offsetof(typeof(field), __data_loc_##item), \
  369. sizeof(field.__data_loc_##item), 0, \
  370. FILTER_OTHER);
  371. #undef __string
  372. #define __string(item, src) __dynamic_array(char, item, -1)
  373. #undef DECLARE_EVENT_CLASS
  374. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
  375. static int \
  376. ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
  377. { \
  378. struct ftrace_raw_##call field; \
  379. int ret; \
  380. \
  381. tstruct; \
  382. \
  383. return ret; \
  384. }
  385. #undef DEFINE_EVENT
  386. #define DEFINE_EVENT(template, name, proto, args)
  387. #undef DEFINE_EVENT_PRINT
  388. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  389. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  390. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  391. /*
  392. * remember the offset of each array from the beginning of the event.
  393. */
  394. #undef __entry
  395. #define __entry entry
  396. #undef __field
  397. #define __field(type, item)
  398. #undef __field_ext
  399. #define __field_ext(type, item, filter_type)
  400. #undef __array
  401. #define __array(type, item, len)
  402. #undef __dynamic_array
  403. #define __dynamic_array(type, item, len) \
  404. __data_offsets->item = __data_size + \
  405. offsetof(typeof(*entry), __data); \
  406. __data_offsets->item |= (len * sizeof(type)) << 16; \
  407. __data_size += (len) * sizeof(type);
  408. #undef __string
  409. #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
  410. #undef DECLARE_EVENT_CLASS
  411. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  412. static inline int ftrace_get_offsets_##call( \
  413. struct ftrace_data_offsets_##call *__data_offsets, proto) \
  414. { \
  415. int __data_size = 0; \
  416. struct ftrace_raw_##call __maybe_unused *entry; \
  417. \
  418. tstruct; \
  419. \
  420. return __data_size; \
  421. }
  422. #undef DEFINE_EVENT
  423. #define DEFINE_EVENT(template, name, proto, args)
  424. #undef DEFINE_EVENT_PRINT
  425. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  426. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  427. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  428. #ifdef CONFIG_EVENT_PROFILE
  429. /*
  430. * Generate the functions needed for tracepoint perf_event support.
  431. *
  432. * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
  433. *
  434. * static int ftrace_profile_enable_<call>(void)
  435. * {
  436. * return register_trace_<call>(ftrace_profile_<call>);
  437. * }
  438. *
  439. * static void ftrace_profile_disable_<call>(void)
  440. * {
  441. * unregister_trace_<call>(ftrace_profile_<call>);
  442. * }
  443. *
  444. */
  445. #undef DECLARE_EVENT_CLASS
  446. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
  447. #undef DEFINE_EVENT
  448. #define DEFINE_EVENT(template, name, proto, args) \
  449. \
  450. static void ftrace_profile_##name(proto); \
  451. \
  452. static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\
  453. { \
  454. return register_trace_##name(ftrace_profile_##name); \
  455. } \
  456. \
  457. static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
  458. { \
  459. unregister_trace_##name(ftrace_profile_##name); \
  460. }
  461. #undef DEFINE_EVENT_PRINT
  462. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  463. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  464. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  465. #endif
  466. /*
  467. * Stage 4 of the trace events.
  468. *
  469. * Override the macros in <trace/trace_events.h> to include the following:
  470. *
  471. * static void ftrace_event_<call>(proto)
  472. * {
  473. * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
  474. * }
  475. *
  476. * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
  477. * {
  478. * return register_trace_<call>(ftrace_event_<call>);
  479. * }
  480. *
  481. * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
  482. * {
  483. * unregister_trace_<call>(ftrace_event_<call>);
  484. * }
  485. *
  486. *
  487. * For those macros defined with TRACE_EVENT:
  488. *
  489. * static struct ftrace_event_call event_<call>;
  490. *
  491. * static void ftrace_raw_event_<call>(proto)
  492. * {
  493. * struct ring_buffer_event *event;
  494. * struct ftrace_raw_<call> *entry; <-- defined in stage 1
  495. * struct ring_buffer *buffer;
  496. * unsigned long irq_flags;
  497. * int pc;
  498. *
  499. * local_save_flags(irq_flags);
  500. * pc = preempt_count();
  501. *
  502. * event = trace_current_buffer_lock_reserve(&buffer,
  503. * event_<call>.id,
  504. * sizeof(struct ftrace_raw_<call>),
  505. * irq_flags, pc);
  506. * if (!event)
  507. * return;
  508. * entry = ring_buffer_event_data(event);
  509. *
  510. * <assign>; <-- Here we assign the entries by the __field and
  511. * __array macros.
  512. *
  513. * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
  514. * }
  515. *
  516. * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
  517. * {
  518. * int ret;
  519. *
  520. * ret = register_trace_<call>(ftrace_raw_event_<call>);
  521. * if (!ret)
  522. * pr_info("event trace: Could not activate trace point "
  523. * "probe to <call>");
  524. * return ret;
  525. * }
  526. *
  527. * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
  528. * {
  529. * unregister_trace_<call>(ftrace_raw_event_<call>);
  530. * }
  531. *
  532. * static struct trace_event ftrace_event_type_<call> = {
  533. * .trace = ftrace_raw_output_<call>, <-- stage 2
  534. * };
  535. *
  536. * static struct ftrace_event_call __used
  537. * __attribute__((__aligned__(4)))
  538. * __attribute__((section("_ftrace_events"))) event_<call> = {
  539. * .name = "<call>",
  540. * .system = "<system>",
  541. * .raw_init = trace_event_raw_init,
  542. * .regfunc = ftrace_reg_event_<call>,
  543. * .unregfunc = ftrace_unreg_event_<call>,
  544. * .show_format = ftrace_format_<call>,
  545. * }
  546. *
  547. */
  548. #ifdef CONFIG_EVENT_PROFILE
  549. #define _TRACE_PROFILE_INIT(call) \
  550. .profile_enable = ftrace_profile_enable_##call, \
  551. .profile_disable = ftrace_profile_disable_##call,
  552. #else
  553. #define _TRACE_PROFILE_INIT(call)
  554. #endif
  555. #undef __entry
  556. #define __entry entry
  557. #undef __field
  558. #define __field(type, item)
  559. #undef __array
  560. #define __array(type, item, len)
  561. #undef __dynamic_array
  562. #define __dynamic_array(type, item, len) \
  563. __entry->__data_loc_##item = __data_offsets.item;
  564. #undef __string
  565. #define __string(item, src) __dynamic_array(char, item, -1) \
  566. #undef __assign_str
  567. #define __assign_str(dst, src) \
  568. strcpy(__get_str(dst), src);
  569. #undef DECLARE_EVENT_CLASS
  570. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  571. \
  572. static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
  573. proto) \
  574. { \
  575. struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
  576. struct ring_buffer_event *event; \
  577. struct ftrace_raw_##call *entry; \
  578. struct ring_buffer *buffer; \
  579. unsigned long irq_flags; \
  580. int __data_size; \
  581. int pc; \
  582. \
  583. local_save_flags(irq_flags); \
  584. pc = preempt_count(); \
  585. \
  586. __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
  587. \
  588. event = trace_current_buffer_lock_reserve(&buffer, \
  589. event_call->id, \
  590. sizeof(*entry) + __data_size, \
  591. irq_flags, pc); \
  592. if (!event) \
  593. return; \
  594. entry = ring_buffer_event_data(event); \
  595. \
  596. \
  597. tstruct \
  598. \
  599. { assign; } \
  600. \
  601. if (!filter_current_check_discard(buffer, event_call, entry, event)) \
  602. trace_nowake_buffer_unlock_commit(buffer, \
  603. event, irq_flags, pc); \
  604. }
  605. #undef DEFINE_EVENT
  606. #define DEFINE_EVENT(template, call, proto, args) \
  607. \
  608. static void ftrace_raw_event_##call(proto) \
  609. { \
  610. ftrace_raw_event_id_##template(&event_##call, args); \
  611. } \
  612. \
  613. static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
  614. { \
  615. return register_trace_##call(ftrace_raw_event_##call); \
  616. } \
  617. \
  618. static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
  619. { \
  620. unregister_trace_##call(ftrace_raw_event_##call); \
  621. } \
  622. \
  623. static struct trace_event ftrace_event_type_##call = { \
  624. .trace = ftrace_raw_output_##call, \
  625. };
  626. #undef DEFINE_EVENT_PRINT
  627. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  628. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  629. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  630. #undef DECLARE_EVENT_CLASS
  631. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
  632. #undef DEFINE_EVENT
  633. #define DEFINE_EVENT(template, call, proto, args) \
  634. \
  635. static struct ftrace_event_call __used \
  636. __attribute__((__aligned__(4))) \
  637. __attribute__((section("_ftrace_events"))) event_##call = { \
  638. .name = #call, \
  639. .system = __stringify(TRACE_SYSTEM), \
  640. .event = &ftrace_event_type_##call, \
  641. .raw_init = trace_event_raw_init, \
  642. .regfunc = ftrace_raw_reg_event_##call, \
  643. .unregfunc = ftrace_raw_unreg_event_##call, \
  644. .show_format = ftrace_format_##template, \
  645. .define_fields = ftrace_define_fields_##template, \
  646. _TRACE_PROFILE_INIT(call) \
  647. }
  648. #undef DEFINE_EVENT_PRINT
  649. #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
  650. \
  651. static struct ftrace_event_call __used \
  652. __attribute__((__aligned__(4))) \
  653. __attribute__((section("_ftrace_events"))) event_##call = { \
  654. .name = #call, \
  655. .system = __stringify(TRACE_SYSTEM), \
  656. .event = &ftrace_event_type_##call, \
  657. .raw_init = trace_event_raw_init, \
  658. .regfunc = ftrace_raw_reg_event_##call, \
  659. .unregfunc = ftrace_raw_unreg_event_##call, \
  660. .show_format = ftrace_format_##call, \
  661. .define_fields = ftrace_define_fields_##template, \
  662. _TRACE_PROFILE_INIT(call) \
  663. }
  664. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  665. /*
  666. * Define the insertion callback to profile events
  667. *
  668. * The job is very similar to ftrace_raw_event_<call> except that we don't
  669. * insert in the ring buffer but in a perf counter.
  670. *
  671. * static void ftrace_profile_<call>(proto)
  672. * {
  673. * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
  674. * struct ftrace_event_call *event_call = &event_<call>;
  675. * extern void perf_tp_event(int, u64, u64, void *, int);
  676. * struct ftrace_raw_##call *entry;
  677. * struct perf_trace_buf *trace_buf;
  678. * u64 __addr = 0, __count = 1;
  679. * unsigned long irq_flags;
  680. * struct trace_entry *ent;
  681. * int __entry_size;
  682. * int __data_size;
  683. * int __cpu
  684. * int pc;
  685. *
  686. * pc = preempt_count();
  687. *
  688. * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
  689. *
  690. * // Below we want to get the aligned size by taking into account
  691. * // the u32 field that will later store the buffer size
  692. * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
  693. * sizeof(u64));
  694. * __entry_size -= sizeof(u32);
  695. *
  696. * // Protect the non nmi buffer
  697. * // This also protects the rcu read side
  698. * local_irq_save(irq_flags);
  699. * __cpu = smp_processor_id();
  700. *
  701. * if (in_nmi())
  702. * trace_buf = rcu_dereference(perf_trace_buf_nmi);
  703. * else
  704. * trace_buf = rcu_dereference(perf_trace_buf);
  705. *
  706. * if (!trace_buf)
  707. * goto end;
  708. *
  709. * trace_buf = per_cpu_ptr(trace_buf, __cpu);
  710. *
  711. * // Avoid recursion from perf that could mess up the buffer
  712. * if (trace_buf->recursion++)
  713. * goto end_recursion;
  714. *
  715. * raw_data = trace_buf->buf;
  716. *
  717. * // Make recursion update visible before entering perf_tp_event
  718. * // so that we protect from perf recursions.
  719. *
  720. * barrier();
  721. *
  722. * //zero dead bytes from alignment to avoid stack leak to userspace:
  723. * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
  724. * entry = (struct ftrace_raw_<call> *)raw_data;
  725. * ent = &entry->ent;
  726. * tracing_generic_entry_update(ent, irq_flags, pc);
  727. * ent->type = event_call->id;
  728. *
  729. * <tstruct> <- do some jobs with dynamic arrays
  730. *
  731. * <assign> <- affect our values
  732. *
  733. * perf_tp_event(event_call->id, __addr, __count, entry,
  734. * __entry_size); <- submit them to perf counter
  735. *
  736. * }
  737. */
  738. #ifdef CONFIG_EVENT_PROFILE
  739. #undef __perf_addr
  740. #define __perf_addr(a) __addr = (a)
  741. #undef __perf_count
  742. #define __perf_count(c) __count = (c)
  743. #undef DECLARE_EVENT_CLASS
  744. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  745. static void \
  746. ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
  747. proto) \
  748. { \
  749. struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
  750. extern int perf_swevent_get_recursion_context(void); \
  751. extern void perf_swevent_put_recursion_context(int rctx); \
  752. extern void perf_tp_event(int, u64, u64, void *, int); \
  753. struct ftrace_raw_##call *entry; \
  754. u64 __addr = 0, __count = 1; \
  755. unsigned long irq_flags; \
  756. struct trace_entry *ent; \
  757. int __entry_size; \
  758. int __data_size; \
  759. char *trace_buf; \
  760. char *raw_data; \
  761. int __cpu; \
  762. int rctx; \
  763. int pc; \
  764. \
  765. pc = preempt_count(); \
  766. \
  767. __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
  768. __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
  769. sizeof(u64)); \
  770. __entry_size -= sizeof(u32); \
  771. \
  772. if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
  773. "profile buffer not large enough")) \
  774. return; \
  775. \
  776. local_irq_save(irq_flags); \
  777. \
  778. rctx = perf_swevent_get_recursion_context(); \
  779. if (rctx < 0) \
  780. goto end_recursion; \
  781. \
  782. __cpu = smp_processor_id(); \
  783. \
  784. if (in_nmi()) \
  785. trace_buf = rcu_dereference(perf_trace_buf_nmi); \
  786. else \
  787. trace_buf = rcu_dereference(perf_trace_buf); \
  788. \
  789. if (!trace_buf) \
  790. goto end; \
  791. \
  792. raw_data = per_cpu_ptr(trace_buf, __cpu); \
  793. \
  794. *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
  795. entry = (struct ftrace_raw_##call *)raw_data; \
  796. ent = &entry->ent; \
  797. tracing_generic_entry_update(ent, irq_flags, pc); \
  798. ent->type = event_call->id; \
  799. \
  800. tstruct \
  801. \
  802. { assign; } \
  803. \
  804. perf_tp_event(event_call->id, __addr, __count, entry, \
  805. __entry_size); \
  806. \
  807. end: \
  808. perf_swevent_put_recursion_context(rctx); \
  809. end_recursion: \
  810. local_irq_restore(irq_flags); \
  811. }
  812. #undef DEFINE_EVENT
  813. #define DEFINE_EVENT(template, call, proto, args) \
  814. static void ftrace_profile_##call(proto) \
  815. { \
  816. struct ftrace_event_call *event_call = &event_##call; \
  817. \
  818. ftrace_profile_templ_##template(event_call, args); \
  819. }
  820. #undef DEFINE_EVENT_PRINT
  821. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  822. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  823. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  824. #endif /* CONFIG_EVENT_PROFILE */
  825. #undef _TRACE_PROFILE_INIT