ftrace.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155
  1. /*
  2. * Infrastructure for profiling code inserted by 'gcc -pg'.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Originally ported from the -rt patch by:
  8. * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Based on code in the latency_tracer, that is:
  11. *
  12. * Copyright (C) 2004-2006 Ingo Molnar
  13. * Copyright (C) 2004 William Lee Irwin III
  14. */
  15. #include <linux/stop_machine.h>
  16. #include <linux/clocksource.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/suspend.h>
  20. #include <linux/debugfs.h>
  21. #include <linux/hardirq.h>
  22. #include <linux/kthread.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/kprobes.h>
  25. #include <linux/ftrace.h>
  26. #include <linux/sysctl.h>
  27. #include <linux/ctype.h>
  28. #include <linux/list.h>
  29. #include <asm/ftrace.h>
  30. #include "trace.h"
  31. #define FTRACE_WARN_ON(cond) \
  32. do { \
  33. if (WARN_ON(cond)) \
  34. ftrace_kill(); \
  35. } while (0)
  36. #define FTRACE_WARN_ON_ONCE(cond) \
  37. do { \
  38. if (WARN_ON_ONCE(cond)) \
  39. ftrace_kill(); \
  40. } while (0)
  41. /* ftrace_enabled is a method to turn ftrace on or off */
  42. int ftrace_enabled __read_mostly;
  43. static int last_ftrace_enabled;
  44. /* set when tracing only a pid */
  45. struct pid *ftrace_pid_trace;
  46. static struct pid * const ftrace_swapper_pid = &init_struct_pid;
  47. /* Quick disabling of function tracer. */
  48. int function_trace_stop;
  49. /*
  50. * ftrace_disabled is set when an anomaly is discovered.
  51. * ftrace_disabled is much stronger than ftrace_enabled.
  52. */
  53. static int ftrace_disabled __read_mostly;
  54. static DEFINE_SPINLOCK(ftrace_lock);
  55. static DEFINE_MUTEX(ftrace_sysctl_lock);
  56. static DEFINE_MUTEX(ftrace_start_lock);
  57. static struct ftrace_ops ftrace_list_end __read_mostly =
  58. {
  59. .func = ftrace_stub,
  60. };
  61. static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
  62. ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
  63. ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
  64. ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
  65. static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
  66. {
  67. struct ftrace_ops *op = ftrace_list;
  68. /* in case someone actually ports this to alpha! */
  69. read_barrier_depends();
  70. while (op != &ftrace_list_end) {
  71. /* silly alpha */
  72. read_barrier_depends();
  73. op->func(ip, parent_ip);
  74. op = op->next;
  75. };
  76. }
  77. static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
  78. {
  79. if (!test_tsk_trace_trace(current))
  80. return;
  81. ftrace_pid_function(ip, parent_ip);
  82. }
  83. static void set_ftrace_pid_function(ftrace_func_t func)
  84. {
  85. /* do not set ftrace_pid_function to itself! */
  86. if (func != ftrace_pid_func)
  87. ftrace_pid_function = func;
  88. }
  89. /**
  90. * clear_ftrace_function - reset the ftrace function
  91. *
  92. * This NULLs the ftrace function and in essence stops
  93. * tracing. There may be lag
  94. */
  95. void clear_ftrace_function(void)
  96. {
  97. ftrace_trace_function = ftrace_stub;
  98. __ftrace_trace_function = ftrace_stub;
  99. ftrace_pid_function = ftrace_stub;
  100. }
  101. #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  102. /*
  103. * For those archs that do not test ftrace_trace_stop in their
  104. * mcount call site, we need to do it from C.
  105. */
  106. static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
  107. {
  108. if (function_trace_stop)
  109. return;
  110. __ftrace_trace_function(ip, parent_ip);
  111. }
  112. #endif
  113. static int __register_ftrace_function(struct ftrace_ops *ops)
  114. {
  115. /* should not be called from interrupt context */
  116. spin_lock(&ftrace_lock);
  117. ops->next = ftrace_list;
  118. /*
  119. * We are entering ops into the ftrace_list but another
  120. * CPU might be walking that list. We need to make sure
  121. * the ops->next pointer is valid before another CPU sees
  122. * the ops pointer included into the ftrace_list.
  123. */
  124. smp_wmb();
  125. ftrace_list = ops;
  126. if (ftrace_enabled) {
  127. ftrace_func_t func;
  128. if (ops->next == &ftrace_list_end)
  129. func = ops->func;
  130. else
  131. func = ftrace_list_func;
  132. if (ftrace_pid_trace) {
  133. set_ftrace_pid_function(func);
  134. func = ftrace_pid_func;
  135. }
  136. /*
  137. * For one func, simply call it directly.
  138. * For more than one func, call the chain.
  139. */
  140. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  141. ftrace_trace_function = func;
  142. #else
  143. __ftrace_trace_function = func;
  144. ftrace_trace_function = ftrace_test_stop_func;
  145. #endif
  146. }
  147. spin_unlock(&ftrace_lock);
  148. return 0;
  149. }
  150. static int __unregister_ftrace_function(struct ftrace_ops *ops)
  151. {
  152. struct ftrace_ops **p;
  153. int ret = 0;
  154. /* should not be called from interrupt context */
  155. spin_lock(&ftrace_lock);
  156. /*
  157. * If we are removing the last function, then simply point
  158. * to the ftrace_stub.
  159. */
  160. if (ftrace_list == ops && ops->next == &ftrace_list_end) {
  161. ftrace_trace_function = ftrace_stub;
  162. ftrace_list = &ftrace_list_end;
  163. goto out;
  164. }
  165. for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
  166. if (*p == ops)
  167. break;
  168. if (*p != ops) {
  169. ret = -1;
  170. goto out;
  171. }
  172. *p = (*p)->next;
  173. if (ftrace_enabled) {
  174. /* If we only have one func left, then call that directly */
  175. if (ftrace_list->next == &ftrace_list_end) {
  176. ftrace_func_t func = ftrace_list->func;
  177. if (ftrace_pid_trace) {
  178. set_ftrace_pid_function(func);
  179. func = ftrace_pid_func;
  180. }
  181. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  182. ftrace_trace_function = func;
  183. #else
  184. __ftrace_trace_function = func;
  185. #endif
  186. }
  187. }
  188. out:
  189. spin_unlock(&ftrace_lock);
  190. return ret;
  191. }
  192. static void ftrace_update_pid_func(void)
  193. {
  194. ftrace_func_t func;
  195. /* should not be called from interrupt context */
  196. spin_lock(&ftrace_lock);
  197. if (ftrace_trace_function == ftrace_stub)
  198. goto out;
  199. func = ftrace_trace_function;
  200. if (ftrace_pid_trace) {
  201. set_ftrace_pid_function(func);
  202. func = ftrace_pid_func;
  203. } else {
  204. if (func == ftrace_pid_func)
  205. func = ftrace_pid_function;
  206. }
  207. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  208. ftrace_trace_function = func;
  209. #else
  210. __ftrace_trace_function = func;
  211. #endif
  212. out:
  213. spin_unlock(&ftrace_lock);
  214. }
  215. #ifdef CONFIG_DYNAMIC_FTRACE
  216. #ifndef CONFIG_FTRACE_MCOUNT_RECORD
  217. # error Dynamic ftrace depends on MCOUNT_RECORD
  218. #endif
  219. enum {
  220. FTRACE_ENABLE_CALLS = (1 << 0),
  221. FTRACE_DISABLE_CALLS = (1 << 1),
  222. FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
  223. FTRACE_ENABLE_MCOUNT = (1 << 3),
  224. FTRACE_DISABLE_MCOUNT = (1 << 4),
  225. FTRACE_START_FUNC_RET = (1 << 5),
  226. FTRACE_STOP_FUNC_RET = (1 << 6),
  227. };
  228. static int ftrace_filtered;
  229. static LIST_HEAD(ftrace_new_addrs);
  230. static DEFINE_MUTEX(ftrace_regex_lock);
  231. struct ftrace_page {
  232. struct ftrace_page *next;
  233. int index;
  234. struct dyn_ftrace records[];
  235. };
  236. #define ENTRIES_PER_PAGE \
  237. ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
  238. /* estimate from running different kernels */
  239. #define NR_TO_INIT 10000
  240. static struct ftrace_page *ftrace_pages_start;
  241. static struct ftrace_page *ftrace_pages;
  242. static struct dyn_ftrace *ftrace_free_records;
  243. /*
  244. * This is a double for. Do not use 'break' to break out of the loop,
  245. * you must use a goto.
  246. */
  247. #define do_for_each_ftrace_rec(pg, rec) \
  248. for (pg = ftrace_pages_start; pg; pg = pg->next) { \
  249. int _____i; \
  250. for (_____i = 0; _____i < pg->index; _____i++) { \
  251. rec = &pg->records[_____i];
  252. #define while_for_each_ftrace_rec() \
  253. } \
  254. }
  255. #ifdef CONFIG_KPROBES
  256. static int frozen_record_count;
  257. static inline void freeze_record(struct dyn_ftrace *rec)
  258. {
  259. if (!(rec->flags & FTRACE_FL_FROZEN)) {
  260. rec->flags |= FTRACE_FL_FROZEN;
  261. frozen_record_count++;
  262. }
  263. }
  264. static inline void unfreeze_record(struct dyn_ftrace *rec)
  265. {
  266. if (rec->flags & FTRACE_FL_FROZEN) {
  267. rec->flags &= ~FTRACE_FL_FROZEN;
  268. frozen_record_count--;
  269. }
  270. }
  271. static inline int record_frozen(struct dyn_ftrace *rec)
  272. {
  273. return rec->flags & FTRACE_FL_FROZEN;
  274. }
  275. #else
  276. # define freeze_record(rec) ({ 0; })
  277. # define unfreeze_record(rec) ({ 0; })
  278. # define record_frozen(rec) ({ 0; })
  279. #endif /* CONFIG_KPROBES */
  280. static void ftrace_free_rec(struct dyn_ftrace *rec)
  281. {
  282. rec->ip = (unsigned long)ftrace_free_records;
  283. ftrace_free_records = rec;
  284. rec->flags |= FTRACE_FL_FREE;
  285. }
  286. void ftrace_release(void *start, unsigned long size)
  287. {
  288. struct dyn_ftrace *rec;
  289. struct ftrace_page *pg;
  290. unsigned long s = (unsigned long)start;
  291. unsigned long e = s + size;
  292. if (ftrace_disabled || !start)
  293. return;
  294. /* should not be called from interrupt context */
  295. spin_lock(&ftrace_lock);
  296. do_for_each_ftrace_rec(pg, rec) {
  297. if ((rec->ip >= s) && (rec->ip < e))
  298. ftrace_free_rec(rec);
  299. } while_for_each_ftrace_rec();
  300. spin_unlock(&ftrace_lock);
  301. }
  302. static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
  303. {
  304. struct dyn_ftrace *rec;
  305. /* First check for freed records */
  306. if (ftrace_free_records) {
  307. rec = ftrace_free_records;
  308. if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
  309. FTRACE_WARN_ON_ONCE(1);
  310. ftrace_free_records = NULL;
  311. return NULL;
  312. }
  313. ftrace_free_records = (void *)rec->ip;
  314. memset(rec, 0, sizeof(*rec));
  315. return rec;
  316. }
  317. if (ftrace_pages->index == ENTRIES_PER_PAGE) {
  318. if (!ftrace_pages->next) {
  319. /* allocate another page */
  320. ftrace_pages->next =
  321. (void *)get_zeroed_page(GFP_KERNEL);
  322. if (!ftrace_pages->next)
  323. return NULL;
  324. }
  325. ftrace_pages = ftrace_pages->next;
  326. }
  327. return &ftrace_pages->records[ftrace_pages->index++];
  328. }
  329. static struct dyn_ftrace *
  330. ftrace_record_ip(unsigned long ip)
  331. {
  332. struct dyn_ftrace *rec;
  333. if (ftrace_disabled)
  334. return NULL;
  335. rec = ftrace_alloc_dyn_node(ip);
  336. if (!rec)
  337. return NULL;
  338. rec->ip = ip;
  339. list_add(&rec->list, &ftrace_new_addrs);
  340. return rec;
  341. }
  342. static void print_ip_ins(const char *fmt, unsigned char *p)
  343. {
  344. int i;
  345. printk(KERN_CONT "%s", fmt);
  346. for (i = 0; i < MCOUNT_INSN_SIZE; i++)
  347. printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
  348. }
  349. static void ftrace_bug(int failed, unsigned long ip)
  350. {
  351. switch (failed) {
  352. case -EFAULT:
  353. FTRACE_WARN_ON_ONCE(1);
  354. pr_info("ftrace faulted on modifying ");
  355. print_ip_sym(ip);
  356. break;
  357. case -EINVAL:
  358. FTRACE_WARN_ON_ONCE(1);
  359. pr_info("ftrace failed to modify ");
  360. print_ip_sym(ip);
  361. print_ip_ins(" actual: ", (unsigned char *)ip);
  362. printk(KERN_CONT "\n");
  363. break;
  364. case -EPERM:
  365. FTRACE_WARN_ON_ONCE(1);
  366. pr_info("ftrace faulted on writing ");
  367. print_ip_sym(ip);
  368. break;
  369. default:
  370. FTRACE_WARN_ON_ONCE(1);
  371. pr_info("ftrace faulted on unknown error ");
  372. print_ip_sym(ip);
  373. }
  374. }
  375. static int
  376. __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
  377. {
  378. unsigned long ip, fl;
  379. unsigned long ftrace_addr;
  380. ftrace_addr = (unsigned long)FTRACE_ADDR;
  381. ip = rec->ip;
  382. /*
  383. * If this record is not to be traced and
  384. * it is not enabled then do nothing.
  385. *
  386. * If this record is not to be traced and
  387. * it is enabled then disable it.
  388. *
  389. */
  390. if (rec->flags & FTRACE_FL_NOTRACE) {
  391. if (rec->flags & FTRACE_FL_ENABLED)
  392. rec->flags &= ~FTRACE_FL_ENABLED;
  393. else
  394. return 0;
  395. } else if (ftrace_filtered && enable) {
  396. /*
  397. * Filtering is on:
  398. */
  399. fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
  400. /* Record is filtered and enabled, do nothing */
  401. if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
  402. return 0;
  403. /* Record is not filtered or enabled, do nothing */
  404. if (!fl)
  405. return 0;
  406. /* Record is not filtered but enabled, disable it */
  407. if (fl == FTRACE_FL_ENABLED)
  408. rec->flags &= ~FTRACE_FL_ENABLED;
  409. else
  410. /* Otherwise record is filtered but not enabled, enable it */
  411. rec->flags |= FTRACE_FL_ENABLED;
  412. } else {
  413. /* Disable or not filtered */
  414. if (enable) {
  415. /* if record is enabled, do nothing */
  416. if (rec->flags & FTRACE_FL_ENABLED)
  417. return 0;
  418. rec->flags |= FTRACE_FL_ENABLED;
  419. } else {
  420. /* if record is not enabled, do nothing */
  421. if (!(rec->flags & FTRACE_FL_ENABLED))
  422. return 0;
  423. rec->flags &= ~FTRACE_FL_ENABLED;
  424. }
  425. }
  426. if (rec->flags & FTRACE_FL_ENABLED)
  427. return ftrace_make_call(rec, ftrace_addr);
  428. else
  429. return ftrace_make_nop(NULL, rec, ftrace_addr);
  430. }
  431. static void ftrace_replace_code(int enable)
  432. {
  433. int failed;
  434. struct dyn_ftrace *rec;
  435. struct ftrace_page *pg;
  436. do_for_each_ftrace_rec(pg, rec) {
  437. /*
  438. * Skip over free records and records that have
  439. * failed.
  440. */
  441. if (rec->flags & FTRACE_FL_FREE ||
  442. rec->flags & FTRACE_FL_FAILED)
  443. continue;
  444. /* ignore updates to this record's mcount site */
  445. if (get_kprobe((void *)rec->ip)) {
  446. freeze_record(rec);
  447. continue;
  448. } else {
  449. unfreeze_record(rec);
  450. }
  451. failed = __ftrace_replace_code(rec, enable);
  452. if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
  453. rec->flags |= FTRACE_FL_FAILED;
  454. if ((system_state == SYSTEM_BOOTING) ||
  455. !core_kernel_text(rec->ip)) {
  456. ftrace_free_rec(rec);
  457. } else
  458. ftrace_bug(failed, rec->ip);
  459. }
  460. } while_for_each_ftrace_rec();
  461. }
  462. static int
  463. ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
  464. {
  465. unsigned long ip;
  466. int ret;
  467. ip = rec->ip;
  468. ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
  469. if (ret) {
  470. ftrace_bug(ret, ip);
  471. rec->flags |= FTRACE_FL_FAILED;
  472. return 0;
  473. }
  474. return 1;
  475. }
  476. static int __ftrace_modify_code(void *data)
  477. {
  478. int *command = data;
  479. if (*command & FTRACE_ENABLE_CALLS)
  480. ftrace_replace_code(1);
  481. else if (*command & FTRACE_DISABLE_CALLS)
  482. ftrace_replace_code(0);
  483. if (*command & FTRACE_UPDATE_TRACE_FUNC)
  484. ftrace_update_ftrace_func(ftrace_trace_function);
  485. if (*command & FTRACE_START_FUNC_RET)
  486. ftrace_enable_ftrace_graph_caller();
  487. else if (*command & FTRACE_STOP_FUNC_RET)
  488. ftrace_disable_ftrace_graph_caller();
  489. return 0;
  490. }
  491. static void ftrace_run_update_code(int command)
  492. {
  493. stop_machine(__ftrace_modify_code, &command, NULL);
  494. }
  495. static ftrace_func_t saved_ftrace_func;
  496. static int ftrace_start_up;
  497. static void ftrace_startup_enable(int command)
  498. {
  499. if (saved_ftrace_func != ftrace_trace_function) {
  500. saved_ftrace_func = ftrace_trace_function;
  501. command |= FTRACE_UPDATE_TRACE_FUNC;
  502. }
  503. if (!command || !ftrace_enabled)
  504. return;
  505. ftrace_run_update_code(command);
  506. }
  507. static void ftrace_startup(int command)
  508. {
  509. if (unlikely(ftrace_disabled))
  510. return;
  511. mutex_lock(&ftrace_start_lock);
  512. ftrace_start_up++;
  513. command |= FTRACE_ENABLE_CALLS;
  514. ftrace_startup_enable(command);
  515. mutex_unlock(&ftrace_start_lock);
  516. }
  517. static void ftrace_shutdown(int command)
  518. {
  519. if (unlikely(ftrace_disabled))
  520. return;
  521. mutex_lock(&ftrace_start_lock);
  522. ftrace_start_up--;
  523. if (!ftrace_start_up)
  524. command |= FTRACE_DISABLE_CALLS;
  525. if (saved_ftrace_func != ftrace_trace_function) {
  526. saved_ftrace_func = ftrace_trace_function;
  527. command |= FTRACE_UPDATE_TRACE_FUNC;
  528. }
  529. if (!command || !ftrace_enabled)
  530. goto out;
  531. ftrace_run_update_code(command);
  532. out:
  533. mutex_unlock(&ftrace_start_lock);
  534. }
  535. static void ftrace_startup_sysctl(void)
  536. {
  537. int command = FTRACE_ENABLE_MCOUNT;
  538. if (unlikely(ftrace_disabled))
  539. return;
  540. mutex_lock(&ftrace_start_lock);
  541. /* Force update next time */
  542. saved_ftrace_func = NULL;
  543. /* ftrace_start_up is true if we want ftrace running */
  544. if (ftrace_start_up)
  545. command |= FTRACE_ENABLE_CALLS;
  546. ftrace_run_update_code(command);
  547. mutex_unlock(&ftrace_start_lock);
  548. }
  549. static void ftrace_shutdown_sysctl(void)
  550. {
  551. int command = FTRACE_DISABLE_MCOUNT;
  552. if (unlikely(ftrace_disabled))
  553. return;
  554. mutex_lock(&ftrace_start_lock);
  555. /* ftrace_start_up is true if ftrace is running */
  556. if (ftrace_start_up)
  557. command |= FTRACE_DISABLE_CALLS;
  558. ftrace_run_update_code(command);
  559. mutex_unlock(&ftrace_start_lock);
  560. }
  561. static cycle_t ftrace_update_time;
  562. static unsigned long ftrace_update_cnt;
  563. unsigned long ftrace_update_tot_cnt;
  564. static int ftrace_update_code(struct module *mod)
  565. {
  566. struct dyn_ftrace *p, *t;
  567. cycle_t start, stop;
  568. start = ftrace_now(raw_smp_processor_id());
  569. ftrace_update_cnt = 0;
  570. list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
  571. /* If something went wrong, bail without enabling anything */
  572. if (unlikely(ftrace_disabled))
  573. return -1;
  574. list_del_init(&p->list);
  575. /* convert record (i.e, patch mcount-call with NOP) */
  576. if (ftrace_code_disable(mod, p)) {
  577. p->flags |= FTRACE_FL_CONVERTED;
  578. ftrace_update_cnt++;
  579. } else
  580. ftrace_free_rec(p);
  581. }
  582. stop = ftrace_now(raw_smp_processor_id());
  583. ftrace_update_time = stop - start;
  584. ftrace_update_tot_cnt += ftrace_update_cnt;
  585. return 0;
  586. }
  587. static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
  588. {
  589. struct ftrace_page *pg;
  590. int cnt;
  591. int i;
  592. /* allocate a few pages */
  593. ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
  594. if (!ftrace_pages_start)
  595. return -1;
  596. /*
  597. * Allocate a few more pages.
  598. *
  599. * TODO: have some parser search vmlinux before
  600. * final linking to find all calls to ftrace.
  601. * Then we can:
  602. * a) know how many pages to allocate.
  603. * and/or
  604. * b) set up the table then.
  605. *
  606. * The dynamic code is still necessary for
  607. * modules.
  608. */
  609. pg = ftrace_pages = ftrace_pages_start;
  610. cnt = num_to_init / ENTRIES_PER_PAGE;
  611. pr_info("ftrace: allocating %ld entries in %d pages\n",
  612. num_to_init, cnt + 1);
  613. for (i = 0; i < cnt; i++) {
  614. pg->next = (void *)get_zeroed_page(GFP_KERNEL);
  615. /* If we fail, we'll try later anyway */
  616. if (!pg->next)
  617. break;
  618. pg = pg->next;
  619. }
  620. return 0;
  621. }
  622. enum {
  623. FTRACE_ITER_FILTER = (1 << 0),
  624. FTRACE_ITER_CONT = (1 << 1),
  625. FTRACE_ITER_NOTRACE = (1 << 2),
  626. FTRACE_ITER_FAILURES = (1 << 3),
  627. FTRACE_ITER_PRINTALL = (1 << 4),
  628. };
  629. #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
  630. struct ftrace_iterator {
  631. struct ftrace_page *pg;
  632. int idx;
  633. unsigned flags;
  634. unsigned char buffer[FTRACE_BUFF_MAX+1];
  635. unsigned buffer_idx;
  636. unsigned filtered;
  637. };
  638. static void *
  639. t_next(struct seq_file *m, void *v, loff_t *pos)
  640. {
  641. struct ftrace_iterator *iter = m->private;
  642. struct dyn_ftrace *rec = NULL;
  643. (*pos)++;
  644. if (iter->flags & FTRACE_ITER_PRINTALL)
  645. return NULL;
  646. /* should not be called from interrupt context */
  647. spin_lock(&ftrace_lock);
  648. retry:
  649. if (iter->idx >= iter->pg->index) {
  650. if (iter->pg->next) {
  651. iter->pg = iter->pg->next;
  652. iter->idx = 0;
  653. goto retry;
  654. } else {
  655. iter->idx = -1;
  656. }
  657. } else {
  658. rec = &iter->pg->records[iter->idx++];
  659. if ((rec->flags & FTRACE_FL_FREE) ||
  660. (!(iter->flags & FTRACE_ITER_FAILURES) &&
  661. (rec->flags & FTRACE_FL_FAILED)) ||
  662. ((iter->flags & FTRACE_ITER_FAILURES) &&
  663. !(rec->flags & FTRACE_FL_FAILED)) ||
  664. ((iter->flags & FTRACE_ITER_FILTER) &&
  665. !(rec->flags & FTRACE_FL_FILTER)) ||
  666. ((iter->flags & FTRACE_ITER_NOTRACE) &&
  667. !(rec->flags & FTRACE_FL_NOTRACE))) {
  668. rec = NULL;
  669. goto retry;
  670. }
  671. }
  672. spin_unlock(&ftrace_lock);
  673. return rec;
  674. }
  675. static void *t_start(struct seq_file *m, loff_t *pos)
  676. {
  677. struct ftrace_iterator *iter = m->private;
  678. void *p = NULL;
  679. /*
  680. * For set_ftrace_filter reading, if we have the filter
  681. * off, we can short cut and just print out that all
  682. * functions are enabled.
  683. */
  684. if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
  685. if (*pos > 0)
  686. return NULL;
  687. iter->flags |= FTRACE_ITER_PRINTALL;
  688. (*pos)++;
  689. return iter;
  690. }
  691. if (*pos > 0) {
  692. if (iter->idx < 0)
  693. return p;
  694. (*pos)--;
  695. iter->idx--;
  696. }
  697. p = t_next(m, p, pos);
  698. return p;
  699. }
  700. static void t_stop(struct seq_file *m, void *p)
  701. {
  702. }
  703. static int t_show(struct seq_file *m, void *v)
  704. {
  705. struct ftrace_iterator *iter = m->private;
  706. struct dyn_ftrace *rec = v;
  707. char str[KSYM_SYMBOL_LEN];
  708. if (iter->flags & FTRACE_ITER_PRINTALL) {
  709. seq_printf(m, "#### all functions enabled ####\n");
  710. return 0;
  711. }
  712. if (!rec)
  713. return 0;
  714. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  715. seq_printf(m, "%s\n", str);
  716. return 0;
  717. }
  718. static struct seq_operations show_ftrace_seq_ops = {
  719. .start = t_start,
  720. .next = t_next,
  721. .stop = t_stop,
  722. .show = t_show,
  723. };
  724. static int
  725. ftrace_avail_open(struct inode *inode, struct file *file)
  726. {
  727. struct ftrace_iterator *iter;
  728. int ret;
  729. if (unlikely(ftrace_disabled))
  730. return -ENODEV;
  731. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  732. if (!iter)
  733. return -ENOMEM;
  734. iter->pg = ftrace_pages_start;
  735. ret = seq_open(file, &show_ftrace_seq_ops);
  736. if (!ret) {
  737. struct seq_file *m = file->private_data;
  738. m->private = iter;
  739. } else {
  740. kfree(iter);
  741. }
  742. return ret;
  743. }
  744. int ftrace_avail_release(struct inode *inode, struct file *file)
  745. {
  746. struct seq_file *m = (struct seq_file *)file->private_data;
  747. struct ftrace_iterator *iter = m->private;
  748. seq_release(inode, file);
  749. kfree(iter);
  750. return 0;
  751. }
  752. static int
  753. ftrace_failures_open(struct inode *inode, struct file *file)
  754. {
  755. int ret;
  756. struct seq_file *m;
  757. struct ftrace_iterator *iter;
  758. ret = ftrace_avail_open(inode, file);
  759. if (!ret) {
  760. m = (struct seq_file *)file->private_data;
  761. iter = (struct ftrace_iterator *)m->private;
  762. iter->flags = FTRACE_ITER_FAILURES;
  763. }
  764. return ret;
  765. }
  766. static void ftrace_filter_reset(int enable)
  767. {
  768. struct ftrace_page *pg;
  769. struct dyn_ftrace *rec;
  770. unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  771. /* should not be called from interrupt context */
  772. spin_lock(&ftrace_lock);
  773. if (enable)
  774. ftrace_filtered = 0;
  775. do_for_each_ftrace_rec(pg, rec) {
  776. if (rec->flags & FTRACE_FL_FAILED)
  777. continue;
  778. rec->flags &= ~type;
  779. } while_for_each_ftrace_rec();
  780. spin_unlock(&ftrace_lock);
  781. }
  782. static int
  783. ftrace_regex_open(struct inode *inode, struct file *file, int enable)
  784. {
  785. struct ftrace_iterator *iter;
  786. int ret = 0;
  787. if (unlikely(ftrace_disabled))
  788. return -ENODEV;
  789. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  790. if (!iter)
  791. return -ENOMEM;
  792. mutex_lock(&ftrace_regex_lock);
  793. if ((file->f_mode & FMODE_WRITE) &&
  794. !(file->f_flags & O_APPEND))
  795. ftrace_filter_reset(enable);
  796. if (file->f_mode & FMODE_READ) {
  797. iter->pg = ftrace_pages_start;
  798. iter->flags = enable ? FTRACE_ITER_FILTER :
  799. FTRACE_ITER_NOTRACE;
  800. ret = seq_open(file, &show_ftrace_seq_ops);
  801. if (!ret) {
  802. struct seq_file *m = file->private_data;
  803. m->private = iter;
  804. } else
  805. kfree(iter);
  806. } else
  807. file->private_data = iter;
  808. mutex_unlock(&ftrace_regex_lock);
  809. return ret;
  810. }
  811. static int
  812. ftrace_filter_open(struct inode *inode, struct file *file)
  813. {
  814. return ftrace_regex_open(inode, file, 1);
  815. }
  816. static int
  817. ftrace_notrace_open(struct inode *inode, struct file *file)
  818. {
  819. return ftrace_regex_open(inode, file, 0);
  820. }
  821. static ssize_t
  822. ftrace_regex_read(struct file *file, char __user *ubuf,
  823. size_t cnt, loff_t *ppos)
  824. {
  825. if (file->f_mode & FMODE_READ)
  826. return seq_read(file, ubuf, cnt, ppos);
  827. else
  828. return -EPERM;
  829. }
  830. static loff_t
  831. ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
  832. {
  833. loff_t ret;
  834. if (file->f_mode & FMODE_READ)
  835. ret = seq_lseek(file, offset, origin);
  836. else
  837. file->f_pos = ret = 1;
  838. return ret;
  839. }
  840. enum {
  841. MATCH_FULL,
  842. MATCH_FRONT_ONLY,
  843. MATCH_MIDDLE_ONLY,
  844. MATCH_END_ONLY,
  845. };
  846. static void
  847. ftrace_match_records(unsigned char *buff, int len, int enable)
  848. {
  849. char str[KSYM_SYMBOL_LEN];
  850. char *search = NULL;
  851. struct ftrace_page *pg;
  852. struct dyn_ftrace *rec;
  853. int type = MATCH_FULL;
  854. unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  855. unsigned i, match = 0, search_len = 0;
  856. int not = 0;
  857. if (buff[0] == '!') {
  858. not = 1;
  859. buff++;
  860. len--;
  861. }
  862. for (i = 0; i < len; i++) {
  863. if (buff[i] == '*') {
  864. if (!i) {
  865. search = buff + i + 1;
  866. type = MATCH_END_ONLY;
  867. search_len = len - (i + 1);
  868. } else {
  869. if (type == MATCH_END_ONLY) {
  870. type = MATCH_MIDDLE_ONLY;
  871. } else {
  872. match = i;
  873. type = MATCH_FRONT_ONLY;
  874. }
  875. buff[i] = 0;
  876. break;
  877. }
  878. }
  879. }
  880. /* should not be called from interrupt context */
  881. spin_lock(&ftrace_lock);
  882. if (enable)
  883. ftrace_filtered = 1;
  884. do_for_each_ftrace_rec(pg, rec) {
  885. int matched = 0;
  886. char *ptr;
  887. if (rec->flags & FTRACE_FL_FAILED)
  888. continue;
  889. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  890. switch (type) {
  891. case MATCH_FULL:
  892. if (strcmp(str, buff) == 0)
  893. matched = 1;
  894. break;
  895. case MATCH_FRONT_ONLY:
  896. if (memcmp(str, buff, match) == 0)
  897. matched = 1;
  898. break;
  899. case MATCH_MIDDLE_ONLY:
  900. if (strstr(str, search))
  901. matched = 1;
  902. break;
  903. case MATCH_END_ONLY:
  904. ptr = strstr(str, search);
  905. if (ptr && (ptr[search_len] == 0))
  906. matched = 1;
  907. break;
  908. }
  909. if (matched) {
  910. if (not)
  911. rec->flags &= ~flag;
  912. else
  913. rec->flags |= flag;
  914. }
  915. } while_for_each_ftrace_rec();
  916. spin_unlock(&ftrace_lock);
  917. }
  918. static ssize_t
  919. ftrace_regex_write(struct file *file, const char __user *ubuf,
  920. size_t cnt, loff_t *ppos, int enable)
  921. {
  922. struct ftrace_iterator *iter;
  923. char ch;
  924. size_t read = 0;
  925. ssize_t ret;
  926. if (!cnt || cnt < 0)
  927. return 0;
  928. mutex_lock(&ftrace_regex_lock);
  929. if (file->f_mode & FMODE_READ) {
  930. struct seq_file *m = file->private_data;
  931. iter = m->private;
  932. } else
  933. iter = file->private_data;
  934. if (!*ppos) {
  935. iter->flags &= ~FTRACE_ITER_CONT;
  936. iter->buffer_idx = 0;
  937. }
  938. ret = get_user(ch, ubuf++);
  939. if (ret)
  940. goto out;
  941. read++;
  942. cnt--;
  943. if (!(iter->flags & ~FTRACE_ITER_CONT)) {
  944. /* skip white space */
  945. while (cnt && isspace(ch)) {
  946. ret = get_user(ch, ubuf++);
  947. if (ret)
  948. goto out;
  949. read++;
  950. cnt--;
  951. }
  952. if (isspace(ch)) {
  953. file->f_pos += read;
  954. ret = read;
  955. goto out;
  956. }
  957. iter->buffer_idx = 0;
  958. }
  959. while (cnt && !isspace(ch)) {
  960. if (iter->buffer_idx < FTRACE_BUFF_MAX)
  961. iter->buffer[iter->buffer_idx++] = ch;
  962. else {
  963. ret = -EINVAL;
  964. goto out;
  965. }
  966. ret = get_user(ch, ubuf++);
  967. if (ret)
  968. goto out;
  969. read++;
  970. cnt--;
  971. }
  972. if (isspace(ch)) {
  973. iter->filtered++;
  974. iter->buffer[iter->buffer_idx] = 0;
  975. ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
  976. iter->buffer_idx = 0;
  977. } else
  978. iter->flags |= FTRACE_ITER_CONT;
  979. file->f_pos += read;
  980. ret = read;
  981. out:
  982. mutex_unlock(&ftrace_regex_lock);
  983. return ret;
  984. }
  985. static ssize_t
  986. ftrace_filter_write(struct file *file, const char __user *ubuf,
  987. size_t cnt, loff_t *ppos)
  988. {
  989. return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
  990. }
  991. static ssize_t
  992. ftrace_notrace_write(struct file *file, const char __user *ubuf,
  993. size_t cnt, loff_t *ppos)
  994. {
  995. return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
  996. }
  997. static void
  998. ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
  999. {
  1000. if (unlikely(ftrace_disabled))
  1001. return;
  1002. mutex_lock(&ftrace_regex_lock);
  1003. if (reset)
  1004. ftrace_filter_reset(enable);
  1005. if (buf)
  1006. ftrace_match_records(buf, len, enable);
  1007. mutex_unlock(&ftrace_regex_lock);
  1008. }
  1009. /**
  1010. * ftrace_set_filter - set a function to filter on in ftrace
  1011. * @buf - the string that holds the function filter text.
  1012. * @len - the length of the string.
  1013. * @reset - non zero to reset all filters before applying this filter.
  1014. *
  1015. * Filters denote which functions should be enabled when tracing is enabled.
  1016. * If @buf is NULL and reset is set, all functions will be enabled for tracing.
  1017. */
  1018. void ftrace_set_filter(unsigned char *buf, int len, int reset)
  1019. {
  1020. ftrace_set_regex(buf, len, reset, 1);
  1021. }
  1022. /**
  1023. * ftrace_set_notrace - set a function to not trace in ftrace
  1024. * @buf - the string that holds the function notrace text.
  1025. * @len - the length of the string.
  1026. * @reset - non zero to reset all filters before applying this filter.
  1027. *
  1028. * Notrace Filters denote which functions should not be enabled when tracing
  1029. * is enabled. If @buf is NULL and reset is set, all functions will be enabled
  1030. * for tracing.
  1031. */
  1032. void ftrace_set_notrace(unsigned char *buf, int len, int reset)
  1033. {
  1034. ftrace_set_regex(buf, len, reset, 0);
  1035. }
  1036. static int
  1037. ftrace_regex_release(struct inode *inode, struct file *file, int enable)
  1038. {
  1039. struct seq_file *m = (struct seq_file *)file->private_data;
  1040. struct ftrace_iterator *iter;
  1041. mutex_lock(&ftrace_regex_lock);
  1042. if (file->f_mode & FMODE_READ) {
  1043. iter = m->private;
  1044. seq_release(inode, file);
  1045. } else
  1046. iter = file->private_data;
  1047. if (iter->buffer_idx) {
  1048. iter->filtered++;
  1049. iter->buffer[iter->buffer_idx] = 0;
  1050. ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
  1051. }
  1052. mutex_lock(&ftrace_sysctl_lock);
  1053. mutex_lock(&ftrace_start_lock);
  1054. if (ftrace_start_up && ftrace_enabled)
  1055. ftrace_run_update_code(FTRACE_ENABLE_CALLS);
  1056. mutex_unlock(&ftrace_start_lock);
  1057. mutex_unlock(&ftrace_sysctl_lock);
  1058. kfree(iter);
  1059. mutex_unlock(&ftrace_regex_lock);
  1060. return 0;
  1061. }
  1062. static int
  1063. ftrace_filter_release(struct inode *inode, struct file *file)
  1064. {
  1065. return ftrace_regex_release(inode, file, 1);
  1066. }
  1067. static int
  1068. ftrace_notrace_release(struct inode *inode, struct file *file)
  1069. {
  1070. return ftrace_regex_release(inode, file, 0);
  1071. }
  1072. static struct file_operations ftrace_avail_fops = {
  1073. .open = ftrace_avail_open,
  1074. .read = seq_read,
  1075. .llseek = seq_lseek,
  1076. .release = ftrace_avail_release,
  1077. };
  1078. static struct file_operations ftrace_failures_fops = {
  1079. .open = ftrace_failures_open,
  1080. .read = seq_read,
  1081. .llseek = seq_lseek,
  1082. .release = ftrace_avail_release,
  1083. };
  1084. static struct file_operations ftrace_filter_fops = {
  1085. .open = ftrace_filter_open,
  1086. .read = ftrace_regex_read,
  1087. .write = ftrace_filter_write,
  1088. .llseek = ftrace_regex_lseek,
  1089. .release = ftrace_filter_release,
  1090. };
  1091. static struct file_operations ftrace_notrace_fops = {
  1092. .open = ftrace_notrace_open,
  1093. .read = ftrace_regex_read,
  1094. .write = ftrace_notrace_write,
  1095. .llseek = ftrace_regex_lseek,
  1096. .release = ftrace_notrace_release,
  1097. };
  1098. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1099. static DEFINE_MUTEX(graph_lock);
  1100. int ftrace_graph_count;
  1101. unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
  1102. static void *
  1103. g_next(struct seq_file *m, void *v, loff_t *pos)
  1104. {
  1105. unsigned long *array = m->private;
  1106. int index = *pos;
  1107. (*pos)++;
  1108. if (index >= ftrace_graph_count)
  1109. return NULL;
  1110. return &array[index];
  1111. }
  1112. static void *g_start(struct seq_file *m, loff_t *pos)
  1113. {
  1114. void *p = NULL;
  1115. mutex_lock(&graph_lock);
  1116. p = g_next(m, p, pos);
  1117. return p;
  1118. }
  1119. static void g_stop(struct seq_file *m, void *p)
  1120. {
  1121. mutex_unlock(&graph_lock);
  1122. }
  1123. static int g_show(struct seq_file *m, void *v)
  1124. {
  1125. unsigned long *ptr = v;
  1126. char str[KSYM_SYMBOL_LEN];
  1127. if (!ptr)
  1128. return 0;
  1129. kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
  1130. seq_printf(m, "%s\n", str);
  1131. return 0;
  1132. }
  1133. static struct seq_operations ftrace_graph_seq_ops = {
  1134. .start = g_start,
  1135. .next = g_next,
  1136. .stop = g_stop,
  1137. .show = g_show,
  1138. };
  1139. static int
  1140. ftrace_graph_open(struct inode *inode, struct file *file)
  1141. {
  1142. int ret = 0;
  1143. if (unlikely(ftrace_disabled))
  1144. return -ENODEV;
  1145. mutex_lock(&graph_lock);
  1146. if ((file->f_mode & FMODE_WRITE) &&
  1147. !(file->f_flags & O_APPEND)) {
  1148. ftrace_graph_count = 0;
  1149. memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
  1150. }
  1151. if (file->f_mode & FMODE_READ) {
  1152. ret = seq_open(file, &ftrace_graph_seq_ops);
  1153. if (!ret) {
  1154. struct seq_file *m = file->private_data;
  1155. m->private = ftrace_graph_funcs;
  1156. }
  1157. } else
  1158. file->private_data = ftrace_graph_funcs;
  1159. mutex_unlock(&graph_lock);
  1160. return ret;
  1161. }
  1162. static ssize_t
  1163. ftrace_graph_read(struct file *file, char __user *ubuf,
  1164. size_t cnt, loff_t *ppos)
  1165. {
  1166. if (file->f_mode & FMODE_READ)
  1167. return seq_read(file, ubuf, cnt, ppos);
  1168. else
  1169. return -EPERM;
  1170. }
  1171. static int
  1172. ftrace_set_func(unsigned long *array, int idx, char *buffer)
  1173. {
  1174. char str[KSYM_SYMBOL_LEN];
  1175. struct dyn_ftrace *rec;
  1176. struct ftrace_page *pg;
  1177. int found = 0;
  1178. int j;
  1179. if (ftrace_disabled)
  1180. return -ENODEV;
  1181. /* should not be called from interrupt context */
  1182. spin_lock(&ftrace_lock);
  1183. do_for_each_ftrace_rec(pg, rec) {
  1184. if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
  1185. continue;
  1186. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  1187. if (strcmp(str, buffer) == 0) {
  1188. /* Return 1 if we add it to the array */
  1189. found = 1;
  1190. for (j = 0; j < idx; j++)
  1191. if (array[j] == rec->ip) {
  1192. found = 0;
  1193. break;
  1194. }
  1195. if (found)
  1196. array[idx] = rec->ip;
  1197. goto out;
  1198. }
  1199. } while_for_each_ftrace_rec();
  1200. out:
  1201. spin_unlock(&ftrace_lock);
  1202. return found ? 0 : -EINVAL;
  1203. }
  1204. static ssize_t
  1205. ftrace_graph_write(struct file *file, const char __user *ubuf,
  1206. size_t cnt, loff_t *ppos)
  1207. {
  1208. unsigned char buffer[FTRACE_BUFF_MAX+1];
  1209. unsigned long *array;
  1210. size_t read = 0;
  1211. ssize_t ret;
  1212. int index = 0;
  1213. char ch;
  1214. if (!cnt || cnt < 0)
  1215. return 0;
  1216. mutex_lock(&graph_lock);
  1217. if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
  1218. ret = -EBUSY;
  1219. goto out;
  1220. }
  1221. if (file->f_mode & FMODE_READ) {
  1222. struct seq_file *m = file->private_data;
  1223. array = m->private;
  1224. } else
  1225. array = file->private_data;
  1226. ret = get_user(ch, ubuf++);
  1227. if (ret)
  1228. goto out;
  1229. read++;
  1230. cnt--;
  1231. /* skip white space */
  1232. while (cnt && isspace(ch)) {
  1233. ret = get_user(ch, ubuf++);
  1234. if (ret)
  1235. goto out;
  1236. read++;
  1237. cnt--;
  1238. }
  1239. if (isspace(ch)) {
  1240. *ppos += read;
  1241. ret = read;
  1242. goto out;
  1243. }
  1244. while (cnt && !isspace(ch)) {
  1245. if (index < FTRACE_BUFF_MAX)
  1246. buffer[index++] = ch;
  1247. else {
  1248. ret = -EINVAL;
  1249. goto out;
  1250. }
  1251. ret = get_user(ch, ubuf++);
  1252. if (ret)
  1253. goto out;
  1254. read++;
  1255. cnt--;
  1256. }
  1257. buffer[index] = 0;
  1258. /* we allow only one at a time */
  1259. ret = ftrace_set_func(array, ftrace_graph_count, buffer);
  1260. if (ret)
  1261. goto out;
  1262. ftrace_graph_count++;
  1263. file->f_pos += read;
  1264. ret = read;
  1265. out:
  1266. mutex_unlock(&graph_lock);
  1267. return ret;
  1268. }
  1269. static const struct file_operations ftrace_graph_fops = {
  1270. .open = ftrace_graph_open,
  1271. .read = ftrace_graph_read,
  1272. .write = ftrace_graph_write,
  1273. };
  1274. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  1275. static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
  1276. {
  1277. struct dentry *entry;
  1278. entry = debugfs_create_file("available_filter_functions", 0444,
  1279. d_tracer, NULL, &ftrace_avail_fops);
  1280. if (!entry)
  1281. pr_warning("Could not create debugfs "
  1282. "'available_filter_functions' entry\n");
  1283. entry = debugfs_create_file("failures", 0444,
  1284. d_tracer, NULL, &ftrace_failures_fops);
  1285. if (!entry)
  1286. pr_warning("Could not create debugfs 'failures' entry\n");
  1287. entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
  1288. NULL, &ftrace_filter_fops);
  1289. if (!entry)
  1290. pr_warning("Could not create debugfs "
  1291. "'set_ftrace_filter' entry\n");
  1292. entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
  1293. NULL, &ftrace_notrace_fops);
  1294. if (!entry)
  1295. pr_warning("Could not create debugfs "
  1296. "'set_ftrace_notrace' entry\n");
  1297. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1298. entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
  1299. NULL,
  1300. &ftrace_graph_fops);
  1301. if (!entry)
  1302. pr_warning("Could not create debugfs "
  1303. "'set_graph_function' entry\n");
  1304. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  1305. return 0;
  1306. }
  1307. static int ftrace_convert_nops(struct module *mod,
  1308. unsigned long *start,
  1309. unsigned long *end)
  1310. {
  1311. unsigned long *p;
  1312. unsigned long addr;
  1313. unsigned long flags;
  1314. mutex_lock(&ftrace_start_lock);
  1315. p = start;
  1316. while (p < end) {
  1317. addr = ftrace_call_adjust(*p++);
  1318. /*
  1319. * Some architecture linkers will pad between
  1320. * the different mcount_loc sections of different
  1321. * object files to satisfy alignments.
  1322. * Skip any NULL pointers.
  1323. */
  1324. if (!addr)
  1325. continue;
  1326. ftrace_record_ip(addr);
  1327. }
  1328. /* disable interrupts to prevent kstop machine */
  1329. local_irq_save(flags);
  1330. ftrace_update_code(mod);
  1331. local_irq_restore(flags);
  1332. mutex_unlock(&ftrace_start_lock);
  1333. return 0;
  1334. }
  1335. void ftrace_init_module(struct module *mod,
  1336. unsigned long *start, unsigned long *end)
  1337. {
  1338. if (ftrace_disabled || start == end)
  1339. return;
  1340. ftrace_convert_nops(mod, start, end);
  1341. }
  1342. extern unsigned long __start_mcount_loc[];
  1343. extern unsigned long __stop_mcount_loc[];
  1344. void __init ftrace_init(void)
  1345. {
  1346. unsigned long count, addr, flags;
  1347. int ret;
  1348. /* Keep the ftrace pointer to the stub */
  1349. addr = (unsigned long)ftrace_stub;
  1350. local_irq_save(flags);
  1351. ftrace_dyn_arch_init(&addr);
  1352. local_irq_restore(flags);
  1353. /* ftrace_dyn_arch_init places the return code in addr */
  1354. if (addr)
  1355. goto failed;
  1356. count = __stop_mcount_loc - __start_mcount_loc;
  1357. ret = ftrace_dyn_table_alloc(count);
  1358. if (ret)
  1359. goto failed;
  1360. last_ftrace_enabled = ftrace_enabled = 1;
  1361. ret = ftrace_convert_nops(NULL,
  1362. __start_mcount_loc,
  1363. __stop_mcount_loc);
  1364. return;
  1365. failed:
  1366. ftrace_disabled = 1;
  1367. }
  1368. #else
  1369. static int __init ftrace_nodyn_init(void)
  1370. {
  1371. ftrace_enabled = 1;
  1372. return 0;
  1373. }
  1374. device_initcall(ftrace_nodyn_init);
  1375. static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
  1376. static inline void ftrace_startup_enable(int command) { }
  1377. /* Keep as macros so we do not need to define the commands */
  1378. # define ftrace_startup(command) do { } while (0)
  1379. # define ftrace_shutdown(command) do { } while (0)
  1380. # define ftrace_startup_sysctl() do { } while (0)
  1381. # define ftrace_shutdown_sysctl() do { } while (0)
  1382. #endif /* CONFIG_DYNAMIC_FTRACE */
  1383. static ssize_t
  1384. ftrace_pid_read(struct file *file, char __user *ubuf,
  1385. size_t cnt, loff_t *ppos)
  1386. {
  1387. char buf[64];
  1388. int r;
  1389. if (ftrace_pid_trace == ftrace_swapper_pid)
  1390. r = sprintf(buf, "swapper tasks\n");
  1391. else if (ftrace_pid_trace)
  1392. r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
  1393. else
  1394. r = sprintf(buf, "no pid\n");
  1395. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  1396. }
  1397. static void clear_ftrace_swapper(void)
  1398. {
  1399. struct task_struct *p;
  1400. int cpu;
  1401. get_online_cpus();
  1402. for_each_online_cpu(cpu) {
  1403. p = idle_task(cpu);
  1404. clear_tsk_trace_trace(p);
  1405. }
  1406. put_online_cpus();
  1407. }
  1408. static void set_ftrace_swapper(void)
  1409. {
  1410. struct task_struct *p;
  1411. int cpu;
  1412. get_online_cpus();
  1413. for_each_online_cpu(cpu) {
  1414. p = idle_task(cpu);
  1415. set_tsk_trace_trace(p);
  1416. }
  1417. put_online_cpus();
  1418. }
  1419. static void clear_ftrace_pid(struct pid *pid)
  1420. {
  1421. struct task_struct *p;
  1422. rcu_read_lock();
  1423. do_each_pid_task(pid, PIDTYPE_PID, p) {
  1424. clear_tsk_trace_trace(p);
  1425. } while_each_pid_task(pid, PIDTYPE_PID, p);
  1426. rcu_read_unlock();
  1427. put_pid(pid);
  1428. }
  1429. static void set_ftrace_pid(struct pid *pid)
  1430. {
  1431. struct task_struct *p;
  1432. rcu_read_lock();
  1433. do_each_pid_task(pid, PIDTYPE_PID, p) {
  1434. set_tsk_trace_trace(p);
  1435. } while_each_pid_task(pid, PIDTYPE_PID, p);
  1436. rcu_read_unlock();
  1437. }
  1438. static void clear_ftrace_pid_task(struct pid **pid)
  1439. {
  1440. if (*pid == ftrace_swapper_pid)
  1441. clear_ftrace_swapper();
  1442. else
  1443. clear_ftrace_pid(*pid);
  1444. *pid = NULL;
  1445. }
  1446. static void set_ftrace_pid_task(struct pid *pid)
  1447. {
  1448. if (pid == ftrace_swapper_pid)
  1449. set_ftrace_swapper();
  1450. else
  1451. set_ftrace_pid(pid);
  1452. }
  1453. static ssize_t
  1454. ftrace_pid_write(struct file *filp, const char __user *ubuf,
  1455. size_t cnt, loff_t *ppos)
  1456. {
  1457. struct pid *pid;
  1458. char buf[64];
  1459. long val;
  1460. int ret;
  1461. if (cnt >= sizeof(buf))
  1462. return -EINVAL;
  1463. if (copy_from_user(&buf, ubuf, cnt))
  1464. return -EFAULT;
  1465. buf[cnt] = 0;
  1466. ret = strict_strtol(buf, 10, &val);
  1467. if (ret < 0)
  1468. return ret;
  1469. mutex_lock(&ftrace_start_lock);
  1470. if (val < 0) {
  1471. /* disable pid tracing */
  1472. if (!ftrace_pid_trace)
  1473. goto out;
  1474. clear_ftrace_pid_task(&ftrace_pid_trace);
  1475. } else {
  1476. /* swapper task is special */
  1477. if (!val) {
  1478. pid = ftrace_swapper_pid;
  1479. if (pid == ftrace_pid_trace)
  1480. goto out;
  1481. } else {
  1482. pid = find_get_pid(val);
  1483. if (pid == ftrace_pid_trace) {
  1484. put_pid(pid);
  1485. goto out;
  1486. }
  1487. }
  1488. if (ftrace_pid_trace)
  1489. clear_ftrace_pid_task(&ftrace_pid_trace);
  1490. if (!pid)
  1491. goto out;
  1492. ftrace_pid_trace = pid;
  1493. set_ftrace_pid_task(ftrace_pid_trace);
  1494. }
  1495. /* update the function call */
  1496. ftrace_update_pid_func();
  1497. ftrace_startup_enable(0);
  1498. out:
  1499. mutex_unlock(&ftrace_start_lock);
  1500. return cnt;
  1501. }
  1502. static struct file_operations ftrace_pid_fops = {
  1503. .read = ftrace_pid_read,
  1504. .write = ftrace_pid_write,
  1505. };
  1506. static __init int ftrace_init_debugfs(void)
  1507. {
  1508. struct dentry *d_tracer;
  1509. struct dentry *entry;
  1510. d_tracer = tracing_init_dentry();
  1511. if (!d_tracer)
  1512. return 0;
  1513. ftrace_init_dyn_debugfs(d_tracer);
  1514. entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
  1515. NULL, &ftrace_pid_fops);
  1516. if (!entry)
  1517. pr_warning("Could not create debugfs "
  1518. "'set_ftrace_pid' entry\n");
  1519. return 0;
  1520. }
  1521. fs_initcall(ftrace_init_debugfs);
  1522. /**
  1523. * ftrace_kill - kill ftrace
  1524. *
  1525. * This function should be used by panic code. It stops ftrace
  1526. * but in a not so nice way. If you need to simply kill ftrace
  1527. * from a non-atomic section, use ftrace_kill.
  1528. */
  1529. void ftrace_kill(void)
  1530. {
  1531. ftrace_disabled = 1;
  1532. ftrace_enabled = 0;
  1533. clear_ftrace_function();
  1534. }
  1535. /**
  1536. * register_ftrace_function - register a function for profiling
  1537. * @ops - ops structure that holds the function for profiling.
  1538. *
  1539. * Register a function to be called by all functions in the
  1540. * kernel.
  1541. *
  1542. * Note: @ops->func and all the functions it calls must be labeled
  1543. * with "notrace", otherwise it will go into a
  1544. * recursive loop.
  1545. */
  1546. int register_ftrace_function(struct ftrace_ops *ops)
  1547. {
  1548. int ret;
  1549. if (unlikely(ftrace_disabled))
  1550. return -1;
  1551. mutex_lock(&ftrace_sysctl_lock);
  1552. ret = __register_ftrace_function(ops);
  1553. ftrace_startup(0);
  1554. mutex_unlock(&ftrace_sysctl_lock);
  1555. return ret;
  1556. }
  1557. /**
  1558. * unregister_ftrace_function - unregister a function for profiling.
  1559. * @ops - ops structure that holds the function to unregister
  1560. *
  1561. * Unregister a function that was added to be called by ftrace profiling.
  1562. */
  1563. int unregister_ftrace_function(struct ftrace_ops *ops)
  1564. {
  1565. int ret;
  1566. mutex_lock(&ftrace_sysctl_lock);
  1567. ret = __unregister_ftrace_function(ops);
  1568. ftrace_shutdown(0);
  1569. mutex_unlock(&ftrace_sysctl_lock);
  1570. return ret;
  1571. }
  1572. int
  1573. ftrace_enable_sysctl(struct ctl_table *table, int write,
  1574. struct file *file, void __user *buffer, size_t *lenp,
  1575. loff_t *ppos)
  1576. {
  1577. int ret;
  1578. if (unlikely(ftrace_disabled))
  1579. return -ENODEV;
  1580. mutex_lock(&ftrace_sysctl_lock);
  1581. ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
  1582. if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
  1583. goto out;
  1584. last_ftrace_enabled = ftrace_enabled;
  1585. if (ftrace_enabled) {
  1586. ftrace_startup_sysctl();
  1587. /* we are starting ftrace again */
  1588. if (ftrace_list != &ftrace_list_end) {
  1589. if (ftrace_list->next == &ftrace_list_end)
  1590. ftrace_trace_function = ftrace_list->func;
  1591. else
  1592. ftrace_trace_function = ftrace_list_func;
  1593. }
  1594. } else {
  1595. /* stopping ftrace calls (just send to ftrace_stub) */
  1596. ftrace_trace_function = ftrace_stub;
  1597. ftrace_shutdown_sysctl();
  1598. }
  1599. out:
  1600. mutex_unlock(&ftrace_sysctl_lock);
  1601. return ret;
  1602. }
  1603. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1604. static atomic_t ftrace_graph_active;
  1605. static struct notifier_block ftrace_suspend_notifier;
  1606. int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
  1607. {
  1608. return 0;
  1609. }
  1610. /* The callbacks that hook a function */
  1611. trace_func_graph_ret_t ftrace_graph_return =
  1612. (trace_func_graph_ret_t)ftrace_stub;
  1613. trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
  1614. /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
  1615. static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
  1616. {
  1617. int i;
  1618. int ret = 0;
  1619. unsigned long flags;
  1620. int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
  1621. struct task_struct *g, *t;
  1622. for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
  1623. ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
  1624. * sizeof(struct ftrace_ret_stack),
  1625. GFP_KERNEL);
  1626. if (!ret_stack_list[i]) {
  1627. start = 0;
  1628. end = i;
  1629. ret = -ENOMEM;
  1630. goto free;
  1631. }
  1632. }
  1633. read_lock_irqsave(&tasklist_lock, flags);
  1634. do_each_thread(g, t) {
  1635. if (start == end) {
  1636. ret = -EAGAIN;
  1637. goto unlock;
  1638. }
  1639. if (t->ret_stack == NULL) {
  1640. t->curr_ret_stack = -1;
  1641. /* Make sure IRQs see the -1 first: */
  1642. barrier();
  1643. t->ret_stack = ret_stack_list[start++];
  1644. atomic_set(&t->tracing_graph_pause, 0);
  1645. atomic_set(&t->trace_overrun, 0);
  1646. }
  1647. } while_each_thread(g, t);
  1648. unlock:
  1649. read_unlock_irqrestore(&tasklist_lock, flags);
  1650. free:
  1651. for (i = start; i < end; i++)
  1652. kfree(ret_stack_list[i]);
  1653. return ret;
  1654. }
  1655. /* Allocate a return stack for each task */
  1656. static int start_graph_tracing(void)
  1657. {
  1658. struct ftrace_ret_stack **ret_stack_list;
  1659. int ret;
  1660. ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
  1661. sizeof(struct ftrace_ret_stack *),
  1662. GFP_KERNEL);
  1663. if (!ret_stack_list)
  1664. return -ENOMEM;
  1665. do {
  1666. ret = alloc_retstack_tasklist(ret_stack_list);
  1667. } while (ret == -EAGAIN);
  1668. kfree(ret_stack_list);
  1669. return ret;
  1670. }
  1671. /*
  1672. * Hibernation protection.
  1673. * The state of the current task is too much unstable during
  1674. * suspend/restore to disk. We want to protect against that.
  1675. */
  1676. static int
  1677. ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
  1678. void *unused)
  1679. {
  1680. switch (state) {
  1681. case PM_HIBERNATION_PREPARE:
  1682. pause_graph_tracing();
  1683. break;
  1684. case PM_POST_HIBERNATION:
  1685. unpause_graph_tracing();
  1686. break;
  1687. }
  1688. return NOTIFY_DONE;
  1689. }
  1690. int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  1691. trace_func_graph_ent_t entryfunc)
  1692. {
  1693. int ret = 0;
  1694. mutex_lock(&ftrace_sysctl_lock);
  1695. ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
  1696. register_pm_notifier(&ftrace_suspend_notifier);
  1697. atomic_inc(&ftrace_graph_active);
  1698. ret = start_graph_tracing();
  1699. if (ret) {
  1700. atomic_dec(&ftrace_graph_active);
  1701. goto out;
  1702. }
  1703. ftrace_graph_return = retfunc;
  1704. ftrace_graph_entry = entryfunc;
  1705. ftrace_startup(FTRACE_START_FUNC_RET);
  1706. out:
  1707. mutex_unlock(&ftrace_sysctl_lock);
  1708. return ret;
  1709. }
  1710. void unregister_ftrace_graph(void)
  1711. {
  1712. mutex_lock(&ftrace_sysctl_lock);
  1713. atomic_dec(&ftrace_graph_active);
  1714. ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
  1715. ftrace_graph_entry = ftrace_graph_entry_stub;
  1716. ftrace_shutdown(FTRACE_STOP_FUNC_RET);
  1717. unregister_pm_notifier(&ftrace_suspend_notifier);
  1718. mutex_unlock(&ftrace_sysctl_lock);
  1719. }
  1720. /* Allocate a return stack for newly created task */
  1721. void ftrace_graph_init_task(struct task_struct *t)
  1722. {
  1723. if (atomic_read(&ftrace_graph_active)) {
  1724. t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
  1725. * sizeof(struct ftrace_ret_stack),
  1726. GFP_KERNEL);
  1727. if (!t->ret_stack)
  1728. return;
  1729. t->curr_ret_stack = -1;
  1730. atomic_set(&t->tracing_graph_pause, 0);
  1731. atomic_set(&t->trace_overrun, 0);
  1732. } else
  1733. t->ret_stack = NULL;
  1734. }
  1735. void ftrace_graph_exit_task(struct task_struct *t)
  1736. {
  1737. struct ftrace_ret_stack *ret_stack = t->ret_stack;
  1738. t->ret_stack = NULL;
  1739. /* NULL must become visible to IRQs before we free it: */
  1740. barrier();
  1741. kfree(ret_stack);
  1742. }
  1743. void ftrace_graph_stop(void)
  1744. {
  1745. ftrace_stop();
  1746. }
  1747. #endif