ftrace.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190
  1. /*
  2. * Infrastructure for profiling code inserted by 'gcc -pg'.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Originally ported from the -rt patch by:
  8. * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Based on code in the latency_tracer, that is:
  11. *
  12. * Copyright (C) 2004-2006 Ingo Molnar
  13. * Copyright (C) 2004 William Lee Irwin III
  14. */
  15. #include <linux/stop_machine.h>
  16. #include <linux/clocksource.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/suspend.h>
  20. #include <linux/debugfs.h>
  21. #include <linux/hardirq.h>
  22. #include <linux/kthread.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/kprobes.h>
  25. #include <linux/ftrace.h>
  26. #include <linux/sysctl.h>
  27. #include <linux/ctype.h>
  28. #include <linux/list.h>
  29. #include <asm/ftrace.h>
  30. #include "trace.h"
  31. #define FTRACE_WARN_ON(cond) \
  32. do { \
  33. if (WARN_ON(cond)) \
  34. ftrace_kill(); \
  35. } while (0)
  36. #define FTRACE_WARN_ON_ONCE(cond) \
  37. do { \
  38. if (WARN_ON_ONCE(cond)) \
  39. ftrace_kill(); \
  40. } while (0)
  41. /* ftrace_enabled is a method to turn ftrace on or off */
  42. int ftrace_enabled __read_mostly;
  43. static int last_ftrace_enabled;
  44. /* set when tracing only a pid */
  45. struct pid *ftrace_pid_trace;
  46. static struct pid * const ftrace_swapper_pid = &init_struct_pid;
  47. /* Quick disabling of function tracer. */
  48. int function_trace_stop;
  49. /*
  50. * ftrace_disabled is set when an anomaly is discovered.
  51. * ftrace_disabled is much stronger than ftrace_enabled.
  52. */
  53. static int ftrace_disabled __read_mostly;
  54. static DEFINE_SPINLOCK(ftrace_lock);
  55. static DEFINE_MUTEX(ftrace_sysctl_lock);
  56. static DEFINE_MUTEX(ftrace_start_lock);
  57. static struct ftrace_ops ftrace_list_end __read_mostly =
  58. {
  59. .func = ftrace_stub,
  60. };
  61. static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
  62. ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
  63. ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
  64. ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
  65. static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
  66. {
  67. struct ftrace_ops *op = ftrace_list;
  68. /* in case someone actually ports this to alpha! */
  69. read_barrier_depends();
  70. while (op != &ftrace_list_end) {
  71. /* silly alpha */
  72. read_barrier_depends();
  73. op->func(ip, parent_ip);
  74. op = op->next;
  75. };
  76. }
  77. static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
  78. {
  79. if (!test_tsk_trace_trace(current))
  80. return;
  81. ftrace_pid_function(ip, parent_ip);
  82. }
  83. static void set_ftrace_pid_function(ftrace_func_t func)
  84. {
  85. /* do not set ftrace_pid_function to itself! */
  86. if (func != ftrace_pid_func)
  87. ftrace_pid_function = func;
  88. }
  89. /**
  90. * clear_ftrace_function - reset the ftrace function
  91. *
  92. * This NULLs the ftrace function and in essence stops
  93. * tracing. There may be lag
  94. */
  95. void clear_ftrace_function(void)
  96. {
  97. ftrace_trace_function = ftrace_stub;
  98. __ftrace_trace_function = ftrace_stub;
  99. ftrace_pid_function = ftrace_stub;
  100. }
  101. #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  102. /*
  103. * For those archs that do not test ftrace_trace_stop in their
  104. * mcount call site, we need to do it from C.
  105. */
  106. static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
  107. {
  108. if (function_trace_stop)
  109. return;
  110. __ftrace_trace_function(ip, parent_ip);
  111. }
  112. #endif
  113. static int __register_ftrace_function(struct ftrace_ops *ops)
  114. {
  115. /* should not be called from interrupt context */
  116. spin_lock(&ftrace_lock);
  117. ops->next = ftrace_list;
  118. /*
  119. * We are entering ops into the ftrace_list but another
  120. * CPU might be walking that list. We need to make sure
  121. * the ops->next pointer is valid before another CPU sees
  122. * the ops pointer included into the ftrace_list.
  123. */
  124. smp_wmb();
  125. ftrace_list = ops;
  126. if (ftrace_enabled) {
  127. ftrace_func_t func;
  128. if (ops->next == &ftrace_list_end)
  129. func = ops->func;
  130. else
  131. func = ftrace_list_func;
  132. if (ftrace_pid_trace) {
  133. set_ftrace_pid_function(func);
  134. func = ftrace_pid_func;
  135. }
  136. /*
  137. * For one func, simply call it directly.
  138. * For more than one func, call the chain.
  139. */
  140. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  141. ftrace_trace_function = func;
  142. #else
  143. __ftrace_trace_function = func;
  144. ftrace_trace_function = ftrace_test_stop_func;
  145. #endif
  146. }
  147. spin_unlock(&ftrace_lock);
  148. return 0;
  149. }
  150. static int __unregister_ftrace_function(struct ftrace_ops *ops)
  151. {
  152. struct ftrace_ops **p;
  153. int ret = 0;
  154. /* should not be called from interrupt context */
  155. spin_lock(&ftrace_lock);
  156. /*
  157. * If we are removing the last function, then simply point
  158. * to the ftrace_stub.
  159. */
  160. if (ftrace_list == ops && ops->next == &ftrace_list_end) {
  161. ftrace_trace_function = ftrace_stub;
  162. ftrace_list = &ftrace_list_end;
  163. goto out;
  164. }
  165. for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
  166. if (*p == ops)
  167. break;
  168. if (*p != ops) {
  169. ret = -1;
  170. goto out;
  171. }
  172. *p = (*p)->next;
  173. if (ftrace_enabled) {
  174. /* If we only have one func left, then call that directly */
  175. if (ftrace_list->next == &ftrace_list_end) {
  176. ftrace_func_t func = ftrace_list->func;
  177. if (ftrace_pid_trace) {
  178. set_ftrace_pid_function(func);
  179. func = ftrace_pid_func;
  180. }
  181. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  182. ftrace_trace_function = func;
  183. #else
  184. __ftrace_trace_function = func;
  185. #endif
  186. }
  187. }
  188. out:
  189. spin_unlock(&ftrace_lock);
  190. return ret;
  191. }
  192. static void ftrace_update_pid_func(void)
  193. {
  194. ftrace_func_t func;
  195. /* should not be called from interrupt context */
  196. spin_lock(&ftrace_lock);
  197. if (ftrace_trace_function == ftrace_stub)
  198. goto out;
  199. func = ftrace_trace_function;
  200. if (ftrace_pid_trace) {
  201. set_ftrace_pid_function(func);
  202. func = ftrace_pid_func;
  203. } else {
  204. if (func == ftrace_pid_func)
  205. func = ftrace_pid_function;
  206. }
  207. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  208. ftrace_trace_function = func;
  209. #else
  210. __ftrace_trace_function = func;
  211. #endif
  212. out:
  213. spin_unlock(&ftrace_lock);
  214. }
  215. #ifdef CONFIG_DYNAMIC_FTRACE
  216. #ifndef CONFIG_FTRACE_MCOUNT_RECORD
  217. # error Dynamic ftrace depends on MCOUNT_RECORD
  218. #endif
  219. enum {
  220. FTRACE_ENABLE_CALLS = (1 << 0),
  221. FTRACE_DISABLE_CALLS = (1 << 1),
  222. FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
  223. FTRACE_ENABLE_MCOUNT = (1 << 3),
  224. FTRACE_DISABLE_MCOUNT = (1 << 4),
  225. FTRACE_START_FUNC_RET = (1 << 5),
  226. FTRACE_STOP_FUNC_RET = (1 << 6),
  227. };
  228. static int ftrace_filtered;
  229. static LIST_HEAD(ftrace_new_addrs);
  230. static DEFINE_MUTEX(ftrace_regex_lock);
  231. struct ftrace_page {
  232. struct ftrace_page *next;
  233. int index;
  234. struct dyn_ftrace records[];
  235. };
  236. #define ENTRIES_PER_PAGE \
  237. ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
  238. /* estimate from running different kernels */
  239. #define NR_TO_INIT 10000
  240. static struct ftrace_page *ftrace_pages_start;
  241. static struct ftrace_page *ftrace_pages;
  242. static struct dyn_ftrace *ftrace_free_records;
  243. /*
  244. * This is a double for. Do not use 'break' to break out of the loop,
  245. * you must use a goto.
  246. */
  247. #define do_for_each_ftrace_rec(pg, rec) \
  248. for (pg = ftrace_pages_start; pg; pg = pg->next) { \
  249. int _____i; \
  250. for (_____i = 0; _____i < pg->index; _____i++) { \
  251. rec = &pg->records[_____i];
  252. #define while_for_each_ftrace_rec() \
  253. } \
  254. }
  255. #ifdef CONFIG_KPROBES
  256. static int frozen_record_count;
  257. static inline void freeze_record(struct dyn_ftrace *rec)
  258. {
  259. if (!(rec->flags & FTRACE_FL_FROZEN)) {
  260. rec->flags |= FTRACE_FL_FROZEN;
  261. frozen_record_count++;
  262. }
  263. }
  264. static inline void unfreeze_record(struct dyn_ftrace *rec)
  265. {
  266. if (rec->flags & FTRACE_FL_FROZEN) {
  267. rec->flags &= ~FTRACE_FL_FROZEN;
  268. frozen_record_count--;
  269. }
  270. }
  271. static inline int record_frozen(struct dyn_ftrace *rec)
  272. {
  273. return rec->flags & FTRACE_FL_FROZEN;
  274. }
  275. #else
  276. # define freeze_record(rec) ({ 0; })
  277. # define unfreeze_record(rec) ({ 0; })
  278. # define record_frozen(rec) ({ 0; })
  279. #endif /* CONFIG_KPROBES */
  280. static void ftrace_free_rec(struct dyn_ftrace *rec)
  281. {
  282. rec->ip = (unsigned long)ftrace_free_records;
  283. ftrace_free_records = rec;
  284. rec->flags |= FTRACE_FL_FREE;
  285. }
  286. void ftrace_release(void *start, unsigned long size)
  287. {
  288. struct dyn_ftrace *rec;
  289. struct ftrace_page *pg;
  290. unsigned long s = (unsigned long)start;
  291. unsigned long e = s + size;
  292. if (ftrace_disabled || !start)
  293. return;
  294. /* should not be called from interrupt context */
  295. spin_lock(&ftrace_lock);
  296. do_for_each_ftrace_rec(pg, rec) {
  297. if ((rec->ip >= s) && (rec->ip < e))
  298. ftrace_free_rec(rec);
  299. } while_for_each_ftrace_rec();
  300. spin_unlock(&ftrace_lock);
  301. }
  302. static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
  303. {
  304. struct dyn_ftrace *rec;
  305. /* First check for freed records */
  306. if (ftrace_free_records) {
  307. rec = ftrace_free_records;
  308. if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
  309. FTRACE_WARN_ON_ONCE(1);
  310. ftrace_free_records = NULL;
  311. return NULL;
  312. }
  313. ftrace_free_records = (void *)rec->ip;
  314. memset(rec, 0, sizeof(*rec));
  315. return rec;
  316. }
  317. if (ftrace_pages->index == ENTRIES_PER_PAGE) {
  318. if (!ftrace_pages->next) {
  319. /* allocate another page */
  320. ftrace_pages->next =
  321. (void *)get_zeroed_page(GFP_KERNEL);
  322. if (!ftrace_pages->next)
  323. return NULL;
  324. }
  325. ftrace_pages = ftrace_pages->next;
  326. }
  327. return &ftrace_pages->records[ftrace_pages->index++];
  328. }
  329. static struct dyn_ftrace *
  330. ftrace_record_ip(unsigned long ip)
  331. {
  332. struct dyn_ftrace *rec;
  333. if (ftrace_disabled)
  334. return NULL;
  335. rec = ftrace_alloc_dyn_node(ip);
  336. if (!rec)
  337. return NULL;
  338. rec->ip = ip;
  339. list_add(&rec->list, &ftrace_new_addrs);
  340. return rec;
  341. }
  342. static void print_ip_ins(const char *fmt, unsigned char *p)
  343. {
  344. int i;
  345. printk(KERN_CONT "%s", fmt);
  346. for (i = 0; i < MCOUNT_INSN_SIZE; i++)
  347. printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
  348. }
  349. static void ftrace_bug(int failed, unsigned long ip)
  350. {
  351. switch (failed) {
  352. case -EFAULT:
  353. FTRACE_WARN_ON_ONCE(1);
  354. pr_info("ftrace faulted on modifying ");
  355. print_ip_sym(ip);
  356. break;
  357. case -EINVAL:
  358. FTRACE_WARN_ON_ONCE(1);
  359. pr_info("ftrace failed to modify ");
  360. print_ip_sym(ip);
  361. print_ip_ins(" actual: ", (unsigned char *)ip);
  362. printk(KERN_CONT "\n");
  363. break;
  364. case -EPERM:
  365. FTRACE_WARN_ON_ONCE(1);
  366. pr_info("ftrace faulted on writing ");
  367. print_ip_sym(ip);
  368. break;
  369. default:
  370. FTRACE_WARN_ON_ONCE(1);
  371. pr_info("ftrace faulted on unknown error ");
  372. print_ip_sym(ip);
  373. }
  374. }
  375. static int
  376. __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
  377. {
  378. unsigned long ip, fl;
  379. unsigned long ftrace_addr;
  380. ftrace_addr = (unsigned long)FTRACE_ADDR;
  381. ip = rec->ip;
  382. /*
  383. * If this record is not to be traced and
  384. * it is not enabled then do nothing.
  385. *
  386. * If this record is not to be traced and
  387. * it is enabled then disable it.
  388. *
  389. */
  390. if (rec->flags & FTRACE_FL_NOTRACE) {
  391. if (rec->flags & FTRACE_FL_ENABLED)
  392. rec->flags &= ~FTRACE_FL_ENABLED;
  393. else
  394. return 0;
  395. } else if (ftrace_filtered && enable) {
  396. /*
  397. * Filtering is on:
  398. */
  399. fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
  400. /* Record is filtered and enabled, do nothing */
  401. if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
  402. return 0;
  403. /* Record is not filtered or enabled, do nothing */
  404. if (!fl)
  405. return 0;
  406. /* Record is not filtered but enabled, disable it */
  407. if (fl == FTRACE_FL_ENABLED)
  408. rec->flags &= ~FTRACE_FL_ENABLED;
  409. else
  410. /* Otherwise record is filtered but not enabled, enable it */
  411. rec->flags |= FTRACE_FL_ENABLED;
  412. } else {
  413. /* Disable or not filtered */
  414. if (enable) {
  415. /* if record is enabled, do nothing */
  416. if (rec->flags & FTRACE_FL_ENABLED)
  417. return 0;
  418. rec->flags |= FTRACE_FL_ENABLED;
  419. } else {
  420. /* if record is not enabled, do nothing */
  421. if (!(rec->flags & FTRACE_FL_ENABLED))
  422. return 0;
  423. rec->flags &= ~FTRACE_FL_ENABLED;
  424. }
  425. }
  426. if (rec->flags & FTRACE_FL_ENABLED)
  427. return ftrace_make_call(rec, ftrace_addr);
  428. else
  429. return ftrace_make_nop(NULL, rec, ftrace_addr);
  430. }
  431. static void ftrace_replace_code(int enable)
  432. {
  433. int failed;
  434. struct dyn_ftrace *rec;
  435. struct ftrace_page *pg;
  436. do_for_each_ftrace_rec(pg, rec) {
  437. /*
  438. * Skip over free records and records that have
  439. * failed.
  440. */
  441. if (rec->flags & FTRACE_FL_FREE ||
  442. rec->flags & FTRACE_FL_FAILED)
  443. continue;
  444. /* ignore updates to this record's mcount site */
  445. if (get_kprobe((void *)rec->ip)) {
  446. freeze_record(rec);
  447. continue;
  448. } else {
  449. unfreeze_record(rec);
  450. }
  451. failed = __ftrace_replace_code(rec, enable);
  452. if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
  453. rec->flags |= FTRACE_FL_FAILED;
  454. if ((system_state == SYSTEM_BOOTING) ||
  455. !core_kernel_text(rec->ip)) {
  456. ftrace_free_rec(rec);
  457. } else
  458. ftrace_bug(failed, rec->ip);
  459. }
  460. } while_for_each_ftrace_rec();
  461. }
  462. static int
  463. ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
  464. {
  465. unsigned long ip;
  466. int ret;
  467. ip = rec->ip;
  468. ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
  469. if (ret) {
  470. ftrace_bug(ret, ip);
  471. rec->flags |= FTRACE_FL_FAILED;
  472. return 0;
  473. }
  474. return 1;
  475. }
  476. static int __ftrace_modify_code(void *data)
  477. {
  478. int *command = data;
  479. if (*command & FTRACE_ENABLE_CALLS)
  480. ftrace_replace_code(1);
  481. else if (*command & FTRACE_DISABLE_CALLS)
  482. ftrace_replace_code(0);
  483. if (*command & FTRACE_UPDATE_TRACE_FUNC)
  484. ftrace_update_ftrace_func(ftrace_trace_function);
  485. if (*command & FTRACE_START_FUNC_RET)
  486. ftrace_enable_ftrace_graph_caller();
  487. else if (*command & FTRACE_STOP_FUNC_RET)
  488. ftrace_disable_ftrace_graph_caller();
  489. return 0;
  490. }
  491. static void ftrace_run_update_code(int command)
  492. {
  493. stop_machine(__ftrace_modify_code, &command, NULL);
  494. }
  495. static ftrace_func_t saved_ftrace_func;
  496. static int ftrace_start_up;
  497. static void ftrace_startup_enable(int command)
  498. {
  499. if (saved_ftrace_func != ftrace_trace_function) {
  500. saved_ftrace_func = ftrace_trace_function;
  501. command |= FTRACE_UPDATE_TRACE_FUNC;
  502. }
  503. if (!command || !ftrace_enabled)
  504. return;
  505. ftrace_run_update_code(command);
  506. }
  507. static void ftrace_startup(int command)
  508. {
  509. if (unlikely(ftrace_disabled))
  510. return;
  511. mutex_lock(&ftrace_start_lock);
  512. ftrace_start_up++;
  513. command |= FTRACE_ENABLE_CALLS;
  514. ftrace_startup_enable(command);
  515. mutex_unlock(&ftrace_start_lock);
  516. }
  517. static void ftrace_shutdown(int command)
  518. {
  519. if (unlikely(ftrace_disabled))
  520. return;
  521. mutex_lock(&ftrace_start_lock);
  522. ftrace_start_up--;
  523. if (!ftrace_start_up)
  524. command |= FTRACE_DISABLE_CALLS;
  525. if (saved_ftrace_func != ftrace_trace_function) {
  526. saved_ftrace_func = ftrace_trace_function;
  527. command |= FTRACE_UPDATE_TRACE_FUNC;
  528. }
  529. if (!command || !ftrace_enabled)
  530. goto out;
  531. ftrace_run_update_code(command);
  532. out:
  533. mutex_unlock(&ftrace_start_lock);
  534. }
  535. static void ftrace_startup_sysctl(void)
  536. {
  537. int command = FTRACE_ENABLE_MCOUNT;
  538. if (unlikely(ftrace_disabled))
  539. return;
  540. mutex_lock(&ftrace_start_lock);
  541. /* Force update next time */
  542. saved_ftrace_func = NULL;
  543. /* ftrace_start_up is true if we want ftrace running */
  544. if (ftrace_start_up)
  545. command |= FTRACE_ENABLE_CALLS;
  546. ftrace_run_update_code(command);
  547. mutex_unlock(&ftrace_start_lock);
  548. }
  549. static void ftrace_shutdown_sysctl(void)
  550. {
  551. int command = FTRACE_DISABLE_MCOUNT;
  552. if (unlikely(ftrace_disabled))
  553. return;
  554. mutex_lock(&ftrace_start_lock);
  555. /* ftrace_start_up is true if ftrace is running */
  556. if (ftrace_start_up)
  557. command |= FTRACE_DISABLE_CALLS;
  558. ftrace_run_update_code(command);
  559. mutex_unlock(&ftrace_start_lock);
  560. }
  561. static cycle_t ftrace_update_time;
  562. static unsigned long ftrace_update_cnt;
  563. unsigned long ftrace_update_tot_cnt;
  564. static int ftrace_update_code(struct module *mod)
  565. {
  566. struct dyn_ftrace *p, *t;
  567. cycle_t start, stop;
  568. start = ftrace_now(raw_smp_processor_id());
  569. ftrace_update_cnt = 0;
  570. list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
  571. /* If something went wrong, bail without enabling anything */
  572. if (unlikely(ftrace_disabled))
  573. return -1;
  574. list_del_init(&p->list);
  575. /* convert record (i.e, patch mcount-call with NOP) */
  576. if (ftrace_code_disable(mod, p)) {
  577. p->flags |= FTRACE_FL_CONVERTED;
  578. ftrace_update_cnt++;
  579. } else
  580. ftrace_free_rec(p);
  581. }
  582. stop = ftrace_now(raw_smp_processor_id());
  583. ftrace_update_time = stop - start;
  584. ftrace_update_tot_cnt += ftrace_update_cnt;
  585. return 0;
  586. }
  587. static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
  588. {
  589. struct ftrace_page *pg;
  590. int cnt;
  591. int i;
  592. /* allocate a few pages */
  593. ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
  594. if (!ftrace_pages_start)
  595. return -1;
  596. /*
  597. * Allocate a few more pages.
  598. *
  599. * TODO: have some parser search vmlinux before
  600. * final linking to find all calls to ftrace.
  601. * Then we can:
  602. * a) know how many pages to allocate.
  603. * and/or
  604. * b) set up the table then.
  605. *
  606. * The dynamic code is still necessary for
  607. * modules.
  608. */
  609. pg = ftrace_pages = ftrace_pages_start;
  610. cnt = num_to_init / ENTRIES_PER_PAGE;
  611. pr_info("ftrace: allocating %ld entries in %d pages\n",
  612. num_to_init, cnt + 1);
  613. for (i = 0; i < cnt; i++) {
  614. pg->next = (void *)get_zeroed_page(GFP_KERNEL);
  615. /* If we fail, we'll try later anyway */
  616. if (!pg->next)
  617. break;
  618. pg = pg->next;
  619. }
  620. return 0;
  621. }
  622. enum {
  623. FTRACE_ITER_FILTER = (1 << 0),
  624. FTRACE_ITER_CONT = (1 << 1),
  625. FTRACE_ITER_NOTRACE = (1 << 2),
  626. FTRACE_ITER_FAILURES = (1 << 3),
  627. FTRACE_ITER_PRINTALL = (1 << 4),
  628. };
  629. #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
  630. struct ftrace_iterator {
  631. struct ftrace_page *pg;
  632. int idx;
  633. unsigned flags;
  634. unsigned char buffer[FTRACE_BUFF_MAX+1];
  635. unsigned buffer_idx;
  636. unsigned filtered;
  637. };
  638. static void *
  639. t_next(struct seq_file *m, void *v, loff_t *pos)
  640. {
  641. struct ftrace_iterator *iter = m->private;
  642. struct dyn_ftrace *rec = NULL;
  643. (*pos)++;
  644. if (iter->flags & FTRACE_ITER_PRINTALL)
  645. return NULL;
  646. /* should not be called from interrupt context */
  647. spin_lock(&ftrace_lock);
  648. retry:
  649. if (iter->idx >= iter->pg->index) {
  650. if (iter->pg->next) {
  651. iter->pg = iter->pg->next;
  652. iter->idx = 0;
  653. goto retry;
  654. } else {
  655. iter->idx = -1;
  656. }
  657. } else {
  658. rec = &iter->pg->records[iter->idx++];
  659. if ((rec->flags & FTRACE_FL_FREE) ||
  660. (!(iter->flags & FTRACE_ITER_FAILURES) &&
  661. (rec->flags & FTRACE_FL_FAILED)) ||
  662. ((iter->flags & FTRACE_ITER_FAILURES) &&
  663. !(rec->flags & FTRACE_FL_FAILED)) ||
  664. ((iter->flags & FTRACE_ITER_FILTER) &&
  665. !(rec->flags & FTRACE_FL_FILTER)) ||
  666. ((iter->flags & FTRACE_ITER_NOTRACE) &&
  667. !(rec->flags & FTRACE_FL_NOTRACE))) {
  668. rec = NULL;
  669. goto retry;
  670. }
  671. }
  672. spin_unlock(&ftrace_lock);
  673. return rec;
  674. }
  675. static void *t_start(struct seq_file *m, loff_t *pos)
  676. {
  677. struct ftrace_iterator *iter = m->private;
  678. void *p = NULL;
  679. /*
  680. * For set_ftrace_filter reading, if we have the filter
  681. * off, we can short cut and just print out that all
  682. * functions are enabled.
  683. */
  684. if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
  685. if (*pos > 0)
  686. return NULL;
  687. iter->flags |= FTRACE_ITER_PRINTALL;
  688. (*pos)++;
  689. return iter;
  690. }
  691. if (*pos > 0) {
  692. if (iter->idx < 0)
  693. return p;
  694. (*pos)--;
  695. iter->idx--;
  696. }
  697. p = t_next(m, p, pos);
  698. return p;
  699. }
  700. static void t_stop(struct seq_file *m, void *p)
  701. {
  702. }
  703. static int t_show(struct seq_file *m, void *v)
  704. {
  705. struct ftrace_iterator *iter = m->private;
  706. struct dyn_ftrace *rec = v;
  707. char str[KSYM_SYMBOL_LEN];
  708. if (iter->flags & FTRACE_ITER_PRINTALL) {
  709. seq_printf(m, "#### all functions enabled ####\n");
  710. return 0;
  711. }
  712. if (!rec)
  713. return 0;
  714. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  715. seq_printf(m, "%s\n", str);
  716. return 0;
  717. }
  718. static struct seq_operations show_ftrace_seq_ops = {
  719. .start = t_start,
  720. .next = t_next,
  721. .stop = t_stop,
  722. .show = t_show,
  723. };
  724. static int
  725. ftrace_avail_open(struct inode *inode, struct file *file)
  726. {
  727. struct ftrace_iterator *iter;
  728. int ret;
  729. if (unlikely(ftrace_disabled))
  730. return -ENODEV;
  731. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  732. if (!iter)
  733. return -ENOMEM;
  734. iter->pg = ftrace_pages_start;
  735. ret = seq_open(file, &show_ftrace_seq_ops);
  736. if (!ret) {
  737. struct seq_file *m = file->private_data;
  738. m->private = iter;
  739. } else {
  740. kfree(iter);
  741. }
  742. return ret;
  743. }
  744. int ftrace_avail_release(struct inode *inode, struct file *file)
  745. {
  746. struct seq_file *m = (struct seq_file *)file->private_data;
  747. struct ftrace_iterator *iter = m->private;
  748. seq_release(inode, file);
  749. kfree(iter);
  750. return 0;
  751. }
  752. static int
  753. ftrace_failures_open(struct inode *inode, struct file *file)
  754. {
  755. int ret;
  756. struct seq_file *m;
  757. struct ftrace_iterator *iter;
  758. ret = ftrace_avail_open(inode, file);
  759. if (!ret) {
  760. m = (struct seq_file *)file->private_data;
  761. iter = (struct ftrace_iterator *)m->private;
  762. iter->flags = FTRACE_ITER_FAILURES;
  763. }
  764. return ret;
  765. }
  766. static void ftrace_filter_reset(int enable)
  767. {
  768. struct ftrace_page *pg;
  769. struct dyn_ftrace *rec;
  770. unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  771. /* should not be called from interrupt context */
  772. spin_lock(&ftrace_lock);
  773. if (enable)
  774. ftrace_filtered = 0;
  775. do_for_each_ftrace_rec(pg, rec) {
  776. if (rec->flags & FTRACE_FL_FAILED)
  777. continue;
  778. rec->flags &= ~type;
  779. } while_for_each_ftrace_rec();
  780. spin_unlock(&ftrace_lock);
  781. }
  782. static int
  783. ftrace_regex_open(struct inode *inode, struct file *file, int enable)
  784. {
  785. struct ftrace_iterator *iter;
  786. int ret = 0;
  787. if (unlikely(ftrace_disabled))
  788. return -ENODEV;
  789. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  790. if (!iter)
  791. return -ENOMEM;
  792. mutex_lock(&ftrace_regex_lock);
  793. if ((file->f_mode & FMODE_WRITE) &&
  794. !(file->f_flags & O_APPEND))
  795. ftrace_filter_reset(enable);
  796. if (file->f_mode & FMODE_READ) {
  797. iter->pg = ftrace_pages_start;
  798. iter->flags = enable ? FTRACE_ITER_FILTER :
  799. FTRACE_ITER_NOTRACE;
  800. ret = seq_open(file, &show_ftrace_seq_ops);
  801. if (!ret) {
  802. struct seq_file *m = file->private_data;
  803. m->private = iter;
  804. } else
  805. kfree(iter);
  806. } else
  807. file->private_data = iter;
  808. mutex_unlock(&ftrace_regex_lock);
  809. return ret;
  810. }
  811. static int
  812. ftrace_filter_open(struct inode *inode, struct file *file)
  813. {
  814. return ftrace_regex_open(inode, file, 1);
  815. }
  816. static int
  817. ftrace_notrace_open(struct inode *inode, struct file *file)
  818. {
  819. return ftrace_regex_open(inode, file, 0);
  820. }
  821. static ssize_t
  822. ftrace_regex_read(struct file *file, char __user *ubuf,
  823. size_t cnt, loff_t *ppos)
  824. {
  825. if (file->f_mode & FMODE_READ)
  826. return seq_read(file, ubuf, cnt, ppos);
  827. else
  828. return -EPERM;
  829. }
  830. static loff_t
  831. ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
  832. {
  833. loff_t ret;
  834. if (file->f_mode & FMODE_READ)
  835. ret = seq_lseek(file, offset, origin);
  836. else
  837. file->f_pos = ret = 1;
  838. return ret;
  839. }
  840. enum {
  841. MATCH_FULL,
  842. MATCH_FRONT_ONLY,
  843. MATCH_MIDDLE_ONLY,
  844. MATCH_END_ONLY,
  845. };
  846. /*
  847. * (static function - no need for kernel doc)
  848. *
  849. * Pass in a buffer containing a glob and this function will
  850. * set search to point to the search part of the buffer and
  851. * return the type of search it is (see enum above).
  852. * This does modify buff.
  853. *
  854. * Returns enum type.
  855. * search returns the pointer to use for comparison.
  856. * not returns 1 if buff started with a '!'
  857. * 0 otherwise.
  858. */
  859. static int
  860. ftrace_setup_glob(unsigned char *buff, int len, char **search, int *not)
  861. {
  862. int type = MATCH_FULL;
  863. int i;
  864. if (buff[0] == '!') {
  865. *not = 1;
  866. buff++;
  867. len--;
  868. } else
  869. *not = 0;
  870. *search = buff;
  871. for (i = 0; i < len; i++) {
  872. if (buff[i] == '*') {
  873. if (!i) {
  874. *search = buff + 1;
  875. type = MATCH_END_ONLY;
  876. } else {
  877. if (type == MATCH_END_ONLY)
  878. type = MATCH_MIDDLE_ONLY;
  879. else
  880. type = MATCH_FRONT_ONLY;
  881. buff[i] = 0;
  882. break;
  883. }
  884. }
  885. }
  886. return type;
  887. }
  888. static int
  889. ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
  890. {
  891. char str[KSYM_SYMBOL_LEN];
  892. int matched = 0;
  893. char *ptr;
  894. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  895. switch (type) {
  896. case MATCH_FULL:
  897. if (strcmp(str, regex) == 0)
  898. matched = 1;
  899. break;
  900. case MATCH_FRONT_ONLY:
  901. if (strncmp(str, regex, len) == 0)
  902. matched = 1;
  903. break;
  904. case MATCH_MIDDLE_ONLY:
  905. if (strstr(str, regex))
  906. matched = 1;
  907. break;
  908. case MATCH_END_ONLY:
  909. ptr = strstr(str, regex);
  910. if (ptr && (ptr[len] == 0))
  911. matched = 1;
  912. break;
  913. }
  914. return matched;
  915. }
  916. static void ftrace_match_records(char *buff, int len, int enable)
  917. {
  918. char *search;
  919. struct ftrace_page *pg;
  920. struct dyn_ftrace *rec;
  921. int type;
  922. unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  923. unsigned search_len;
  924. int not;
  925. type = ftrace_setup_glob(buff, len, &search, &not);
  926. search_len = strlen(search);
  927. /* should not be called from interrupt context */
  928. spin_lock(&ftrace_lock);
  929. if (enable)
  930. ftrace_filtered = 1;
  931. do_for_each_ftrace_rec(pg, rec) {
  932. if (rec->flags & FTRACE_FL_FAILED)
  933. continue;
  934. if (ftrace_match_record(rec, search, search_len, type)) {
  935. if (not)
  936. rec->flags &= ~flag;
  937. else
  938. rec->flags |= flag;
  939. }
  940. } while_for_each_ftrace_rec();
  941. spin_unlock(&ftrace_lock);
  942. }
  943. static ssize_t
  944. ftrace_regex_write(struct file *file, const char __user *ubuf,
  945. size_t cnt, loff_t *ppos, int enable)
  946. {
  947. struct ftrace_iterator *iter;
  948. char ch;
  949. size_t read = 0;
  950. ssize_t ret;
  951. if (!cnt || cnt < 0)
  952. return 0;
  953. mutex_lock(&ftrace_regex_lock);
  954. if (file->f_mode & FMODE_READ) {
  955. struct seq_file *m = file->private_data;
  956. iter = m->private;
  957. } else
  958. iter = file->private_data;
  959. if (!*ppos) {
  960. iter->flags &= ~FTRACE_ITER_CONT;
  961. iter->buffer_idx = 0;
  962. }
  963. ret = get_user(ch, ubuf++);
  964. if (ret)
  965. goto out;
  966. read++;
  967. cnt--;
  968. if (!(iter->flags & ~FTRACE_ITER_CONT)) {
  969. /* skip white space */
  970. while (cnt && isspace(ch)) {
  971. ret = get_user(ch, ubuf++);
  972. if (ret)
  973. goto out;
  974. read++;
  975. cnt--;
  976. }
  977. if (isspace(ch)) {
  978. file->f_pos += read;
  979. ret = read;
  980. goto out;
  981. }
  982. iter->buffer_idx = 0;
  983. }
  984. while (cnt && !isspace(ch)) {
  985. if (iter->buffer_idx < FTRACE_BUFF_MAX)
  986. iter->buffer[iter->buffer_idx++] = ch;
  987. else {
  988. ret = -EINVAL;
  989. goto out;
  990. }
  991. ret = get_user(ch, ubuf++);
  992. if (ret)
  993. goto out;
  994. read++;
  995. cnt--;
  996. }
  997. if (isspace(ch)) {
  998. iter->filtered++;
  999. iter->buffer[iter->buffer_idx] = 0;
  1000. ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
  1001. iter->buffer_idx = 0;
  1002. } else
  1003. iter->flags |= FTRACE_ITER_CONT;
  1004. file->f_pos += read;
  1005. ret = read;
  1006. out:
  1007. mutex_unlock(&ftrace_regex_lock);
  1008. return ret;
  1009. }
  1010. static ssize_t
  1011. ftrace_filter_write(struct file *file, const char __user *ubuf,
  1012. size_t cnt, loff_t *ppos)
  1013. {
  1014. return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
  1015. }
  1016. static ssize_t
  1017. ftrace_notrace_write(struct file *file, const char __user *ubuf,
  1018. size_t cnt, loff_t *ppos)
  1019. {
  1020. return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
  1021. }
  1022. static void
  1023. ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
  1024. {
  1025. if (unlikely(ftrace_disabled))
  1026. return;
  1027. mutex_lock(&ftrace_regex_lock);
  1028. if (reset)
  1029. ftrace_filter_reset(enable);
  1030. if (buf)
  1031. ftrace_match_records(buf, len, enable);
  1032. mutex_unlock(&ftrace_regex_lock);
  1033. }
  1034. /**
  1035. * ftrace_set_filter - set a function to filter on in ftrace
  1036. * @buf - the string that holds the function filter text.
  1037. * @len - the length of the string.
  1038. * @reset - non zero to reset all filters before applying this filter.
  1039. *
  1040. * Filters denote which functions should be enabled when tracing is enabled.
  1041. * If @buf is NULL and reset is set, all functions will be enabled for tracing.
  1042. */
  1043. void ftrace_set_filter(unsigned char *buf, int len, int reset)
  1044. {
  1045. ftrace_set_regex(buf, len, reset, 1);
  1046. }
  1047. /**
  1048. * ftrace_set_notrace - set a function to not trace in ftrace
  1049. * @buf - the string that holds the function notrace text.
  1050. * @len - the length of the string.
  1051. * @reset - non zero to reset all filters before applying this filter.
  1052. *
  1053. * Notrace Filters denote which functions should not be enabled when tracing
  1054. * is enabled. If @buf is NULL and reset is set, all functions will be enabled
  1055. * for tracing.
  1056. */
  1057. void ftrace_set_notrace(unsigned char *buf, int len, int reset)
  1058. {
  1059. ftrace_set_regex(buf, len, reset, 0);
  1060. }
  1061. static int
  1062. ftrace_regex_release(struct inode *inode, struct file *file, int enable)
  1063. {
  1064. struct seq_file *m = (struct seq_file *)file->private_data;
  1065. struct ftrace_iterator *iter;
  1066. mutex_lock(&ftrace_regex_lock);
  1067. if (file->f_mode & FMODE_READ) {
  1068. iter = m->private;
  1069. seq_release(inode, file);
  1070. } else
  1071. iter = file->private_data;
  1072. if (iter->buffer_idx) {
  1073. iter->filtered++;
  1074. iter->buffer[iter->buffer_idx] = 0;
  1075. ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
  1076. }
  1077. mutex_lock(&ftrace_sysctl_lock);
  1078. mutex_lock(&ftrace_start_lock);
  1079. if (ftrace_start_up && ftrace_enabled)
  1080. ftrace_run_update_code(FTRACE_ENABLE_CALLS);
  1081. mutex_unlock(&ftrace_start_lock);
  1082. mutex_unlock(&ftrace_sysctl_lock);
  1083. kfree(iter);
  1084. mutex_unlock(&ftrace_regex_lock);
  1085. return 0;
  1086. }
  1087. static int
  1088. ftrace_filter_release(struct inode *inode, struct file *file)
  1089. {
  1090. return ftrace_regex_release(inode, file, 1);
  1091. }
  1092. static int
  1093. ftrace_notrace_release(struct inode *inode, struct file *file)
  1094. {
  1095. return ftrace_regex_release(inode, file, 0);
  1096. }
  1097. static struct file_operations ftrace_avail_fops = {
  1098. .open = ftrace_avail_open,
  1099. .read = seq_read,
  1100. .llseek = seq_lseek,
  1101. .release = ftrace_avail_release,
  1102. };
  1103. static struct file_operations ftrace_failures_fops = {
  1104. .open = ftrace_failures_open,
  1105. .read = seq_read,
  1106. .llseek = seq_lseek,
  1107. .release = ftrace_avail_release,
  1108. };
  1109. static struct file_operations ftrace_filter_fops = {
  1110. .open = ftrace_filter_open,
  1111. .read = ftrace_regex_read,
  1112. .write = ftrace_filter_write,
  1113. .llseek = ftrace_regex_lseek,
  1114. .release = ftrace_filter_release,
  1115. };
  1116. static struct file_operations ftrace_notrace_fops = {
  1117. .open = ftrace_notrace_open,
  1118. .read = ftrace_regex_read,
  1119. .write = ftrace_notrace_write,
  1120. .llseek = ftrace_regex_lseek,
  1121. .release = ftrace_notrace_release,
  1122. };
  1123. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1124. static DEFINE_MUTEX(graph_lock);
  1125. int ftrace_graph_count;
  1126. unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
  1127. static void *
  1128. g_next(struct seq_file *m, void *v, loff_t *pos)
  1129. {
  1130. unsigned long *array = m->private;
  1131. int index = *pos;
  1132. (*pos)++;
  1133. if (index >= ftrace_graph_count)
  1134. return NULL;
  1135. return &array[index];
  1136. }
  1137. static void *g_start(struct seq_file *m, loff_t *pos)
  1138. {
  1139. void *p = NULL;
  1140. mutex_lock(&graph_lock);
  1141. p = g_next(m, p, pos);
  1142. return p;
  1143. }
  1144. static void g_stop(struct seq_file *m, void *p)
  1145. {
  1146. mutex_unlock(&graph_lock);
  1147. }
  1148. static int g_show(struct seq_file *m, void *v)
  1149. {
  1150. unsigned long *ptr = v;
  1151. char str[KSYM_SYMBOL_LEN];
  1152. if (!ptr)
  1153. return 0;
  1154. kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
  1155. seq_printf(m, "%s\n", str);
  1156. return 0;
  1157. }
  1158. static struct seq_operations ftrace_graph_seq_ops = {
  1159. .start = g_start,
  1160. .next = g_next,
  1161. .stop = g_stop,
  1162. .show = g_show,
  1163. };
  1164. static int
  1165. ftrace_graph_open(struct inode *inode, struct file *file)
  1166. {
  1167. int ret = 0;
  1168. if (unlikely(ftrace_disabled))
  1169. return -ENODEV;
  1170. mutex_lock(&graph_lock);
  1171. if ((file->f_mode & FMODE_WRITE) &&
  1172. !(file->f_flags & O_APPEND)) {
  1173. ftrace_graph_count = 0;
  1174. memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
  1175. }
  1176. if (file->f_mode & FMODE_READ) {
  1177. ret = seq_open(file, &ftrace_graph_seq_ops);
  1178. if (!ret) {
  1179. struct seq_file *m = file->private_data;
  1180. m->private = ftrace_graph_funcs;
  1181. }
  1182. } else
  1183. file->private_data = ftrace_graph_funcs;
  1184. mutex_unlock(&graph_lock);
  1185. return ret;
  1186. }
  1187. static ssize_t
  1188. ftrace_graph_read(struct file *file, char __user *ubuf,
  1189. size_t cnt, loff_t *ppos)
  1190. {
  1191. if (file->f_mode & FMODE_READ)
  1192. return seq_read(file, ubuf, cnt, ppos);
  1193. else
  1194. return -EPERM;
  1195. }
  1196. static int
  1197. ftrace_set_func(unsigned long *array, int idx, char *buffer)
  1198. {
  1199. char str[KSYM_SYMBOL_LEN];
  1200. struct dyn_ftrace *rec;
  1201. struct ftrace_page *pg;
  1202. int found = 0;
  1203. int j;
  1204. if (ftrace_disabled)
  1205. return -ENODEV;
  1206. /* should not be called from interrupt context */
  1207. spin_lock(&ftrace_lock);
  1208. do_for_each_ftrace_rec(pg, rec) {
  1209. if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
  1210. continue;
  1211. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  1212. if (strcmp(str, buffer) == 0) {
  1213. /* Return 1 if we add it to the array */
  1214. found = 1;
  1215. for (j = 0; j < idx; j++)
  1216. if (array[j] == rec->ip) {
  1217. found = 0;
  1218. break;
  1219. }
  1220. if (found)
  1221. array[idx] = rec->ip;
  1222. goto out;
  1223. }
  1224. } while_for_each_ftrace_rec();
  1225. out:
  1226. spin_unlock(&ftrace_lock);
  1227. return found ? 0 : -EINVAL;
  1228. }
  1229. static ssize_t
  1230. ftrace_graph_write(struct file *file, const char __user *ubuf,
  1231. size_t cnt, loff_t *ppos)
  1232. {
  1233. unsigned char buffer[FTRACE_BUFF_MAX+1];
  1234. unsigned long *array;
  1235. size_t read = 0;
  1236. ssize_t ret;
  1237. int index = 0;
  1238. char ch;
  1239. if (!cnt || cnt < 0)
  1240. return 0;
  1241. mutex_lock(&graph_lock);
  1242. if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
  1243. ret = -EBUSY;
  1244. goto out;
  1245. }
  1246. if (file->f_mode & FMODE_READ) {
  1247. struct seq_file *m = file->private_data;
  1248. array = m->private;
  1249. } else
  1250. array = file->private_data;
  1251. ret = get_user(ch, ubuf++);
  1252. if (ret)
  1253. goto out;
  1254. read++;
  1255. cnt--;
  1256. /* skip white space */
  1257. while (cnt && isspace(ch)) {
  1258. ret = get_user(ch, ubuf++);
  1259. if (ret)
  1260. goto out;
  1261. read++;
  1262. cnt--;
  1263. }
  1264. if (isspace(ch)) {
  1265. *ppos += read;
  1266. ret = read;
  1267. goto out;
  1268. }
  1269. while (cnt && !isspace(ch)) {
  1270. if (index < FTRACE_BUFF_MAX)
  1271. buffer[index++] = ch;
  1272. else {
  1273. ret = -EINVAL;
  1274. goto out;
  1275. }
  1276. ret = get_user(ch, ubuf++);
  1277. if (ret)
  1278. goto out;
  1279. read++;
  1280. cnt--;
  1281. }
  1282. buffer[index] = 0;
  1283. /* we allow only one at a time */
  1284. ret = ftrace_set_func(array, ftrace_graph_count, buffer);
  1285. if (ret)
  1286. goto out;
  1287. ftrace_graph_count++;
  1288. file->f_pos += read;
  1289. ret = read;
  1290. out:
  1291. mutex_unlock(&graph_lock);
  1292. return ret;
  1293. }
  1294. static const struct file_operations ftrace_graph_fops = {
  1295. .open = ftrace_graph_open,
  1296. .read = ftrace_graph_read,
  1297. .write = ftrace_graph_write,
  1298. };
  1299. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  1300. static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
  1301. {
  1302. struct dentry *entry;
  1303. entry = debugfs_create_file("available_filter_functions", 0444,
  1304. d_tracer, NULL, &ftrace_avail_fops);
  1305. if (!entry)
  1306. pr_warning("Could not create debugfs "
  1307. "'available_filter_functions' entry\n");
  1308. entry = debugfs_create_file("failures", 0444,
  1309. d_tracer, NULL, &ftrace_failures_fops);
  1310. if (!entry)
  1311. pr_warning("Could not create debugfs 'failures' entry\n");
  1312. entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
  1313. NULL, &ftrace_filter_fops);
  1314. if (!entry)
  1315. pr_warning("Could not create debugfs "
  1316. "'set_ftrace_filter' entry\n");
  1317. entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
  1318. NULL, &ftrace_notrace_fops);
  1319. if (!entry)
  1320. pr_warning("Could not create debugfs "
  1321. "'set_ftrace_notrace' entry\n");
  1322. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1323. entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
  1324. NULL,
  1325. &ftrace_graph_fops);
  1326. if (!entry)
  1327. pr_warning("Could not create debugfs "
  1328. "'set_graph_function' entry\n");
  1329. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  1330. return 0;
  1331. }
  1332. static int ftrace_convert_nops(struct module *mod,
  1333. unsigned long *start,
  1334. unsigned long *end)
  1335. {
  1336. unsigned long *p;
  1337. unsigned long addr;
  1338. unsigned long flags;
  1339. mutex_lock(&ftrace_start_lock);
  1340. p = start;
  1341. while (p < end) {
  1342. addr = ftrace_call_adjust(*p++);
  1343. /*
  1344. * Some architecture linkers will pad between
  1345. * the different mcount_loc sections of different
  1346. * object files to satisfy alignments.
  1347. * Skip any NULL pointers.
  1348. */
  1349. if (!addr)
  1350. continue;
  1351. ftrace_record_ip(addr);
  1352. }
  1353. /* disable interrupts to prevent kstop machine */
  1354. local_irq_save(flags);
  1355. ftrace_update_code(mod);
  1356. local_irq_restore(flags);
  1357. mutex_unlock(&ftrace_start_lock);
  1358. return 0;
  1359. }
  1360. void ftrace_init_module(struct module *mod,
  1361. unsigned long *start, unsigned long *end)
  1362. {
  1363. if (ftrace_disabled || start == end)
  1364. return;
  1365. ftrace_convert_nops(mod, start, end);
  1366. }
  1367. extern unsigned long __start_mcount_loc[];
  1368. extern unsigned long __stop_mcount_loc[];
  1369. void __init ftrace_init(void)
  1370. {
  1371. unsigned long count, addr, flags;
  1372. int ret;
  1373. /* Keep the ftrace pointer to the stub */
  1374. addr = (unsigned long)ftrace_stub;
  1375. local_irq_save(flags);
  1376. ftrace_dyn_arch_init(&addr);
  1377. local_irq_restore(flags);
  1378. /* ftrace_dyn_arch_init places the return code in addr */
  1379. if (addr)
  1380. goto failed;
  1381. count = __stop_mcount_loc - __start_mcount_loc;
  1382. ret = ftrace_dyn_table_alloc(count);
  1383. if (ret)
  1384. goto failed;
  1385. last_ftrace_enabled = ftrace_enabled = 1;
  1386. ret = ftrace_convert_nops(NULL,
  1387. __start_mcount_loc,
  1388. __stop_mcount_loc);
  1389. return;
  1390. failed:
  1391. ftrace_disabled = 1;
  1392. }
  1393. #else
  1394. static int __init ftrace_nodyn_init(void)
  1395. {
  1396. ftrace_enabled = 1;
  1397. return 0;
  1398. }
  1399. device_initcall(ftrace_nodyn_init);
  1400. static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
  1401. static inline void ftrace_startup_enable(int command) { }
  1402. /* Keep as macros so we do not need to define the commands */
  1403. # define ftrace_startup(command) do { } while (0)
  1404. # define ftrace_shutdown(command) do { } while (0)
  1405. # define ftrace_startup_sysctl() do { } while (0)
  1406. # define ftrace_shutdown_sysctl() do { } while (0)
  1407. #endif /* CONFIG_DYNAMIC_FTRACE */
  1408. static ssize_t
  1409. ftrace_pid_read(struct file *file, char __user *ubuf,
  1410. size_t cnt, loff_t *ppos)
  1411. {
  1412. char buf[64];
  1413. int r;
  1414. if (ftrace_pid_trace == ftrace_swapper_pid)
  1415. r = sprintf(buf, "swapper tasks\n");
  1416. else if (ftrace_pid_trace)
  1417. r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
  1418. else
  1419. r = sprintf(buf, "no pid\n");
  1420. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  1421. }
  1422. static void clear_ftrace_swapper(void)
  1423. {
  1424. struct task_struct *p;
  1425. int cpu;
  1426. get_online_cpus();
  1427. for_each_online_cpu(cpu) {
  1428. p = idle_task(cpu);
  1429. clear_tsk_trace_trace(p);
  1430. }
  1431. put_online_cpus();
  1432. }
  1433. static void set_ftrace_swapper(void)
  1434. {
  1435. struct task_struct *p;
  1436. int cpu;
  1437. get_online_cpus();
  1438. for_each_online_cpu(cpu) {
  1439. p = idle_task(cpu);
  1440. set_tsk_trace_trace(p);
  1441. }
  1442. put_online_cpus();
  1443. }
  1444. static void clear_ftrace_pid(struct pid *pid)
  1445. {
  1446. struct task_struct *p;
  1447. rcu_read_lock();
  1448. do_each_pid_task(pid, PIDTYPE_PID, p) {
  1449. clear_tsk_trace_trace(p);
  1450. } while_each_pid_task(pid, PIDTYPE_PID, p);
  1451. rcu_read_unlock();
  1452. put_pid(pid);
  1453. }
  1454. static void set_ftrace_pid(struct pid *pid)
  1455. {
  1456. struct task_struct *p;
  1457. rcu_read_lock();
  1458. do_each_pid_task(pid, PIDTYPE_PID, p) {
  1459. set_tsk_trace_trace(p);
  1460. } while_each_pid_task(pid, PIDTYPE_PID, p);
  1461. rcu_read_unlock();
  1462. }
  1463. static void clear_ftrace_pid_task(struct pid **pid)
  1464. {
  1465. if (*pid == ftrace_swapper_pid)
  1466. clear_ftrace_swapper();
  1467. else
  1468. clear_ftrace_pid(*pid);
  1469. *pid = NULL;
  1470. }
  1471. static void set_ftrace_pid_task(struct pid *pid)
  1472. {
  1473. if (pid == ftrace_swapper_pid)
  1474. set_ftrace_swapper();
  1475. else
  1476. set_ftrace_pid(pid);
  1477. }
  1478. static ssize_t
  1479. ftrace_pid_write(struct file *filp, const char __user *ubuf,
  1480. size_t cnt, loff_t *ppos)
  1481. {
  1482. struct pid *pid;
  1483. char buf[64];
  1484. long val;
  1485. int ret;
  1486. if (cnt >= sizeof(buf))
  1487. return -EINVAL;
  1488. if (copy_from_user(&buf, ubuf, cnt))
  1489. return -EFAULT;
  1490. buf[cnt] = 0;
  1491. ret = strict_strtol(buf, 10, &val);
  1492. if (ret < 0)
  1493. return ret;
  1494. mutex_lock(&ftrace_start_lock);
  1495. if (val < 0) {
  1496. /* disable pid tracing */
  1497. if (!ftrace_pid_trace)
  1498. goto out;
  1499. clear_ftrace_pid_task(&ftrace_pid_trace);
  1500. } else {
  1501. /* swapper task is special */
  1502. if (!val) {
  1503. pid = ftrace_swapper_pid;
  1504. if (pid == ftrace_pid_trace)
  1505. goto out;
  1506. } else {
  1507. pid = find_get_pid(val);
  1508. if (pid == ftrace_pid_trace) {
  1509. put_pid(pid);
  1510. goto out;
  1511. }
  1512. }
  1513. if (ftrace_pid_trace)
  1514. clear_ftrace_pid_task(&ftrace_pid_trace);
  1515. if (!pid)
  1516. goto out;
  1517. ftrace_pid_trace = pid;
  1518. set_ftrace_pid_task(ftrace_pid_trace);
  1519. }
  1520. /* update the function call */
  1521. ftrace_update_pid_func();
  1522. ftrace_startup_enable(0);
  1523. out:
  1524. mutex_unlock(&ftrace_start_lock);
  1525. return cnt;
  1526. }
  1527. static struct file_operations ftrace_pid_fops = {
  1528. .read = ftrace_pid_read,
  1529. .write = ftrace_pid_write,
  1530. };
  1531. static __init int ftrace_init_debugfs(void)
  1532. {
  1533. struct dentry *d_tracer;
  1534. struct dentry *entry;
  1535. d_tracer = tracing_init_dentry();
  1536. if (!d_tracer)
  1537. return 0;
  1538. ftrace_init_dyn_debugfs(d_tracer);
  1539. entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
  1540. NULL, &ftrace_pid_fops);
  1541. if (!entry)
  1542. pr_warning("Could not create debugfs "
  1543. "'set_ftrace_pid' entry\n");
  1544. return 0;
  1545. }
  1546. fs_initcall(ftrace_init_debugfs);
  1547. /**
  1548. * ftrace_kill - kill ftrace
  1549. *
  1550. * This function should be used by panic code. It stops ftrace
  1551. * but in a not so nice way. If you need to simply kill ftrace
  1552. * from a non-atomic section, use ftrace_kill.
  1553. */
  1554. void ftrace_kill(void)
  1555. {
  1556. ftrace_disabled = 1;
  1557. ftrace_enabled = 0;
  1558. clear_ftrace_function();
  1559. }
  1560. /**
  1561. * register_ftrace_function - register a function for profiling
  1562. * @ops - ops structure that holds the function for profiling.
  1563. *
  1564. * Register a function to be called by all functions in the
  1565. * kernel.
  1566. *
  1567. * Note: @ops->func and all the functions it calls must be labeled
  1568. * with "notrace", otherwise it will go into a
  1569. * recursive loop.
  1570. */
  1571. int register_ftrace_function(struct ftrace_ops *ops)
  1572. {
  1573. int ret;
  1574. if (unlikely(ftrace_disabled))
  1575. return -1;
  1576. mutex_lock(&ftrace_sysctl_lock);
  1577. ret = __register_ftrace_function(ops);
  1578. ftrace_startup(0);
  1579. mutex_unlock(&ftrace_sysctl_lock);
  1580. return ret;
  1581. }
  1582. /**
  1583. * unregister_ftrace_function - unregister a function for profiling.
  1584. * @ops - ops structure that holds the function to unregister
  1585. *
  1586. * Unregister a function that was added to be called by ftrace profiling.
  1587. */
  1588. int unregister_ftrace_function(struct ftrace_ops *ops)
  1589. {
  1590. int ret;
  1591. mutex_lock(&ftrace_sysctl_lock);
  1592. ret = __unregister_ftrace_function(ops);
  1593. ftrace_shutdown(0);
  1594. mutex_unlock(&ftrace_sysctl_lock);
  1595. return ret;
  1596. }
  1597. int
  1598. ftrace_enable_sysctl(struct ctl_table *table, int write,
  1599. struct file *file, void __user *buffer, size_t *lenp,
  1600. loff_t *ppos)
  1601. {
  1602. int ret;
  1603. if (unlikely(ftrace_disabled))
  1604. return -ENODEV;
  1605. mutex_lock(&ftrace_sysctl_lock);
  1606. ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
  1607. if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
  1608. goto out;
  1609. last_ftrace_enabled = ftrace_enabled;
  1610. if (ftrace_enabled) {
  1611. ftrace_startup_sysctl();
  1612. /* we are starting ftrace again */
  1613. if (ftrace_list != &ftrace_list_end) {
  1614. if (ftrace_list->next == &ftrace_list_end)
  1615. ftrace_trace_function = ftrace_list->func;
  1616. else
  1617. ftrace_trace_function = ftrace_list_func;
  1618. }
  1619. } else {
  1620. /* stopping ftrace calls (just send to ftrace_stub) */
  1621. ftrace_trace_function = ftrace_stub;
  1622. ftrace_shutdown_sysctl();
  1623. }
  1624. out:
  1625. mutex_unlock(&ftrace_sysctl_lock);
  1626. return ret;
  1627. }
  1628. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1629. static atomic_t ftrace_graph_active;
  1630. static struct notifier_block ftrace_suspend_notifier;
  1631. int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
  1632. {
  1633. return 0;
  1634. }
  1635. /* The callbacks that hook a function */
  1636. trace_func_graph_ret_t ftrace_graph_return =
  1637. (trace_func_graph_ret_t)ftrace_stub;
  1638. trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
  1639. /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
  1640. static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
  1641. {
  1642. int i;
  1643. int ret = 0;
  1644. unsigned long flags;
  1645. int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
  1646. struct task_struct *g, *t;
  1647. for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
  1648. ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
  1649. * sizeof(struct ftrace_ret_stack),
  1650. GFP_KERNEL);
  1651. if (!ret_stack_list[i]) {
  1652. start = 0;
  1653. end = i;
  1654. ret = -ENOMEM;
  1655. goto free;
  1656. }
  1657. }
  1658. read_lock_irqsave(&tasklist_lock, flags);
  1659. do_each_thread(g, t) {
  1660. if (start == end) {
  1661. ret = -EAGAIN;
  1662. goto unlock;
  1663. }
  1664. if (t->ret_stack == NULL) {
  1665. t->curr_ret_stack = -1;
  1666. /* Make sure IRQs see the -1 first: */
  1667. barrier();
  1668. t->ret_stack = ret_stack_list[start++];
  1669. atomic_set(&t->tracing_graph_pause, 0);
  1670. atomic_set(&t->trace_overrun, 0);
  1671. }
  1672. } while_each_thread(g, t);
  1673. unlock:
  1674. read_unlock_irqrestore(&tasklist_lock, flags);
  1675. free:
  1676. for (i = start; i < end; i++)
  1677. kfree(ret_stack_list[i]);
  1678. return ret;
  1679. }
  1680. /* Allocate a return stack for each task */
  1681. static int start_graph_tracing(void)
  1682. {
  1683. struct ftrace_ret_stack **ret_stack_list;
  1684. int ret;
  1685. ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
  1686. sizeof(struct ftrace_ret_stack *),
  1687. GFP_KERNEL);
  1688. if (!ret_stack_list)
  1689. return -ENOMEM;
  1690. do {
  1691. ret = alloc_retstack_tasklist(ret_stack_list);
  1692. } while (ret == -EAGAIN);
  1693. kfree(ret_stack_list);
  1694. return ret;
  1695. }
  1696. /*
  1697. * Hibernation protection.
  1698. * The state of the current task is too much unstable during
  1699. * suspend/restore to disk. We want to protect against that.
  1700. */
  1701. static int
  1702. ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
  1703. void *unused)
  1704. {
  1705. switch (state) {
  1706. case PM_HIBERNATION_PREPARE:
  1707. pause_graph_tracing();
  1708. break;
  1709. case PM_POST_HIBERNATION:
  1710. unpause_graph_tracing();
  1711. break;
  1712. }
  1713. return NOTIFY_DONE;
  1714. }
  1715. int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  1716. trace_func_graph_ent_t entryfunc)
  1717. {
  1718. int ret = 0;
  1719. mutex_lock(&ftrace_sysctl_lock);
  1720. ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
  1721. register_pm_notifier(&ftrace_suspend_notifier);
  1722. atomic_inc(&ftrace_graph_active);
  1723. ret = start_graph_tracing();
  1724. if (ret) {
  1725. atomic_dec(&ftrace_graph_active);
  1726. goto out;
  1727. }
  1728. ftrace_graph_return = retfunc;
  1729. ftrace_graph_entry = entryfunc;
  1730. ftrace_startup(FTRACE_START_FUNC_RET);
  1731. out:
  1732. mutex_unlock(&ftrace_sysctl_lock);
  1733. return ret;
  1734. }
  1735. void unregister_ftrace_graph(void)
  1736. {
  1737. mutex_lock(&ftrace_sysctl_lock);
  1738. atomic_dec(&ftrace_graph_active);
  1739. ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
  1740. ftrace_graph_entry = ftrace_graph_entry_stub;
  1741. ftrace_shutdown(FTRACE_STOP_FUNC_RET);
  1742. unregister_pm_notifier(&ftrace_suspend_notifier);
  1743. mutex_unlock(&ftrace_sysctl_lock);
  1744. }
  1745. /* Allocate a return stack for newly created task */
  1746. void ftrace_graph_init_task(struct task_struct *t)
  1747. {
  1748. if (atomic_read(&ftrace_graph_active)) {
  1749. t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
  1750. * sizeof(struct ftrace_ret_stack),
  1751. GFP_KERNEL);
  1752. if (!t->ret_stack)
  1753. return;
  1754. t->curr_ret_stack = -1;
  1755. atomic_set(&t->tracing_graph_pause, 0);
  1756. atomic_set(&t->trace_overrun, 0);
  1757. } else
  1758. t->ret_stack = NULL;
  1759. }
  1760. void ftrace_graph_exit_task(struct task_struct *t)
  1761. {
  1762. struct ftrace_ret_stack *ret_stack = t->ret_stack;
  1763. t->ret_stack = NULL;
  1764. /* NULL must become visible to IRQs before we free it: */
  1765. barrier();
  1766. kfree(ret_stack);
  1767. }
  1768. void ftrace_graph_stop(void)
  1769. {
  1770. ftrace_stop();
  1771. }
  1772. #endif