ftrace.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543
  1. /*
  2. * Infrastructure for profiling code inserted by 'gcc -pg'.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Originally ported from the -rt patch by:
  8. * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Based on code in the latency_tracer, that is:
  11. *
  12. * Copyright (C) 2004-2006 Ingo Molnar
  13. * Copyright (C) 2004 William Lee Irwin III
  14. */
  15. #include <linux/stop_machine.h>
  16. #include <linux/clocksource.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/debugfs.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/kthread.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/kprobes.h>
  24. #include <linux/ftrace.h>
  25. #include <linux/sysctl.h>
  26. #include <linux/ctype.h>
  27. #include <linux/list.h>
  28. #include <asm/ftrace.h>
  29. #include "trace.h"
  30. #define FTRACE_WARN_ON(cond) \
  31. do { \
  32. if (WARN_ON(cond)) \
  33. ftrace_kill(); \
  34. } while (0)
  35. #define FTRACE_WARN_ON_ONCE(cond) \
  36. do { \
  37. if (WARN_ON_ONCE(cond)) \
  38. ftrace_kill(); \
  39. } while (0)
  40. /* ftrace_enabled is a method to turn ftrace on or off */
  41. int ftrace_enabled __read_mostly;
  42. static int last_ftrace_enabled;
  43. /* Quick disabling of function tracer. */
  44. int function_trace_stop;
  45. /* By default, current tracing type is normal tracing. */
  46. enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER;
  47. /*
  48. * ftrace_disabled is set when an anomaly is discovered.
  49. * ftrace_disabled is much stronger than ftrace_enabled.
  50. */
  51. static int ftrace_disabled __read_mostly;
  52. static DEFINE_SPINLOCK(ftrace_lock);
  53. static DEFINE_MUTEX(ftrace_sysctl_lock);
  54. static struct ftrace_ops ftrace_list_end __read_mostly =
  55. {
  56. .func = ftrace_stub,
  57. };
  58. static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
  59. ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
  60. ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
  61. static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
  62. {
  63. struct ftrace_ops *op = ftrace_list;
  64. /* in case someone actually ports this to alpha! */
  65. read_barrier_depends();
  66. while (op != &ftrace_list_end) {
  67. /* silly alpha */
  68. read_barrier_depends();
  69. op->func(ip, parent_ip);
  70. op = op->next;
  71. };
  72. }
  73. /**
  74. * clear_ftrace_function - reset the ftrace function
  75. *
  76. * This NULLs the ftrace function and in essence stops
  77. * tracing. There may be lag
  78. */
  79. void clear_ftrace_function(void)
  80. {
  81. ftrace_trace_function = ftrace_stub;
  82. __ftrace_trace_function = ftrace_stub;
  83. }
  84. #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  85. /*
  86. * For those archs that do not test ftrace_trace_stop in their
  87. * mcount call site, we need to do it from C.
  88. */
  89. static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
  90. {
  91. if (function_trace_stop)
  92. return;
  93. __ftrace_trace_function(ip, parent_ip);
  94. }
  95. #endif
  96. static int __register_ftrace_function(struct ftrace_ops *ops)
  97. {
  98. /* should not be called from interrupt context */
  99. spin_lock(&ftrace_lock);
  100. ops->next = ftrace_list;
  101. /*
  102. * We are entering ops into the ftrace_list but another
  103. * CPU might be walking that list. We need to make sure
  104. * the ops->next pointer is valid before another CPU sees
  105. * the ops pointer included into the ftrace_list.
  106. */
  107. smp_wmb();
  108. ftrace_list = ops;
  109. if (ftrace_enabled) {
  110. /*
  111. * For one func, simply call it directly.
  112. * For more than one func, call the chain.
  113. */
  114. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  115. if (ops->next == &ftrace_list_end)
  116. ftrace_trace_function = ops->func;
  117. else
  118. ftrace_trace_function = ftrace_list_func;
  119. #else
  120. if (ops->next == &ftrace_list_end)
  121. __ftrace_trace_function = ops->func;
  122. else
  123. __ftrace_trace_function = ftrace_list_func;
  124. ftrace_trace_function = ftrace_test_stop_func;
  125. #endif
  126. }
  127. spin_unlock(&ftrace_lock);
  128. return 0;
  129. }
  130. static int __unregister_ftrace_function(struct ftrace_ops *ops)
  131. {
  132. struct ftrace_ops **p;
  133. int ret = 0;
  134. /* should not be called from interrupt context */
  135. spin_lock(&ftrace_lock);
  136. /*
  137. * If we are removing the last function, then simply point
  138. * to the ftrace_stub.
  139. */
  140. if (ftrace_list == ops && ops->next == &ftrace_list_end) {
  141. ftrace_trace_function = ftrace_stub;
  142. ftrace_list = &ftrace_list_end;
  143. goto out;
  144. }
  145. for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
  146. if (*p == ops)
  147. break;
  148. if (*p != ops) {
  149. ret = -1;
  150. goto out;
  151. }
  152. *p = (*p)->next;
  153. if (ftrace_enabled) {
  154. /* If we only have one func left, then call that directly */
  155. if (ftrace_list->next == &ftrace_list_end)
  156. ftrace_trace_function = ftrace_list->func;
  157. }
  158. out:
  159. spin_unlock(&ftrace_lock);
  160. return ret;
  161. }
  162. #ifdef CONFIG_DYNAMIC_FTRACE
  163. #ifndef CONFIG_FTRACE_MCOUNT_RECORD
  164. # error Dynamic ftrace depends on MCOUNT_RECORD
  165. #endif
  166. /*
  167. * Since MCOUNT_ADDR may point to mcount itself, we do not want
  168. * to get it confused by reading a reference in the code as we
  169. * are parsing on objcopy output of text. Use a variable for
  170. * it instead.
  171. */
  172. static unsigned long mcount_addr = MCOUNT_ADDR;
  173. enum {
  174. FTRACE_ENABLE_CALLS = (1 << 0),
  175. FTRACE_DISABLE_CALLS = (1 << 1),
  176. FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
  177. FTRACE_ENABLE_MCOUNT = (1 << 3),
  178. FTRACE_DISABLE_MCOUNT = (1 << 4),
  179. };
  180. static int ftrace_filtered;
  181. static LIST_HEAD(ftrace_new_addrs);
  182. static DEFINE_MUTEX(ftrace_regex_lock);
  183. struct ftrace_page {
  184. struct ftrace_page *next;
  185. unsigned long index;
  186. struct dyn_ftrace records[];
  187. };
  188. #define ENTRIES_PER_PAGE \
  189. ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
  190. /* estimate from running different kernels */
  191. #define NR_TO_INIT 10000
  192. static struct ftrace_page *ftrace_pages_start;
  193. static struct ftrace_page *ftrace_pages;
  194. static struct dyn_ftrace *ftrace_free_records;
  195. #ifdef CONFIG_KPROBES
  196. static int frozen_record_count;
  197. static inline void freeze_record(struct dyn_ftrace *rec)
  198. {
  199. if (!(rec->flags & FTRACE_FL_FROZEN)) {
  200. rec->flags |= FTRACE_FL_FROZEN;
  201. frozen_record_count++;
  202. }
  203. }
  204. static inline void unfreeze_record(struct dyn_ftrace *rec)
  205. {
  206. if (rec->flags & FTRACE_FL_FROZEN) {
  207. rec->flags &= ~FTRACE_FL_FROZEN;
  208. frozen_record_count--;
  209. }
  210. }
  211. static inline int record_frozen(struct dyn_ftrace *rec)
  212. {
  213. return rec->flags & FTRACE_FL_FROZEN;
  214. }
  215. #else
  216. # define freeze_record(rec) ({ 0; })
  217. # define unfreeze_record(rec) ({ 0; })
  218. # define record_frozen(rec) ({ 0; })
  219. #endif /* CONFIG_KPROBES */
  220. static void ftrace_free_rec(struct dyn_ftrace *rec)
  221. {
  222. rec->ip = (unsigned long)ftrace_free_records;
  223. ftrace_free_records = rec;
  224. rec->flags |= FTRACE_FL_FREE;
  225. }
  226. void ftrace_release(void *start, unsigned long size)
  227. {
  228. struct dyn_ftrace *rec;
  229. struct ftrace_page *pg;
  230. unsigned long s = (unsigned long)start;
  231. unsigned long e = s + size;
  232. int i;
  233. if (ftrace_disabled || !start)
  234. return;
  235. /* should not be called from interrupt context */
  236. spin_lock(&ftrace_lock);
  237. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  238. for (i = 0; i < pg->index; i++) {
  239. rec = &pg->records[i];
  240. if ((rec->ip >= s) && (rec->ip < e))
  241. ftrace_free_rec(rec);
  242. }
  243. }
  244. spin_unlock(&ftrace_lock);
  245. }
  246. static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
  247. {
  248. struct dyn_ftrace *rec;
  249. /* First check for freed records */
  250. if (ftrace_free_records) {
  251. rec = ftrace_free_records;
  252. if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
  253. FTRACE_WARN_ON_ONCE(1);
  254. ftrace_free_records = NULL;
  255. return NULL;
  256. }
  257. ftrace_free_records = (void *)rec->ip;
  258. memset(rec, 0, sizeof(*rec));
  259. return rec;
  260. }
  261. if (ftrace_pages->index == ENTRIES_PER_PAGE) {
  262. if (!ftrace_pages->next) {
  263. /* allocate another page */
  264. ftrace_pages->next =
  265. (void *)get_zeroed_page(GFP_KERNEL);
  266. if (!ftrace_pages->next)
  267. return NULL;
  268. }
  269. ftrace_pages = ftrace_pages->next;
  270. }
  271. return &ftrace_pages->records[ftrace_pages->index++];
  272. }
  273. static struct dyn_ftrace *
  274. ftrace_record_ip(unsigned long ip)
  275. {
  276. struct dyn_ftrace *rec;
  277. if (ftrace_disabled)
  278. return NULL;
  279. rec = ftrace_alloc_dyn_node(ip);
  280. if (!rec)
  281. return NULL;
  282. rec->ip = ip;
  283. list_add(&rec->list, &ftrace_new_addrs);
  284. return rec;
  285. }
  286. static void print_ip_ins(const char *fmt, unsigned char *p)
  287. {
  288. int i;
  289. printk(KERN_CONT "%s", fmt);
  290. for (i = 0; i < MCOUNT_INSN_SIZE; i++)
  291. printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
  292. }
  293. static void ftrace_bug(int failed, unsigned long ip)
  294. {
  295. switch (failed) {
  296. case -EFAULT:
  297. FTRACE_WARN_ON_ONCE(1);
  298. pr_info("ftrace faulted on modifying ");
  299. print_ip_sym(ip);
  300. break;
  301. case -EINVAL:
  302. FTRACE_WARN_ON_ONCE(1);
  303. pr_info("ftrace failed to modify ");
  304. print_ip_sym(ip);
  305. print_ip_ins(" actual: ", (unsigned char *)ip);
  306. printk(KERN_CONT "\n");
  307. break;
  308. case -EPERM:
  309. FTRACE_WARN_ON_ONCE(1);
  310. pr_info("ftrace faulted on writing ");
  311. print_ip_sym(ip);
  312. break;
  313. default:
  314. FTRACE_WARN_ON_ONCE(1);
  315. pr_info("ftrace faulted on unknown error ");
  316. print_ip_sym(ip);
  317. }
  318. }
  319. static int
  320. __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
  321. {
  322. unsigned long ip, fl;
  323. unsigned long ftrace_addr;
  324. #ifdef CONFIG_FUNCTION_RET_TRACER
  325. if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
  326. ftrace_addr = (unsigned long)ftrace_caller;
  327. else
  328. ftrace_addr = (unsigned long)ftrace_return_caller;
  329. #else
  330. ftrace_addr = (unsigned long)ftrace_caller;
  331. #endif
  332. ip = rec->ip;
  333. /*
  334. * If this record is not to be traced and
  335. * it is not enabled then do nothing.
  336. *
  337. * If this record is not to be traced and
  338. * it is enabled then disabled it.
  339. *
  340. */
  341. if (rec->flags & FTRACE_FL_NOTRACE) {
  342. if (rec->flags & FTRACE_FL_ENABLED)
  343. rec->flags &= ~FTRACE_FL_ENABLED;
  344. else
  345. return 0;
  346. } else if (ftrace_filtered && enable) {
  347. /*
  348. * Filtering is on:
  349. */
  350. fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
  351. /* Record is filtered and enabled, do nothing */
  352. if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
  353. return 0;
  354. /* Record is not filtered and is not enabled do nothing */
  355. if (!fl)
  356. return 0;
  357. /* Record is not filtered but enabled, disable it */
  358. if (fl == FTRACE_FL_ENABLED)
  359. rec->flags &= ~FTRACE_FL_ENABLED;
  360. else
  361. /* Otherwise record is filtered but not enabled, enable it */
  362. rec->flags |= FTRACE_FL_ENABLED;
  363. } else {
  364. /* Disable or not filtered */
  365. if (enable) {
  366. /* if record is enabled, do nothing */
  367. if (rec->flags & FTRACE_FL_ENABLED)
  368. return 0;
  369. rec->flags |= FTRACE_FL_ENABLED;
  370. } else {
  371. /* if record is not enabled do nothing */
  372. if (!(rec->flags & FTRACE_FL_ENABLED))
  373. return 0;
  374. rec->flags &= ~FTRACE_FL_ENABLED;
  375. }
  376. }
  377. if (rec->flags & FTRACE_FL_ENABLED)
  378. return ftrace_make_call(rec, ftrace_addr);
  379. else
  380. return ftrace_make_nop(NULL, rec, ftrace_addr);
  381. }
  382. static void ftrace_replace_code(int enable)
  383. {
  384. int i, failed;
  385. struct dyn_ftrace *rec;
  386. struct ftrace_page *pg;
  387. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  388. for (i = 0; i < pg->index; i++) {
  389. rec = &pg->records[i];
  390. /*
  391. * Skip over free records and records that have
  392. * failed.
  393. */
  394. if (rec->flags & FTRACE_FL_FREE ||
  395. rec->flags & FTRACE_FL_FAILED)
  396. continue;
  397. /* ignore updates to this record's mcount site */
  398. if (get_kprobe((void *)rec->ip)) {
  399. freeze_record(rec);
  400. continue;
  401. } else {
  402. unfreeze_record(rec);
  403. }
  404. failed = __ftrace_replace_code(rec, enable);
  405. if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
  406. rec->flags |= FTRACE_FL_FAILED;
  407. if ((system_state == SYSTEM_BOOTING) ||
  408. !core_kernel_text(rec->ip)) {
  409. ftrace_free_rec(rec);
  410. } else
  411. ftrace_bug(failed, rec->ip);
  412. }
  413. }
  414. }
  415. }
  416. static int
  417. ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
  418. {
  419. unsigned long ip;
  420. int ret;
  421. ip = rec->ip;
  422. ret = ftrace_make_nop(mod, rec, mcount_addr);
  423. if (ret) {
  424. ftrace_bug(ret, ip);
  425. rec->flags |= FTRACE_FL_FAILED;
  426. return 0;
  427. }
  428. return 1;
  429. }
  430. static int __ftrace_modify_code(void *data)
  431. {
  432. int *command = data;
  433. if (*command & FTRACE_ENABLE_CALLS)
  434. ftrace_replace_code(1);
  435. else if (*command & FTRACE_DISABLE_CALLS)
  436. ftrace_replace_code(0);
  437. if (*command & FTRACE_UPDATE_TRACE_FUNC)
  438. ftrace_update_ftrace_func(ftrace_trace_function);
  439. return 0;
  440. }
  441. static void ftrace_run_update_code(int command)
  442. {
  443. stop_machine(__ftrace_modify_code, &command, NULL);
  444. }
  445. static ftrace_func_t saved_ftrace_func;
  446. static int ftrace_start_up;
  447. static DEFINE_MUTEX(ftrace_start_lock);
  448. static void ftrace_startup(void)
  449. {
  450. int command = 0;
  451. if (unlikely(ftrace_disabled))
  452. return;
  453. mutex_lock(&ftrace_start_lock);
  454. ftrace_start_up++;
  455. command |= FTRACE_ENABLE_CALLS;
  456. if (saved_ftrace_func != ftrace_trace_function) {
  457. saved_ftrace_func = ftrace_trace_function;
  458. command |= FTRACE_UPDATE_TRACE_FUNC;
  459. }
  460. if (!command || !ftrace_enabled)
  461. goto out;
  462. ftrace_run_update_code(command);
  463. out:
  464. mutex_unlock(&ftrace_start_lock);
  465. }
  466. static void ftrace_shutdown(void)
  467. {
  468. int command = 0;
  469. if (unlikely(ftrace_disabled))
  470. return;
  471. mutex_lock(&ftrace_start_lock);
  472. ftrace_start_up--;
  473. if (!ftrace_start_up)
  474. command |= FTRACE_DISABLE_CALLS;
  475. if (saved_ftrace_func != ftrace_trace_function) {
  476. saved_ftrace_func = ftrace_trace_function;
  477. command |= FTRACE_UPDATE_TRACE_FUNC;
  478. }
  479. if (!command || !ftrace_enabled)
  480. goto out;
  481. ftrace_run_update_code(command);
  482. out:
  483. mutex_unlock(&ftrace_start_lock);
  484. }
  485. static void ftrace_startup_sysctl(void)
  486. {
  487. int command = FTRACE_ENABLE_MCOUNT;
  488. if (unlikely(ftrace_disabled))
  489. return;
  490. mutex_lock(&ftrace_start_lock);
  491. /* Force update next time */
  492. saved_ftrace_func = NULL;
  493. /* ftrace_start_up is true if we want ftrace running */
  494. if (ftrace_start_up)
  495. command |= FTRACE_ENABLE_CALLS;
  496. ftrace_run_update_code(command);
  497. mutex_unlock(&ftrace_start_lock);
  498. }
  499. static void ftrace_shutdown_sysctl(void)
  500. {
  501. int command = FTRACE_DISABLE_MCOUNT;
  502. if (unlikely(ftrace_disabled))
  503. return;
  504. mutex_lock(&ftrace_start_lock);
  505. /* ftrace_start_up is true if ftrace is running */
  506. if (ftrace_start_up)
  507. command |= FTRACE_DISABLE_CALLS;
  508. ftrace_run_update_code(command);
  509. mutex_unlock(&ftrace_start_lock);
  510. }
  511. static cycle_t ftrace_update_time;
  512. static unsigned long ftrace_update_cnt;
  513. unsigned long ftrace_update_tot_cnt;
  514. static int ftrace_update_code(struct module *mod)
  515. {
  516. struct dyn_ftrace *p, *t;
  517. cycle_t start, stop;
  518. start = ftrace_now(raw_smp_processor_id());
  519. ftrace_update_cnt = 0;
  520. list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
  521. /* If something went wrong, bail without enabling anything */
  522. if (unlikely(ftrace_disabled))
  523. return -1;
  524. list_del_init(&p->list);
  525. /* convert record (i.e, patch mcount-call with NOP) */
  526. if (ftrace_code_disable(mod, p)) {
  527. p->flags |= FTRACE_FL_CONVERTED;
  528. ftrace_update_cnt++;
  529. } else
  530. ftrace_free_rec(p);
  531. }
  532. stop = ftrace_now(raw_smp_processor_id());
  533. ftrace_update_time = stop - start;
  534. ftrace_update_tot_cnt += ftrace_update_cnt;
  535. return 0;
  536. }
  537. static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
  538. {
  539. struct ftrace_page *pg;
  540. int cnt;
  541. int i;
  542. /* allocate a few pages */
  543. ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
  544. if (!ftrace_pages_start)
  545. return -1;
  546. /*
  547. * Allocate a few more pages.
  548. *
  549. * TODO: have some parser search vmlinux before
  550. * final linking to find all calls to ftrace.
  551. * Then we can:
  552. * a) know how many pages to allocate.
  553. * and/or
  554. * b) set up the table then.
  555. *
  556. * The dynamic code is still necessary for
  557. * modules.
  558. */
  559. pg = ftrace_pages = ftrace_pages_start;
  560. cnt = num_to_init / ENTRIES_PER_PAGE;
  561. pr_info("ftrace: allocating %ld entries in %d pages\n",
  562. num_to_init, cnt + 1);
  563. for (i = 0; i < cnt; i++) {
  564. pg->next = (void *)get_zeroed_page(GFP_KERNEL);
  565. /* If we fail, we'll try later anyway */
  566. if (!pg->next)
  567. break;
  568. pg = pg->next;
  569. }
  570. return 0;
  571. }
  572. enum {
  573. FTRACE_ITER_FILTER = (1 << 0),
  574. FTRACE_ITER_CONT = (1 << 1),
  575. FTRACE_ITER_NOTRACE = (1 << 2),
  576. FTRACE_ITER_FAILURES = (1 << 3),
  577. };
  578. #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
  579. struct ftrace_iterator {
  580. loff_t pos;
  581. struct ftrace_page *pg;
  582. unsigned idx;
  583. unsigned flags;
  584. unsigned char buffer[FTRACE_BUFF_MAX+1];
  585. unsigned buffer_idx;
  586. unsigned filtered;
  587. };
  588. static void *
  589. t_next(struct seq_file *m, void *v, loff_t *pos)
  590. {
  591. struct ftrace_iterator *iter = m->private;
  592. struct dyn_ftrace *rec = NULL;
  593. (*pos)++;
  594. /* should not be called from interrupt context */
  595. spin_lock(&ftrace_lock);
  596. retry:
  597. if (iter->idx >= iter->pg->index) {
  598. if (iter->pg->next) {
  599. iter->pg = iter->pg->next;
  600. iter->idx = 0;
  601. goto retry;
  602. }
  603. } else {
  604. rec = &iter->pg->records[iter->idx++];
  605. if ((rec->flags & FTRACE_FL_FREE) ||
  606. (!(iter->flags & FTRACE_ITER_FAILURES) &&
  607. (rec->flags & FTRACE_FL_FAILED)) ||
  608. ((iter->flags & FTRACE_ITER_FAILURES) &&
  609. !(rec->flags & FTRACE_FL_FAILED)) ||
  610. ((iter->flags & FTRACE_ITER_FILTER) &&
  611. !(rec->flags & FTRACE_FL_FILTER)) ||
  612. ((iter->flags & FTRACE_ITER_NOTRACE) &&
  613. !(rec->flags & FTRACE_FL_NOTRACE))) {
  614. rec = NULL;
  615. goto retry;
  616. }
  617. }
  618. spin_unlock(&ftrace_lock);
  619. iter->pos = *pos;
  620. return rec;
  621. }
  622. static void *t_start(struct seq_file *m, loff_t *pos)
  623. {
  624. struct ftrace_iterator *iter = m->private;
  625. void *p = NULL;
  626. loff_t l = -1;
  627. if (*pos > iter->pos)
  628. *pos = iter->pos;
  629. l = *pos;
  630. p = t_next(m, p, &l);
  631. return p;
  632. }
  633. static void t_stop(struct seq_file *m, void *p)
  634. {
  635. }
  636. static int t_show(struct seq_file *m, void *v)
  637. {
  638. struct ftrace_iterator *iter = m->private;
  639. struct dyn_ftrace *rec = v;
  640. char str[KSYM_SYMBOL_LEN];
  641. int ret = 0;
  642. if (!rec)
  643. return 0;
  644. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  645. ret = seq_printf(m, "%s\n", str);
  646. if (ret < 0) {
  647. iter->pos--;
  648. iter->idx--;
  649. }
  650. return 0;
  651. }
  652. static struct seq_operations show_ftrace_seq_ops = {
  653. .start = t_start,
  654. .next = t_next,
  655. .stop = t_stop,
  656. .show = t_show,
  657. };
  658. static int
  659. ftrace_avail_open(struct inode *inode, struct file *file)
  660. {
  661. struct ftrace_iterator *iter;
  662. int ret;
  663. if (unlikely(ftrace_disabled))
  664. return -ENODEV;
  665. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  666. if (!iter)
  667. return -ENOMEM;
  668. iter->pg = ftrace_pages_start;
  669. iter->pos = 0;
  670. ret = seq_open(file, &show_ftrace_seq_ops);
  671. if (!ret) {
  672. struct seq_file *m = file->private_data;
  673. m->private = iter;
  674. } else {
  675. kfree(iter);
  676. }
  677. return ret;
  678. }
  679. int ftrace_avail_release(struct inode *inode, struct file *file)
  680. {
  681. struct seq_file *m = (struct seq_file *)file->private_data;
  682. struct ftrace_iterator *iter = m->private;
  683. seq_release(inode, file);
  684. kfree(iter);
  685. return 0;
  686. }
  687. static int
  688. ftrace_failures_open(struct inode *inode, struct file *file)
  689. {
  690. int ret;
  691. struct seq_file *m;
  692. struct ftrace_iterator *iter;
  693. ret = ftrace_avail_open(inode, file);
  694. if (!ret) {
  695. m = (struct seq_file *)file->private_data;
  696. iter = (struct ftrace_iterator *)m->private;
  697. iter->flags = FTRACE_ITER_FAILURES;
  698. }
  699. return ret;
  700. }
  701. static void ftrace_filter_reset(int enable)
  702. {
  703. struct ftrace_page *pg;
  704. struct dyn_ftrace *rec;
  705. unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  706. unsigned i;
  707. /* should not be called from interrupt context */
  708. spin_lock(&ftrace_lock);
  709. if (enable)
  710. ftrace_filtered = 0;
  711. pg = ftrace_pages_start;
  712. while (pg) {
  713. for (i = 0; i < pg->index; i++) {
  714. rec = &pg->records[i];
  715. if (rec->flags & FTRACE_FL_FAILED)
  716. continue;
  717. rec->flags &= ~type;
  718. }
  719. pg = pg->next;
  720. }
  721. spin_unlock(&ftrace_lock);
  722. }
  723. static int
  724. ftrace_regex_open(struct inode *inode, struct file *file, int enable)
  725. {
  726. struct ftrace_iterator *iter;
  727. int ret = 0;
  728. if (unlikely(ftrace_disabled))
  729. return -ENODEV;
  730. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  731. if (!iter)
  732. return -ENOMEM;
  733. mutex_lock(&ftrace_regex_lock);
  734. if ((file->f_mode & FMODE_WRITE) &&
  735. !(file->f_flags & O_APPEND))
  736. ftrace_filter_reset(enable);
  737. if (file->f_mode & FMODE_READ) {
  738. iter->pg = ftrace_pages_start;
  739. iter->pos = 0;
  740. iter->flags = enable ? FTRACE_ITER_FILTER :
  741. FTRACE_ITER_NOTRACE;
  742. ret = seq_open(file, &show_ftrace_seq_ops);
  743. if (!ret) {
  744. struct seq_file *m = file->private_data;
  745. m->private = iter;
  746. } else
  747. kfree(iter);
  748. } else
  749. file->private_data = iter;
  750. mutex_unlock(&ftrace_regex_lock);
  751. return ret;
  752. }
  753. static int
  754. ftrace_filter_open(struct inode *inode, struct file *file)
  755. {
  756. return ftrace_regex_open(inode, file, 1);
  757. }
  758. static int
  759. ftrace_notrace_open(struct inode *inode, struct file *file)
  760. {
  761. return ftrace_regex_open(inode, file, 0);
  762. }
  763. static ssize_t
  764. ftrace_regex_read(struct file *file, char __user *ubuf,
  765. size_t cnt, loff_t *ppos)
  766. {
  767. if (file->f_mode & FMODE_READ)
  768. return seq_read(file, ubuf, cnt, ppos);
  769. else
  770. return -EPERM;
  771. }
  772. static loff_t
  773. ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
  774. {
  775. loff_t ret;
  776. if (file->f_mode & FMODE_READ)
  777. ret = seq_lseek(file, offset, origin);
  778. else
  779. file->f_pos = ret = 1;
  780. return ret;
  781. }
  782. enum {
  783. MATCH_FULL,
  784. MATCH_FRONT_ONLY,
  785. MATCH_MIDDLE_ONLY,
  786. MATCH_END_ONLY,
  787. };
  788. static void
  789. ftrace_match(unsigned char *buff, int len, int enable)
  790. {
  791. char str[KSYM_SYMBOL_LEN];
  792. char *search = NULL;
  793. struct ftrace_page *pg;
  794. struct dyn_ftrace *rec;
  795. int type = MATCH_FULL;
  796. unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  797. unsigned i, match = 0, search_len = 0;
  798. for (i = 0; i < len; i++) {
  799. if (buff[i] == '*') {
  800. if (!i) {
  801. search = buff + i + 1;
  802. type = MATCH_END_ONLY;
  803. search_len = len - (i + 1);
  804. } else {
  805. if (type == MATCH_END_ONLY) {
  806. type = MATCH_MIDDLE_ONLY;
  807. } else {
  808. match = i;
  809. type = MATCH_FRONT_ONLY;
  810. }
  811. buff[i] = 0;
  812. break;
  813. }
  814. }
  815. }
  816. /* should not be called from interrupt context */
  817. spin_lock(&ftrace_lock);
  818. if (enable)
  819. ftrace_filtered = 1;
  820. pg = ftrace_pages_start;
  821. while (pg) {
  822. for (i = 0; i < pg->index; i++) {
  823. int matched = 0;
  824. char *ptr;
  825. rec = &pg->records[i];
  826. if (rec->flags & FTRACE_FL_FAILED)
  827. continue;
  828. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  829. switch (type) {
  830. case MATCH_FULL:
  831. if (strcmp(str, buff) == 0)
  832. matched = 1;
  833. break;
  834. case MATCH_FRONT_ONLY:
  835. if (memcmp(str, buff, match) == 0)
  836. matched = 1;
  837. break;
  838. case MATCH_MIDDLE_ONLY:
  839. if (strstr(str, search))
  840. matched = 1;
  841. break;
  842. case MATCH_END_ONLY:
  843. ptr = strstr(str, search);
  844. if (ptr && (ptr[search_len] == 0))
  845. matched = 1;
  846. break;
  847. }
  848. if (matched)
  849. rec->flags |= flag;
  850. }
  851. pg = pg->next;
  852. }
  853. spin_unlock(&ftrace_lock);
  854. }
  855. static ssize_t
  856. ftrace_regex_write(struct file *file, const char __user *ubuf,
  857. size_t cnt, loff_t *ppos, int enable)
  858. {
  859. struct ftrace_iterator *iter;
  860. char ch;
  861. size_t read = 0;
  862. ssize_t ret;
  863. if (!cnt || cnt < 0)
  864. return 0;
  865. mutex_lock(&ftrace_regex_lock);
  866. if (file->f_mode & FMODE_READ) {
  867. struct seq_file *m = file->private_data;
  868. iter = m->private;
  869. } else
  870. iter = file->private_data;
  871. if (!*ppos) {
  872. iter->flags &= ~FTRACE_ITER_CONT;
  873. iter->buffer_idx = 0;
  874. }
  875. ret = get_user(ch, ubuf++);
  876. if (ret)
  877. goto out;
  878. read++;
  879. cnt--;
  880. if (!(iter->flags & ~FTRACE_ITER_CONT)) {
  881. /* skip white space */
  882. while (cnt && isspace(ch)) {
  883. ret = get_user(ch, ubuf++);
  884. if (ret)
  885. goto out;
  886. read++;
  887. cnt--;
  888. }
  889. if (isspace(ch)) {
  890. file->f_pos += read;
  891. ret = read;
  892. goto out;
  893. }
  894. iter->buffer_idx = 0;
  895. }
  896. while (cnt && !isspace(ch)) {
  897. if (iter->buffer_idx < FTRACE_BUFF_MAX)
  898. iter->buffer[iter->buffer_idx++] = ch;
  899. else {
  900. ret = -EINVAL;
  901. goto out;
  902. }
  903. ret = get_user(ch, ubuf++);
  904. if (ret)
  905. goto out;
  906. read++;
  907. cnt--;
  908. }
  909. if (isspace(ch)) {
  910. iter->filtered++;
  911. iter->buffer[iter->buffer_idx] = 0;
  912. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  913. iter->buffer_idx = 0;
  914. } else
  915. iter->flags |= FTRACE_ITER_CONT;
  916. file->f_pos += read;
  917. ret = read;
  918. out:
  919. mutex_unlock(&ftrace_regex_lock);
  920. return ret;
  921. }
  922. static ssize_t
  923. ftrace_filter_write(struct file *file, const char __user *ubuf,
  924. size_t cnt, loff_t *ppos)
  925. {
  926. return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
  927. }
  928. static ssize_t
  929. ftrace_notrace_write(struct file *file, const char __user *ubuf,
  930. size_t cnt, loff_t *ppos)
  931. {
  932. return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
  933. }
  934. static void
  935. ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
  936. {
  937. if (unlikely(ftrace_disabled))
  938. return;
  939. mutex_lock(&ftrace_regex_lock);
  940. if (reset)
  941. ftrace_filter_reset(enable);
  942. if (buf)
  943. ftrace_match(buf, len, enable);
  944. mutex_unlock(&ftrace_regex_lock);
  945. }
  946. /**
  947. * ftrace_set_filter - set a function to filter on in ftrace
  948. * @buf - the string that holds the function filter text.
  949. * @len - the length of the string.
  950. * @reset - non zero to reset all filters before applying this filter.
  951. *
  952. * Filters denote which functions should be enabled when tracing is enabled.
  953. * If @buf is NULL and reset is set, all functions will be enabled for tracing.
  954. */
  955. void ftrace_set_filter(unsigned char *buf, int len, int reset)
  956. {
  957. ftrace_set_regex(buf, len, reset, 1);
  958. }
  959. /**
  960. * ftrace_set_notrace - set a function to not trace in ftrace
  961. * @buf - the string that holds the function notrace text.
  962. * @len - the length of the string.
  963. * @reset - non zero to reset all filters before applying this filter.
  964. *
  965. * Notrace Filters denote which functions should not be enabled when tracing
  966. * is enabled. If @buf is NULL and reset is set, all functions will be enabled
  967. * for tracing.
  968. */
  969. void ftrace_set_notrace(unsigned char *buf, int len, int reset)
  970. {
  971. ftrace_set_regex(buf, len, reset, 0);
  972. }
  973. static int
  974. ftrace_regex_release(struct inode *inode, struct file *file, int enable)
  975. {
  976. struct seq_file *m = (struct seq_file *)file->private_data;
  977. struct ftrace_iterator *iter;
  978. mutex_lock(&ftrace_regex_lock);
  979. if (file->f_mode & FMODE_READ) {
  980. iter = m->private;
  981. seq_release(inode, file);
  982. } else
  983. iter = file->private_data;
  984. if (iter->buffer_idx) {
  985. iter->filtered++;
  986. iter->buffer[iter->buffer_idx] = 0;
  987. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  988. }
  989. mutex_lock(&ftrace_sysctl_lock);
  990. mutex_lock(&ftrace_start_lock);
  991. if (ftrace_start_up && ftrace_enabled)
  992. ftrace_run_update_code(FTRACE_ENABLE_CALLS);
  993. mutex_unlock(&ftrace_start_lock);
  994. mutex_unlock(&ftrace_sysctl_lock);
  995. kfree(iter);
  996. mutex_unlock(&ftrace_regex_lock);
  997. return 0;
  998. }
  999. static int
  1000. ftrace_filter_release(struct inode *inode, struct file *file)
  1001. {
  1002. return ftrace_regex_release(inode, file, 1);
  1003. }
  1004. static int
  1005. ftrace_notrace_release(struct inode *inode, struct file *file)
  1006. {
  1007. return ftrace_regex_release(inode, file, 0);
  1008. }
  1009. static struct file_operations ftrace_avail_fops = {
  1010. .open = ftrace_avail_open,
  1011. .read = seq_read,
  1012. .llseek = seq_lseek,
  1013. .release = ftrace_avail_release,
  1014. };
  1015. static struct file_operations ftrace_failures_fops = {
  1016. .open = ftrace_failures_open,
  1017. .read = seq_read,
  1018. .llseek = seq_lseek,
  1019. .release = ftrace_avail_release,
  1020. };
  1021. static struct file_operations ftrace_filter_fops = {
  1022. .open = ftrace_filter_open,
  1023. .read = ftrace_regex_read,
  1024. .write = ftrace_filter_write,
  1025. .llseek = ftrace_regex_lseek,
  1026. .release = ftrace_filter_release,
  1027. };
  1028. static struct file_operations ftrace_notrace_fops = {
  1029. .open = ftrace_notrace_open,
  1030. .read = ftrace_regex_read,
  1031. .write = ftrace_notrace_write,
  1032. .llseek = ftrace_regex_lseek,
  1033. .release = ftrace_notrace_release,
  1034. };
  1035. static __init int ftrace_init_debugfs(void)
  1036. {
  1037. struct dentry *d_tracer;
  1038. struct dentry *entry;
  1039. d_tracer = tracing_init_dentry();
  1040. entry = debugfs_create_file("available_filter_functions", 0444,
  1041. d_tracer, NULL, &ftrace_avail_fops);
  1042. if (!entry)
  1043. pr_warning("Could not create debugfs "
  1044. "'available_filter_functions' entry\n");
  1045. entry = debugfs_create_file("failures", 0444,
  1046. d_tracer, NULL, &ftrace_failures_fops);
  1047. if (!entry)
  1048. pr_warning("Could not create debugfs 'failures' entry\n");
  1049. entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
  1050. NULL, &ftrace_filter_fops);
  1051. if (!entry)
  1052. pr_warning("Could not create debugfs "
  1053. "'set_ftrace_filter' entry\n");
  1054. entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
  1055. NULL, &ftrace_notrace_fops);
  1056. if (!entry)
  1057. pr_warning("Could not create debugfs "
  1058. "'set_ftrace_notrace' entry\n");
  1059. return 0;
  1060. }
  1061. fs_initcall(ftrace_init_debugfs);
  1062. static int ftrace_convert_nops(struct module *mod,
  1063. unsigned long *start,
  1064. unsigned long *end)
  1065. {
  1066. unsigned long *p;
  1067. unsigned long addr;
  1068. unsigned long flags;
  1069. mutex_lock(&ftrace_start_lock);
  1070. p = start;
  1071. while (p < end) {
  1072. addr = ftrace_call_adjust(*p++);
  1073. /*
  1074. * Some architecture linkers will pad between
  1075. * the different mcount_loc sections of different
  1076. * object files to satisfy alignments.
  1077. * Skip any NULL pointers.
  1078. */
  1079. if (!addr)
  1080. continue;
  1081. ftrace_record_ip(addr);
  1082. }
  1083. /* disable interrupts to prevent kstop machine */
  1084. local_irq_save(flags);
  1085. ftrace_update_code(mod);
  1086. local_irq_restore(flags);
  1087. mutex_unlock(&ftrace_start_lock);
  1088. return 0;
  1089. }
  1090. void ftrace_init_module(struct module *mod,
  1091. unsigned long *start, unsigned long *end)
  1092. {
  1093. if (ftrace_disabled || start == end)
  1094. return;
  1095. ftrace_convert_nops(mod, start, end);
  1096. }
  1097. extern unsigned long __start_mcount_loc[];
  1098. extern unsigned long __stop_mcount_loc[];
  1099. void __init ftrace_init(void)
  1100. {
  1101. unsigned long count, addr, flags;
  1102. int ret;
  1103. /* Keep the ftrace pointer to the stub */
  1104. addr = (unsigned long)ftrace_stub;
  1105. local_irq_save(flags);
  1106. ftrace_dyn_arch_init(&addr);
  1107. local_irq_restore(flags);
  1108. /* ftrace_dyn_arch_init places the return code in addr */
  1109. if (addr)
  1110. goto failed;
  1111. count = __stop_mcount_loc - __start_mcount_loc;
  1112. ret = ftrace_dyn_table_alloc(count);
  1113. if (ret)
  1114. goto failed;
  1115. last_ftrace_enabled = ftrace_enabled = 1;
  1116. ret = ftrace_convert_nops(NULL,
  1117. __start_mcount_loc,
  1118. __stop_mcount_loc);
  1119. return;
  1120. failed:
  1121. ftrace_disabled = 1;
  1122. }
  1123. #else
  1124. static int __init ftrace_nodyn_init(void)
  1125. {
  1126. ftrace_enabled = 1;
  1127. return 0;
  1128. }
  1129. device_initcall(ftrace_nodyn_init);
  1130. # define ftrace_startup() do { } while (0)
  1131. # define ftrace_shutdown() do { } while (0)
  1132. # define ftrace_startup_sysctl() do { } while (0)
  1133. # define ftrace_shutdown_sysctl() do { } while (0)
  1134. #endif /* CONFIG_DYNAMIC_FTRACE */
  1135. /**
  1136. * ftrace_kill - kill ftrace
  1137. *
  1138. * This function should be used by panic code. It stops ftrace
  1139. * but in a not so nice way. If you need to simply kill ftrace
  1140. * from a non-atomic section, use ftrace_kill.
  1141. */
  1142. void ftrace_kill(void)
  1143. {
  1144. ftrace_disabled = 1;
  1145. ftrace_enabled = 0;
  1146. clear_ftrace_function();
  1147. }
  1148. /**
  1149. * register_ftrace_function - register a function for profiling
  1150. * @ops - ops structure that holds the function for profiling.
  1151. *
  1152. * Register a function to be called by all functions in the
  1153. * kernel.
  1154. *
  1155. * Note: @ops->func and all the functions it calls must be labeled
  1156. * with "notrace", otherwise it will go into a
  1157. * recursive loop.
  1158. */
  1159. int register_ftrace_function(struct ftrace_ops *ops)
  1160. {
  1161. int ret;
  1162. if (unlikely(ftrace_disabled))
  1163. return -1;
  1164. mutex_lock(&ftrace_sysctl_lock);
  1165. if (ftrace_tracing_type == FTRACE_TYPE_RETURN) {
  1166. ret = -EBUSY;
  1167. goto out;
  1168. }
  1169. ret = __register_ftrace_function(ops);
  1170. ftrace_startup();
  1171. out:
  1172. mutex_unlock(&ftrace_sysctl_lock);
  1173. return ret;
  1174. }
  1175. /**
  1176. * unregister_ftrace_function - unresgister a function for profiling.
  1177. * @ops - ops structure that holds the function to unregister
  1178. *
  1179. * Unregister a function that was added to be called by ftrace profiling.
  1180. */
  1181. int unregister_ftrace_function(struct ftrace_ops *ops)
  1182. {
  1183. int ret;
  1184. mutex_lock(&ftrace_sysctl_lock);
  1185. ret = __unregister_ftrace_function(ops);
  1186. ftrace_shutdown();
  1187. mutex_unlock(&ftrace_sysctl_lock);
  1188. return ret;
  1189. }
  1190. int
  1191. ftrace_enable_sysctl(struct ctl_table *table, int write,
  1192. struct file *file, void __user *buffer, size_t *lenp,
  1193. loff_t *ppos)
  1194. {
  1195. int ret;
  1196. if (unlikely(ftrace_disabled))
  1197. return -ENODEV;
  1198. mutex_lock(&ftrace_sysctl_lock);
  1199. ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
  1200. if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
  1201. goto out;
  1202. last_ftrace_enabled = ftrace_enabled;
  1203. if (ftrace_enabled) {
  1204. ftrace_startup_sysctl();
  1205. /* we are starting ftrace again */
  1206. if (ftrace_list != &ftrace_list_end) {
  1207. if (ftrace_list->next == &ftrace_list_end)
  1208. ftrace_trace_function = ftrace_list->func;
  1209. else
  1210. ftrace_trace_function = ftrace_list_func;
  1211. }
  1212. } else {
  1213. /* stopping ftrace calls (just send to ftrace_stub) */
  1214. ftrace_trace_function = ftrace_stub;
  1215. ftrace_shutdown_sysctl();
  1216. }
  1217. out:
  1218. mutex_unlock(&ftrace_sysctl_lock);
  1219. return ret;
  1220. }
  1221. #ifdef CONFIG_FUNCTION_RET_TRACER
  1222. /* The callback that hooks the return of a function */
  1223. trace_function_return_t ftrace_function_return =
  1224. (trace_function_return_t)ftrace_stub;
  1225. int register_ftrace_return(trace_function_return_t func)
  1226. {
  1227. int ret = 0;
  1228. mutex_lock(&ftrace_sysctl_lock);
  1229. /*
  1230. * Don't launch return tracing if normal function
  1231. * tracing is already running.
  1232. */
  1233. if (ftrace_trace_function != ftrace_stub) {
  1234. ret = -EBUSY;
  1235. goto out;
  1236. }
  1237. ftrace_tracing_type = FTRACE_TYPE_RETURN;
  1238. ftrace_function_return = func;
  1239. ftrace_startup();
  1240. out:
  1241. mutex_unlock(&ftrace_sysctl_lock);
  1242. return ret;
  1243. }
  1244. void unregister_ftrace_return(void)
  1245. {
  1246. mutex_lock(&ftrace_sysctl_lock);
  1247. ftrace_function_return = (trace_function_return_t)ftrace_stub;
  1248. ftrace_shutdown();
  1249. /* Restore normal tracing type */
  1250. ftrace_tracing_type = FTRACE_TYPE_ENTER;
  1251. mutex_unlock(&ftrace_sysctl_lock);
  1252. }
  1253. #endif