ftrace.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452
  1. /*
  2. * Infrastructure for profiling code inserted by 'gcc -pg'.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Originally ported from the -rt patch by:
  8. * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Based on code in the latency_tracer, that is:
  11. *
  12. * Copyright (C) 2004-2006 Ingo Molnar
  13. * Copyright (C) 2004 William Lee Irwin III
  14. */
  15. #include <linux/stop_machine.h>
  16. #include <linux/clocksource.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/debugfs.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/kthread.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/kprobes.h>
  24. #include <linux/ftrace.h>
  25. #include <linux/sysctl.h>
  26. #include <linux/ctype.h>
  27. #include <linux/list.h>
  28. #include <asm/ftrace.h>
  29. #include "trace.h"
  30. #define FTRACE_WARN_ON(cond) \
  31. do { \
  32. if (WARN_ON(cond)) \
  33. ftrace_kill(); \
  34. } while (0)
  35. #define FTRACE_WARN_ON_ONCE(cond) \
  36. do { \
  37. if (WARN_ON_ONCE(cond)) \
  38. ftrace_kill(); \
  39. } while (0)
  40. /* ftrace_enabled is a method to turn ftrace on or off */
  41. int ftrace_enabled __read_mostly;
  42. static int last_ftrace_enabled;
  43. /*
  44. * ftrace_disabled is set when an anomaly is discovered.
  45. * ftrace_disabled is much stronger than ftrace_enabled.
  46. */
  47. static int ftrace_disabled __read_mostly;
  48. static DEFINE_SPINLOCK(ftrace_lock);
  49. static DEFINE_MUTEX(ftrace_sysctl_lock);
  50. static struct ftrace_ops ftrace_list_end __read_mostly =
  51. {
  52. .func = ftrace_stub,
  53. };
  54. static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
  55. ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
  56. static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
  57. {
  58. struct ftrace_ops *op = ftrace_list;
  59. /* in case someone actually ports this to alpha! */
  60. read_barrier_depends();
  61. while (op != &ftrace_list_end) {
  62. /* silly alpha */
  63. read_barrier_depends();
  64. op->func(ip, parent_ip);
  65. op = op->next;
  66. };
  67. }
  68. /**
  69. * clear_ftrace_function - reset the ftrace function
  70. *
  71. * This NULLs the ftrace function and in essence stops
  72. * tracing. There may be lag
  73. */
  74. void clear_ftrace_function(void)
  75. {
  76. ftrace_trace_function = ftrace_stub;
  77. }
  78. static int __register_ftrace_function(struct ftrace_ops *ops)
  79. {
  80. /* should not be called from interrupt context */
  81. spin_lock(&ftrace_lock);
  82. ops->next = ftrace_list;
  83. /*
  84. * We are entering ops into the ftrace_list but another
  85. * CPU might be walking that list. We need to make sure
  86. * the ops->next pointer is valid before another CPU sees
  87. * the ops pointer included into the ftrace_list.
  88. */
  89. smp_wmb();
  90. ftrace_list = ops;
  91. if (ftrace_enabled) {
  92. /*
  93. * For one func, simply call it directly.
  94. * For more than one func, call the chain.
  95. */
  96. if (ops->next == &ftrace_list_end)
  97. ftrace_trace_function = ops->func;
  98. else
  99. ftrace_trace_function = ftrace_list_func;
  100. }
  101. spin_unlock(&ftrace_lock);
  102. return 0;
  103. }
  104. static int __unregister_ftrace_function(struct ftrace_ops *ops)
  105. {
  106. struct ftrace_ops **p;
  107. int ret = 0;
  108. /* should not be called from interrupt context */
  109. spin_lock(&ftrace_lock);
  110. /*
  111. * If we are removing the last function, then simply point
  112. * to the ftrace_stub.
  113. */
  114. if (ftrace_list == ops && ops->next == &ftrace_list_end) {
  115. ftrace_trace_function = ftrace_stub;
  116. ftrace_list = &ftrace_list_end;
  117. goto out;
  118. }
  119. for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
  120. if (*p == ops)
  121. break;
  122. if (*p != ops) {
  123. ret = -1;
  124. goto out;
  125. }
  126. *p = (*p)->next;
  127. if (ftrace_enabled) {
  128. /* If we only have one func left, then call that directly */
  129. if (ftrace_list == &ftrace_list_end ||
  130. ftrace_list->next == &ftrace_list_end)
  131. ftrace_trace_function = ftrace_list->func;
  132. }
  133. out:
  134. spin_unlock(&ftrace_lock);
  135. return ret;
  136. }
  137. #ifdef CONFIG_DYNAMIC_FTRACE
  138. #ifndef CONFIG_FTRACE_MCOUNT_RECORD
  139. # error Dynamic ftrace depends on MCOUNT_RECORD
  140. #endif
  141. /*
  142. * Since MCOUNT_ADDR may point to mcount itself, we do not want
  143. * to get it confused by reading a reference in the code as we
  144. * are parsing on objcopy output of text. Use a variable for
  145. * it instead.
  146. */
  147. static unsigned long mcount_addr = MCOUNT_ADDR;
  148. enum {
  149. FTRACE_ENABLE_CALLS = (1 << 0),
  150. FTRACE_DISABLE_CALLS = (1 << 1),
  151. FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
  152. FTRACE_ENABLE_MCOUNT = (1 << 3),
  153. FTRACE_DISABLE_MCOUNT = (1 << 4),
  154. };
  155. static int ftrace_filtered;
  156. static LIST_HEAD(ftrace_new_addrs);
  157. static DEFINE_MUTEX(ftrace_regex_lock);
  158. struct ftrace_page {
  159. struct ftrace_page *next;
  160. unsigned long index;
  161. struct dyn_ftrace records[];
  162. };
  163. #define ENTRIES_PER_PAGE \
  164. ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
  165. /* estimate from running different kernels */
  166. #define NR_TO_INIT 10000
  167. static struct ftrace_page *ftrace_pages_start;
  168. static struct ftrace_page *ftrace_pages;
  169. static struct dyn_ftrace *ftrace_free_records;
  170. #ifdef CONFIG_KPROBES
  171. static int frozen_record_count;
  172. static inline void freeze_record(struct dyn_ftrace *rec)
  173. {
  174. if (!(rec->flags & FTRACE_FL_FROZEN)) {
  175. rec->flags |= FTRACE_FL_FROZEN;
  176. frozen_record_count++;
  177. }
  178. }
  179. static inline void unfreeze_record(struct dyn_ftrace *rec)
  180. {
  181. if (rec->flags & FTRACE_FL_FROZEN) {
  182. rec->flags &= ~FTRACE_FL_FROZEN;
  183. frozen_record_count--;
  184. }
  185. }
  186. static inline int record_frozen(struct dyn_ftrace *rec)
  187. {
  188. return rec->flags & FTRACE_FL_FROZEN;
  189. }
  190. #else
  191. # define freeze_record(rec) ({ 0; })
  192. # define unfreeze_record(rec) ({ 0; })
  193. # define record_frozen(rec) ({ 0; })
  194. #endif /* CONFIG_KPROBES */
  195. static void ftrace_free_rec(struct dyn_ftrace *rec)
  196. {
  197. rec->ip = (unsigned long)ftrace_free_records;
  198. ftrace_free_records = rec;
  199. rec->flags |= FTRACE_FL_FREE;
  200. }
  201. void ftrace_release(void *start, unsigned long size)
  202. {
  203. struct dyn_ftrace *rec;
  204. struct ftrace_page *pg;
  205. unsigned long s = (unsigned long)start;
  206. unsigned long e = s + size;
  207. int i;
  208. if (ftrace_disabled || !start)
  209. return;
  210. /* should not be called from interrupt context */
  211. spin_lock(&ftrace_lock);
  212. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  213. for (i = 0; i < pg->index; i++) {
  214. rec = &pg->records[i];
  215. if ((rec->ip >= s) && (rec->ip < e))
  216. ftrace_free_rec(rec);
  217. }
  218. }
  219. spin_unlock(&ftrace_lock);
  220. }
  221. static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
  222. {
  223. struct dyn_ftrace *rec;
  224. /* First check for freed records */
  225. if (ftrace_free_records) {
  226. rec = ftrace_free_records;
  227. if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
  228. FTRACE_WARN_ON_ONCE(1);
  229. ftrace_free_records = NULL;
  230. return NULL;
  231. }
  232. ftrace_free_records = (void *)rec->ip;
  233. memset(rec, 0, sizeof(*rec));
  234. return rec;
  235. }
  236. if (ftrace_pages->index == ENTRIES_PER_PAGE) {
  237. if (!ftrace_pages->next) {
  238. /* allocate another page */
  239. ftrace_pages->next =
  240. (void *)get_zeroed_page(GFP_KERNEL);
  241. if (!ftrace_pages->next)
  242. return NULL;
  243. }
  244. ftrace_pages = ftrace_pages->next;
  245. }
  246. return &ftrace_pages->records[ftrace_pages->index++];
  247. }
  248. static struct dyn_ftrace *
  249. ftrace_record_ip(unsigned long ip)
  250. {
  251. struct dyn_ftrace *rec;
  252. if (!ftrace_enabled || ftrace_disabled)
  253. return NULL;
  254. rec = ftrace_alloc_dyn_node(ip);
  255. if (!rec)
  256. return NULL;
  257. rec->ip = ip;
  258. list_add(&rec->list, &ftrace_new_addrs);
  259. return rec;
  260. }
  261. #define FTRACE_ADDR ((long)(ftrace_caller))
  262. static int
  263. __ftrace_replace_code(struct dyn_ftrace *rec,
  264. unsigned char *old, unsigned char *new, int enable)
  265. {
  266. unsigned long ip, fl;
  267. ip = rec->ip;
  268. if (ftrace_filtered && enable) {
  269. /*
  270. * If filtering is on:
  271. *
  272. * If this record is set to be filtered and
  273. * is enabled then do nothing.
  274. *
  275. * If this record is set to be filtered and
  276. * it is not enabled, enable it.
  277. *
  278. * If this record is not set to be filtered
  279. * and it is not enabled do nothing.
  280. *
  281. * If this record is set not to trace then
  282. * do nothing.
  283. *
  284. * If this record is set not to trace and
  285. * it is enabled then disable it.
  286. *
  287. * If this record is not set to be filtered and
  288. * it is enabled, disable it.
  289. */
  290. fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
  291. FTRACE_FL_ENABLED);
  292. if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
  293. (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
  294. !fl || (fl == FTRACE_FL_NOTRACE))
  295. return 0;
  296. /*
  297. * If it is enabled disable it,
  298. * otherwise enable it!
  299. */
  300. if (fl & FTRACE_FL_ENABLED) {
  301. /* swap new and old */
  302. new = old;
  303. old = ftrace_call_replace(ip, FTRACE_ADDR);
  304. rec->flags &= ~FTRACE_FL_ENABLED;
  305. } else {
  306. new = ftrace_call_replace(ip, FTRACE_ADDR);
  307. rec->flags |= FTRACE_FL_ENABLED;
  308. }
  309. } else {
  310. if (enable) {
  311. /*
  312. * If this record is set not to trace and is
  313. * not enabled, do nothing.
  314. */
  315. fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
  316. if (fl == FTRACE_FL_NOTRACE)
  317. return 0;
  318. new = ftrace_call_replace(ip, FTRACE_ADDR);
  319. } else
  320. old = ftrace_call_replace(ip, FTRACE_ADDR);
  321. if (enable) {
  322. if (rec->flags & FTRACE_FL_ENABLED)
  323. return 0;
  324. rec->flags |= FTRACE_FL_ENABLED;
  325. } else {
  326. if (!(rec->flags & FTRACE_FL_ENABLED))
  327. return 0;
  328. rec->flags &= ~FTRACE_FL_ENABLED;
  329. }
  330. }
  331. return ftrace_modify_code(ip, old, new);
  332. }
  333. static void ftrace_replace_code(int enable)
  334. {
  335. int i, failed;
  336. unsigned char *new = NULL, *old = NULL;
  337. struct dyn_ftrace *rec;
  338. struct ftrace_page *pg;
  339. if (enable)
  340. old = ftrace_nop_replace();
  341. else
  342. new = ftrace_nop_replace();
  343. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  344. for (i = 0; i < pg->index; i++) {
  345. rec = &pg->records[i];
  346. /* don't modify code that has already faulted */
  347. if (rec->flags & FTRACE_FL_FAILED)
  348. continue;
  349. /* ignore updates to this record's mcount site */
  350. if (get_kprobe((void *)rec->ip)) {
  351. freeze_record(rec);
  352. continue;
  353. } else {
  354. unfreeze_record(rec);
  355. }
  356. failed = __ftrace_replace_code(rec, old, new, enable);
  357. if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
  358. rec->flags |= FTRACE_FL_FAILED;
  359. if ((system_state == SYSTEM_BOOTING) ||
  360. !core_kernel_text(rec->ip)) {
  361. ftrace_free_rec(rec);
  362. }
  363. }
  364. }
  365. }
  366. }
  367. static void print_ip_ins(const char *fmt, unsigned char *p)
  368. {
  369. int i;
  370. printk(KERN_CONT "%s", fmt);
  371. for (i = 0; i < MCOUNT_INSN_SIZE; i++)
  372. printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
  373. }
  374. static int
  375. ftrace_code_disable(struct dyn_ftrace *rec)
  376. {
  377. unsigned long ip;
  378. unsigned char *nop, *call;
  379. int ret;
  380. ip = rec->ip;
  381. nop = ftrace_nop_replace();
  382. call = ftrace_call_replace(ip, mcount_addr);
  383. ret = ftrace_modify_code(ip, call, nop);
  384. if (ret) {
  385. switch (ret) {
  386. case -EFAULT:
  387. FTRACE_WARN_ON_ONCE(1);
  388. pr_info("ftrace faulted on modifying ");
  389. print_ip_sym(ip);
  390. break;
  391. case -EINVAL:
  392. FTRACE_WARN_ON_ONCE(1);
  393. pr_info("ftrace failed to modify ");
  394. print_ip_sym(ip);
  395. print_ip_ins(" expected: ", call);
  396. print_ip_ins(" actual: ", (unsigned char *)ip);
  397. print_ip_ins(" replace: ", nop);
  398. printk(KERN_CONT "\n");
  399. break;
  400. case -EPERM:
  401. FTRACE_WARN_ON_ONCE(1);
  402. pr_info("ftrace faulted on writing ");
  403. print_ip_sym(ip);
  404. break;
  405. default:
  406. FTRACE_WARN_ON_ONCE(1);
  407. pr_info("ftrace faulted on unknown error ");
  408. print_ip_sym(ip);
  409. }
  410. rec->flags |= FTRACE_FL_FAILED;
  411. return 0;
  412. }
  413. return 1;
  414. }
  415. static int __ftrace_modify_code(void *data)
  416. {
  417. int *command = data;
  418. if (*command & FTRACE_ENABLE_CALLS)
  419. ftrace_replace_code(1);
  420. else if (*command & FTRACE_DISABLE_CALLS)
  421. ftrace_replace_code(0);
  422. if (*command & FTRACE_UPDATE_TRACE_FUNC)
  423. ftrace_update_ftrace_func(ftrace_trace_function);
  424. return 0;
  425. }
  426. static void ftrace_run_update_code(int command)
  427. {
  428. stop_machine(__ftrace_modify_code, &command, NULL);
  429. }
  430. static ftrace_func_t saved_ftrace_func;
  431. static int ftrace_start;
  432. static DEFINE_MUTEX(ftrace_start_lock);
  433. static void ftrace_startup(void)
  434. {
  435. int command = 0;
  436. if (unlikely(ftrace_disabled))
  437. return;
  438. mutex_lock(&ftrace_start_lock);
  439. ftrace_start++;
  440. if (ftrace_start == 1)
  441. command |= FTRACE_ENABLE_CALLS;
  442. if (saved_ftrace_func != ftrace_trace_function) {
  443. saved_ftrace_func = ftrace_trace_function;
  444. command |= FTRACE_UPDATE_TRACE_FUNC;
  445. }
  446. if (!command || !ftrace_enabled)
  447. goto out;
  448. ftrace_run_update_code(command);
  449. out:
  450. mutex_unlock(&ftrace_start_lock);
  451. }
  452. static void ftrace_shutdown(void)
  453. {
  454. int command = 0;
  455. if (unlikely(ftrace_disabled))
  456. return;
  457. mutex_lock(&ftrace_start_lock);
  458. ftrace_start--;
  459. if (!ftrace_start)
  460. command |= FTRACE_DISABLE_CALLS;
  461. if (saved_ftrace_func != ftrace_trace_function) {
  462. saved_ftrace_func = ftrace_trace_function;
  463. command |= FTRACE_UPDATE_TRACE_FUNC;
  464. }
  465. if (!command || !ftrace_enabled)
  466. goto out;
  467. ftrace_run_update_code(command);
  468. out:
  469. mutex_unlock(&ftrace_start_lock);
  470. }
  471. static void ftrace_startup_sysctl(void)
  472. {
  473. int command = FTRACE_ENABLE_MCOUNT;
  474. if (unlikely(ftrace_disabled))
  475. return;
  476. mutex_lock(&ftrace_start_lock);
  477. /* Force update next time */
  478. saved_ftrace_func = NULL;
  479. /* ftrace_start is true if we want ftrace running */
  480. if (ftrace_start)
  481. command |= FTRACE_ENABLE_CALLS;
  482. ftrace_run_update_code(command);
  483. mutex_unlock(&ftrace_start_lock);
  484. }
  485. static void ftrace_shutdown_sysctl(void)
  486. {
  487. int command = FTRACE_DISABLE_MCOUNT;
  488. if (unlikely(ftrace_disabled))
  489. return;
  490. mutex_lock(&ftrace_start_lock);
  491. /* ftrace_start is true if ftrace is running */
  492. if (ftrace_start)
  493. command |= FTRACE_DISABLE_CALLS;
  494. ftrace_run_update_code(command);
  495. mutex_unlock(&ftrace_start_lock);
  496. }
  497. static cycle_t ftrace_update_time;
  498. static unsigned long ftrace_update_cnt;
  499. unsigned long ftrace_update_tot_cnt;
  500. static int ftrace_update_code(void)
  501. {
  502. struct dyn_ftrace *p, *t;
  503. cycle_t start, stop;
  504. start = ftrace_now(raw_smp_processor_id());
  505. ftrace_update_cnt = 0;
  506. list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
  507. /* If something went wrong, bail without enabling anything */
  508. if (unlikely(ftrace_disabled))
  509. return -1;
  510. list_del_init(&p->list);
  511. /* convert record (i.e, patch mcount-call with NOP) */
  512. if (ftrace_code_disable(p)) {
  513. p->flags |= FTRACE_FL_CONVERTED;
  514. ftrace_update_cnt++;
  515. } else
  516. ftrace_free_rec(p);
  517. }
  518. stop = ftrace_now(raw_smp_processor_id());
  519. ftrace_update_time = stop - start;
  520. ftrace_update_tot_cnt += ftrace_update_cnt;
  521. return 0;
  522. }
  523. static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
  524. {
  525. struct ftrace_page *pg;
  526. int cnt;
  527. int i;
  528. /* allocate a few pages */
  529. ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
  530. if (!ftrace_pages_start)
  531. return -1;
  532. /*
  533. * Allocate a few more pages.
  534. *
  535. * TODO: have some parser search vmlinux before
  536. * final linking to find all calls to ftrace.
  537. * Then we can:
  538. * a) know how many pages to allocate.
  539. * and/or
  540. * b) set up the table then.
  541. *
  542. * The dynamic code is still necessary for
  543. * modules.
  544. */
  545. pg = ftrace_pages = ftrace_pages_start;
  546. cnt = num_to_init / ENTRIES_PER_PAGE;
  547. pr_info("ftrace: allocating %ld entries in %d pages\n",
  548. num_to_init, cnt);
  549. for (i = 0; i < cnt; i++) {
  550. pg->next = (void *)get_zeroed_page(GFP_KERNEL);
  551. /* If we fail, we'll try later anyway */
  552. if (!pg->next)
  553. break;
  554. pg = pg->next;
  555. }
  556. return 0;
  557. }
  558. enum {
  559. FTRACE_ITER_FILTER = (1 << 0),
  560. FTRACE_ITER_CONT = (1 << 1),
  561. FTRACE_ITER_NOTRACE = (1 << 2),
  562. FTRACE_ITER_FAILURES = (1 << 3),
  563. };
  564. #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
  565. struct ftrace_iterator {
  566. loff_t pos;
  567. struct ftrace_page *pg;
  568. unsigned idx;
  569. unsigned flags;
  570. unsigned char buffer[FTRACE_BUFF_MAX+1];
  571. unsigned buffer_idx;
  572. unsigned filtered;
  573. };
  574. static void *
  575. t_next(struct seq_file *m, void *v, loff_t *pos)
  576. {
  577. struct ftrace_iterator *iter = m->private;
  578. struct dyn_ftrace *rec = NULL;
  579. (*pos)++;
  580. /* should not be called from interrupt context */
  581. spin_lock(&ftrace_lock);
  582. retry:
  583. if (iter->idx >= iter->pg->index) {
  584. if (iter->pg->next) {
  585. iter->pg = iter->pg->next;
  586. iter->idx = 0;
  587. goto retry;
  588. }
  589. } else {
  590. rec = &iter->pg->records[iter->idx++];
  591. if ((rec->flags & FTRACE_FL_FREE) ||
  592. (!(iter->flags & FTRACE_ITER_FAILURES) &&
  593. (rec->flags & FTRACE_FL_FAILED)) ||
  594. ((iter->flags & FTRACE_ITER_FAILURES) &&
  595. !(rec->flags & FTRACE_FL_FAILED)) ||
  596. ((iter->flags & FTRACE_ITER_NOTRACE) &&
  597. !(rec->flags & FTRACE_FL_NOTRACE))) {
  598. rec = NULL;
  599. goto retry;
  600. }
  601. }
  602. spin_unlock(&ftrace_lock);
  603. iter->pos = *pos;
  604. return rec;
  605. }
  606. static void *t_start(struct seq_file *m, loff_t *pos)
  607. {
  608. struct ftrace_iterator *iter = m->private;
  609. void *p = NULL;
  610. loff_t l = -1;
  611. if (*pos != iter->pos) {
  612. for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
  613. ;
  614. } else {
  615. l = *pos;
  616. p = t_next(m, p, &l);
  617. }
  618. return p;
  619. }
  620. static void t_stop(struct seq_file *m, void *p)
  621. {
  622. }
  623. static int t_show(struct seq_file *m, void *v)
  624. {
  625. struct dyn_ftrace *rec = v;
  626. char str[KSYM_SYMBOL_LEN];
  627. if (!rec)
  628. return 0;
  629. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  630. seq_printf(m, "%s\n", str);
  631. return 0;
  632. }
  633. static struct seq_operations show_ftrace_seq_ops = {
  634. .start = t_start,
  635. .next = t_next,
  636. .stop = t_stop,
  637. .show = t_show,
  638. };
  639. static int
  640. ftrace_avail_open(struct inode *inode, struct file *file)
  641. {
  642. struct ftrace_iterator *iter;
  643. int ret;
  644. if (unlikely(ftrace_disabled))
  645. return -ENODEV;
  646. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  647. if (!iter)
  648. return -ENOMEM;
  649. iter->pg = ftrace_pages_start;
  650. iter->pos = -1;
  651. ret = seq_open(file, &show_ftrace_seq_ops);
  652. if (!ret) {
  653. struct seq_file *m = file->private_data;
  654. m->private = iter;
  655. } else {
  656. kfree(iter);
  657. }
  658. return ret;
  659. }
  660. int ftrace_avail_release(struct inode *inode, struct file *file)
  661. {
  662. struct seq_file *m = (struct seq_file *)file->private_data;
  663. struct ftrace_iterator *iter = m->private;
  664. seq_release(inode, file);
  665. kfree(iter);
  666. return 0;
  667. }
  668. static int
  669. ftrace_failures_open(struct inode *inode, struct file *file)
  670. {
  671. int ret;
  672. struct seq_file *m;
  673. struct ftrace_iterator *iter;
  674. ret = ftrace_avail_open(inode, file);
  675. if (!ret) {
  676. m = (struct seq_file *)file->private_data;
  677. iter = (struct ftrace_iterator *)m->private;
  678. iter->flags = FTRACE_ITER_FAILURES;
  679. }
  680. return ret;
  681. }
  682. static void ftrace_filter_reset(int enable)
  683. {
  684. struct ftrace_page *pg;
  685. struct dyn_ftrace *rec;
  686. unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  687. unsigned i;
  688. /* should not be called from interrupt context */
  689. spin_lock(&ftrace_lock);
  690. if (enable)
  691. ftrace_filtered = 0;
  692. pg = ftrace_pages_start;
  693. while (pg) {
  694. for (i = 0; i < pg->index; i++) {
  695. rec = &pg->records[i];
  696. if (rec->flags & FTRACE_FL_FAILED)
  697. continue;
  698. rec->flags &= ~type;
  699. }
  700. pg = pg->next;
  701. }
  702. spin_unlock(&ftrace_lock);
  703. }
  704. static int
  705. ftrace_regex_open(struct inode *inode, struct file *file, int enable)
  706. {
  707. struct ftrace_iterator *iter;
  708. int ret = 0;
  709. if (unlikely(ftrace_disabled))
  710. return -ENODEV;
  711. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  712. if (!iter)
  713. return -ENOMEM;
  714. mutex_lock(&ftrace_regex_lock);
  715. if ((file->f_mode & FMODE_WRITE) &&
  716. !(file->f_flags & O_APPEND))
  717. ftrace_filter_reset(enable);
  718. if (file->f_mode & FMODE_READ) {
  719. iter->pg = ftrace_pages_start;
  720. iter->pos = -1;
  721. iter->flags = enable ? FTRACE_ITER_FILTER :
  722. FTRACE_ITER_NOTRACE;
  723. ret = seq_open(file, &show_ftrace_seq_ops);
  724. if (!ret) {
  725. struct seq_file *m = file->private_data;
  726. m->private = iter;
  727. } else
  728. kfree(iter);
  729. } else
  730. file->private_data = iter;
  731. mutex_unlock(&ftrace_regex_lock);
  732. return ret;
  733. }
  734. static int
  735. ftrace_filter_open(struct inode *inode, struct file *file)
  736. {
  737. return ftrace_regex_open(inode, file, 1);
  738. }
  739. static int
  740. ftrace_notrace_open(struct inode *inode, struct file *file)
  741. {
  742. return ftrace_regex_open(inode, file, 0);
  743. }
  744. static ssize_t
  745. ftrace_regex_read(struct file *file, char __user *ubuf,
  746. size_t cnt, loff_t *ppos)
  747. {
  748. if (file->f_mode & FMODE_READ)
  749. return seq_read(file, ubuf, cnt, ppos);
  750. else
  751. return -EPERM;
  752. }
  753. static loff_t
  754. ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
  755. {
  756. loff_t ret;
  757. if (file->f_mode & FMODE_READ)
  758. ret = seq_lseek(file, offset, origin);
  759. else
  760. file->f_pos = ret = 1;
  761. return ret;
  762. }
  763. enum {
  764. MATCH_FULL,
  765. MATCH_FRONT_ONLY,
  766. MATCH_MIDDLE_ONLY,
  767. MATCH_END_ONLY,
  768. };
  769. static void
  770. ftrace_match(unsigned char *buff, int len, int enable)
  771. {
  772. char str[KSYM_SYMBOL_LEN];
  773. char *search = NULL;
  774. struct ftrace_page *pg;
  775. struct dyn_ftrace *rec;
  776. int type = MATCH_FULL;
  777. unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  778. unsigned i, match = 0, search_len = 0;
  779. for (i = 0; i < len; i++) {
  780. if (buff[i] == '*') {
  781. if (!i) {
  782. search = buff + i + 1;
  783. type = MATCH_END_ONLY;
  784. search_len = len - (i + 1);
  785. } else {
  786. if (type == MATCH_END_ONLY) {
  787. type = MATCH_MIDDLE_ONLY;
  788. } else {
  789. match = i;
  790. type = MATCH_FRONT_ONLY;
  791. }
  792. buff[i] = 0;
  793. break;
  794. }
  795. }
  796. }
  797. /* should not be called from interrupt context */
  798. spin_lock(&ftrace_lock);
  799. if (enable)
  800. ftrace_filtered = 1;
  801. pg = ftrace_pages_start;
  802. while (pg) {
  803. for (i = 0; i < pg->index; i++) {
  804. int matched = 0;
  805. char *ptr;
  806. rec = &pg->records[i];
  807. if (rec->flags & FTRACE_FL_FAILED)
  808. continue;
  809. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  810. switch (type) {
  811. case MATCH_FULL:
  812. if (strcmp(str, buff) == 0)
  813. matched = 1;
  814. break;
  815. case MATCH_FRONT_ONLY:
  816. if (memcmp(str, buff, match) == 0)
  817. matched = 1;
  818. break;
  819. case MATCH_MIDDLE_ONLY:
  820. if (strstr(str, search))
  821. matched = 1;
  822. break;
  823. case MATCH_END_ONLY:
  824. ptr = strstr(str, search);
  825. if (ptr && (ptr[search_len] == 0))
  826. matched = 1;
  827. break;
  828. }
  829. if (matched)
  830. rec->flags |= flag;
  831. }
  832. pg = pg->next;
  833. }
  834. spin_unlock(&ftrace_lock);
  835. }
  836. static ssize_t
  837. ftrace_regex_write(struct file *file, const char __user *ubuf,
  838. size_t cnt, loff_t *ppos, int enable)
  839. {
  840. struct ftrace_iterator *iter;
  841. char ch;
  842. size_t read = 0;
  843. ssize_t ret;
  844. if (!cnt || cnt < 0)
  845. return 0;
  846. mutex_lock(&ftrace_regex_lock);
  847. if (file->f_mode & FMODE_READ) {
  848. struct seq_file *m = file->private_data;
  849. iter = m->private;
  850. } else
  851. iter = file->private_data;
  852. if (!*ppos) {
  853. iter->flags &= ~FTRACE_ITER_CONT;
  854. iter->buffer_idx = 0;
  855. }
  856. ret = get_user(ch, ubuf++);
  857. if (ret)
  858. goto out;
  859. read++;
  860. cnt--;
  861. if (!(iter->flags & ~FTRACE_ITER_CONT)) {
  862. /* skip white space */
  863. while (cnt && isspace(ch)) {
  864. ret = get_user(ch, ubuf++);
  865. if (ret)
  866. goto out;
  867. read++;
  868. cnt--;
  869. }
  870. if (isspace(ch)) {
  871. file->f_pos += read;
  872. ret = read;
  873. goto out;
  874. }
  875. iter->buffer_idx = 0;
  876. }
  877. while (cnt && !isspace(ch)) {
  878. if (iter->buffer_idx < FTRACE_BUFF_MAX)
  879. iter->buffer[iter->buffer_idx++] = ch;
  880. else {
  881. ret = -EINVAL;
  882. goto out;
  883. }
  884. ret = get_user(ch, ubuf++);
  885. if (ret)
  886. goto out;
  887. read++;
  888. cnt--;
  889. }
  890. if (isspace(ch)) {
  891. iter->filtered++;
  892. iter->buffer[iter->buffer_idx] = 0;
  893. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  894. iter->buffer_idx = 0;
  895. } else
  896. iter->flags |= FTRACE_ITER_CONT;
  897. file->f_pos += read;
  898. ret = read;
  899. out:
  900. mutex_unlock(&ftrace_regex_lock);
  901. return ret;
  902. }
  903. static ssize_t
  904. ftrace_filter_write(struct file *file, const char __user *ubuf,
  905. size_t cnt, loff_t *ppos)
  906. {
  907. return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
  908. }
  909. static ssize_t
  910. ftrace_notrace_write(struct file *file, const char __user *ubuf,
  911. size_t cnt, loff_t *ppos)
  912. {
  913. return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
  914. }
  915. static void
  916. ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
  917. {
  918. if (unlikely(ftrace_disabled))
  919. return;
  920. mutex_lock(&ftrace_regex_lock);
  921. if (reset)
  922. ftrace_filter_reset(enable);
  923. if (buf)
  924. ftrace_match(buf, len, enable);
  925. mutex_unlock(&ftrace_regex_lock);
  926. }
  927. /**
  928. * ftrace_set_filter - set a function to filter on in ftrace
  929. * @buf - the string that holds the function filter text.
  930. * @len - the length of the string.
  931. * @reset - non zero to reset all filters before applying this filter.
  932. *
  933. * Filters denote which functions should be enabled when tracing is enabled.
  934. * If @buf is NULL and reset is set, all functions will be enabled for tracing.
  935. */
  936. void ftrace_set_filter(unsigned char *buf, int len, int reset)
  937. {
  938. ftrace_set_regex(buf, len, reset, 1);
  939. }
  940. /**
  941. * ftrace_set_notrace - set a function to not trace in ftrace
  942. * @buf - the string that holds the function notrace text.
  943. * @len - the length of the string.
  944. * @reset - non zero to reset all filters before applying this filter.
  945. *
  946. * Notrace Filters denote which functions should not be enabled when tracing
  947. * is enabled. If @buf is NULL and reset is set, all functions will be enabled
  948. * for tracing.
  949. */
  950. void ftrace_set_notrace(unsigned char *buf, int len, int reset)
  951. {
  952. ftrace_set_regex(buf, len, reset, 0);
  953. }
  954. static int
  955. ftrace_regex_release(struct inode *inode, struct file *file, int enable)
  956. {
  957. struct seq_file *m = (struct seq_file *)file->private_data;
  958. struct ftrace_iterator *iter;
  959. mutex_lock(&ftrace_regex_lock);
  960. if (file->f_mode & FMODE_READ) {
  961. iter = m->private;
  962. seq_release(inode, file);
  963. } else
  964. iter = file->private_data;
  965. if (iter->buffer_idx) {
  966. iter->filtered++;
  967. iter->buffer[iter->buffer_idx] = 0;
  968. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  969. }
  970. mutex_lock(&ftrace_sysctl_lock);
  971. mutex_lock(&ftrace_start_lock);
  972. if (iter->filtered && ftrace_start && ftrace_enabled)
  973. ftrace_run_update_code(FTRACE_ENABLE_CALLS);
  974. mutex_unlock(&ftrace_start_lock);
  975. mutex_unlock(&ftrace_sysctl_lock);
  976. kfree(iter);
  977. mutex_unlock(&ftrace_regex_lock);
  978. return 0;
  979. }
  980. static int
  981. ftrace_filter_release(struct inode *inode, struct file *file)
  982. {
  983. return ftrace_regex_release(inode, file, 1);
  984. }
  985. static int
  986. ftrace_notrace_release(struct inode *inode, struct file *file)
  987. {
  988. return ftrace_regex_release(inode, file, 0);
  989. }
  990. static struct file_operations ftrace_avail_fops = {
  991. .open = ftrace_avail_open,
  992. .read = seq_read,
  993. .llseek = seq_lseek,
  994. .release = ftrace_avail_release,
  995. };
  996. static struct file_operations ftrace_failures_fops = {
  997. .open = ftrace_failures_open,
  998. .read = seq_read,
  999. .llseek = seq_lseek,
  1000. .release = ftrace_avail_release,
  1001. };
  1002. static struct file_operations ftrace_filter_fops = {
  1003. .open = ftrace_filter_open,
  1004. .read = ftrace_regex_read,
  1005. .write = ftrace_filter_write,
  1006. .llseek = ftrace_regex_lseek,
  1007. .release = ftrace_filter_release,
  1008. };
  1009. static struct file_operations ftrace_notrace_fops = {
  1010. .open = ftrace_notrace_open,
  1011. .read = ftrace_regex_read,
  1012. .write = ftrace_notrace_write,
  1013. .llseek = ftrace_regex_lseek,
  1014. .release = ftrace_notrace_release,
  1015. };
  1016. static __init int ftrace_init_debugfs(void)
  1017. {
  1018. struct dentry *d_tracer;
  1019. struct dentry *entry;
  1020. d_tracer = tracing_init_dentry();
  1021. entry = debugfs_create_file("available_filter_functions", 0444,
  1022. d_tracer, NULL, &ftrace_avail_fops);
  1023. if (!entry)
  1024. pr_warning("Could not create debugfs "
  1025. "'available_filter_functions' entry\n");
  1026. entry = debugfs_create_file("failures", 0444,
  1027. d_tracer, NULL, &ftrace_failures_fops);
  1028. if (!entry)
  1029. pr_warning("Could not create debugfs 'failures' entry\n");
  1030. entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
  1031. NULL, &ftrace_filter_fops);
  1032. if (!entry)
  1033. pr_warning("Could not create debugfs "
  1034. "'set_ftrace_filter' entry\n");
  1035. entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
  1036. NULL, &ftrace_notrace_fops);
  1037. if (!entry)
  1038. pr_warning("Could not create debugfs "
  1039. "'set_ftrace_notrace' entry\n");
  1040. return 0;
  1041. }
  1042. fs_initcall(ftrace_init_debugfs);
  1043. static int ftrace_convert_nops(unsigned long *start,
  1044. unsigned long *end)
  1045. {
  1046. unsigned long *p;
  1047. unsigned long addr;
  1048. unsigned long flags;
  1049. mutex_lock(&ftrace_start_lock);
  1050. p = start;
  1051. while (p < end) {
  1052. addr = ftrace_call_adjust(*p++);
  1053. ftrace_record_ip(addr);
  1054. }
  1055. /* disable interrupts to prevent kstop machine */
  1056. local_irq_save(flags);
  1057. ftrace_update_code();
  1058. local_irq_restore(flags);
  1059. mutex_unlock(&ftrace_start_lock);
  1060. return 0;
  1061. }
  1062. void ftrace_init_module(unsigned long *start, unsigned long *end)
  1063. {
  1064. if (ftrace_disabled || start == end)
  1065. return;
  1066. ftrace_convert_nops(start, end);
  1067. }
  1068. extern unsigned long __start_mcount_loc[];
  1069. extern unsigned long __stop_mcount_loc[];
  1070. void __init ftrace_init(void)
  1071. {
  1072. unsigned long count, addr, flags;
  1073. int ret;
  1074. /* Keep the ftrace pointer to the stub */
  1075. addr = (unsigned long)ftrace_stub;
  1076. local_irq_save(flags);
  1077. ftrace_dyn_arch_init(&addr);
  1078. local_irq_restore(flags);
  1079. /* ftrace_dyn_arch_init places the return code in addr */
  1080. if (addr)
  1081. goto failed;
  1082. count = __stop_mcount_loc - __start_mcount_loc;
  1083. ret = ftrace_dyn_table_alloc(count);
  1084. if (ret)
  1085. goto failed;
  1086. last_ftrace_enabled = ftrace_enabled = 1;
  1087. ret = ftrace_convert_nops(__start_mcount_loc,
  1088. __stop_mcount_loc);
  1089. return;
  1090. failed:
  1091. ftrace_disabled = 1;
  1092. }
  1093. #else
  1094. static int __init ftrace_nodyn_init(void)
  1095. {
  1096. ftrace_enabled = 1;
  1097. return 0;
  1098. }
  1099. device_initcall(ftrace_nodyn_init);
  1100. # define ftrace_startup() do { } while (0)
  1101. # define ftrace_shutdown() do { } while (0)
  1102. # define ftrace_startup_sysctl() do { } while (0)
  1103. # define ftrace_shutdown_sysctl() do { } while (0)
  1104. #endif /* CONFIG_DYNAMIC_FTRACE */
  1105. /**
  1106. * ftrace_kill - kill ftrace
  1107. *
  1108. * This function should be used by panic code. It stops ftrace
  1109. * but in a not so nice way. If you need to simply kill ftrace
  1110. * from a non-atomic section, use ftrace_kill.
  1111. */
  1112. void ftrace_kill(void)
  1113. {
  1114. ftrace_disabled = 1;
  1115. ftrace_enabled = 0;
  1116. clear_ftrace_function();
  1117. }
  1118. /**
  1119. * register_ftrace_function - register a function for profiling
  1120. * @ops - ops structure that holds the function for profiling.
  1121. *
  1122. * Register a function to be called by all functions in the
  1123. * kernel.
  1124. *
  1125. * Note: @ops->func and all the functions it calls must be labeled
  1126. * with "notrace", otherwise it will go into a
  1127. * recursive loop.
  1128. */
  1129. int register_ftrace_function(struct ftrace_ops *ops)
  1130. {
  1131. int ret;
  1132. if (unlikely(ftrace_disabled))
  1133. return -1;
  1134. mutex_lock(&ftrace_sysctl_lock);
  1135. ret = __register_ftrace_function(ops);
  1136. ftrace_startup();
  1137. mutex_unlock(&ftrace_sysctl_lock);
  1138. return ret;
  1139. }
  1140. /**
  1141. * unregister_ftrace_function - unresgister a function for profiling.
  1142. * @ops - ops structure that holds the function to unregister
  1143. *
  1144. * Unregister a function that was added to be called by ftrace profiling.
  1145. */
  1146. int unregister_ftrace_function(struct ftrace_ops *ops)
  1147. {
  1148. int ret;
  1149. mutex_lock(&ftrace_sysctl_lock);
  1150. ret = __unregister_ftrace_function(ops);
  1151. ftrace_shutdown();
  1152. mutex_unlock(&ftrace_sysctl_lock);
  1153. return ret;
  1154. }
  1155. int
  1156. ftrace_enable_sysctl(struct ctl_table *table, int write,
  1157. struct file *file, void __user *buffer, size_t *lenp,
  1158. loff_t *ppos)
  1159. {
  1160. int ret;
  1161. if (unlikely(ftrace_disabled))
  1162. return -ENODEV;
  1163. mutex_lock(&ftrace_sysctl_lock);
  1164. ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
  1165. if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
  1166. goto out;
  1167. last_ftrace_enabled = ftrace_enabled;
  1168. if (ftrace_enabled) {
  1169. ftrace_startup_sysctl();
  1170. /* we are starting ftrace again */
  1171. if (ftrace_list != &ftrace_list_end) {
  1172. if (ftrace_list->next == &ftrace_list_end)
  1173. ftrace_trace_function = ftrace_list->func;
  1174. else
  1175. ftrace_trace_function = ftrace_list_func;
  1176. }
  1177. } else {
  1178. /* stopping ftrace calls (just send to ftrace_stub) */
  1179. ftrace_trace_function = ftrace_stub;
  1180. ftrace_shutdown_sysctl();
  1181. }
  1182. out:
  1183. mutex_unlock(&ftrace_sysctl_lock);
  1184. return ret;
  1185. }