ftrace.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773
  1. /*
  2. * Infrastructure for profiling code inserted by 'gcc -pg'.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Originally ported from the -rt patch by:
  8. * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Based on code in the latency_tracer, that is:
  11. *
  12. * Copyright (C) 2004-2006 Ingo Molnar
  13. * Copyright (C) 2004 William Lee Irwin III
  14. */
  15. #include <linux/stop_machine.h>
  16. #include <linux/clocksource.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/debugfs.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/kthread.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/kprobes.h>
  24. #include <linux/ftrace.h>
  25. #include <linux/sysctl.h>
  26. #include <linux/ctype.h>
  27. #include <linux/list.h>
  28. #include <asm/ftrace.h>
  29. #include "trace.h"
  30. #define FTRACE_WARN_ON(cond) \
  31. do { \
  32. if (WARN_ON(cond)) \
  33. ftrace_kill(); \
  34. } while (0)
  35. #define FTRACE_WARN_ON_ONCE(cond) \
  36. do { \
  37. if (WARN_ON_ONCE(cond)) \
  38. ftrace_kill(); \
  39. } while (0)
  40. /* ftrace_enabled is a method to turn ftrace on or off */
  41. int ftrace_enabled __read_mostly;
  42. static int last_ftrace_enabled;
  43. /* ftrace_pid_trace >= 0 will only trace threads with this pid */
  44. static int ftrace_pid_trace = -1;
  45. /* Quick disabling of function tracer. */
  46. int function_trace_stop;
  47. /*
  48. * ftrace_disabled is set when an anomaly is discovered.
  49. * ftrace_disabled is much stronger than ftrace_enabled.
  50. */
  51. static int ftrace_disabled __read_mostly;
  52. static DEFINE_SPINLOCK(ftrace_lock);
  53. static DEFINE_MUTEX(ftrace_sysctl_lock);
  54. static DEFINE_MUTEX(ftrace_start_lock);
  55. static struct ftrace_ops ftrace_list_end __read_mostly =
  56. {
  57. .func = ftrace_stub,
  58. };
  59. static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
  60. ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
  61. ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
  62. ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
  63. static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
  64. {
  65. struct ftrace_ops *op = ftrace_list;
  66. /* in case someone actually ports this to alpha! */
  67. read_barrier_depends();
  68. while (op != &ftrace_list_end) {
  69. /* silly alpha */
  70. read_barrier_depends();
  71. op->func(ip, parent_ip);
  72. op = op->next;
  73. };
  74. }
  75. static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
  76. {
  77. if (current->pid != ftrace_pid_trace)
  78. return;
  79. ftrace_pid_function(ip, parent_ip);
  80. }
  81. static void set_ftrace_pid_function(ftrace_func_t func)
  82. {
  83. /* do not set ftrace_pid_function to itself! */
  84. if (func != ftrace_pid_func)
  85. ftrace_pid_function = func;
  86. }
  87. /**
  88. * clear_ftrace_function - reset the ftrace function
  89. *
  90. * This NULLs the ftrace function and in essence stops
  91. * tracing. There may be lag
  92. */
  93. void clear_ftrace_function(void)
  94. {
  95. ftrace_trace_function = ftrace_stub;
  96. __ftrace_trace_function = ftrace_stub;
  97. ftrace_pid_function = ftrace_stub;
  98. }
  99. #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  100. /*
  101. * For those archs that do not test ftrace_trace_stop in their
  102. * mcount call site, we need to do it from C.
  103. */
  104. static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
  105. {
  106. if (function_trace_stop)
  107. return;
  108. __ftrace_trace_function(ip, parent_ip);
  109. }
  110. #endif
  111. static int __register_ftrace_function(struct ftrace_ops *ops)
  112. {
  113. /* should not be called from interrupt context */
  114. spin_lock(&ftrace_lock);
  115. ops->next = ftrace_list;
  116. /*
  117. * We are entering ops into the ftrace_list but another
  118. * CPU might be walking that list. We need to make sure
  119. * the ops->next pointer is valid before another CPU sees
  120. * the ops pointer included into the ftrace_list.
  121. */
  122. smp_wmb();
  123. ftrace_list = ops;
  124. if (ftrace_enabled) {
  125. ftrace_func_t func;
  126. if (ops->next == &ftrace_list_end)
  127. func = ops->func;
  128. else
  129. func = ftrace_list_func;
  130. if (ftrace_pid_trace >= 0) {
  131. set_ftrace_pid_function(func);
  132. func = ftrace_pid_func;
  133. }
  134. /*
  135. * For one func, simply call it directly.
  136. * For more than one func, call the chain.
  137. */
  138. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  139. ftrace_trace_function = func;
  140. #else
  141. __ftrace_trace_function = func;
  142. ftrace_trace_function = ftrace_test_stop_func;
  143. #endif
  144. }
  145. spin_unlock(&ftrace_lock);
  146. return 0;
  147. }
  148. static int __unregister_ftrace_function(struct ftrace_ops *ops)
  149. {
  150. struct ftrace_ops **p;
  151. int ret = 0;
  152. /* should not be called from interrupt context */
  153. spin_lock(&ftrace_lock);
  154. /*
  155. * If we are removing the last function, then simply point
  156. * to the ftrace_stub.
  157. */
  158. if (ftrace_list == ops && ops->next == &ftrace_list_end) {
  159. ftrace_trace_function = ftrace_stub;
  160. ftrace_list = &ftrace_list_end;
  161. goto out;
  162. }
  163. for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
  164. if (*p == ops)
  165. break;
  166. if (*p != ops) {
  167. ret = -1;
  168. goto out;
  169. }
  170. *p = (*p)->next;
  171. if (ftrace_enabled) {
  172. /* If we only have one func left, then call that directly */
  173. if (ftrace_list->next == &ftrace_list_end) {
  174. ftrace_func_t func = ftrace_list->func;
  175. if (ftrace_pid_trace >= 0) {
  176. set_ftrace_pid_function(func);
  177. func = ftrace_pid_func;
  178. }
  179. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  180. ftrace_trace_function = func;
  181. #else
  182. __ftrace_trace_function = func;
  183. #endif
  184. }
  185. }
  186. out:
  187. spin_unlock(&ftrace_lock);
  188. return ret;
  189. }
  190. static void ftrace_update_pid_func(void)
  191. {
  192. ftrace_func_t func;
  193. /* should not be called from interrupt context */
  194. spin_lock(&ftrace_lock);
  195. if (ftrace_trace_function == ftrace_stub)
  196. goto out;
  197. func = ftrace_trace_function;
  198. if (ftrace_pid_trace >= 0) {
  199. set_ftrace_pid_function(func);
  200. func = ftrace_pid_func;
  201. } else {
  202. if (func == ftrace_pid_func)
  203. func = ftrace_pid_function;
  204. }
  205. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  206. ftrace_trace_function = func;
  207. #else
  208. __ftrace_trace_function = func;
  209. #endif
  210. out:
  211. spin_unlock(&ftrace_lock);
  212. }
  213. #ifdef CONFIG_DYNAMIC_FTRACE
  214. #ifndef CONFIG_FTRACE_MCOUNT_RECORD
  215. # error Dynamic ftrace depends on MCOUNT_RECORD
  216. #endif
  217. /*
  218. * Since MCOUNT_ADDR may point to mcount itself, we do not want
  219. * to get it confused by reading a reference in the code as we
  220. * are parsing on objcopy output of text. Use a variable for
  221. * it instead.
  222. */
  223. static unsigned long mcount_addr = MCOUNT_ADDR;
  224. enum {
  225. FTRACE_ENABLE_CALLS = (1 << 0),
  226. FTRACE_DISABLE_CALLS = (1 << 1),
  227. FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
  228. FTRACE_ENABLE_MCOUNT = (1 << 3),
  229. FTRACE_DISABLE_MCOUNT = (1 << 4),
  230. FTRACE_START_FUNC_RET = (1 << 5),
  231. FTRACE_STOP_FUNC_RET = (1 << 6),
  232. };
  233. static int ftrace_filtered;
  234. static LIST_HEAD(ftrace_new_addrs);
  235. static DEFINE_MUTEX(ftrace_regex_lock);
  236. struct ftrace_page {
  237. struct ftrace_page *next;
  238. unsigned long index;
  239. struct dyn_ftrace records[];
  240. };
  241. #define ENTRIES_PER_PAGE \
  242. ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
  243. /* estimate from running different kernels */
  244. #define NR_TO_INIT 10000
  245. static struct ftrace_page *ftrace_pages_start;
  246. static struct ftrace_page *ftrace_pages;
  247. static struct dyn_ftrace *ftrace_free_records;
  248. #ifdef CONFIG_KPROBES
  249. static int frozen_record_count;
  250. static inline void freeze_record(struct dyn_ftrace *rec)
  251. {
  252. if (!(rec->flags & FTRACE_FL_FROZEN)) {
  253. rec->flags |= FTRACE_FL_FROZEN;
  254. frozen_record_count++;
  255. }
  256. }
  257. static inline void unfreeze_record(struct dyn_ftrace *rec)
  258. {
  259. if (rec->flags & FTRACE_FL_FROZEN) {
  260. rec->flags &= ~FTRACE_FL_FROZEN;
  261. frozen_record_count--;
  262. }
  263. }
  264. static inline int record_frozen(struct dyn_ftrace *rec)
  265. {
  266. return rec->flags & FTRACE_FL_FROZEN;
  267. }
  268. #else
  269. # define freeze_record(rec) ({ 0; })
  270. # define unfreeze_record(rec) ({ 0; })
  271. # define record_frozen(rec) ({ 0; })
  272. #endif /* CONFIG_KPROBES */
  273. static void ftrace_free_rec(struct dyn_ftrace *rec)
  274. {
  275. rec->ip = (unsigned long)ftrace_free_records;
  276. ftrace_free_records = rec;
  277. rec->flags |= FTRACE_FL_FREE;
  278. }
  279. void ftrace_release(void *start, unsigned long size)
  280. {
  281. struct dyn_ftrace *rec;
  282. struct ftrace_page *pg;
  283. unsigned long s = (unsigned long)start;
  284. unsigned long e = s + size;
  285. int i;
  286. if (ftrace_disabled || !start)
  287. return;
  288. /* should not be called from interrupt context */
  289. spin_lock(&ftrace_lock);
  290. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  291. for (i = 0; i < pg->index; i++) {
  292. rec = &pg->records[i];
  293. if ((rec->ip >= s) && (rec->ip < e))
  294. ftrace_free_rec(rec);
  295. }
  296. }
  297. spin_unlock(&ftrace_lock);
  298. }
  299. static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
  300. {
  301. struct dyn_ftrace *rec;
  302. /* First check for freed records */
  303. if (ftrace_free_records) {
  304. rec = ftrace_free_records;
  305. if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
  306. FTRACE_WARN_ON_ONCE(1);
  307. ftrace_free_records = NULL;
  308. return NULL;
  309. }
  310. ftrace_free_records = (void *)rec->ip;
  311. memset(rec, 0, sizeof(*rec));
  312. return rec;
  313. }
  314. if (ftrace_pages->index == ENTRIES_PER_PAGE) {
  315. if (!ftrace_pages->next) {
  316. /* allocate another page */
  317. ftrace_pages->next =
  318. (void *)get_zeroed_page(GFP_KERNEL);
  319. if (!ftrace_pages->next)
  320. return NULL;
  321. }
  322. ftrace_pages = ftrace_pages->next;
  323. }
  324. return &ftrace_pages->records[ftrace_pages->index++];
  325. }
  326. static struct dyn_ftrace *
  327. ftrace_record_ip(unsigned long ip)
  328. {
  329. struct dyn_ftrace *rec;
  330. if (ftrace_disabled)
  331. return NULL;
  332. rec = ftrace_alloc_dyn_node(ip);
  333. if (!rec)
  334. return NULL;
  335. rec->ip = ip;
  336. list_add(&rec->list, &ftrace_new_addrs);
  337. return rec;
  338. }
  339. static void print_ip_ins(const char *fmt, unsigned char *p)
  340. {
  341. int i;
  342. printk(KERN_CONT "%s", fmt);
  343. for (i = 0; i < MCOUNT_INSN_SIZE; i++)
  344. printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
  345. }
  346. static void ftrace_bug(int failed, unsigned long ip)
  347. {
  348. switch (failed) {
  349. case -EFAULT:
  350. FTRACE_WARN_ON_ONCE(1);
  351. pr_info("ftrace faulted on modifying ");
  352. print_ip_sym(ip);
  353. break;
  354. case -EINVAL:
  355. FTRACE_WARN_ON_ONCE(1);
  356. pr_info("ftrace failed to modify ");
  357. print_ip_sym(ip);
  358. print_ip_ins(" actual: ", (unsigned char *)ip);
  359. printk(KERN_CONT "\n");
  360. break;
  361. case -EPERM:
  362. FTRACE_WARN_ON_ONCE(1);
  363. pr_info("ftrace faulted on writing ");
  364. print_ip_sym(ip);
  365. break;
  366. default:
  367. FTRACE_WARN_ON_ONCE(1);
  368. pr_info("ftrace faulted on unknown error ");
  369. print_ip_sym(ip);
  370. }
  371. }
  372. static int
  373. __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
  374. {
  375. unsigned long ip, fl;
  376. unsigned long ftrace_addr;
  377. ftrace_addr = (unsigned long)ftrace_caller;
  378. ip = rec->ip;
  379. /*
  380. * If this record is not to be traced and
  381. * it is not enabled then do nothing.
  382. *
  383. * If this record is not to be traced and
  384. * it is enabled then disabled it.
  385. *
  386. */
  387. if (rec->flags & FTRACE_FL_NOTRACE) {
  388. if (rec->flags & FTRACE_FL_ENABLED)
  389. rec->flags &= ~FTRACE_FL_ENABLED;
  390. else
  391. return 0;
  392. } else if (ftrace_filtered && enable) {
  393. /*
  394. * Filtering is on:
  395. */
  396. fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
  397. /* Record is filtered and enabled, do nothing */
  398. if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
  399. return 0;
  400. /* Record is not filtered and is not enabled do nothing */
  401. if (!fl)
  402. return 0;
  403. /* Record is not filtered but enabled, disable it */
  404. if (fl == FTRACE_FL_ENABLED)
  405. rec->flags &= ~FTRACE_FL_ENABLED;
  406. else
  407. /* Otherwise record is filtered but not enabled, enable it */
  408. rec->flags |= FTRACE_FL_ENABLED;
  409. } else {
  410. /* Disable or not filtered */
  411. if (enable) {
  412. /* if record is enabled, do nothing */
  413. if (rec->flags & FTRACE_FL_ENABLED)
  414. return 0;
  415. rec->flags |= FTRACE_FL_ENABLED;
  416. } else {
  417. /* if record is not enabled do nothing */
  418. if (!(rec->flags & FTRACE_FL_ENABLED))
  419. return 0;
  420. rec->flags &= ~FTRACE_FL_ENABLED;
  421. }
  422. }
  423. if (rec->flags & FTRACE_FL_ENABLED)
  424. return ftrace_make_call(rec, ftrace_addr);
  425. else
  426. return ftrace_make_nop(NULL, rec, ftrace_addr);
  427. }
  428. static void ftrace_replace_code(int enable)
  429. {
  430. int i, failed;
  431. struct dyn_ftrace *rec;
  432. struct ftrace_page *pg;
  433. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  434. for (i = 0; i < pg->index; i++) {
  435. rec = &pg->records[i];
  436. /*
  437. * Skip over free records and records that have
  438. * failed.
  439. */
  440. if (rec->flags & FTRACE_FL_FREE ||
  441. rec->flags & FTRACE_FL_FAILED)
  442. continue;
  443. /* ignore updates to this record's mcount site */
  444. if (get_kprobe((void *)rec->ip)) {
  445. freeze_record(rec);
  446. continue;
  447. } else {
  448. unfreeze_record(rec);
  449. }
  450. failed = __ftrace_replace_code(rec, enable);
  451. if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
  452. rec->flags |= FTRACE_FL_FAILED;
  453. if ((system_state == SYSTEM_BOOTING) ||
  454. !core_kernel_text(rec->ip)) {
  455. ftrace_free_rec(rec);
  456. } else
  457. ftrace_bug(failed, rec->ip);
  458. }
  459. }
  460. }
  461. }
  462. static int
  463. ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
  464. {
  465. unsigned long ip;
  466. int ret;
  467. ip = rec->ip;
  468. ret = ftrace_make_nop(mod, rec, mcount_addr);
  469. if (ret) {
  470. ftrace_bug(ret, ip);
  471. rec->flags |= FTRACE_FL_FAILED;
  472. return 0;
  473. }
  474. return 1;
  475. }
  476. static int __ftrace_modify_code(void *data)
  477. {
  478. int *command = data;
  479. if (*command & FTRACE_ENABLE_CALLS)
  480. ftrace_replace_code(1);
  481. else if (*command & FTRACE_DISABLE_CALLS)
  482. ftrace_replace_code(0);
  483. if (*command & FTRACE_UPDATE_TRACE_FUNC)
  484. ftrace_update_ftrace_func(ftrace_trace_function);
  485. if (*command & FTRACE_START_FUNC_RET)
  486. ftrace_enable_ftrace_graph_caller();
  487. else if (*command & FTRACE_STOP_FUNC_RET)
  488. ftrace_disable_ftrace_graph_caller();
  489. return 0;
  490. }
  491. static void ftrace_run_update_code(int command)
  492. {
  493. stop_machine(__ftrace_modify_code, &command, NULL);
  494. }
  495. static ftrace_func_t saved_ftrace_func;
  496. static int ftrace_start_up;
  497. static void ftrace_startup_enable(int command)
  498. {
  499. if (saved_ftrace_func != ftrace_trace_function) {
  500. saved_ftrace_func = ftrace_trace_function;
  501. command |= FTRACE_UPDATE_TRACE_FUNC;
  502. }
  503. if (!command || !ftrace_enabled)
  504. return;
  505. ftrace_run_update_code(command);
  506. }
  507. static void ftrace_startup(int command)
  508. {
  509. if (unlikely(ftrace_disabled))
  510. return;
  511. mutex_lock(&ftrace_start_lock);
  512. ftrace_start_up++;
  513. command |= FTRACE_ENABLE_CALLS;
  514. ftrace_startup_enable(command);
  515. mutex_unlock(&ftrace_start_lock);
  516. }
  517. static void ftrace_shutdown(int command)
  518. {
  519. if (unlikely(ftrace_disabled))
  520. return;
  521. mutex_lock(&ftrace_start_lock);
  522. ftrace_start_up--;
  523. if (!ftrace_start_up)
  524. command |= FTRACE_DISABLE_CALLS;
  525. if (saved_ftrace_func != ftrace_trace_function) {
  526. saved_ftrace_func = ftrace_trace_function;
  527. command |= FTRACE_UPDATE_TRACE_FUNC;
  528. }
  529. if (!command || !ftrace_enabled)
  530. goto out;
  531. ftrace_run_update_code(command);
  532. out:
  533. mutex_unlock(&ftrace_start_lock);
  534. }
  535. static void ftrace_startup_sysctl(void)
  536. {
  537. int command = FTRACE_ENABLE_MCOUNT;
  538. if (unlikely(ftrace_disabled))
  539. return;
  540. mutex_lock(&ftrace_start_lock);
  541. /* Force update next time */
  542. saved_ftrace_func = NULL;
  543. /* ftrace_start_up is true if we want ftrace running */
  544. if (ftrace_start_up)
  545. command |= FTRACE_ENABLE_CALLS;
  546. ftrace_run_update_code(command);
  547. mutex_unlock(&ftrace_start_lock);
  548. }
  549. static void ftrace_shutdown_sysctl(void)
  550. {
  551. int command = FTRACE_DISABLE_MCOUNT;
  552. if (unlikely(ftrace_disabled))
  553. return;
  554. mutex_lock(&ftrace_start_lock);
  555. /* ftrace_start_up is true if ftrace is running */
  556. if (ftrace_start_up)
  557. command |= FTRACE_DISABLE_CALLS;
  558. ftrace_run_update_code(command);
  559. mutex_unlock(&ftrace_start_lock);
  560. }
  561. static cycle_t ftrace_update_time;
  562. static unsigned long ftrace_update_cnt;
  563. unsigned long ftrace_update_tot_cnt;
  564. static int ftrace_update_code(struct module *mod)
  565. {
  566. struct dyn_ftrace *p, *t;
  567. cycle_t start, stop;
  568. start = ftrace_now(raw_smp_processor_id());
  569. ftrace_update_cnt = 0;
  570. list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
  571. /* If something went wrong, bail without enabling anything */
  572. if (unlikely(ftrace_disabled))
  573. return -1;
  574. list_del_init(&p->list);
  575. /* convert record (i.e, patch mcount-call with NOP) */
  576. if (ftrace_code_disable(mod, p)) {
  577. p->flags |= FTRACE_FL_CONVERTED;
  578. ftrace_update_cnt++;
  579. } else
  580. ftrace_free_rec(p);
  581. }
  582. stop = ftrace_now(raw_smp_processor_id());
  583. ftrace_update_time = stop - start;
  584. ftrace_update_tot_cnt += ftrace_update_cnt;
  585. return 0;
  586. }
  587. static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
  588. {
  589. struct ftrace_page *pg;
  590. int cnt;
  591. int i;
  592. /* allocate a few pages */
  593. ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
  594. if (!ftrace_pages_start)
  595. return -1;
  596. /*
  597. * Allocate a few more pages.
  598. *
  599. * TODO: have some parser search vmlinux before
  600. * final linking to find all calls to ftrace.
  601. * Then we can:
  602. * a) know how many pages to allocate.
  603. * and/or
  604. * b) set up the table then.
  605. *
  606. * The dynamic code is still necessary for
  607. * modules.
  608. */
  609. pg = ftrace_pages = ftrace_pages_start;
  610. cnt = num_to_init / ENTRIES_PER_PAGE;
  611. pr_info("ftrace: allocating %ld entries in %d pages\n",
  612. num_to_init, cnt + 1);
  613. for (i = 0; i < cnt; i++) {
  614. pg->next = (void *)get_zeroed_page(GFP_KERNEL);
  615. /* If we fail, we'll try later anyway */
  616. if (!pg->next)
  617. break;
  618. pg = pg->next;
  619. }
  620. return 0;
  621. }
  622. enum {
  623. FTRACE_ITER_FILTER = (1 << 0),
  624. FTRACE_ITER_CONT = (1 << 1),
  625. FTRACE_ITER_NOTRACE = (1 << 2),
  626. FTRACE_ITER_FAILURES = (1 << 3),
  627. };
  628. #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
  629. struct ftrace_iterator {
  630. struct ftrace_page *pg;
  631. unsigned idx;
  632. unsigned flags;
  633. unsigned char buffer[FTRACE_BUFF_MAX+1];
  634. unsigned buffer_idx;
  635. unsigned filtered;
  636. };
  637. static void *
  638. t_next(struct seq_file *m, void *v, loff_t *pos)
  639. {
  640. struct ftrace_iterator *iter = m->private;
  641. struct dyn_ftrace *rec = NULL;
  642. (*pos)++;
  643. /* should not be called from interrupt context */
  644. spin_lock(&ftrace_lock);
  645. retry:
  646. if (iter->idx >= iter->pg->index) {
  647. if (iter->pg->next) {
  648. iter->pg = iter->pg->next;
  649. iter->idx = 0;
  650. goto retry;
  651. } else {
  652. iter->idx = -1;
  653. }
  654. } else {
  655. rec = &iter->pg->records[iter->idx++];
  656. if ((rec->flags & FTRACE_FL_FREE) ||
  657. (!(iter->flags & FTRACE_ITER_FAILURES) &&
  658. (rec->flags & FTRACE_FL_FAILED)) ||
  659. ((iter->flags & FTRACE_ITER_FAILURES) &&
  660. !(rec->flags & FTRACE_FL_FAILED)) ||
  661. ((iter->flags & FTRACE_ITER_FILTER) &&
  662. !(rec->flags & FTRACE_FL_FILTER)) ||
  663. ((iter->flags & FTRACE_ITER_NOTRACE) &&
  664. !(rec->flags & FTRACE_FL_NOTRACE))) {
  665. rec = NULL;
  666. goto retry;
  667. }
  668. }
  669. spin_unlock(&ftrace_lock);
  670. return rec;
  671. }
  672. static void *t_start(struct seq_file *m, loff_t *pos)
  673. {
  674. struct ftrace_iterator *iter = m->private;
  675. void *p = NULL;
  676. if (*pos > 0) {
  677. if (iter->idx < 0)
  678. return p;
  679. (*pos)--;
  680. iter->idx--;
  681. }
  682. p = t_next(m, p, pos);
  683. return p;
  684. }
  685. static void t_stop(struct seq_file *m, void *p)
  686. {
  687. }
  688. static int t_show(struct seq_file *m, void *v)
  689. {
  690. struct dyn_ftrace *rec = v;
  691. char str[KSYM_SYMBOL_LEN];
  692. if (!rec)
  693. return 0;
  694. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  695. seq_printf(m, "%s\n", str);
  696. return 0;
  697. }
  698. static struct seq_operations show_ftrace_seq_ops = {
  699. .start = t_start,
  700. .next = t_next,
  701. .stop = t_stop,
  702. .show = t_show,
  703. };
  704. static int
  705. ftrace_avail_open(struct inode *inode, struct file *file)
  706. {
  707. struct ftrace_iterator *iter;
  708. int ret;
  709. if (unlikely(ftrace_disabled))
  710. return -ENODEV;
  711. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  712. if (!iter)
  713. return -ENOMEM;
  714. iter->pg = ftrace_pages_start;
  715. ret = seq_open(file, &show_ftrace_seq_ops);
  716. if (!ret) {
  717. struct seq_file *m = file->private_data;
  718. m->private = iter;
  719. } else {
  720. kfree(iter);
  721. }
  722. return ret;
  723. }
  724. int ftrace_avail_release(struct inode *inode, struct file *file)
  725. {
  726. struct seq_file *m = (struct seq_file *)file->private_data;
  727. struct ftrace_iterator *iter = m->private;
  728. seq_release(inode, file);
  729. kfree(iter);
  730. return 0;
  731. }
  732. static int
  733. ftrace_failures_open(struct inode *inode, struct file *file)
  734. {
  735. int ret;
  736. struct seq_file *m;
  737. struct ftrace_iterator *iter;
  738. ret = ftrace_avail_open(inode, file);
  739. if (!ret) {
  740. m = (struct seq_file *)file->private_data;
  741. iter = (struct ftrace_iterator *)m->private;
  742. iter->flags = FTRACE_ITER_FAILURES;
  743. }
  744. return ret;
  745. }
  746. static void ftrace_filter_reset(int enable)
  747. {
  748. struct ftrace_page *pg;
  749. struct dyn_ftrace *rec;
  750. unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  751. unsigned i;
  752. /* should not be called from interrupt context */
  753. spin_lock(&ftrace_lock);
  754. if (enable)
  755. ftrace_filtered = 0;
  756. pg = ftrace_pages_start;
  757. while (pg) {
  758. for (i = 0; i < pg->index; i++) {
  759. rec = &pg->records[i];
  760. if (rec->flags & FTRACE_FL_FAILED)
  761. continue;
  762. rec->flags &= ~type;
  763. }
  764. pg = pg->next;
  765. }
  766. spin_unlock(&ftrace_lock);
  767. }
  768. static int
  769. ftrace_regex_open(struct inode *inode, struct file *file, int enable)
  770. {
  771. struct ftrace_iterator *iter;
  772. int ret = 0;
  773. if (unlikely(ftrace_disabled))
  774. return -ENODEV;
  775. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  776. if (!iter)
  777. return -ENOMEM;
  778. mutex_lock(&ftrace_regex_lock);
  779. if ((file->f_mode & FMODE_WRITE) &&
  780. !(file->f_flags & O_APPEND))
  781. ftrace_filter_reset(enable);
  782. if (file->f_mode & FMODE_READ) {
  783. iter->pg = ftrace_pages_start;
  784. iter->flags = enable ? FTRACE_ITER_FILTER :
  785. FTRACE_ITER_NOTRACE;
  786. ret = seq_open(file, &show_ftrace_seq_ops);
  787. if (!ret) {
  788. struct seq_file *m = file->private_data;
  789. m->private = iter;
  790. } else
  791. kfree(iter);
  792. } else
  793. file->private_data = iter;
  794. mutex_unlock(&ftrace_regex_lock);
  795. return ret;
  796. }
  797. static int
  798. ftrace_filter_open(struct inode *inode, struct file *file)
  799. {
  800. return ftrace_regex_open(inode, file, 1);
  801. }
  802. static int
  803. ftrace_notrace_open(struct inode *inode, struct file *file)
  804. {
  805. return ftrace_regex_open(inode, file, 0);
  806. }
  807. static ssize_t
  808. ftrace_regex_read(struct file *file, char __user *ubuf,
  809. size_t cnt, loff_t *ppos)
  810. {
  811. if (file->f_mode & FMODE_READ)
  812. return seq_read(file, ubuf, cnt, ppos);
  813. else
  814. return -EPERM;
  815. }
  816. static loff_t
  817. ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
  818. {
  819. loff_t ret;
  820. if (file->f_mode & FMODE_READ)
  821. ret = seq_lseek(file, offset, origin);
  822. else
  823. file->f_pos = ret = 1;
  824. return ret;
  825. }
  826. enum {
  827. MATCH_FULL,
  828. MATCH_FRONT_ONLY,
  829. MATCH_MIDDLE_ONLY,
  830. MATCH_END_ONLY,
  831. };
  832. static void
  833. ftrace_match(unsigned char *buff, int len, int enable)
  834. {
  835. char str[KSYM_SYMBOL_LEN];
  836. char *search = NULL;
  837. struct ftrace_page *pg;
  838. struct dyn_ftrace *rec;
  839. int type = MATCH_FULL;
  840. unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  841. unsigned i, match = 0, search_len = 0;
  842. for (i = 0; i < len; i++) {
  843. if (buff[i] == '*') {
  844. if (!i) {
  845. search = buff + i + 1;
  846. type = MATCH_END_ONLY;
  847. search_len = len - (i + 1);
  848. } else {
  849. if (type == MATCH_END_ONLY) {
  850. type = MATCH_MIDDLE_ONLY;
  851. } else {
  852. match = i;
  853. type = MATCH_FRONT_ONLY;
  854. }
  855. buff[i] = 0;
  856. break;
  857. }
  858. }
  859. }
  860. /* should not be called from interrupt context */
  861. spin_lock(&ftrace_lock);
  862. if (enable)
  863. ftrace_filtered = 1;
  864. pg = ftrace_pages_start;
  865. while (pg) {
  866. for (i = 0; i < pg->index; i++) {
  867. int matched = 0;
  868. char *ptr;
  869. rec = &pg->records[i];
  870. if (rec->flags & FTRACE_FL_FAILED)
  871. continue;
  872. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  873. switch (type) {
  874. case MATCH_FULL:
  875. if (strcmp(str, buff) == 0)
  876. matched = 1;
  877. break;
  878. case MATCH_FRONT_ONLY:
  879. if (memcmp(str, buff, match) == 0)
  880. matched = 1;
  881. break;
  882. case MATCH_MIDDLE_ONLY:
  883. if (strstr(str, search))
  884. matched = 1;
  885. break;
  886. case MATCH_END_ONLY:
  887. ptr = strstr(str, search);
  888. if (ptr && (ptr[search_len] == 0))
  889. matched = 1;
  890. break;
  891. }
  892. if (matched)
  893. rec->flags |= flag;
  894. }
  895. pg = pg->next;
  896. }
  897. spin_unlock(&ftrace_lock);
  898. }
  899. static ssize_t
  900. ftrace_regex_write(struct file *file, const char __user *ubuf,
  901. size_t cnt, loff_t *ppos, int enable)
  902. {
  903. struct ftrace_iterator *iter;
  904. char ch;
  905. size_t read = 0;
  906. ssize_t ret;
  907. if (!cnt || cnt < 0)
  908. return 0;
  909. mutex_lock(&ftrace_regex_lock);
  910. if (file->f_mode & FMODE_READ) {
  911. struct seq_file *m = file->private_data;
  912. iter = m->private;
  913. } else
  914. iter = file->private_data;
  915. if (!*ppos) {
  916. iter->flags &= ~FTRACE_ITER_CONT;
  917. iter->buffer_idx = 0;
  918. }
  919. ret = get_user(ch, ubuf++);
  920. if (ret)
  921. goto out;
  922. read++;
  923. cnt--;
  924. if (!(iter->flags & ~FTRACE_ITER_CONT)) {
  925. /* skip white space */
  926. while (cnt && isspace(ch)) {
  927. ret = get_user(ch, ubuf++);
  928. if (ret)
  929. goto out;
  930. read++;
  931. cnt--;
  932. }
  933. if (isspace(ch)) {
  934. file->f_pos += read;
  935. ret = read;
  936. goto out;
  937. }
  938. iter->buffer_idx = 0;
  939. }
  940. while (cnt && !isspace(ch)) {
  941. if (iter->buffer_idx < FTRACE_BUFF_MAX)
  942. iter->buffer[iter->buffer_idx++] = ch;
  943. else {
  944. ret = -EINVAL;
  945. goto out;
  946. }
  947. ret = get_user(ch, ubuf++);
  948. if (ret)
  949. goto out;
  950. read++;
  951. cnt--;
  952. }
  953. if (isspace(ch)) {
  954. iter->filtered++;
  955. iter->buffer[iter->buffer_idx] = 0;
  956. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  957. iter->buffer_idx = 0;
  958. } else
  959. iter->flags |= FTRACE_ITER_CONT;
  960. file->f_pos += read;
  961. ret = read;
  962. out:
  963. mutex_unlock(&ftrace_regex_lock);
  964. return ret;
  965. }
  966. static ssize_t
  967. ftrace_filter_write(struct file *file, const char __user *ubuf,
  968. size_t cnt, loff_t *ppos)
  969. {
  970. return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
  971. }
  972. static ssize_t
  973. ftrace_notrace_write(struct file *file, const char __user *ubuf,
  974. size_t cnt, loff_t *ppos)
  975. {
  976. return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
  977. }
  978. static void
  979. ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
  980. {
  981. if (unlikely(ftrace_disabled))
  982. return;
  983. mutex_lock(&ftrace_regex_lock);
  984. if (reset)
  985. ftrace_filter_reset(enable);
  986. if (buf)
  987. ftrace_match(buf, len, enable);
  988. mutex_unlock(&ftrace_regex_lock);
  989. }
  990. /**
  991. * ftrace_set_filter - set a function to filter on in ftrace
  992. * @buf - the string that holds the function filter text.
  993. * @len - the length of the string.
  994. * @reset - non zero to reset all filters before applying this filter.
  995. *
  996. * Filters denote which functions should be enabled when tracing is enabled.
  997. * If @buf is NULL and reset is set, all functions will be enabled for tracing.
  998. */
  999. void ftrace_set_filter(unsigned char *buf, int len, int reset)
  1000. {
  1001. ftrace_set_regex(buf, len, reset, 1);
  1002. }
  1003. /**
  1004. * ftrace_set_notrace - set a function to not trace in ftrace
  1005. * @buf - the string that holds the function notrace text.
  1006. * @len - the length of the string.
  1007. * @reset - non zero to reset all filters before applying this filter.
  1008. *
  1009. * Notrace Filters denote which functions should not be enabled when tracing
  1010. * is enabled. If @buf is NULL and reset is set, all functions will be enabled
  1011. * for tracing.
  1012. */
  1013. void ftrace_set_notrace(unsigned char *buf, int len, int reset)
  1014. {
  1015. ftrace_set_regex(buf, len, reset, 0);
  1016. }
  1017. static int
  1018. ftrace_regex_release(struct inode *inode, struct file *file, int enable)
  1019. {
  1020. struct seq_file *m = (struct seq_file *)file->private_data;
  1021. struct ftrace_iterator *iter;
  1022. mutex_lock(&ftrace_regex_lock);
  1023. if (file->f_mode & FMODE_READ) {
  1024. iter = m->private;
  1025. seq_release(inode, file);
  1026. } else
  1027. iter = file->private_data;
  1028. if (iter->buffer_idx) {
  1029. iter->filtered++;
  1030. iter->buffer[iter->buffer_idx] = 0;
  1031. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  1032. }
  1033. mutex_lock(&ftrace_sysctl_lock);
  1034. mutex_lock(&ftrace_start_lock);
  1035. if (ftrace_start_up && ftrace_enabled)
  1036. ftrace_run_update_code(FTRACE_ENABLE_CALLS);
  1037. mutex_unlock(&ftrace_start_lock);
  1038. mutex_unlock(&ftrace_sysctl_lock);
  1039. kfree(iter);
  1040. mutex_unlock(&ftrace_regex_lock);
  1041. return 0;
  1042. }
  1043. static int
  1044. ftrace_filter_release(struct inode *inode, struct file *file)
  1045. {
  1046. return ftrace_regex_release(inode, file, 1);
  1047. }
  1048. static int
  1049. ftrace_notrace_release(struct inode *inode, struct file *file)
  1050. {
  1051. return ftrace_regex_release(inode, file, 0);
  1052. }
  1053. static struct file_operations ftrace_avail_fops = {
  1054. .open = ftrace_avail_open,
  1055. .read = seq_read,
  1056. .llseek = seq_lseek,
  1057. .release = ftrace_avail_release,
  1058. };
  1059. static struct file_operations ftrace_failures_fops = {
  1060. .open = ftrace_failures_open,
  1061. .read = seq_read,
  1062. .llseek = seq_lseek,
  1063. .release = ftrace_avail_release,
  1064. };
  1065. static struct file_operations ftrace_filter_fops = {
  1066. .open = ftrace_filter_open,
  1067. .read = ftrace_regex_read,
  1068. .write = ftrace_filter_write,
  1069. .llseek = ftrace_regex_lseek,
  1070. .release = ftrace_filter_release,
  1071. };
  1072. static struct file_operations ftrace_notrace_fops = {
  1073. .open = ftrace_notrace_open,
  1074. .read = ftrace_regex_read,
  1075. .write = ftrace_notrace_write,
  1076. .llseek = ftrace_regex_lseek,
  1077. .release = ftrace_notrace_release,
  1078. };
  1079. static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
  1080. {
  1081. struct dentry *entry;
  1082. entry = debugfs_create_file("available_filter_functions", 0444,
  1083. d_tracer, NULL, &ftrace_avail_fops);
  1084. if (!entry)
  1085. pr_warning("Could not create debugfs "
  1086. "'available_filter_functions' entry\n");
  1087. entry = debugfs_create_file("failures", 0444,
  1088. d_tracer, NULL, &ftrace_failures_fops);
  1089. if (!entry)
  1090. pr_warning("Could not create debugfs 'failures' entry\n");
  1091. entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
  1092. NULL, &ftrace_filter_fops);
  1093. if (!entry)
  1094. pr_warning("Could not create debugfs "
  1095. "'set_ftrace_filter' entry\n");
  1096. entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
  1097. NULL, &ftrace_notrace_fops);
  1098. if (!entry)
  1099. pr_warning("Could not create debugfs "
  1100. "'set_ftrace_notrace' entry\n");
  1101. return 0;
  1102. }
  1103. static int ftrace_convert_nops(struct module *mod,
  1104. unsigned long *start,
  1105. unsigned long *end)
  1106. {
  1107. unsigned long *p;
  1108. unsigned long addr;
  1109. unsigned long flags;
  1110. mutex_lock(&ftrace_start_lock);
  1111. p = start;
  1112. while (p < end) {
  1113. addr = ftrace_call_adjust(*p++);
  1114. /*
  1115. * Some architecture linkers will pad between
  1116. * the different mcount_loc sections of different
  1117. * object files to satisfy alignments.
  1118. * Skip any NULL pointers.
  1119. */
  1120. if (!addr)
  1121. continue;
  1122. ftrace_record_ip(addr);
  1123. }
  1124. /* disable interrupts to prevent kstop machine */
  1125. local_irq_save(flags);
  1126. ftrace_update_code(mod);
  1127. local_irq_restore(flags);
  1128. mutex_unlock(&ftrace_start_lock);
  1129. return 0;
  1130. }
  1131. void ftrace_init_module(struct module *mod,
  1132. unsigned long *start, unsigned long *end)
  1133. {
  1134. if (ftrace_disabled || start == end)
  1135. return;
  1136. ftrace_convert_nops(mod, start, end);
  1137. }
  1138. extern unsigned long __start_mcount_loc[];
  1139. extern unsigned long __stop_mcount_loc[];
  1140. void __init ftrace_init(void)
  1141. {
  1142. unsigned long count, addr, flags;
  1143. int ret;
  1144. /* Keep the ftrace pointer to the stub */
  1145. addr = (unsigned long)ftrace_stub;
  1146. local_irq_save(flags);
  1147. ftrace_dyn_arch_init(&addr);
  1148. local_irq_restore(flags);
  1149. /* ftrace_dyn_arch_init places the return code in addr */
  1150. if (addr)
  1151. goto failed;
  1152. count = __stop_mcount_loc - __start_mcount_loc;
  1153. ret = ftrace_dyn_table_alloc(count);
  1154. if (ret)
  1155. goto failed;
  1156. last_ftrace_enabled = ftrace_enabled = 1;
  1157. ret = ftrace_convert_nops(NULL,
  1158. __start_mcount_loc,
  1159. __stop_mcount_loc);
  1160. return;
  1161. failed:
  1162. ftrace_disabled = 1;
  1163. }
  1164. #else
  1165. static int __init ftrace_nodyn_init(void)
  1166. {
  1167. ftrace_enabled = 1;
  1168. return 0;
  1169. }
  1170. device_initcall(ftrace_nodyn_init);
  1171. static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
  1172. static inline void ftrace_startup_enable(int command) { }
  1173. /* Keep as macros so we do not need to define the commands */
  1174. # define ftrace_startup(command) do { } while (0)
  1175. # define ftrace_shutdown(command) do { } while (0)
  1176. # define ftrace_startup_sysctl() do { } while (0)
  1177. # define ftrace_shutdown_sysctl() do { } while (0)
  1178. #endif /* CONFIG_DYNAMIC_FTRACE */
  1179. static ssize_t
  1180. ftrace_pid_read(struct file *file, char __user *ubuf,
  1181. size_t cnt, loff_t *ppos)
  1182. {
  1183. char buf[64];
  1184. int r;
  1185. if (ftrace_pid_trace >= 0)
  1186. r = sprintf(buf, "%u\n", ftrace_pid_trace);
  1187. else
  1188. r = sprintf(buf, "no pid\n");
  1189. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  1190. }
  1191. static ssize_t
  1192. ftrace_pid_write(struct file *filp, const char __user *ubuf,
  1193. size_t cnt, loff_t *ppos)
  1194. {
  1195. char buf[64];
  1196. long val;
  1197. int ret;
  1198. if (cnt >= sizeof(buf))
  1199. return -EINVAL;
  1200. if (copy_from_user(&buf, ubuf, cnt))
  1201. return -EFAULT;
  1202. buf[cnt] = 0;
  1203. ret = strict_strtol(buf, 10, &val);
  1204. if (ret < 0)
  1205. return ret;
  1206. mutex_lock(&ftrace_start_lock);
  1207. if (ret < 0) {
  1208. /* disable pid tracing */
  1209. if (ftrace_pid_trace < 0)
  1210. goto out;
  1211. ftrace_pid_trace = -1;
  1212. } else {
  1213. if (ftrace_pid_trace == val)
  1214. goto out;
  1215. ftrace_pid_trace = val;
  1216. }
  1217. /* update the function call */
  1218. ftrace_update_pid_func();
  1219. ftrace_startup_enable(0);
  1220. out:
  1221. mutex_unlock(&ftrace_start_lock);
  1222. return cnt;
  1223. }
  1224. static struct file_operations ftrace_pid_fops = {
  1225. .read = ftrace_pid_read,
  1226. .write = ftrace_pid_write,
  1227. };
  1228. static __init int ftrace_init_debugfs(void)
  1229. {
  1230. struct dentry *d_tracer;
  1231. struct dentry *entry;
  1232. d_tracer = tracing_init_dentry();
  1233. if (!d_tracer)
  1234. return 0;
  1235. ftrace_init_dyn_debugfs(d_tracer);
  1236. entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
  1237. NULL, &ftrace_pid_fops);
  1238. if (!entry)
  1239. pr_warning("Could not create debugfs "
  1240. "'set_ftrace_pid' entry\n");
  1241. return 0;
  1242. }
  1243. fs_initcall(ftrace_init_debugfs);
  1244. /**
  1245. * ftrace_kill - kill ftrace
  1246. *
  1247. * This function should be used by panic code. It stops ftrace
  1248. * but in a not so nice way. If you need to simply kill ftrace
  1249. * from a non-atomic section, use ftrace_kill.
  1250. */
  1251. void ftrace_kill(void)
  1252. {
  1253. ftrace_disabled = 1;
  1254. ftrace_enabled = 0;
  1255. clear_ftrace_function();
  1256. }
  1257. /**
  1258. * register_ftrace_function - register a function for profiling
  1259. * @ops - ops structure that holds the function for profiling.
  1260. *
  1261. * Register a function to be called by all functions in the
  1262. * kernel.
  1263. *
  1264. * Note: @ops->func and all the functions it calls must be labeled
  1265. * with "notrace", otherwise it will go into a
  1266. * recursive loop.
  1267. */
  1268. int register_ftrace_function(struct ftrace_ops *ops)
  1269. {
  1270. int ret;
  1271. if (unlikely(ftrace_disabled))
  1272. return -1;
  1273. mutex_lock(&ftrace_sysctl_lock);
  1274. ret = __register_ftrace_function(ops);
  1275. ftrace_startup(0);
  1276. mutex_unlock(&ftrace_sysctl_lock);
  1277. return ret;
  1278. }
  1279. /**
  1280. * unregister_ftrace_function - unresgister a function for profiling.
  1281. * @ops - ops structure that holds the function to unregister
  1282. *
  1283. * Unregister a function that was added to be called by ftrace profiling.
  1284. */
  1285. int unregister_ftrace_function(struct ftrace_ops *ops)
  1286. {
  1287. int ret;
  1288. mutex_lock(&ftrace_sysctl_lock);
  1289. ret = __unregister_ftrace_function(ops);
  1290. ftrace_shutdown(0);
  1291. mutex_unlock(&ftrace_sysctl_lock);
  1292. return ret;
  1293. }
  1294. int
  1295. ftrace_enable_sysctl(struct ctl_table *table, int write,
  1296. struct file *file, void __user *buffer, size_t *lenp,
  1297. loff_t *ppos)
  1298. {
  1299. int ret;
  1300. if (unlikely(ftrace_disabled))
  1301. return -ENODEV;
  1302. mutex_lock(&ftrace_sysctl_lock);
  1303. ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
  1304. if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
  1305. goto out;
  1306. last_ftrace_enabled = ftrace_enabled;
  1307. if (ftrace_enabled) {
  1308. ftrace_startup_sysctl();
  1309. /* we are starting ftrace again */
  1310. if (ftrace_list != &ftrace_list_end) {
  1311. if (ftrace_list->next == &ftrace_list_end)
  1312. ftrace_trace_function = ftrace_list->func;
  1313. else
  1314. ftrace_trace_function = ftrace_list_func;
  1315. }
  1316. } else {
  1317. /* stopping ftrace calls (just send to ftrace_stub) */
  1318. ftrace_trace_function = ftrace_stub;
  1319. ftrace_shutdown_sysctl();
  1320. }
  1321. out:
  1322. mutex_unlock(&ftrace_sysctl_lock);
  1323. return ret;
  1324. }
  1325. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1326. static atomic_t ftrace_graph_active;
  1327. /* The callbacks that hook a function */
  1328. trace_func_graph_ret_t ftrace_graph_return =
  1329. (trace_func_graph_ret_t)ftrace_stub;
  1330. trace_func_graph_ent_t ftrace_graph_entry =
  1331. (trace_func_graph_ent_t)ftrace_stub;
  1332. /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
  1333. static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
  1334. {
  1335. int i;
  1336. int ret = 0;
  1337. unsigned long flags;
  1338. int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
  1339. struct task_struct *g, *t;
  1340. for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
  1341. ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
  1342. * sizeof(struct ftrace_ret_stack),
  1343. GFP_KERNEL);
  1344. if (!ret_stack_list[i]) {
  1345. start = 0;
  1346. end = i;
  1347. ret = -ENOMEM;
  1348. goto free;
  1349. }
  1350. }
  1351. read_lock_irqsave(&tasklist_lock, flags);
  1352. do_each_thread(g, t) {
  1353. if (start == end) {
  1354. ret = -EAGAIN;
  1355. goto unlock;
  1356. }
  1357. if (t->ret_stack == NULL) {
  1358. t->curr_ret_stack = -1;
  1359. /* Make sure IRQs see the -1 first: */
  1360. barrier();
  1361. t->ret_stack = ret_stack_list[start++];
  1362. atomic_set(&t->trace_overrun, 0);
  1363. }
  1364. } while_each_thread(g, t);
  1365. unlock:
  1366. read_unlock_irqrestore(&tasklist_lock, flags);
  1367. free:
  1368. for (i = start; i < end; i++)
  1369. kfree(ret_stack_list[i]);
  1370. return ret;
  1371. }
  1372. /* Allocate a return stack for each task */
  1373. static int start_graph_tracing(void)
  1374. {
  1375. struct ftrace_ret_stack **ret_stack_list;
  1376. int ret;
  1377. ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
  1378. sizeof(struct ftrace_ret_stack *),
  1379. GFP_KERNEL);
  1380. if (!ret_stack_list)
  1381. return -ENOMEM;
  1382. do {
  1383. ret = alloc_retstack_tasklist(ret_stack_list);
  1384. } while (ret == -EAGAIN);
  1385. kfree(ret_stack_list);
  1386. return ret;
  1387. }
  1388. int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  1389. trace_func_graph_ent_t entryfunc)
  1390. {
  1391. int ret = 0;
  1392. mutex_lock(&ftrace_sysctl_lock);
  1393. atomic_inc(&ftrace_graph_active);
  1394. ret = start_graph_tracing();
  1395. if (ret) {
  1396. atomic_dec(&ftrace_graph_active);
  1397. goto out;
  1398. }
  1399. ftrace_graph_return = retfunc;
  1400. ftrace_graph_entry = entryfunc;
  1401. ftrace_startup(FTRACE_START_FUNC_RET);
  1402. out:
  1403. mutex_unlock(&ftrace_sysctl_lock);
  1404. return ret;
  1405. }
  1406. void unregister_ftrace_graph(void)
  1407. {
  1408. mutex_lock(&ftrace_sysctl_lock);
  1409. atomic_dec(&ftrace_graph_active);
  1410. ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
  1411. ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
  1412. ftrace_shutdown(FTRACE_STOP_FUNC_RET);
  1413. mutex_unlock(&ftrace_sysctl_lock);
  1414. }
  1415. /* Allocate a return stack for newly created task */
  1416. void ftrace_graph_init_task(struct task_struct *t)
  1417. {
  1418. if (atomic_read(&ftrace_graph_active)) {
  1419. t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
  1420. * sizeof(struct ftrace_ret_stack),
  1421. GFP_KERNEL);
  1422. if (!t->ret_stack)
  1423. return;
  1424. t->curr_ret_stack = -1;
  1425. atomic_set(&t->trace_overrun, 0);
  1426. } else
  1427. t->ret_stack = NULL;
  1428. }
  1429. void ftrace_graph_exit_task(struct task_struct *t)
  1430. {
  1431. struct ftrace_ret_stack *ret_stack = t->ret_stack;
  1432. t->ret_stack = NULL;
  1433. /* NULL must become visible to IRQs before we free it: */
  1434. barrier();
  1435. kfree(ret_stack);
  1436. }
  1437. #endif