ftrace.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106
  1. /*
  2. * Infrastructure for profiling code inserted by 'gcc -pg'.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Originally ported from the -rt patch by:
  8. * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Based on code in the latency_tracer, that is:
  11. *
  12. * Copyright (C) 2004-2006 Ingo Molnar
  13. * Copyright (C) 2004 William Lee Irwin III
  14. */
  15. #include <linux/stop_machine.h>
  16. #include <linux/clocksource.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/debugfs.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/kthread.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/kprobes.h>
  24. #include <linux/ftrace.h>
  25. #include <linux/sysctl.h>
  26. #include <linux/ctype.h>
  27. #include <linux/list.h>
  28. #include <asm/ftrace.h>
  29. #include "trace.h"
  30. #define FTRACE_WARN_ON(cond) \
  31. do { \
  32. if (WARN_ON(cond)) \
  33. ftrace_kill(); \
  34. } while (0)
  35. #define FTRACE_WARN_ON_ONCE(cond) \
  36. do { \
  37. if (WARN_ON_ONCE(cond)) \
  38. ftrace_kill(); \
  39. } while (0)
  40. /* ftrace_enabled is a method to turn ftrace on or off */
  41. int ftrace_enabled __read_mostly;
  42. static int last_ftrace_enabled;
  43. /* set when tracing only a pid */
  44. struct pid *ftrace_pid_trace;
  45. static struct pid * const ftrace_swapper_pid = &init_struct_pid;
  46. /* Quick disabling of function tracer. */
  47. int function_trace_stop;
  48. /*
  49. * ftrace_disabled is set when an anomaly is discovered.
  50. * ftrace_disabled is much stronger than ftrace_enabled.
  51. */
  52. static int ftrace_disabled __read_mostly;
  53. static DEFINE_SPINLOCK(ftrace_lock);
  54. static DEFINE_MUTEX(ftrace_sysctl_lock);
  55. static DEFINE_MUTEX(ftrace_start_lock);
  56. static struct ftrace_ops ftrace_list_end __read_mostly =
  57. {
  58. .func = ftrace_stub,
  59. };
  60. static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
  61. ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
  62. ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
  63. ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
  64. static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
  65. {
  66. struct ftrace_ops *op = ftrace_list;
  67. /* in case someone actually ports this to alpha! */
  68. read_barrier_depends();
  69. while (op != &ftrace_list_end) {
  70. /* silly alpha */
  71. read_barrier_depends();
  72. op->func(ip, parent_ip);
  73. op = op->next;
  74. };
  75. }
  76. static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
  77. {
  78. if (!test_tsk_trace_trace(current))
  79. return;
  80. ftrace_pid_function(ip, parent_ip);
  81. }
  82. static void set_ftrace_pid_function(ftrace_func_t func)
  83. {
  84. /* do not set ftrace_pid_function to itself! */
  85. if (func != ftrace_pid_func)
  86. ftrace_pid_function = func;
  87. }
  88. /**
  89. * clear_ftrace_function - reset the ftrace function
  90. *
  91. * This NULLs the ftrace function and in essence stops
  92. * tracing. There may be lag
  93. */
  94. void clear_ftrace_function(void)
  95. {
  96. ftrace_trace_function = ftrace_stub;
  97. __ftrace_trace_function = ftrace_stub;
  98. ftrace_pid_function = ftrace_stub;
  99. }
  100. #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  101. /*
  102. * For those archs that do not test ftrace_trace_stop in their
  103. * mcount call site, we need to do it from C.
  104. */
  105. static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
  106. {
  107. if (function_trace_stop)
  108. return;
  109. __ftrace_trace_function(ip, parent_ip);
  110. }
  111. #endif
  112. static int __register_ftrace_function(struct ftrace_ops *ops)
  113. {
  114. /* should not be called from interrupt context */
  115. spin_lock(&ftrace_lock);
  116. ops->next = ftrace_list;
  117. /*
  118. * We are entering ops into the ftrace_list but another
  119. * CPU might be walking that list. We need to make sure
  120. * the ops->next pointer is valid before another CPU sees
  121. * the ops pointer included into the ftrace_list.
  122. */
  123. smp_wmb();
  124. ftrace_list = ops;
  125. if (ftrace_enabled) {
  126. ftrace_func_t func;
  127. if (ops->next == &ftrace_list_end)
  128. func = ops->func;
  129. else
  130. func = ftrace_list_func;
  131. if (ftrace_pid_trace) {
  132. set_ftrace_pid_function(func);
  133. func = ftrace_pid_func;
  134. }
  135. /*
  136. * For one func, simply call it directly.
  137. * For more than one func, call the chain.
  138. */
  139. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  140. ftrace_trace_function = func;
  141. #else
  142. __ftrace_trace_function = func;
  143. ftrace_trace_function = ftrace_test_stop_func;
  144. #endif
  145. }
  146. spin_unlock(&ftrace_lock);
  147. return 0;
  148. }
  149. static int __unregister_ftrace_function(struct ftrace_ops *ops)
  150. {
  151. struct ftrace_ops **p;
  152. int ret = 0;
  153. /* should not be called from interrupt context */
  154. spin_lock(&ftrace_lock);
  155. /*
  156. * If we are removing the last function, then simply point
  157. * to the ftrace_stub.
  158. */
  159. if (ftrace_list == ops && ops->next == &ftrace_list_end) {
  160. ftrace_trace_function = ftrace_stub;
  161. ftrace_list = &ftrace_list_end;
  162. goto out;
  163. }
  164. for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
  165. if (*p == ops)
  166. break;
  167. if (*p != ops) {
  168. ret = -1;
  169. goto out;
  170. }
  171. *p = (*p)->next;
  172. if (ftrace_enabled) {
  173. /* If we only have one func left, then call that directly */
  174. if (ftrace_list->next == &ftrace_list_end) {
  175. ftrace_func_t func = ftrace_list->func;
  176. if (ftrace_pid_trace) {
  177. set_ftrace_pid_function(func);
  178. func = ftrace_pid_func;
  179. }
  180. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  181. ftrace_trace_function = func;
  182. #else
  183. __ftrace_trace_function = func;
  184. #endif
  185. }
  186. }
  187. out:
  188. spin_unlock(&ftrace_lock);
  189. return ret;
  190. }
  191. static void ftrace_update_pid_func(void)
  192. {
  193. ftrace_func_t func;
  194. /* should not be called from interrupt context */
  195. spin_lock(&ftrace_lock);
  196. if (ftrace_trace_function == ftrace_stub)
  197. goto out;
  198. func = ftrace_trace_function;
  199. if (ftrace_pid_trace) {
  200. set_ftrace_pid_function(func);
  201. func = ftrace_pid_func;
  202. } else {
  203. if (func == ftrace_pid_func)
  204. func = ftrace_pid_function;
  205. }
  206. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  207. ftrace_trace_function = func;
  208. #else
  209. __ftrace_trace_function = func;
  210. #endif
  211. out:
  212. spin_unlock(&ftrace_lock);
  213. }
  214. #ifdef CONFIG_DYNAMIC_FTRACE
  215. #ifndef CONFIG_FTRACE_MCOUNT_RECORD
  216. # error Dynamic ftrace depends on MCOUNT_RECORD
  217. #endif
  218. enum {
  219. FTRACE_ENABLE_CALLS = (1 << 0),
  220. FTRACE_DISABLE_CALLS = (1 << 1),
  221. FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
  222. FTRACE_ENABLE_MCOUNT = (1 << 3),
  223. FTRACE_DISABLE_MCOUNT = (1 << 4),
  224. FTRACE_START_FUNC_RET = (1 << 5),
  225. FTRACE_STOP_FUNC_RET = (1 << 6),
  226. };
  227. static int ftrace_filtered;
  228. static LIST_HEAD(ftrace_new_addrs);
  229. static DEFINE_MUTEX(ftrace_regex_lock);
  230. struct ftrace_page {
  231. struct ftrace_page *next;
  232. int index;
  233. struct dyn_ftrace records[];
  234. };
  235. #define ENTRIES_PER_PAGE \
  236. ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
  237. /* estimate from running different kernels */
  238. #define NR_TO_INIT 10000
  239. static struct ftrace_page *ftrace_pages_start;
  240. static struct ftrace_page *ftrace_pages;
  241. static struct dyn_ftrace *ftrace_free_records;
  242. #ifdef CONFIG_KPROBES
  243. static int frozen_record_count;
  244. static inline void freeze_record(struct dyn_ftrace *rec)
  245. {
  246. if (!(rec->flags & FTRACE_FL_FROZEN)) {
  247. rec->flags |= FTRACE_FL_FROZEN;
  248. frozen_record_count++;
  249. }
  250. }
  251. static inline void unfreeze_record(struct dyn_ftrace *rec)
  252. {
  253. if (rec->flags & FTRACE_FL_FROZEN) {
  254. rec->flags &= ~FTRACE_FL_FROZEN;
  255. frozen_record_count--;
  256. }
  257. }
  258. static inline int record_frozen(struct dyn_ftrace *rec)
  259. {
  260. return rec->flags & FTRACE_FL_FROZEN;
  261. }
  262. #else
  263. # define freeze_record(rec) ({ 0; })
  264. # define unfreeze_record(rec) ({ 0; })
  265. # define record_frozen(rec) ({ 0; })
  266. #endif /* CONFIG_KPROBES */
  267. static void ftrace_free_rec(struct dyn_ftrace *rec)
  268. {
  269. rec->ip = (unsigned long)ftrace_free_records;
  270. ftrace_free_records = rec;
  271. rec->flags |= FTRACE_FL_FREE;
  272. }
  273. void ftrace_release(void *start, unsigned long size)
  274. {
  275. struct dyn_ftrace *rec;
  276. struct ftrace_page *pg;
  277. unsigned long s = (unsigned long)start;
  278. unsigned long e = s + size;
  279. int i;
  280. if (ftrace_disabled || !start)
  281. return;
  282. /* should not be called from interrupt context */
  283. spin_lock(&ftrace_lock);
  284. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  285. for (i = 0; i < pg->index; i++) {
  286. rec = &pg->records[i];
  287. if ((rec->ip >= s) && (rec->ip < e))
  288. ftrace_free_rec(rec);
  289. }
  290. }
  291. spin_unlock(&ftrace_lock);
  292. }
  293. static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
  294. {
  295. struct dyn_ftrace *rec;
  296. /* First check for freed records */
  297. if (ftrace_free_records) {
  298. rec = ftrace_free_records;
  299. if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
  300. FTRACE_WARN_ON_ONCE(1);
  301. ftrace_free_records = NULL;
  302. return NULL;
  303. }
  304. ftrace_free_records = (void *)rec->ip;
  305. memset(rec, 0, sizeof(*rec));
  306. return rec;
  307. }
  308. if (ftrace_pages->index == ENTRIES_PER_PAGE) {
  309. if (!ftrace_pages->next) {
  310. /* allocate another page */
  311. ftrace_pages->next =
  312. (void *)get_zeroed_page(GFP_KERNEL);
  313. if (!ftrace_pages->next)
  314. return NULL;
  315. }
  316. ftrace_pages = ftrace_pages->next;
  317. }
  318. return &ftrace_pages->records[ftrace_pages->index++];
  319. }
  320. static struct dyn_ftrace *
  321. ftrace_record_ip(unsigned long ip)
  322. {
  323. struct dyn_ftrace *rec;
  324. if (ftrace_disabled)
  325. return NULL;
  326. rec = ftrace_alloc_dyn_node(ip);
  327. if (!rec)
  328. return NULL;
  329. rec->ip = ip;
  330. list_add(&rec->list, &ftrace_new_addrs);
  331. return rec;
  332. }
  333. static void print_ip_ins(const char *fmt, unsigned char *p)
  334. {
  335. int i;
  336. printk(KERN_CONT "%s", fmt);
  337. for (i = 0; i < MCOUNT_INSN_SIZE; i++)
  338. printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
  339. }
  340. static void ftrace_bug(int failed, unsigned long ip)
  341. {
  342. switch (failed) {
  343. case -EFAULT:
  344. FTRACE_WARN_ON_ONCE(1);
  345. pr_info("ftrace faulted on modifying ");
  346. print_ip_sym(ip);
  347. break;
  348. case -EINVAL:
  349. FTRACE_WARN_ON_ONCE(1);
  350. pr_info("ftrace failed to modify ");
  351. print_ip_sym(ip);
  352. print_ip_ins(" actual: ", (unsigned char *)ip);
  353. printk(KERN_CONT "\n");
  354. break;
  355. case -EPERM:
  356. FTRACE_WARN_ON_ONCE(1);
  357. pr_info("ftrace faulted on writing ");
  358. print_ip_sym(ip);
  359. break;
  360. default:
  361. FTRACE_WARN_ON_ONCE(1);
  362. pr_info("ftrace faulted on unknown error ");
  363. print_ip_sym(ip);
  364. }
  365. }
  366. static int
  367. __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
  368. {
  369. unsigned long ip, fl;
  370. unsigned long ftrace_addr;
  371. ftrace_addr = (unsigned long)FTRACE_ADDR;
  372. ip = rec->ip;
  373. /*
  374. * If this record is not to be traced and
  375. * it is not enabled then do nothing.
  376. *
  377. * If this record is not to be traced and
  378. * it is enabled then disabled it.
  379. *
  380. */
  381. if (rec->flags & FTRACE_FL_NOTRACE) {
  382. if (rec->flags & FTRACE_FL_ENABLED)
  383. rec->flags &= ~FTRACE_FL_ENABLED;
  384. else
  385. return 0;
  386. } else if (ftrace_filtered && enable) {
  387. /*
  388. * Filtering is on:
  389. */
  390. fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
  391. /* Record is filtered and enabled, do nothing */
  392. if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
  393. return 0;
  394. /* Record is not filtered and is not enabled do nothing */
  395. if (!fl)
  396. return 0;
  397. /* Record is not filtered but enabled, disable it */
  398. if (fl == FTRACE_FL_ENABLED)
  399. rec->flags &= ~FTRACE_FL_ENABLED;
  400. else
  401. /* Otherwise record is filtered but not enabled, enable it */
  402. rec->flags |= FTRACE_FL_ENABLED;
  403. } else {
  404. /* Disable or not filtered */
  405. if (enable) {
  406. /* if record is enabled, do nothing */
  407. if (rec->flags & FTRACE_FL_ENABLED)
  408. return 0;
  409. rec->flags |= FTRACE_FL_ENABLED;
  410. } else {
  411. /* if record is not enabled do nothing */
  412. if (!(rec->flags & FTRACE_FL_ENABLED))
  413. return 0;
  414. rec->flags &= ~FTRACE_FL_ENABLED;
  415. }
  416. }
  417. if (rec->flags & FTRACE_FL_ENABLED)
  418. return ftrace_make_call(rec, ftrace_addr);
  419. else
  420. return ftrace_make_nop(NULL, rec, ftrace_addr);
  421. }
  422. static void ftrace_replace_code(int enable)
  423. {
  424. int i, failed;
  425. struct dyn_ftrace *rec;
  426. struct ftrace_page *pg;
  427. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  428. for (i = 0; i < pg->index; i++) {
  429. rec = &pg->records[i];
  430. /*
  431. * Skip over free records and records that have
  432. * failed.
  433. */
  434. if (rec->flags & FTRACE_FL_FREE ||
  435. rec->flags & FTRACE_FL_FAILED)
  436. continue;
  437. /* ignore updates to this record's mcount site */
  438. if (get_kprobe((void *)rec->ip)) {
  439. freeze_record(rec);
  440. continue;
  441. } else {
  442. unfreeze_record(rec);
  443. }
  444. failed = __ftrace_replace_code(rec, enable);
  445. if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
  446. rec->flags |= FTRACE_FL_FAILED;
  447. if ((system_state == SYSTEM_BOOTING) ||
  448. !core_kernel_text(rec->ip)) {
  449. ftrace_free_rec(rec);
  450. } else
  451. ftrace_bug(failed, rec->ip);
  452. }
  453. }
  454. }
  455. }
  456. static int
  457. ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
  458. {
  459. unsigned long ip;
  460. int ret;
  461. ip = rec->ip;
  462. ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
  463. if (ret) {
  464. ftrace_bug(ret, ip);
  465. rec->flags |= FTRACE_FL_FAILED;
  466. return 0;
  467. }
  468. return 1;
  469. }
  470. static int __ftrace_modify_code(void *data)
  471. {
  472. int *command = data;
  473. if (*command & FTRACE_ENABLE_CALLS)
  474. ftrace_replace_code(1);
  475. else if (*command & FTRACE_DISABLE_CALLS)
  476. ftrace_replace_code(0);
  477. if (*command & FTRACE_UPDATE_TRACE_FUNC)
  478. ftrace_update_ftrace_func(ftrace_trace_function);
  479. if (*command & FTRACE_START_FUNC_RET)
  480. ftrace_enable_ftrace_graph_caller();
  481. else if (*command & FTRACE_STOP_FUNC_RET)
  482. ftrace_disable_ftrace_graph_caller();
  483. return 0;
  484. }
  485. static void ftrace_run_update_code(int command)
  486. {
  487. stop_machine(__ftrace_modify_code, &command, NULL);
  488. }
  489. static ftrace_func_t saved_ftrace_func;
  490. static int ftrace_start_up;
  491. static void ftrace_startup_enable(int command)
  492. {
  493. if (saved_ftrace_func != ftrace_trace_function) {
  494. saved_ftrace_func = ftrace_trace_function;
  495. command |= FTRACE_UPDATE_TRACE_FUNC;
  496. }
  497. if (!command || !ftrace_enabled)
  498. return;
  499. ftrace_run_update_code(command);
  500. }
  501. static void ftrace_startup(int command)
  502. {
  503. if (unlikely(ftrace_disabled))
  504. return;
  505. mutex_lock(&ftrace_start_lock);
  506. ftrace_start_up++;
  507. command |= FTRACE_ENABLE_CALLS;
  508. ftrace_startup_enable(command);
  509. mutex_unlock(&ftrace_start_lock);
  510. }
  511. static void ftrace_shutdown(int command)
  512. {
  513. if (unlikely(ftrace_disabled))
  514. return;
  515. mutex_lock(&ftrace_start_lock);
  516. ftrace_start_up--;
  517. if (!ftrace_start_up)
  518. command |= FTRACE_DISABLE_CALLS;
  519. if (saved_ftrace_func != ftrace_trace_function) {
  520. saved_ftrace_func = ftrace_trace_function;
  521. command |= FTRACE_UPDATE_TRACE_FUNC;
  522. }
  523. if (!command || !ftrace_enabled)
  524. goto out;
  525. ftrace_run_update_code(command);
  526. out:
  527. mutex_unlock(&ftrace_start_lock);
  528. }
  529. static void ftrace_startup_sysctl(void)
  530. {
  531. int command = FTRACE_ENABLE_MCOUNT;
  532. if (unlikely(ftrace_disabled))
  533. return;
  534. mutex_lock(&ftrace_start_lock);
  535. /* Force update next time */
  536. saved_ftrace_func = NULL;
  537. /* ftrace_start_up is true if we want ftrace running */
  538. if (ftrace_start_up)
  539. command |= FTRACE_ENABLE_CALLS;
  540. ftrace_run_update_code(command);
  541. mutex_unlock(&ftrace_start_lock);
  542. }
  543. static void ftrace_shutdown_sysctl(void)
  544. {
  545. int command = FTRACE_DISABLE_MCOUNT;
  546. if (unlikely(ftrace_disabled))
  547. return;
  548. mutex_lock(&ftrace_start_lock);
  549. /* ftrace_start_up is true if ftrace is running */
  550. if (ftrace_start_up)
  551. command |= FTRACE_DISABLE_CALLS;
  552. ftrace_run_update_code(command);
  553. mutex_unlock(&ftrace_start_lock);
  554. }
  555. static cycle_t ftrace_update_time;
  556. static unsigned long ftrace_update_cnt;
  557. unsigned long ftrace_update_tot_cnt;
  558. static int ftrace_update_code(struct module *mod)
  559. {
  560. struct dyn_ftrace *p, *t;
  561. cycle_t start, stop;
  562. start = ftrace_now(raw_smp_processor_id());
  563. ftrace_update_cnt = 0;
  564. list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
  565. /* If something went wrong, bail without enabling anything */
  566. if (unlikely(ftrace_disabled))
  567. return -1;
  568. list_del_init(&p->list);
  569. /* convert record (i.e, patch mcount-call with NOP) */
  570. if (ftrace_code_disable(mod, p)) {
  571. p->flags |= FTRACE_FL_CONVERTED;
  572. ftrace_update_cnt++;
  573. } else
  574. ftrace_free_rec(p);
  575. }
  576. stop = ftrace_now(raw_smp_processor_id());
  577. ftrace_update_time = stop - start;
  578. ftrace_update_tot_cnt += ftrace_update_cnt;
  579. return 0;
  580. }
  581. static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
  582. {
  583. struct ftrace_page *pg;
  584. int cnt;
  585. int i;
  586. /* allocate a few pages */
  587. ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
  588. if (!ftrace_pages_start)
  589. return -1;
  590. /*
  591. * Allocate a few more pages.
  592. *
  593. * TODO: have some parser search vmlinux before
  594. * final linking to find all calls to ftrace.
  595. * Then we can:
  596. * a) know how many pages to allocate.
  597. * and/or
  598. * b) set up the table then.
  599. *
  600. * The dynamic code is still necessary for
  601. * modules.
  602. */
  603. pg = ftrace_pages = ftrace_pages_start;
  604. cnt = num_to_init / ENTRIES_PER_PAGE;
  605. pr_info("ftrace: allocating %ld entries in %d pages\n",
  606. num_to_init, cnt + 1);
  607. for (i = 0; i < cnt; i++) {
  608. pg->next = (void *)get_zeroed_page(GFP_KERNEL);
  609. /* If we fail, we'll try later anyway */
  610. if (!pg->next)
  611. break;
  612. pg = pg->next;
  613. }
  614. return 0;
  615. }
  616. enum {
  617. FTRACE_ITER_FILTER = (1 << 0),
  618. FTRACE_ITER_CONT = (1 << 1),
  619. FTRACE_ITER_NOTRACE = (1 << 2),
  620. FTRACE_ITER_FAILURES = (1 << 3),
  621. };
  622. #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
  623. struct ftrace_iterator {
  624. struct ftrace_page *pg;
  625. int idx;
  626. unsigned flags;
  627. unsigned char buffer[FTRACE_BUFF_MAX+1];
  628. unsigned buffer_idx;
  629. unsigned filtered;
  630. };
  631. static void *
  632. t_next(struct seq_file *m, void *v, loff_t *pos)
  633. {
  634. struct ftrace_iterator *iter = m->private;
  635. struct dyn_ftrace *rec = NULL;
  636. (*pos)++;
  637. /* should not be called from interrupt context */
  638. spin_lock(&ftrace_lock);
  639. retry:
  640. if (iter->idx >= iter->pg->index) {
  641. if (iter->pg->next) {
  642. iter->pg = iter->pg->next;
  643. iter->idx = 0;
  644. goto retry;
  645. } else {
  646. iter->idx = -1;
  647. }
  648. } else {
  649. rec = &iter->pg->records[iter->idx++];
  650. if ((rec->flags & FTRACE_FL_FREE) ||
  651. (!(iter->flags & FTRACE_ITER_FAILURES) &&
  652. (rec->flags & FTRACE_FL_FAILED)) ||
  653. ((iter->flags & FTRACE_ITER_FAILURES) &&
  654. !(rec->flags & FTRACE_FL_FAILED)) ||
  655. ((iter->flags & FTRACE_ITER_FILTER) &&
  656. !(rec->flags & FTRACE_FL_FILTER)) ||
  657. ((iter->flags & FTRACE_ITER_NOTRACE) &&
  658. !(rec->flags & FTRACE_FL_NOTRACE))) {
  659. rec = NULL;
  660. goto retry;
  661. }
  662. }
  663. spin_unlock(&ftrace_lock);
  664. return rec;
  665. }
  666. static void *t_start(struct seq_file *m, loff_t *pos)
  667. {
  668. struct ftrace_iterator *iter = m->private;
  669. void *p = NULL;
  670. if (*pos > 0) {
  671. if (iter->idx < 0)
  672. return p;
  673. (*pos)--;
  674. iter->idx--;
  675. }
  676. p = t_next(m, p, pos);
  677. return p;
  678. }
  679. static void t_stop(struct seq_file *m, void *p)
  680. {
  681. }
  682. static int t_show(struct seq_file *m, void *v)
  683. {
  684. struct dyn_ftrace *rec = v;
  685. char str[KSYM_SYMBOL_LEN];
  686. if (!rec)
  687. return 0;
  688. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  689. seq_printf(m, "%s\n", str);
  690. return 0;
  691. }
  692. static struct seq_operations show_ftrace_seq_ops = {
  693. .start = t_start,
  694. .next = t_next,
  695. .stop = t_stop,
  696. .show = t_show,
  697. };
  698. static int
  699. ftrace_avail_open(struct inode *inode, struct file *file)
  700. {
  701. struct ftrace_iterator *iter;
  702. int ret;
  703. if (unlikely(ftrace_disabled))
  704. return -ENODEV;
  705. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  706. if (!iter)
  707. return -ENOMEM;
  708. iter->pg = ftrace_pages_start;
  709. ret = seq_open(file, &show_ftrace_seq_ops);
  710. if (!ret) {
  711. struct seq_file *m = file->private_data;
  712. m->private = iter;
  713. } else {
  714. kfree(iter);
  715. }
  716. return ret;
  717. }
  718. int ftrace_avail_release(struct inode *inode, struct file *file)
  719. {
  720. struct seq_file *m = (struct seq_file *)file->private_data;
  721. struct ftrace_iterator *iter = m->private;
  722. seq_release(inode, file);
  723. kfree(iter);
  724. return 0;
  725. }
  726. static int
  727. ftrace_failures_open(struct inode *inode, struct file *file)
  728. {
  729. int ret;
  730. struct seq_file *m;
  731. struct ftrace_iterator *iter;
  732. ret = ftrace_avail_open(inode, file);
  733. if (!ret) {
  734. m = (struct seq_file *)file->private_data;
  735. iter = (struct ftrace_iterator *)m->private;
  736. iter->flags = FTRACE_ITER_FAILURES;
  737. }
  738. return ret;
  739. }
  740. static void ftrace_filter_reset(int enable)
  741. {
  742. struct ftrace_page *pg;
  743. struct dyn_ftrace *rec;
  744. unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  745. unsigned i;
  746. /* should not be called from interrupt context */
  747. spin_lock(&ftrace_lock);
  748. if (enable)
  749. ftrace_filtered = 0;
  750. pg = ftrace_pages_start;
  751. while (pg) {
  752. for (i = 0; i < pg->index; i++) {
  753. rec = &pg->records[i];
  754. if (rec->flags & FTRACE_FL_FAILED)
  755. continue;
  756. rec->flags &= ~type;
  757. }
  758. pg = pg->next;
  759. }
  760. spin_unlock(&ftrace_lock);
  761. }
  762. static int
  763. ftrace_regex_open(struct inode *inode, struct file *file, int enable)
  764. {
  765. struct ftrace_iterator *iter;
  766. int ret = 0;
  767. if (unlikely(ftrace_disabled))
  768. return -ENODEV;
  769. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  770. if (!iter)
  771. return -ENOMEM;
  772. mutex_lock(&ftrace_regex_lock);
  773. if ((file->f_mode & FMODE_WRITE) &&
  774. !(file->f_flags & O_APPEND))
  775. ftrace_filter_reset(enable);
  776. if (file->f_mode & FMODE_READ) {
  777. iter->pg = ftrace_pages_start;
  778. iter->flags = enable ? FTRACE_ITER_FILTER :
  779. FTRACE_ITER_NOTRACE;
  780. ret = seq_open(file, &show_ftrace_seq_ops);
  781. if (!ret) {
  782. struct seq_file *m = file->private_data;
  783. m->private = iter;
  784. } else
  785. kfree(iter);
  786. } else
  787. file->private_data = iter;
  788. mutex_unlock(&ftrace_regex_lock);
  789. return ret;
  790. }
  791. static int
  792. ftrace_filter_open(struct inode *inode, struct file *file)
  793. {
  794. return ftrace_regex_open(inode, file, 1);
  795. }
  796. static int
  797. ftrace_notrace_open(struct inode *inode, struct file *file)
  798. {
  799. return ftrace_regex_open(inode, file, 0);
  800. }
  801. static ssize_t
  802. ftrace_regex_read(struct file *file, char __user *ubuf,
  803. size_t cnt, loff_t *ppos)
  804. {
  805. if (file->f_mode & FMODE_READ)
  806. return seq_read(file, ubuf, cnt, ppos);
  807. else
  808. return -EPERM;
  809. }
  810. static loff_t
  811. ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
  812. {
  813. loff_t ret;
  814. if (file->f_mode & FMODE_READ)
  815. ret = seq_lseek(file, offset, origin);
  816. else
  817. file->f_pos = ret = 1;
  818. return ret;
  819. }
  820. enum {
  821. MATCH_FULL,
  822. MATCH_FRONT_ONLY,
  823. MATCH_MIDDLE_ONLY,
  824. MATCH_END_ONLY,
  825. };
  826. static void
  827. ftrace_match(unsigned char *buff, int len, int enable)
  828. {
  829. char str[KSYM_SYMBOL_LEN];
  830. char *search = NULL;
  831. struct ftrace_page *pg;
  832. struct dyn_ftrace *rec;
  833. int type = MATCH_FULL;
  834. unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  835. unsigned i, match = 0, search_len = 0;
  836. int not = 0;
  837. if (buff[0] == '!') {
  838. not = 1;
  839. buff++;
  840. len--;
  841. }
  842. for (i = 0; i < len; i++) {
  843. if (buff[i] == '*') {
  844. if (!i) {
  845. search = buff + i + 1;
  846. type = MATCH_END_ONLY;
  847. search_len = len - (i + 1);
  848. } else {
  849. if (type == MATCH_END_ONLY) {
  850. type = MATCH_MIDDLE_ONLY;
  851. } else {
  852. match = i;
  853. type = MATCH_FRONT_ONLY;
  854. }
  855. buff[i] = 0;
  856. break;
  857. }
  858. }
  859. }
  860. /* should not be called from interrupt context */
  861. spin_lock(&ftrace_lock);
  862. if (enable)
  863. ftrace_filtered = 1;
  864. pg = ftrace_pages_start;
  865. while (pg) {
  866. for (i = 0; i < pg->index; i++) {
  867. int matched = 0;
  868. char *ptr;
  869. rec = &pg->records[i];
  870. if (rec->flags & FTRACE_FL_FAILED)
  871. continue;
  872. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  873. switch (type) {
  874. case MATCH_FULL:
  875. if (strcmp(str, buff) == 0)
  876. matched = 1;
  877. break;
  878. case MATCH_FRONT_ONLY:
  879. if (memcmp(str, buff, match) == 0)
  880. matched = 1;
  881. break;
  882. case MATCH_MIDDLE_ONLY:
  883. if (strstr(str, search))
  884. matched = 1;
  885. break;
  886. case MATCH_END_ONLY:
  887. ptr = strstr(str, search);
  888. if (ptr && (ptr[search_len] == 0))
  889. matched = 1;
  890. break;
  891. }
  892. if (matched) {
  893. if (not)
  894. rec->flags &= ~flag;
  895. else
  896. rec->flags |= flag;
  897. }
  898. }
  899. pg = pg->next;
  900. }
  901. spin_unlock(&ftrace_lock);
  902. }
  903. static ssize_t
  904. ftrace_regex_write(struct file *file, const char __user *ubuf,
  905. size_t cnt, loff_t *ppos, int enable)
  906. {
  907. struct ftrace_iterator *iter;
  908. char ch;
  909. size_t read = 0;
  910. ssize_t ret;
  911. if (!cnt || cnt < 0)
  912. return 0;
  913. mutex_lock(&ftrace_regex_lock);
  914. if (file->f_mode & FMODE_READ) {
  915. struct seq_file *m = file->private_data;
  916. iter = m->private;
  917. } else
  918. iter = file->private_data;
  919. if (!*ppos) {
  920. iter->flags &= ~FTRACE_ITER_CONT;
  921. iter->buffer_idx = 0;
  922. }
  923. ret = get_user(ch, ubuf++);
  924. if (ret)
  925. goto out;
  926. read++;
  927. cnt--;
  928. if (!(iter->flags & ~FTRACE_ITER_CONT)) {
  929. /* skip white space */
  930. while (cnt && isspace(ch)) {
  931. ret = get_user(ch, ubuf++);
  932. if (ret)
  933. goto out;
  934. read++;
  935. cnt--;
  936. }
  937. if (isspace(ch)) {
  938. file->f_pos += read;
  939. ret = read;
  940. goto out;
  941. }
  942. iter->buffer_idx = 0;
  943. }
  944. while (cnt && !isspace(ch)) {
  945. if (iter->buffer_idx < FTRACE_BUFF_MAX)
  946. iter->buffer[iter->buffer_idx++] = ch;
  947. else {
  948. ret = -EINVAL;
  949. goto out;
  950. }
  951. ret = get_user(ch, ubuf++);
  952. if (ret)
  953. goto out;
  954. read++;
  955. cnt--;
  956. }
  957. if (isspace(ch)) {
  958. iter->filtered++;
  959. iter->buffer[iter->buffer_idx] = 0;
  960. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  961. iter->buffer_idx = 0;
  962. } else
  963. iter->flags |= FTRACE_ITER_CONT;
  964. file->f_pos += read;
  965. ret = read;
  966. out:
  967. mutex_unlock(&ftrace_regex_lock);
  968. return ret;
  969. }
  970. static ssize_t
  971. ftrace_filter_write(struct file *file, const char __user *ubuf,
  972. size_t cnt, loff_t *ppos)
  973. {
  974. return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
  975. }
  976. static ssize_t
  977. ftrace_notrace_write(struct file *file, const char __user *ubuf,
  978. size_t cnt, loff_t *ppos)
  979. {
  980. return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
  981. }
  982. static void
  983. ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
  984. {
  985. if (unlikely(ftrace_disabled))
  986. return;
  987. mutex_lock(&ftrace_regex_lock);
  988. if (reset)
  989. ftrace_filter_reset(enable);
  990. if (buf)
  991. ftrace_match(buf, len, enable);
  992. mutex_unlock(&ftrace_regex_lock);
  993. }
  994. /**
  995. * ftrace_set_filter - set a function to filter on in ftrace
  996. * @buf - the string that holds the function filter text.
  997. * @len - the length of the string.
  998. * @reset - non zero to reset all filters before applying this filter.
  999. *
  1000. * Filters denote which functions should be enabled when tracing is enabled.
  1001. * If @buf is NULL and reset is set, all functions will be enabled for tracing.
  1002. */
  1003. void ftrace_set_filter(unsigned char *buf, int len, int reset)
  1004. {
  1005. ftrace_set_regex(buf, len, reset, 1);
  1006. }
  1007. /**
  1008. * ftrace_set_notrace - set a function to not trace in ftrace
  1009. * @buf - the string that holds the function notrace text.
  1010. * @len - the length of the string.
  1011. * @reset - non zero to reset all filters before applying this filter.
  1012. *
  1013. * Notrace Filters denote which functions should not be enabled when tracing
  1014. * is enabled. If @buf is NULL and reset is set, all functions will be enabled
  1015. * for tracing.
  1016. */
  1017. void ftrace_set_notrace(unsigned char *buf, int len, int reset)
  1018. {
  1019. ftrace_set_regex(buf, len, reset, 0);
  1020. }
  1021. static int
  1022. ftrace_regex_release(struct inode *inode, struct file *file, int enable)
  1023. {
  1024. struct seq_file *m = (struct seq_file *)file->private_data;
  1025. struct ftrace_iterator *iter;
  1026. mutex_lock(&ftrace_regex_lock);
  1027. if (file->f_mode & FMODE_READ) {
  1028. iter = m->private;
  1029. seq_release(inode, file);
  1030. } else
  1031. iter = file->private_data;
  1032. if (iter->buffer_idx) {
  1033. iter->filtered++;
  1034. iter->buffer[iter->buffer_idx] = 0;
  1035. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  1036. }
  1037. mutex_lock(&ftrace_sysctl_lock);
  1038. mutex_lock(&ftrace_start_lock);
  1039. if (ftrace_start_up && ftrace_enabled)
  1040. ftrace_run_update_code(FTRACE_ENABLE_CALLS);
  1041. mutex_unlock(&ftrace_start_lock);
  1042. mutex_unlock(&ftrace_sysctl_lock);
  1043. kfree(iter);
  1044. mutex_unlock(&ftrace_regex_lock);
  1045. return 0;
  1046. }
  1047. static int
  1048. ftrace_filter_release(struct inode *inode, struct file *file)
  1049. {
  1050. return ftrace_regex_release(inode, file, 1);
  1051. }
  1052. static int
  1053. ftrace_notrace_release(struct inode *inode, struct file *file)
  1054. {
  1055. return ftrace_regex_release(inode, file, 0);
  1056. }
  1057. static struct file_operations ftrace_avail_fops = {
  1058. .open = ftrace_avail_open,
  1059. .read = seq_read,
  1060. .llseek = seq_lseek,
  1061. .release = ftrace_avail_release,
  1062. };
  1063. static struct file_operations ftrace_failures_fops = {
  1064. .open = ftrace_failures_open,
  1065. .read = seq_read,
  1066. .llseek = seq_lseek,
  1067. .release = ftrace_avail_release,
  1068. };
  1069. static struct file_operations ftrace_filter_fops = {
  1070. .open = ftrace_filter_open,
  1071. .read = ftrace_regex_read,
  1072. .write = ftrace_filter_write,
  1073. .llseek = ftrace_regex_lseek,
  1074. .release = ftrace_filter_release,
  1075. };
  1076. static struct file_operations ftrace_notrace_fops = {
  1077. .open = ftrace_notrace_open,
  1078. .read = ftrace_regex_read,
  1079. .write = ftrace_notrace_write,
  1080. .llseek = ftrace_regex_lseek,
  1081. .release = ftrace_notrace_release,
  1082. };
  1083. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1084. static DEFINE_MUTEX(graph_lock);
  1085. int ftrace_graph_count;
  1086. unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
  1087. static void *
  1088. g_next(struct seq_file *m, void *v, loff_t *pos)
  1089. {
  1090. unsigned long *array = m->private;
  1091. int index = *pos;
  1092. (*pos)++;
  1093. if (index >= ftrace_graph_count)
  1094. return NULL;
  1095. return &array[index];
  1096. }
  1097. static void *g_start(struct seq_file *m, loff_t *pos)
  1098. {
  1099. void *p = NULL;
  1100. mutex_lock(&graph_lock);
  1101. p = g_next(m, p, pos);
  1102. return p;
  1103. }
  1104. static void g_stop(struct seq_file *m, void *p)
  1105. {
  1106. mutex_unlock(&graph_lock);
  1107. }
  1108. static int g_show(struct seq_file *m, void *v)
  1109. {
  1110. unsigned long *ptr = v;
  1111. char str[KSYM_SYMBOL_LEN];
  1112. if (!ptr)
  1113. return 0;
  1114. kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
  1115. seq_printf(m, "%s\n", str);
  1116. return 0;
  1117. }
  1118. static struct seq_operations ftrace_graph_seq_ops = {
  1119. .start = g_start,
  1120. .next = g_next,
  1121. .stop = g_stop,
  1122. .show = g_show,
  1123. };
  1124. static int
  1125. ftrace_graph_open(struct inode *inode, struct file *file)
  1126. {
  1127. int ret = 0;
  1128. if (unlikely(ftrace_disabled))
  1129. return -ENODEV;
  1130. mutex_lock(&graph_lock);
  1131. if ((file->f_mode & FMODE_WRITE) &&
  1132. !(file->f_flags & O_APPEND)) {
  1133. ftrace_graph_count = 0;
  1134. memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
  1135. }
  1136. if (file->f_mode & FMODE_READ) {
  1137. ret = seq_open(file, &ftrace_graph_seq_ops);
  1138. if (!ret) {
  1139. struct seq_file *m = file->private_data;
  1140. m->private = ftrace_graph_funcs;
  1141. }
  1142. } else
  1143. file->private_data = ftrace_graph_funcs;
  1144. mutex_unlock(&graph_lock);
  1145. return ret;
  1146. }
  1147. static ssize_t
  1148. ftrace_graph_read(struct file *file, char __user *ubuf,
  1149. size_t cnt, loff_t *ppos)
  1150. {
  1151. if (file->f_mode & FMODE_READ)
  1152. return seq_read(file, ubuf, cnt, ppos);
  1153. else
  1154. return -EPERM;
  1155. }
  1156. static int
  1157. ftrace_set_func(unsigned long *array, int idx, char *buffer)
  1158. {
  1159. char str[KSYM_SYMBOL_LEN];
  1160. struct dyn_ftrace *rec;
  1161. struct ftrace_page *pg;
  1162. int found = 0;
  1163. int i, j;
  1164. if (ftrace_disabled)
  1165. return -ENODEV;
  1166. /* should not be called from interrupt context */
  1167. spin_lock(&ftrace_lock);
  1168. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  1169. for (i = 0; i < pg->index; i++) {
  1170. rec = &pg->records[i];
  1171. if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
  1172. continue;
  1173. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  1174. if (strcmp(str, buffer) == 0) {
  1175. found = 1;
  1176. for (j = 0; j < idx; j++)
  1177. if (array[j] == rec->ip) {
  1178. found = 0;
  1179. break;
  1180. }
  1181. if (found)
  1182. array[idx] = rec->ip;
  1183. break;
  1184. }
  1185. }
  1186. }
  1187. spin_unlock(&ftrace_lock);
  1188. return found ? 0 : -EINVAL;
  1189. }
  1190. static ssize_t
  1191. ftrace_graph_write(struct file *file, const char __user *ubuf,
  1192. size_t cnt, loff_t *ppos)
  1193. {
  1194. unsigned char buffer[FTRACE_BUFF_MAX+1];
  1195. unsigned long *array;
  1196. size_t read = 0;
  1197. ssize_t ret;
  1198. int index = 0;
  1199. char ch;
  1200. if (!cnt || cnt < 0)
  1201. return 0;
  1202. mutex_lock(&graph_lock);
  1203. if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
  1204. ret = -EBUSY;
  1205. goto out;
  1206. }
  1207. if (file->f_mode & FMODE_READ) {
  1208. struct seq_file *m = file->private_data;
  1209. array = m->private;
  1210. } else
  1211. array = file->private_data;
  1212. ret = get_user(ch, ubuf++);
  1213. if (ret)
  1214. goto out;
  1215. read++;
  1216. cnt--;
  1217. /* skip white space */
  1218. while (cnt && isspace(ch)) {
  1219. ret = get_user(ch, ubuf++);
  1220. if (ret)
  1221. goto out;
  1222. read++;
  1223. cnt--;
  1224. }
  1225. if (isspace(ch)) {
  1226. *ppos += read;
  1227. ret = read;
  1228. goto out;
  1229. }
  1230. while (cnt && !isspace(ch)) {
  1231. if (index < FTRACE_BUFF_MAX)
  1232. buffer[index++] = ch;
  1233. else {
  1234. ret = -EINVAL;
  1235. goto out;
  1236. }
  1237. ret = get_user(ch, ubuf++);
  1238. if (ret)
  1239. goto out;
  1240. read++;
  1241. cnt--;
  1242. }
  1243. buffer[index] = 0;
  1244. /* we allow only one at a time */
  1245. ret = ftrace_set_func(array, ftrace_graph_count, buffer);
  1246. if (ret)
  1247. goto out;
  1248. ftrace_graph_count++;
  1249. file->f_pos += read;
  1250. ret = read;
  1251. out:
  1252. mutex_unlock(&graph_lock);
  1253. return ret;
  1254. }
  1255. static const struct file_operations ftrace_graph_fops = {
  1256. .open = ftrace_graph_open,
  1257. .read = ftrace_graph_read,
  1258. .write = ftrace_graph_write,
  1259. };
  1260. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  1261. static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
  1262. {
  1263. struct dentry *entry;
  1264. entry = debugfs_create_file("available_filter_functions", 0444,
  1265. d_tracer, NULL, &ftrace_avail_fops);
  1266. if (!entry)
  1267. pr_warning("Could not create debugfs "
  1268. "'available_filter_functions' entry\n");
  1269. entry = debugfs_create_file("failures", 0444,
  1270. d_tracer, NULL, &ftrace_failures_fops);
  1271. if (!entry)
  1272. pr_warning("Could not create debugfs 'failures' entry\n");
  1273. entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
  1274. NULL, &ftrace_filter_fops);
  1275. if (!entry)
  1276. pr_warning("Could not create debugfs "
  1277. "'set_ftrace_filter' entry\n");
  1278. entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
  1279. NULL, &ftrace_notrace_fops);
  1280. if (!entry)
  1281. pr_warning("Could not create debugfs "
  1282. "'set_ftrace_notrace' entry\n");
  1283. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1284. entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
  1285. NULL,
  1286. &ftrace_graph_fops);
  1287. if (!entry)
  1288. pr_warning("Could not create debugfs "
  1289. "'set_graph_function' entry\n");
  1290. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  1291. return 0;
  1292. }
  1293. static int ftrace_convert_nops(struct module *mod,
  1294. unsigned long *start,
  1295. unsigned long *end)
  1296. {
  1297. unsigned long *p;
  1298. unsigned long addr;
  1299. unsigned long flags;
  1300. mutex_lock(&ftrace_start_lock);
  1301. p = start;
  1302. while (p < end) {
  1303. addr = ftrace_call_adjust(*p++);
  1304. /*
  1305. * Some architecture linkers will pad between
  1306. * the different mcount_loc sections of different
  1307. * object files to satisfy alignments.
  1308. * Skip any NULL pointers.
  1309. */
  1310. if (!addr)
  1311. continue;
  1312. ftrace_record_ip(addr);
  1313. }
  1314. /* disable interrupts to prevent kstop machine */
  1315. local_irq_save(flags);
  1316. ftrace_update_code(mod);
  1317. local_irq_restore(flags);
  1318. mutex_unlock(&ftrace_start_lock);
  1319. return 0;
  1320. }
  1321. void ftrace_init_module(struct module *mod,
  1322. unsigned long *start, unsigned long *end)
  1323. {
  1324. if (ftrace_disabled || start == end)
  1325. return;
  1326. ftrace_convert_nops(mod, start, end);
  1327. }
  1328. extern unsigned long __start_mcount_loc[];
  1329. extern unsigned long __stop_mcount_loc[];
  1330. void __init ftrace_init(void)
  1331. {
  1332. unsigned long count, addr, flags;
  1333. int ret;
  1334. /* Keep the ftrace pointer to the stub */
  1335. addr = (unsigned long)ftrace_stub;
  1336. local_irq_save(flags);
  1337. ftrace_dyn_arch_init(&addr);
  1338. local_irq_restore(flags);
  1339. /* ftrace_dyn_arch_init places the return code in addr */
  1340. if (addr)
  1341. goto failed;
  1342. count = __stop_mcount_loc - __start_mcount_loc;
  1343. ret = ftrace_dyn_table_alloc(count);
  1344. if (ret)
  1345. goto failed;
  1346. last_ftrace_enabled = ftrace_enabled = 1;
  1347. ret = ftrace_convert_nops(NULL,
  1348. __start_mcount_loc,
  1349. __stop_mcount_loc);
  1350. return;
  1351. failed:
  1352. ftrace_disabled = 1;
  1353. }
  1354. #else
  1355. static int __init ftrace_nodyn_init(void)
  1356. {
  1357. ftrace_enabled = 1;
  1358. return 0;
  1359. }
  1360. device_initcall(ftrace_nodyn_init);
  1361. static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
  1362. static inline void ftrace_startup_enable(int command) { }
  1363. /* Keep as macros so we do not need to define the commands */
  1364. # define ftrace_startup(command) do { } while (0)
  1365. # define ftrace_shutdown(command) do { } while (0)
  1366. # define ftrace_startup_sysctl() do { } while (0)
  1367. # define ftrace_shutdown_sysctl() do { } while (0)
  1368. #endif /* CONFIG_DYNAMIC_FTRACE */
  1369. static ssize_t
  1370. ftrace_pid_read(struct file *file, char __user *ubuf,
  1371. size_t cnt, loff_t *ppos)
  1372. {
  1373. char buf[64];
  1374. int r;
  1375. if (ftrace_pid_trace == ftrace_swapper_pid)
  1376. r = sprintf(buf, "swapper tasks\n");
  1377. else if (ftrace_pid_trace)
  1378. r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
  1379. else
  1380. r = sprintf(buf, "no pid\n");
  1381. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  1382. }
  1383. static void clear_ftrace_swapper(void)
  1384. {
  1385. struct task_struct *p;
  1386. int cpu;
  1387. get_online_cpus();
  1388. for_each_online_cpu(cpu) {
  1389. p = idle_task(cpu);
  1390. clear_tsk_trace_trace(p);
  1391. }
  1392. put_online_cpus();
  1393. }
  1394. static void set_ftrace_swapper(void)
  1395. {
  1396. struct task_struct *p;
  1397. int cpu;
  1398. get_online_cpus();
  1399. for_each_online_cpu(cpu) {
  1400. p = idle_task(cpu);
  1401. set_tsk_trace_trace(p);
  1402. }
  1403. put_online_cpus();
  1404. }
  1405. static void clear_ftrace_pid(struct pid *pid)
  1406. {
  1407. struct task_struct *p;
  1408. do_each_pid_task(pid, PIDTYPE_PID, p) {
  1409. clear_tsk_trace_trace(p);
  1410. } while_each_pid_task(pid, PIDTYPE_PID, p);
  1411. put_pid(pid);
  1412. }
  1413. static void set_ftrace_pid(struct pid *pid)
  1414. {
  1415. struct task_struct *p;
  1416. do_each_pid_task(pid, PIDTYPE_PID, p) {
  1417. set_tsk_trace_trace(p);
  1418. } while_each_pid_task(pid, PIDTYPE_PID, p);
  1419. }
  1420. static void clear_ftrace_pid_task(struct pid **pid)
  1421. {
  1422. if (*pid == ftrace_swapper_pid)
  1423. clear_ftrace_swapper();
  1424. else
  1425. clear_ftrace_pid(*pid);
  1426. *pid = NULL;
  1427. }
  1428. static void set_ftrace_pid_task(struct pid *pid)
  1429. {
  1430. if (pid == ftrace_swapper_pid)
  1431. set_ftrace_swapper();
  1432. else
  1433. set_ftrace_pid(pid);
  1434. }
  1435. static ssize_t
  1436. ftrace_pid_write(struct file *filp, const char __user *ubuf,
  1437. size_t cnt, loff_t *ppos)
  1438. {
  1439. struct pid *pid;
  1440. char buf[64];
  1441. long val;
  1442. int ret;
  1443. if (cnt >= sizeof(buf))
  1444. return -EINVAL;
  1445. if (copy_from_user(&buf, ubuf, cnt))
  1446. return -EFAULT;
  1447. buf[cnt] = 0;
  1448. ret = strict_strtol(buf, 10, &val);
  1449. if (ret < 0)
  1450. return ret;
  1451. mutex_lock(&ftrace_start_lock);
  1452. if (val < 0) {
  1453. /* disable pid tracing */
  1454. if (!ftrace_pid_trace)
  1455. goto out;
  1456. clear_ftrace_pid_task(&ftrace_pid_trace);
  1457. } else {
  1458. /* swapper task is special */
  1459. if (!val) {
  1460. pid = ftrace_swapper_pid;
  1461. if (pid == ftrace_pid_trace)
  1462. goto out;
  1463. } else {
  1464. pid = find_get_pid(val);
  1465. if (pid == ftrace_pid_trace) {
  1466. put_pid(pid);
  1467. goto out;
  1468. }
  1469. }
  1470. if (ftrace_pid_trace)
  1471. clear_ftrace_pid_task(&ftrace_pid_trace);
  1472. if (!pid)
  1473. goto out;
  1474. ftrace_pid_trace = pid;
  1475. set_ftrace_pid_task(ftrace_pid_trace);
  1476. }
  1477. /* update the function call */
  1478. ftrace_update_pid_func();
  1479. ftrace_startup_enable(0);
  1480. out:
  1481. mutex_unlock(&ftrace_start_lock);
  1482. return cnt;
  1483. }
  1484. static struct file_operations ftrace_pid_fops = {
  1485. .read = ftrace_pid_read,
  1486. .write = ftrace_pid_write,
  1487. };
  1488. static __init int ftrace_init_debugfs(void)
  1489. {
  1490. struct dentry *d_tracer;
  1491. struct dentry *entry;
  1492. d_tracer = tracing_init_dentry();
  1493. if (!d_tracer)
  1494. return 0;
  1495. ftrace_init_dyn_debugfs(d_tracer);
  1496. entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
  1497. NULL, &ftrace_pid_fops);
  1498. if (!entry)
  1499. pr_warning("Could not create debugfs "
  1500. "'set_ftrace_pid' entry\n");
  1501. return 0;
  1502. }
  1503. fs_initcall(ftrace_init_debugfs);
  1504. /**
  1505. * ftrace_kill - kill ftrace
  1506. *
  1507. * This function should be used by panic code. It stops ftrace
  1508. * but in a not so nice way. If you need to simply kill ftrace
  1509. * from a non-atomic section, use ftrace_kill.
  1510. */
  1511. void ftrace_kill(void)
  1512. {
  1513. ftrace_disabled = 1;
  1514. ftrace_enabled = 0;
  1515. clear_ftrace_function();
  1516. }
  1517. /**
  1518. * register_ftrace_function - register a function for profiling
  1519. * @ops - ops structure that holds the function for profiling.
  1520. *
  1521. * Register a function to be called by all functions in the
  1522. * kernel.
  1523. *
  1524. * Note: @ops->func and all the functions it calls must be labeled
  1525. * with "notrace", otherwise it will go into a
  1526. * recursive loop.
  1527. */
  1528. int register_ftrace_function(struct ftrace_ops *ops)
  1529. {
  1530. int ret;
  1531. if (unlikely(ftrace_disabled))
  1532. return -1;
  1533. mutex_lock(&ftrace_sysctl_lock);
  1534. ret = __register_ftrace_function(ops);
  1535. ftrace_startup(0);
  1536. mutex_unlock(&ftrace_sysctl_lock);
  1537. return ret;
  1538. }
  1539. /**
  1540. * unregister_ftrace_function - unregister a function for profiling.
  1541. * @ops - ops structure that holds the function to unregister
  1542. *
  1543. * Unregister a function that was added to be called by ftrace profiling.
  1544. */
  1545. int unregister_ftrace_function(struct ftrace_ops *ops)
  1546. {
  1547. int ret;
  1548. mutex_lock(&ftrace_sysctl_lock);
  1549. ret = __unregister_ftrace_function(ops);
  1550. ftrace_shutdown(0);
  1551. mutex_unlock(&ftrace_sysctl_lock);
  1552. return ret;
  1553. }
  1554. int
  1555. ftrace_enable_sysctl(struct ctl_table *table, int write,
  1556. struct file *file, void __user *buffer, size_t *lenp,
  1557. loff_t *ppos)
  1558. {
  1559. int ret;
  1560. if (unlikely(ftrace_disabled))
  1561. return -ENODEV;
  1562. mutex_lock(&ftrace_sysctl_lock);
  1563. ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
  1564. if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
  1565. goto out;
  1566. last_ftrace_enabled = ftrace_enabled;
  1567. if (ftrace_enabled) {
  1568. ftrace_startup_sysctl();
  1569. /* we are starting ftrace again */
  1570. if (ftrace_list != &ftrace_list_end) {
  1571. if (ftrace_list->next == &ftrace_list_end)
  1572. ftrace_trace_function = ftrace_list->func;
  1573. else
  1574. ftrace_trace_function = ftrace_list_func;
  1575. }
  1576. } else {
  1577. /* stopping ftrace calls (just send to ftrace_stub) */
  1578. ftrace_trace_function = ftrace_stub;
  1579. ftrace_shutdown_sysctl();
  1580. }
  1581. out:
  1582. mutex_unlock(&ftrace_sysctl_lock);
  1583. return ret;
  1584. }
  1585. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1586. static atomic_t ftrace_graph_active;
  1587. int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
  1588. {
  1589. return 0;
  1590. }
  1591. /* The callbacks that hook a function */
  1592. trace_func_graph_ret_t ftrace_graph_return =
  1593. (trace_func_graph_ret_t)ftrace_stub;
  1594. trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
  1595. /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
  1596. static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
  1597. {
  1598. int i;
  1599. int ret = 0;
  1600. unsigned long flags;
  1601. int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
  1602. struct task_struct *g, *t;
  1603. for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
  1604. ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
  1605. * sizeof(struct ftrace_ret_stack),
  1606. GFP_KERNEL);
  1607. if (!ret_stack_list[i]) {
  1608. start = 0;
  1609. end = i;
  1610. ret = -ENOMEM;
  1611. goto free;
  1612. }
  1613. }
  1614. read_lock_irqsave(&tasklist_lock, flags);
  1615. do_each_thread(g, t) {
  1616. if (start == end) {
  1617. ret = -EAGAIN;
  1618. goto unlock;
  1619. }
  1620. if (t->ret_stack == NULL) {
  1621. t->curr_ret_stack = -1;
  1622. /* Make sure IRQs see the -1 first: */
  1623. barrier();
  1624. t->ret_stack = ret_stack_list[start++];
  1625. atomic_set(&t->tracing_graph_pause, 0);
  1626. atomic_set(&t->trace_overrun, 0);
  1627. }
  1628. } while_each_thread(g, t);
  1629. unlock:
  1630. read_unlock_irqrestore(&tasklist_lock, flags);
  1631. free:
  1632. for (i = start; i < end; i++)
  1633. kfree(ret_stack_list[i]);
  1634. return ret;
  1635. }
  1636. /* Allocate a return stack for each task */
  1637. static int start_graph_tracing(void)
  1638. {
  1639. struct ftrace_ret_stack **ret_stack_list;
  1640. int ret;
  1641. ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
  1642. sizeof(struct ftrace_ret_stack *),
  1643. GFP_KERNEL);
  1644. if (!ret_stack_list)
  1645. return -ENOMEM;
  1646. do {
  1647. ret = alloc_retstack_tasklist(ret_stack_list);
  1648. } while (ret == -EAGAIN);
  1649. kfree(ret_stack_list);
  1650. return ret;
  1651. }
  1652. int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  1653. trace_func_graph_ent_t entryfunc)
  1654. {
  1655. int ret = 0;
  1656. mutex_lock(&ftrace_sysctl_lock);
  1657. atomic_inc(&ftrace_graph_active);
  1658. ret = start_graph_tracing();
  1659. if (ret) {
  1660. atomic_dec(&ftrace_graph_active);
  1661. goto out;
  1662. }
  1663. ftrace_graph_return = retfunc;
  1664. ftrace_graph_entry = entryfunc;
  1665. ftrace_startup(FTRACE_START_FUNC_RET);
  1666. out:
  1667. mutex_unlock(&ftrace_sysctl_lock);
  1668. return ret;
  1669. }
  1670. void unregister_ftrace_graph(void)
  1671. {
  1672. mutex_lock(&ftrace_sysctl_lock);
  1673. atomic_dec(&ftrace_graph_active);
  1674. ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
  1675. ftrace_graph_entry = ftrace_graph_entry_stub;
  1676. ftrace_shutdown(FTRACE_STOP_FUNC_RET);
  1677. mutex_unlock(&ftrace_sysctl_lock);
  1678. }
  1679. /* Allocate a return stack for newly created task */
  1680. void ftrace_graph_init_task(struct task_struct *t)
  1681. {
  1682. if (atomic_read(&ftrace_graph_active)) {
  1683. t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
  1684. * sizeof(struct ftrace_ret_stack),
  1685. GFP_KERNEL);
  1686. if (!t->ret_stack)
  1687. return;
  1688. t->curr_ret_stack = -1;
  1689. atomic_set(&t->tracing_graph_pause, 0);
  1690. atomic_set(&t->trace_overrun, 0);
  1691. } else
  1692. t->ret_stack = NULL;
  1693. }
  1694. void ftrace_graph_exit_task(struct task_struct *t)
  1695. {
  1696. struct ftrace_ret_stack *ret_stack = t->ret_stack;
  1697. t->ret_stack = NULL;
  1698. /* NULL must become visible to IRQs before we free it: */
  1699. barrier();
  1700. kfree(ret_stack);
  1701. }
  1702. void ftrace_graph_stop(void)
  1703. {
  1704. ftrace_stop();
  1705. }
  1706. #endif