ftrace.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101
  1. /*
  2. * Infrastructure for profiling code inserted by 'gcc -pg'.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Originally ported from the -rt patch by:
  8. * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Based on code in the latency_tracer, that is:
  11. *
  12. * Copyright (C) 2004-2006 Ingo Molnar
  13. * Copyright (C) 2004 William Lee Irwin III
  14. */
  15. #include <linux/stop_machine.h>
  16. #include <linux/clocksource.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/debugfs.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/kthread.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/kprobes.h>
  24. #include <linux/ftrace.h>
  25. #include <linux/sysctl.h>
  26. #include <linux/ctype.h>
  27. #include <linux/list.h>
  28. #include <asm/ftrace.h>
  29. #include "trace.h"
  30. #define FTRACE_WARN_ON(cond) \
  31. do { \
  32. if (WARN_ON(cond)) \
  33. ftrace_kill(); \
  34. } while (0)
  35. #define FTRACE_WARN_ON_ONCE(cond) \
  36. do { \
  37. if (WARN_ON_ONCE(cond)) \
  38. ftrace_kill(); \
  39. } while (0)
  40. /* ftrace_enabled is a method to turn ftrace on or off */
  41. int ftrace_enabled __read_mostly;
  42. static int last_ftrace_enabled;
  43. /* set when tracing only a pid */
  44. struct pid *ftrace_pid_trace;
  45. static struct pid * const ftrace_swapper_pid = (struct pid *)1;
  46. /* Quick disabling of function tracer. */
  47. int function_trace_stop;
  48. /*
  49. * ftrace_disabled is set when an anomaly is discovered.
  50. * ftrace_disabled is much stronger than ftrace_enabled.
  51. */
  52. static int ftrace_disabled __read_mostly;
  53. static DEFINE_SPINLOCK(ftrace_lock);
  54. static DEFINE_MUTEX(ftrace_sysctl_lock);
  55. static DEFINE_MUTEX(ftrace_start_lock);
  56. static struct ftrace_ops ftrace_list_end __read_mostly =
  57. {
  58. .func = ftrace_stub,
  59. };
  60. static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
  61. ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
  62. ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
  63. ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
  64. static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
  65. {
  66. struct ftrace_ops *op = ftrace_list;
  67. /* in case someone actually ports this to alpha! */
  68. read_barrier_depends();
  69. while (op != &ftrace_list_end) {
  70. /* silly alpha */
  71. read_barrier_depends();
  72. op->func(ip, parent_ip);
  73. op = op->next;
  74. };
  75. }
  76. static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
  77. {
  78. if (!test_tsk_trace_trace(current))
  79. return;
  80. ftrace_pid_function(ip, parent_ip);
  81. }
  82. static void set_ftrace_pid_function(ftrace_func_t func)
  83. {
  84. /* do not set ftrace_pid_function to itself! */
  85. if (func != ftrace_pid_func)
  86. ftrace_pid_function = func;
  87. }
  88. /**
  89. * clear_ftrace_function - reset the ftrace function
  90. *
  91. * This NULLs the ftrace function and in essence stops
  92. * tracing. There may be lag
  93. */
  94. void clear_ftrace_function(void)
  95. {
  96. ftrace_trace_function = ftrace_stub;
  97. __ftrace_trace_function = ftrace_stub;
  98. ftrace_pid_function = ftrace_stub;
  99. }
  100. #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  101. /*
  102. * For those archs that do not test ftrace_trace_stop in their
  103. * mcount call site, we need to do it from C.
  104. */
  105. static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
  106. {
  107. if (function_trace_stop)
  108. return;
  109. __ftrace_trace_function(ip, parent_ip);
  110. }
  111. #endif
  112. static int __register_ftrace_function(struct ftrace_ops *ops)
  113. {
  114. /* should not be called from interrupt context */
  115. spin_lock(&ftrace_lock);
  116. ops->next = ftrace_list;
  117. /*
  118. * We are entering ops into the ftrace_list but another
  119. * CPU might be walking that list. We need to make sure
  120. * the ops->next pointer is valid before another CPU sees
  121. * the ops pointer included into the ftrace_list.
  122. */
  123. smp_wmb();
  124. ftrace_list = ops;
  125. if (ftrace_enabled) {
  126. ftrace_func_t func;
  127. if (ops->next == &ftrace_list_end)
  128. func = ops->func;
  129. else
  130. func = ftrace_list_func;
  131. if (ftrace_pid_trace) {
  132. set_ftrace_pid_function(func);
  133. func = ftrace_pid_func;
  134. }
  135. /*
  136. * For one func, simply call it directly.
  137. * For more than one func, call the chain.
  138. */
  139. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  140. ftrace_trace_function = func;
  141. #else
  142. __ftrace_trace_function = func;
  143. ftrace_trace_function = ftrace_test_stop_func;
  144. #endif
  145. }
  146. spin_unlock(&ftrace_lock);
  147. return 0;
  148. }
  149. static int __unregister_ftrace_function(struct ftrace_ops *ops)
  150. {
  151. struct ftrace_ops **p;
  152. int ret = 0;
  153. /* should not be called from interrupt context */
  154. spin_lock(&ftrace_lock);
  155. /*
  156. * If we are removing the last function, then simply point
  157. * to the ftrace_stub.
  158. */
  159. if (ftrace_list == ops && ops->next == &ftrace_list_end) {
  160. ftrace_trace_function = ftrace_stub;
  161. ftrace_list = &ftrace_list_end;
  162. goto out;
  163. }
  164. for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
  165. if (*p == ops)
  166. break;
  167. if (*p != ops) {
  168. ret = -1;
  169. goto out;
  170. }
  171. *p = (*p)->next;
  172. if (ftrace_enabled) {
  173. /* If we only have one func left, then call that directly */
  174. if (ftrace_list->next == &ftrace_list_end) {
  175. ftrace_func_t func = ftrace_list->func;
  176. if (ftrace_pid_trace) {
  177. set_ftrace_pid_function(func);
  178. func = ftrace_pid_func;
  179. }
  180. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  181. ftrace_trace_function = func;
  182. #else
  183. __ftrace_trace_function = func;
  184. #endif
  185. }
  186. }
  187. out:
  188. spin_unlock(&ftrace_lock);
  189. return ret;
  190. }
  191. static void ftrace_update_pid_func(void)
  192. {
  193. ftrace_func_t func;
  194. /* should not be called from interrupt context */
  195. spin_lock(&ftrace_lock);
  196. if (ftrace_trace_function == ftrace_stub)
  197. goto out;
  198. func = ftrace_trace_function;
  199. if (ftrace_pid_trace) {
  200. set_ftrace_pid_function(func);
  201. func = ftrace_pid_func;
  202. } else {
  203. if (func == ftrace_pid_func)
  204. func = ftrace_pid_function;
  205. }
  206. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  207. ftrace_trace_function = func;
  208. #else
  209. __ftrace_trace_function = func;
  210. #endif
  211. out:
  212. spin_unlock(&ftrace_lock);
  213. }
  214. #ifdef CONFIG_DYNAMIC_FTRACE
  215. #ifndef CONFIG_FTRACE_MCOUNT_RECORD
  216. # error Dynamic ftrace depends on MCOUNT_RECORD
  217. #endif
  218. /*
  219. * Since MCOUNT_ADDR may point to mcount itself, we do not want
  220. * to get it confused by reading a reference in the code as we
  221. * are parsing on objcopy output of text. Use a variable for
  222. * it instead.
  223. */
  224. static unsigned long mcount_addr = MCOUNT_ADDR;
  225. enum {
  226. FTRACE_ENABLE_CALLS = (1 << 0),
  227. FTRACE_DISABLE_CALLS = (1 << 1),
  228. FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
  229. FTRACE_ENABLE_MCOUNT = (1 << 3),
  230. FTRACE_DISABLE_MCOUNT = (1 << 4),
  231. FTRACE_START_FUNC_RET = (1 << 5),
  232. FTRACE_STOP_FUNC_RET = (1 << 6),
  233. };
  234. static int ftrace_filtered;
  235. static LIST_HEAD(ftrace_new_addrs);
  236. static DEFINE_MUTEX(ftrace_regex_lock);
  237. struct ftrace_page {
  238. struct ftrace_page *next;
  239. unsigned long index;
  240. struct dyn_ftrace records[];
  241. };
  242. #define ENTRIES_PER_PAGE \
  243. ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
  244. /* estimate from running different kernels */
  245. #define NR_TO_INIT 10000
  246. static struct ftrace_page *ftrace_pages_start;
  247. static struct ftrace_page *ftrace_pages;
  248. static struct dyn_ftrace *ftrace_free_records;
  249. #ifdef CONFIG_KPROBES
  250. static int frozen_record_count;
  251. static inline void freeze_record(struct dyn_ftrace *rec)
  252. {
  253. if (!(rec->flags & FTRACE_FL_FROZEN)) {
  254. rec->flags |= FTRACE_FL_FROZEN;
  255. frozen_record_count++;
  256. }
  257. }
  258. static inline void unfreeze_record(struct dyn_ftrace *rec)
  259. {
  260. if (rec->flags & FTRACE_FL_FROZEN) {
  261. rec->flags &= ~FTRACE_FL_FROZEN;
  262. frozen_record_count--;
  263. }
  264. }
  265. static inline int record_frozen(struct dyn_ftrace *rec)
  266. {
  267. return rec->flags & FTRACE_FL_FROZEN;
  268. }
  269. #else
  270. # define freeze_record(rec) ({ 0; })
  271. # define unfreeze_record(rec) ({ 0; })
  272. # define record_frozen(rec) ({ 0; })
  273. #endif /* CONFIG_KPROBES */
  274. static void ftrace_free_rec(struct dyn_ftrace *rec)
  275. {
  276. rec->ip = (unsigned long)ftrace_free_records;
  277. ftrace_free_records = rec;
  278. rec->flags |= FTRACE_FL_FREE;
  279. }
  280. void ftrace_release(void *start, unsigned long size)
  281. {
  282. struct dyn_ftrace *rec;
  283. struct ftrace_page *pg;
  284. unsigned long s = (unsigned long)start;
  285. unsigned long e = s + size;
  286. int i;
  287. if (ftrace_disabled || !start)
  288. return;
  289. /* should not be called from interrupt context */
  290. spin_lock(&ftrace_lock);
  291. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  292. for (i = 0; i < pg->index; i++) {
  293. rec = &pg->records[i];
  294. if ((rec->ip >= s) && (rec->ip < e))
  295. ftrace_free_rec(rec);
  296. }
  297. }
  298. spin_unlock(&ftrace_lock);
  299. }
  300. static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
  301. {
  302. struct dyn_ftrace *rec;
  303. /* First check for freed records */
  304. if (ftrace_free_records) {
  305. rec = ftrace_free_records;
  306. if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
  307. FTRACE_WARN_ON_ONCE(1);
  308. ftrace_free_records = NULL;
  309. return NULL;
  310. }
  311. ftrace_free_records = (void *)rec->ip;
  312. memset(rec, 0, sizeof(*rec));
  313. return rec;
  314. }
  315. if (ftrace_pages->index == ENTRIES_PER_PAGE) {
  316. if (!ftrace_pages->next) {
  317. /* allocate another page */
  318. ftrace_pages->next =
  319. (void *)get_zeroed_page(GFP_KERNEL);
  320. if (!ftrace_pages->next)
  321. return NULL;
  322. }
  323. ftrace_pages = ftrace_pages->next;
  324. }
  325. return &ftrace_pages->records[ftrace_pages->index++];
  326. }
  327. static struct dyn_ftrace *
  328. ftrace_record_ip(unsigned long ip)
  329. {
  330. struct dyn_ftrace *rec;
  331. if (ftrace_disabled)
  332. return NULL;
  333. rec = ftrace_alloc_dyn_node(ip);
  334. if (!rec)
  335. return NULL;
  336. rec->ip = ip;
  337. list_add(&rec->list, &ftrace_new_addrs);
  338. return rec;
  339. }
  340. static void print_ip_ins(const char *fmt, unsigned char *p)
  341. {
  342. int i;
  343. printk(KERN_CONT "%s", fmt);
  344. for (i = 0; i < MCOUNT_INSN_SIZE; i++)
  345. printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
  346. }
  347. static void ftrace_bug(int failed, unsigned long ip)
  348. {
  349. switch (failed) {
  350. case -EFAULT:
  351. FTRACE_WARN_ON_ONCE(1);
  352. pr_info("ftrace faulted on modifying ");
  353. print_ip_sym(ip);
  354. break;
  355. case -EINVAL:
  356. FTRACE_WARN_ON_ONCE(1);
  357. pr_info("ftrace failed to modify ");
  358. print_ip_sym(ip);
  359. print_ip_ins(" actual: ", (unsigned char *)ip);
  360. printk(KERN_CONT "\n");
  361. break;
  362. case -EPERM:
  363. FTRACE_WARN_ON_ONCE(1);
  364. pr_info("ftrace faulted on writing ");
  365. print_ip_sym(ip);
  366. break;
  367. default:
  368. FTRACE_WARN_ON_ONCE(1);
  369. pr_info("ftrace faulted on unknown error ");
  370. print_ip_sym(ip);
  371. }
  372. }
  373. static int
  374. __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
  375. {
  376. unsigned long ip, fl;
  377. unsigned long ftrace_addr;
  378. ftrace_addr = (unsigned long)ftrace_caller;
  379. ip = rec->ip;
  380. /*
  381. * If this record is not to be traced and
  382. * it is not enabled then do nothing.
  383. *
  384. * If this record is not to be traced and
  385. * it is enabled then disabled it.
  386. *
  387. */
  388. if (rec->flags & FTRACE_FL_NOTRACE) {
  389. if (rec->flags & FTRACE_FL_ENABLED)
  390. rec->flags &= ~FTRACE_FL_ENABLED;
  391. else
  392. return 0;
  393. } else if (ftrace_filtered && enable) {
  394. /*
  395. * Filtering is on:
  396. */
  397. fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
  398. /* Record is filtered and enabled, do nothing */
  399. if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
  400. return 0;
  401. /* Record is not filtered and is not enabled do nothing */
  402. if (!fl)
  403. return 0;
  404. /* Record is not filtered but enabled, disable it */
  405. if (fl == FTRACE_FL_ENABLED)
  406. rec->flags &= ~FTRACE_FL_ENABLED;
  407. else
  408. /* Otherwise record is filtered but not enabled, enable it */
  409. rec->flags |= FTRACE_FL_ENABLED;
  410. } else {
  411. /* Disable or not filtered */
  412. if (enable) {
  413. /* if record is enabled, do nothing */
  414. if (rec->flags & FTRACE_FL_ENABLED)
  415. return 0;
  416. rec->flags |= FTRACE_FL_ENABLED;
  417. } else {
  418. /* if record is not enabled do nothing */
  419. if (!(rec->flags & FTRACE_FL_ENABLED))
  420. return 0;
  421. rec->flags &= ~FTRACE_FL_ENABLED;
  422. }
  423. }
  424. if (rec->flags & FTRACE_FL_ENABLED)
  425. return ftrace_make_call(rec, ftrace_addr);
  426. else
  427. return ftrace_make_nop(NULL, rec, ftrace_addr);
  428. }
  429. static void ftrace_replace_code(int enable)
  430. {
  431. int i, failed;
  432. struct dyn_ftrace *rec;
  433. struct ftrace_page *pg;
  434. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  435. for (i = 0; i < pg->index; i++) {
  436. rec = &pg->records[i];
  437. /*
  438. * Skip over free records and records that have
  439. * failed.
  440. */
  441. if (rec->flags & FTRACE_FL_FREE ||
  442. rec->flags & FTRACE_FL_FAILED)
  443. continue;
  444. /* ignore updates to this record's mcount site */
  445. if (get_kprobe((void *)rec->ip)) {
  446. freeze_record(rec);
  447. continue;
  448. } else {
  449. unfreeze_record(rec);
  450. }
  451. failed = __ftrace_replace_code(rec, enable);
  452. if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
  453. rec->flags |= FTRACE_FL_FAILED;
  454. if ((system_state == SYSTEM_BOOTING) ||
  455. !core_kernel_text(rec->ip)) {
  456. ftrace_free_rec(rec);
  457. } else
  458. ftrace_bug(failed, rec->ip);
  459. }
  460. }
  461. }
  462. }
  463. static int
  464. ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
  465. {
  466. unsigned long ip;
  467. int ret;
  468. ip = rec->ip;
  469. ret = ftrace_make_nop(mod, rec, mcount_addr);
  470. if (ret) {
  471. ftrace_bug(ret, ip);
  472. rec->flags |= FTRACE_FL_FAILED;
  473. return 0;
  474. }
  475. return 1;
  476. }
  477. static int __ftrace_modify_code(void *data)
  478. {
  479. int *command = data;
  480. if (*command & FTRACE_ENABLE_CALLS)
  481. ftrace_replace_code(1);
  482. else if (*command & FTRACE_DISABLE_CALLS)
  483. ftrace_replace_code(0);
  484. if (*command & FTRACE_UPDATE_TRACE_FUNC)
  485. ftrace_update_ftrace_func(ftrace_trace_function);
  486. if (*command & FTRACE_START_FUNC_RET)
  487. ftrace_enable_ftrace_graph_caller();
  488. else if (*command & FTRACE_STOP_FUNC_RET)
  489. ftrace_disable_ftrace_graph_caller();
  490. return 0;
  491. }
  492. static void ftrace_run_update_code(int command)
  493. {
  494. stop_machine(__ftrace_modify_code, &command, NULL);
  495. }
  496. static ftrace_func_t saved_ftrace_func;
  497. static int ftrace_start_up;
  498. static void ftrace_startup_enable(int command)
  499. {
  500. if (saved_ftrace_func != ftrace_trace_function) {
  501. saved_ftrace_func = ftrace_trace_function;
  502. command |= FTRACE_UPDATE_TRACE_FUNC;
  503. }
  504. if (!command || !ftrace_enabled)
  505. return;
  506. ftrace_run_update_code(command);
  507. }
  508. static void ftrace_startup(int command)
  509. {
  510. if (unlikely(ftrace_disabled))
  511. return;
  512. mutex_lock(&ftrace_start_lock);
  513. ftrace_start_up++;
  514. command |= FTRACE_ENABLE_CALLS;
  515. ftrace_startup_enable(command);
  516. mutex_unlock(&ftrace_start_lock);
  517. }
  518. static void ftrace_shutdown(int command)
  519. {
  520. if (unlikely(ftrace_disabled))
  521. return;
  522. mutex_lock(&ftrace_start_lock);
  523. ftrace_start_up--;
  524. if (!ftrace_start_up)
  525. command |= FTRACE_DISABLE_CALLS;
  526. if (saved_ftrace_func != ftrace_trace_function) {
  527. saved_ftrace_func = ftrace_trace_function;
  528. command |= FTRACE_UPDATE_TRACE_FUNC;
  529. }
  530. if (!command || !ftrace_enabled)
  531. goto out;
  532. ftrace_run_update_code(command);
  533. out:
  534. mutex_unlock(&ftrace_start_lock);
  535. }
  536. static void ftrace_startup_sysctl(void)
  537. {
  538. int command = FTRACE_ENABLE_MCOUNT;
  539. if (unlikely(ftrace_disabled))
  540. return;
  541. mutex_lock(&ftrace_start_lock);
  542. /* Force update next time */
  543. saved_ftrace_func = NULL;
  544. /* ftrace_start_up is true if we want ftrace running */
  545. if (ftrace_start_up)
  546. command |= FTRACE_ENABLE_CALLS;
  547. ftrace_run_update_code(command);
  548. mutex_unlock(&ftrace_start_lock);
  549. }
  550. static void ftrace_shutdown_sysctl(void)
  551. {
  552. int command = FTRACE_DISABLE_MCOUNT;
  553. if (unlikely(ftrace_disabled))
  554. return;
  555. mutex_lock(&ftrace_start_lock);
  556. /* ftrace_start_up is true if ftrace is running */
  557. if (ftrace_start_up)
  558. command |= FTRACE_DISABLE_CALLS;
  559. ftrace_run_update_code(command);
  560. mutex_unlock(&ftrace_start_lock);
  561. }
  562. static cycle_t ftrace_update_time;
  563. static unsigned long ftrace_update_cnt;
  564. unsigned long ftrace_update_tot_cnt;
  565. static int ftrace_update_code(struct module *mod)
  566. {
  567. struct dyn_ftrace *p, *t;
  568. cycle_t start, stop;
  569. start = ftrace_now(raw_smp_processor_id());
  570. ftrace_update_cnt = 0;
  571. list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
  572. /* If something went wrong, bail without enabling anything */
  573. if (unlikely(ftrace_disabled))
  574. return -1;
  575. list_del_init(&p->list);
  576. /* convert record (i.e, patch mcount-call with NOP) */
  577. if (ftrace_code_disable(mod, p)) {
  578. p->flags |= FTRACE_FL_CONVERTED;
  579. ftrace_update_cnt++;
  580. } else
  581. ftrace_free_rec(p);
  582. }
  583. stop = ftrace_now(raw_smp_processor_id());
  584. ftrace_update_time = stop - start;
  585. ftrace_update_tot_cnt += ftrace_update_cnt;
  586. return 0;
  587. }
  588. static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
  589. {
  590. struct ftrace_page *pg;
  591. int cnt;
  592. int i;
  593. /* allocate a few pages */
  594. ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
  595. if (!ftrace_pages_start)
  596. return -1;
  597. /*
  598. * Allocate a few more pages.
  599. *
  600. * TODO: have some parser search vmlinux before
  601. * final linking to find all calls to ftrace.
  602. * Then we can:
  603. * a) know how many pages to allocate.
  604. * and/or
  605. * b) set up the table then.
  606. *
  607. * The dynamic code is still necessary for
  608. * modules.
  609. */
  610. pg = ftrace_pages = ftrace_pages_start;
  611. cnt = num_to_init / ENTRIES_PER_PAGE;
  612. pr_info("ftrace: allocating %ld entries in %d pages\n",
  613. num_to_init, cnt + 1);
  614. for (i = 0; i < cnt; i++) {
  615. pg->next = (void *)get_zeroed_page(GFP_KERNEL);
  616. /* If we fail, we'll try later anyway */
  617. if (!pg->next)
  618. break;
  619. pg = pg->next;
  620. }
  621. return 0;
  622. }
  623. enum {
  624. FTRACE_ITER_FILTER = (1 << 0),
  625. FTRACE_ITER_CONT = (1 << 1),
  626. FTRACE_ITER_NOTRACE = (1 << 2),
  627. FTRACE_ITER_FAILURES = (1 << 3),
  628. };
  629. #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
  630. struct ftrace_iterator {
  631. struct ftrace_page *pg;
  632. unsigned idx;
  633. unsigned flags;
  634. unsigned char buffer[FTRACE_BUFF_MAX+1];
  635. unsigned buffer_idx;
  636. unsigned filtered;
  637. };
  638. static void *
  639. t_next(struct seq_file *m, void *v, loff_t *pos)
  640. {
  641. struct ftrace_iterator *iter = m->private;
  642. struct dyn_ftrace *rec = NULL;
  643. (*pos)++;
  644. /* should not be called from interrupt context */
  645. spin_lock(&ftrace_lock);
  646. retry:
  647. if (iter->idx >= iter->pg->index) {
  648. if (iter->pg->next) {
  649. iter->pg = iter->pg->next;
  650. iter->idx = 0;
  651. goto retry;
  652. } else {
  653. iter->idx = -1;
  654. }
  655. } else {
  656. rec = &iter->pg->records[iter->idx++];
  657. if ((rec->flags & FTRACE_FL_FREE) ||
  658. (!(iter->flags & FTRACE_ITER_FAILURES) &&
  659. (rec->flags & FTRACE_FL_FAILED)) ||
  660. ((iter->flags & FTRACE_ITER_FAILURES) &&
  661. !(rec->flags & FTRACE_FL_FAILED)) ||
  662. ((iter->flags & FTRACE_ITER_FILTER) &&
  663. !(rec->flags & FTRACE_FL_FILTER)) ||
  664. ((iter->flags & FTRACE_ITER_NOTRACE) &&
  665. !(rec->flags & FTRACE_FL_NOTRACE))) {
  666. rec = NULL;
  667. goto retry;
  668. }
  669. }
  670. spin_unlock(&ftrace_lock);
  671. return rec;
  672. }
  673. static void *t_start(struct seq_file *m, loff_t *pos)
  674. {
  675. struct ftrace_iterator *iter = m->private;
  676. void *p = NULL;
  677. if (*pos > 0) {
  678. if (iter->idx < 0)
  679. return p;
  680. (*pos)--;
  681. iter->idx--;
  682. }
  683. p = t_next(m, p, pos);
  684. return p;
  685. }
  686. static void t_stop(struct seq_file *m, void *p)
  687. {
  688. }
  689. static int t_show(struct seq_file *m, void *v)
  690. {
  691. struct dyn_ftrace *rec = v;
  692. char str[KSYM_SYMBOL_LEN];
  693. if (!rec)
  694. return 0;
  695. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  696. seq_printf(m, "%s\n", str);
  697. return 0;
  698. }
  699. static struct seq_operations show_ftrace_seq_ops = {
  700. .start = t_start,
  701. .next = t_next,
  702. .stop = t_stop,
  703. .show = t_show,
  704. };
  705. static int
  706. ftrace_avail_open(struct inode *inode, struct file *file)
  707. {
  708. struct ftrace_iterator *iter;
  709. int ret;
  710. if (unlikely(ftrace_disabled))
  711. return -ENODEV;
  712. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  713. if (!iter)
  714. return -ENOMEM;
  715. iter->pg = ftrace_pages_start;
  716. ret = seq_open(file, &show_ftrace_seq_ops);
  717. if (!ret) {
  718. struct seq_file *m = file->private_data;
  719. m->private = iter;
  720. } else {
  721. kfree(iter);
  722. }
  723. return ret;
  724. }
  725. int ftrace_avail_release(struct inode *inode, struct file *file)
  726. {
  727. struct seq_file *m = (struct seq_file *)file->private_data;
  728. struct ftrace_iterator *iter = m->private;
  729. seq_release(inode, file);
  730. kfree(iter);
  731. return 0;
  732. }
  733. static int
  734. ftrace_failures_open(struct inode *inode, struct file *file)
  735. {
  736. int ret;
  737. struct seq_file *m;
  738. struct ftrace_iterator *iter;
  739. ret = ftrace_avail_open(inode, file);
  740. if (!ret) {
  741. m = (struct seq_file *)file->private_data;
  742. iter = (struct ftrace_iterator *)m->private;
  743. iter->flags = FTRACE_ITER_FAILURES;
  744. }
  745. return ret;
  746. }
  747. static void ftrace_filter_reset(int enable)
  748. {
  749. struct ftrace_page *pg;
  750. struct dyn_ftrace *rec;
  751. unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  752. unsigned i;
  753. /* should not be called from interrupt context */
  754. spin_lock(&ftrace_lock);
  755. if (enable)
  756. ftrace_filtered = 0;
  757. pg = ftrace_pages_start;
  758. while (pg) {
  759. for (i = 0; i < pg->index; i++) {
  760. rec = &pg->records[i];
  761. if (rec->flags & FTRACE_FL_FAILED)
  762. continue;
  763. rec->flags &= ~type;
  764. }
  765. pg = pg->next;
  766. }
  767. spin_unlock(&ftrace_lock);
  768. }
  769. static int
  770. ftrace_regex_open(struct inode *inode, struct file *file, int enable)
  771. {
  772. struct ftrace_iterator *iter;
  773. int ret = 0;
  774. if (unlikely(ftrace_disabled))
  775. return -ENODEV;
  776. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  777. if (!iter)
  778. return -ENOMEM;
  779. mutex_lock(&ftrace_regex_lock);
  780. if ((file->f_mode & FMODE_WRITE) &&
  781. !(file->f_flags & O_APPEND))
  782. ftrace_filter_reset(enable);
  783. if (file->f_mode & FMODE_READ) {
  784. iter->pg = ftrace_pages_start;
  785. iter->flags = enable ? FTRACE_ITER_FILTER :
  786. FTRACE_ITER_NOTRACE;
  787. ret = seq_open(file, &show_ftrace_seq_ops);
  788. if (!ret) {
  789. struct seq_file *m = file->private_data;
  790. m->private = iter;
  791. } else
  792. kfree(iter);
  793. } else
  794. file->private_data = iter;
  795. mutex_unlock(&ftrace_regex_lock);
  796. return ret;
  797. }
  798. static int
  799. ftrace_filter_open(struct inode *inode, struct file *file)
  800. {
  801. return ftrace_regex_open(inode, file, 1);
  802. }
  803. static int
  804. ftrace_notrace_open(struct inode *inode, struct file *file)
  805. {
  806. return ftrace_regex_open(inode, file, 0);
  807. }
  808. static ssize_t
  809. ftrace_regex_read(struct file *file, char __user *ubuf,
  810. size_t cnt, loff_t *ppos)
  811. {
  812. if (file->f_mode & FMODE_READ)
  813. return seq_read(file, ubuf, cnt, ppos);
  814. else
  815. return -EPERM;
  816. }
  817. static loff_t
  818. ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
  819. {
  820. loff_t ret;
  821. if (file->f_mode & FMODE_READ)
  822. ret = seq_lseek(file, offset, origin);
  823. else
  824. file->f_pos = ret = 1;
  825. return ret;
  826. }
  827. enum {
  828. MATCH_FULL,
  829. MATCH_FRONT_ONLY,
  830. MATCH_MIDDLE_ONLY,
  831. MATCH_END_ONLY,
  832. };
  833. static void
  834. ftrace_match(unsigned char *buff, int len, int enable)
  835. {
  836. char str[KSYM_SYMBOL_LEN];
  837. char *search = NULL;
  838. struct ftrace_page *pg;
  839. struct dyn_ftrace *rec;
  840. int type = MATCH_FULL;
  841. unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  842. unsigned i, match = 0, search_len = 0;
  843. for (i = 0; i < len; i++) {
  844. if (buff[i] == '*') {
  845. if (!i) {
  846. search = buff + i + 1;
  847. type = MATCH_END_ONLY;
  848. search_len = len - (i + 1);
  849. } else {
  850. if (type == MATCH_END_ONLY) {
  851. type = MATCH_MIDDLE_ONLY;
  852. } else {
  853. match = i;
  854. type = MATCH_FRONT_ONLY;
  855. }
  856. buff[i] = 0;
  857. break;
  858. }
  859. }
  860. }
  861. /* should not be called from interrupt context */
  862. spin_lock(&ftrace_lock);
  863. if (enable)
  864. ftrace_filtered = 1;
  865. pg = ftrace_pages_start;
  866. while (pg) {
  867. for (i = 0; i < pg->index; i++) {
  868. int matched = 0;
  869. char *ptr;
  870. rec = &pg->records[i];
  871. if (rec->flags & FTRACE_FL_FAILED)
  872. continue;
  873. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  874. switch (type) {
  875. case MATCH_FULL:
  876. if (strcmp(str, buff) == 0)
  877. matched = 1;
  878. break;
  879. case MATCH_FRONT_ONLY:
  880. if (memcmp(str, buff, match) == 0)
  881. matched = 1;
  882. break;
  883. case MATCH_MIDDLE_ONLY:
  884. if (strstr(str, search))
  885. matched = 1;
  886. break;
  887. case MATCH_END_ONLY:
  888. ptr = strstr(str, search);
  889. if (ptr && (ptr[search_len] == 0))
  890. matched = 1;
  891. break;
  892. }
  893. if (matched)
  894. rec->flags |= flag;
  895. }
  896. pg = pg->next;
  897. }
  898. spin_unlock(&ftrace_lock);
  899. }
  900. static ssize_t
  901. ftrace_regex_write(struct file *file, const char __user *ubuf,
  902. size_t cnt, loff_t *ppos, int enable)
  903. {
  904. struct ftrace_iterator *iter;
  905. char ch;
  906. size_t read = 0;
  907. ssize_t ret;
  908. if (!cnt || cnt < 0)
  909. return 0;
  910. mutex_lock(&ftrace_regex_lock);
  911. if (file->f_mode & FMODE_READ) {
  912. struct seq_file *m = file->private_data;
  913. iter = m->private;
  914. } else
  915. iter = file->private_data;
  916. if (!*ppos) {
  917. iter->flags &= ~FTRACE_ITER_CONT;
  918. iter->buffer_idx = 0;
  919. }
  920. ret = get_user(ch, ubuf++);
  921. if (ret)
  922. goto out;
  923. read++;
  924. cnt--;
  925. if (!(iter->flags & ~FTRACE_ITER_CONT)) {
  926. /* skip white space */
  927. while (cnt && isspace(ch)) {
  928. ret = get_user(ch, ubuf++);
  929. if (ret)
  930. goto out;
  931. read++;
  932. cnt--;
  933. }
  934. if (isspace(ch)) {
  935. file->f_pos += read;
  936. ret = read;
  937. goto out;
  938. }
  939. iter->buffer_idx = 0;
  940. }
  941. while (cnt && !isspace(ch)) {
  942. if (iter->buffer_idx < FTRACE_BUFF_MAX)
  943. iter->buffer[iter->buffer_idx++] = ch;
  944. else {
  945. ret = -EINVAL;
  946. goto out;
  947. }
  948. ret = get_user(ch, ubuf++);
  949. if (ret)
  950. goto out;
  951. read++;
  952. cnt--;
  953. }
  954. if (isspace(ch)) {
  955. iter->filtered++;
  956. iter->buffer[iter->buffer_idx] = 0;
  957. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  958. iter->buffer_idx = 0;
  959. } else
  960. iter->flags |= FTRACE_ITER_CONT;
  961. file->f_pos += read;
  962. ret = read;
  963. out:
  964. mutex_unlock(&ftrace_regex_lock);
  965. return ret;
  966. }
  967. static ssize_t
  968. ftrace_filter_write(struct file *file, const char __user *ubuf,
  969. size_t cnt, loff_t *ppos)
  970. {
  971. return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
  972. }
  973. static ssize_t
  974. ftrace_notrace_write(struct file *file, const char __user *ubuf,
  975. size_t cnt, loff_t *ppos)
  976. {
  977. return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
  978. }
  979. static void
  980. ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
  981. {
  982. if (unlikely(ftrace_disabled))
  983. return;
  984. mutex_lock(&ftrace_regex_lock);
  985. if (reset)
  986. ftrace_filter_reset(enable);
  987. if (buf)
  988. ftrace_match(buf, len, enable);
  989. mutex_unlock(&ftrace_regex_lock);
  990. }
  991. /**
  992. * ftrace_set_filter - set a function to filter on in ftrace
  993. * @buf - the string that holds the function filter text.
  994. * @len - the length of the string.
  995. * @reset - non zero to reset all filters before applying this filter.
  996. *
  997. * Filters denote which functions should be enabled when tracing is enabled.
  998. * If @buf is NULL and reset is set, all functions will be enabled for tracing.
  999. */
  1000. void ftrace_set_filter(unsigned char *buf, int len, int reset)
  1001. {
  1002. ftrace_set_regex(buf, len, reset, 1);
  1003. }
  1004. /**
  1005. * ftrace_set_notrace - set a function to not trace in ftrace
  1006. * @buf - the string that holds the function notrace text.
  1007. * @len - the length of the string.
  1008. * @reset - non zero to reset all filters before applying this filter.
  1009. *
  1010. * Notrace Filters denote which functions should not be enabled when tracing
  1011. * is enabled. If @buf is NULL and reset is set, all functions will be enabled
  1012. * for tracing.
  1013. */
  1014. void ftrace_set_notrace(unsigned char *buf, int len, int reset)
  1015. {
  1016. ftrace_set_regex(buf, len, reset, 0);
  1017. }
  1018. static int
  1019. ftrace_regex_release(struct inode *inode, struct file *file, int enable)
  1020. {
  1021. struct seq_file *m = (struct seq_file *)file->private_data;
  1022. struct ftrace_iterator *iter;
  1023. mutex_lock(&ftrace_regex_lock);
  1024. if (file->f_mode & FMODE_READ) {
  1025. iter = m->private;
  1026. seq_release(inode, file);
  1027. } else
  1028. iter = file->private_data;
  1029. if (iter->buffer_idx) {
  1030. iter->filtered++;
  1031. iter->buffer[iter->buffer_idx] = 0;
  1032. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  1033. }
  1034. mutex_lock(&ftrace_sysctl_lock);
  1035. mutex_lock(&ftrace_start_lock);
  1036. if (ftrace_start_up && ftrace_enabled)
  1037. ftrace_run_update_code(FTRACE_ENABLE_CALLS);
  1038. mutex_unlock(&ftrace_start_lock);
  1039. mutex_unlock(&ftrace_sysctl_lock);
  1040. kfree(iter);
  1041. mutex_unlock(&ftrace_regex_lock);
  1042. return 0;
  1043. }
  1044. static int
  1045. ftrace_filter_release(struct inode *inode, struct file *file)
  1046. {
  1047. return ftrace_regex_release(inode, file, 1);
  1048. }
  1049. static int
  1050. ftrace_notrace_release(struct inode *inode, struct file *file)
  1051. {
  1052. return ftrace_regex_release(inode, file, 0);
  1053. }
  1054. static struct file_operations ftrace_avail_fops = {
  1055. .open = ftrace_avail_open,
  1056. .read = seq_read,
  1057. .llseek = seq_lseek,
  1058. .release = ftrace_avail_release,
  1059. };
  1060. static struct file_operations ftrace_failures_fops = {
  1061. .open = ftrace_failures_open,
  1062. .read = seq_read,
  1063. .llseek = seq_lseek,
  1064. .release = ftrace_avail_release,
  1065. };
  1066. static struct file_operations ftrace_filter_fops = {
  1067. .open = ftrace_filter_open,
  1068. .read = ftrace_regex_read,
  1069. .write = ftrace_filter_write,
  1070. .llseek = ftrace_regex_lseek,
  1071. .release = ftrace_filter_release,
  1072. };
  1073. static struct file_operations ftrace_notrace_fops = {
  1074. .open = ftrace_notrace_open,
  1075. .read = ftrace_regex_read,
  1076. .write = ftrace_notrace_write,
  1077. .llseek = ftrace_regex_lseek,
  1078. .release = ftrace_notrace_release,
  1079. };
  1080. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1081. static DEFINE_MUTEX(graph_lock);
  1082. int ftrace_graph_count;
  1083. unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
  1084. static void *
  1085. g_next(struct seq_file *m, void *v, loff_t *pos)
  1086. {
  1087. unsigned long *array = m->private;
  1088. int index = *pos;
  1089. (*pos)++;
  1090. if (index >= ftrace_graph_count)
  1091. return NULL;
  1092. return &array[index];
  1093. }
  1094. static void *g_start(struct seq_file *m, loff_t *pos)
  1095. {
  1096. void *p = NULL;
  1097. mutex_lock(&graph_lock);
  1098. p = g_next(m, p, pos);
  1099. return p;
  1100. }
  1101. static void g_stop(struct seq_file *m, void *p)
  1102. {
  1103. mutex_unlock(&graph_lock);
  1104. }
  1105. static int g_show(struct seq_file *m, void *v)
  1106. {
  1107. unsigned long *ptr = v;
  1108. char str[KSYM_SYMBOL_LEN];
  1109. if (!ptr)
  1110. return 0;
  1111. kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
  1112. seq_printf(m, "%s\n", str);
  1113. return 0;
  1114. }
  1115. static struct seq_operations ftrace_graph_seq_ops = {
  1116. .start = g_start,
  1117. .next = g_next,
  1118. .stop = g_stop,
  1119. .show = g_show,
  1120. };
  1121. static int
  1122. ftrace_graph_open(struct inode *inode, struct file *file)
  1123. {
  1124. int ret = 0;
  1125. if (unlikely(ftrace_disabled))
  1126. return -ENODEV;
  1127. mutex_lock(&graph_lock);
  1128. if ((file->f_mode & FMODE_WRITE) &&
  1129. !(file->f_flags & O_APPEND)) {
  1130. ftrace_graph_count = 0;
  1131. memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
  1132. }
  1133. if (file->f_mode & FMODE_READ) {
  1134. ret = seq_open(file, &ftrace_graph_seq_ops);
  1135. if (!ret) {
  1136. struct seq_file *m = file->private_data;
  1137. m->private = ftrace_graph_funcs;
  1138. }
  1139. } else
  1140. file->private_data = ftrace_graph_funcs;
  1141. mutex_unlock(&graph_lock);
  1142. return ret;
  1143. }
  1144. static ssize_t
  1145. ftrace_graph_read(struct file *file, char __user *ubuf,
  1146. size_t cnt, loff_t *ppos)
  1147. {
  1148. if (file->f_mode & FMODE_READ)
  1149. return seq_read(file, ubuf, cnt, ppos);
  1150. else
  1151. return -EPERM;
  1152. }
  1153. static int
  1154. ftrace_set_func(unsigned long *array, int idx, char *buffer)
  1155. {
  1156. char str[KSYM_SYMBOL_LEN];
  1157. struct dyn_ftrace *rec;
  1158. struct ftrace_page *pg;
  1159. int found = 0;
  1160. int i, j;
  1161. if (ftrace_disabled)
  1162. return -ENODEV;
  1163. /* should not be called from interrupt context */
  1164. spin_lock(&ftrace_lock);
  1165. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  1166. for (i = 0; i < pg->index; i++) {
  1167. rec = &pg->records[i];
  1168. if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
  1169. continue;
  1170. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  1171. if (strcmp(str, buffer) == 0) {
  1172. found = 1;
  1173. for (j = 0; j < idx; j++)
  1174. if (array[j] == rec->ip) {
  1175. found = 0;
  1176. break;
  1177. }
  1178. if (found)
  1179. array[idx] = rec->ip;
  1180. break;
  1181. }
  1182. }
  1183. }
  1184. spin_unlock(&ftrace_lock);
  1185. return found ? 0 : -EINVAL;
  1186. }
  1187. static ssize_t
  1188. ftrace_graph_write(struct file *file, const char __user *ubuf,
  1189. size_t cnt, loff_t *ppos)
  1190. {
  1191. unsigned char buffer[FTRACE_BUFF_MAX+1];
  1192. unsigned long *array;
  1193. size_t read = 0;
  1194. ssize_t ret;
  1195. int index = 0;
  1196. char ch;
  1197. if (!cnt || cnt < 0)
  1198. return 0;
  1199. mutex_lock(&graph_lock);
  1200. if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
  1201. ret = -EBUSY;
  1202. goto out;
  1203. }
  1204. if (file->f_mode & FMODE_READ) {
  1205. struct seq_file *m = file->private_data;
  1206. array = m->private;
  1207. } else
  1208. array = file->private_data;
  1209. ret = get_user(ch, ubuf++);
  1210. if (ret)
  1211. goto out;
  1212. read++;
  1213. cnt--;
  1214. /* skip white space */
  1215. while (cnt && isspace(ch)) {
  1216. ret = get_user(ch, ubuf++);
  1217. if (ret)
  1218. goto out;
  1219. read++;
  1220. cnt--;
  1221. }
  1222. if (isspace(ch)) {
  1223. *ppos += read;
  1224. ret = read;
  1225. goto out;
  1226. }
  1227. while (cnt && !isspace(ch)) {
  1228. if (index < FTRACE_BUFF_MAX)
  1229. buffer[index++] = ch;
  1230. else {
  1231. ret = -EINVAL;
  1232. goto out;
  1233. }
  1234. ret = get_user(ch, ubuf++);
  1235. if (ret)
  1236. goto out;
  1237. read++;
  1238. cnt--;
  1239. }
  1240. buffer[index] = 0;
  1241. /* we allow only one at a time */
  1242. ret = ftrace_set_func(array, ftrace_graph_count, buffer);
  1243. if (ret)
  1244. goto out;
  1245. ftrace_graph_count++;
  1246. file->f_pos += read;
  1247. ret = read;
  1248. out:
  1249. mutex_unlock(&graph_lock);
  1250. return ret;
  1251. }
  1252. static const struct file_operations ftrace_graph_fops = {
  1253. .open = ftrace_graph_open,
  1254. .read = ftrace_graph_read,
  1255. .write = ftrace_graph_write,
  1256. };
  1257. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  1258. static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
  1259. {
  1260. struct dentry *entry;
  1261. entry = debugfs_create_file("available_filter_functions", 0444,
  1262. d_tracer, NULL, &ftrace_avail_fops);
  1263. if (!entry)
  1264. pr_warning("Could not create debugfs "
  1265. "'available_filter_functions' entry\n");
  1266. entry = debugfs_create_file("failures", 0444,
  1267. d_tracer, NULL, &ftrace_failures_fops);
  1268. if (!entry)
  1269. pr_warning("Could not create debugfs 'failures' entry\n");
  1270. entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
  1271. NULL, &ftrace_filter_fops);
  1272. if (!entry)
  1273. pr_warning("Could not create debugfs "
  1274. "'set_ftrace_filter' entry\n");
  1275. entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
  1276. NULL, &ftrace_notrace_fops);
  1277. if (!entry)
  1278. pr_warning("Could not create debugfs "
  1279. "'set_ftrace_notrace' entry\n");
  1280. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1281. entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
  1282. NULL,
  1283. &ftrace_graph_fops);
  1284. if (!entry)
  1285. pr_warning("Could not create debugfs "
  1286. "'set_graph_function' entry\n");
  1287. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  1288. return 0;
  1289. }
  1290. static int ftrace_convert_nops(struct module *mod,
  1291. unsigned long *start,
  1292. unsigned long *end)
  1293. {
  1294. unsigned long *p;
  1295. unsigned long addr;
  1296. unsigned long flags;
  1297. mutex_lock(&ftrace_start_lock);
  1298. p = start;
  1299. while (p < end) {
  1300. addr = ftrace_call_adjust(*p++);
  1301. /*
  1302. * Some architecture linkers will pad between
  1303. * the different mcount_loc sections of different
  1304. * object files to satisfy alignments.
  1305. * Skip any NULL pointers.
  1306. */
  1307. if (!addr)
  1308. continue;
  1309. ftrace_record_ip(addr);
  1310. }
  1311. /* disable interrupts to prevent kstop machine */
  1312. local_irq_save(flags);
  1313. ftrace_update_code(mod);
  1314. local_irq_restore(flags);
  1315. mutex_unlock(&ftrace_start_lock);
  1316. return 0;
  1317. }
  1318. void ftrace_init_module(struct module *mod,
  1319. unsigned long *start, unsigned long *end)
  1320. {
  1321. if (ftrace_disabled || start == end)
  1322. return;
  1323. ftrace_convert_nops(mod, start, end);
  1324. }
  1325. extern unsigned long __start_mcount_loc[];
  1326. extern unsigned long __stop_mcount_loc[];
  1327. void __init ftrace_init(void)
  1328. {
  1329. unsigned long count, addr, flags;
  1330. int ret;
  1331. /* Keep the ftrace pointer to the stub */
  1332. addr = (unsigned long)ftrace_stub;
  1333. local_irq_save(flags);
  1334. ftrace_dyn_arch_init(&addr);
  1335. local_irq_restore(flags);
  1336. /* ftrace_dyn_arch_init places the return code in addr */
  1337. if (addr)
  1338. goto failed;
  1339. count = __stop_mcount_loc - __start_mcount_loc;
  1340. ret = ftrace_dyn_table_alloc(count);
  1341. if (ret)
  1342. goto failed;
  1343. last_ftrace_enabled = ftrace_enabled = 1;
  1344. ret = ftrace_convert_nops(NULL,
  1345. __start_mcount_loc,
  1346. __stop_mcount_loc);
  1347. return;
  1348. failed:
  1349. ftrace_disabled = 1;
  1350. }
  1351. #else
  1352. static int __init ftrace_nodyn_init(void)
  1353. {
  1354. ftrace_enabled = 1;
  1355. return 0;
  1356. }
  1357. device_initcall(ftrace_nodyn_init);
  1358. static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
  1359. static inline void ftrace_startup_enable(int command) { }
  1360. /* Keep as macros so we do not need to define the commands */
  1361. # define ftrace_startup(command) do { } while (0)
  1362. # define ftrace_shutdown(command) do { } while (0)
  1363. # define ftrace_startup_sysctl() do { } while (0)
  1364. # define ftrace_shutdown_sysctl() do { } while (0)
  1365. #endif /* CONFIG_DYNAMIC_FTRACE */
  1366. static ssize_t
  1367. ftrace_pid_read(struct file *file, char __user *ubuf,
  1368. size_t cnt, loff_t *ppos)
  1369. {
  1370. char buf[64];
  1371. int r;
  1372. if (ftrace_pid_trace == ftrace_swapper_pid)
  1373. r = sprintf(buf, "swapper tasks\n");
  1374. else if (ftrace_pid_trace)
  1375. r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
  1376. else
  1377. r = sprintf(buf, "no pid\n");
  1378. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  1379. }
  1380. static void clear_ftrace_swapper(void)
  1381. {
  1382. struct task_struct *p;
  1383. int cpu;
  1384. get_online_cpus();
  1385. for_each_online_cpu(cpu) {
  1386. p = idle_task(cpu);
  1387. clear_tsk_trace_trace(p);
  1388. }
  1389. put_online_cpus();
  1390. }
  1391. static void set_ftrace_swapper(void)
  1392. {
  1393. struct task_struct *p;
  1394. int cpu;
  1395. get_online_cpus();
  1396. for_each_online_cpu(cpu) {
  1397. p = idle_task(cpu);
  1398. set_tsk_trace_trace(p);
  1399. }
  1400. put_online_cpus();
  1401. }
  1402. static void clear_ftrace_pid(struct pid *pid)
  1403. {
  1404. struct task_struct *p;
  1405. do_each_pid_task(pid, PIDTYPE_PID, p) {
  1406. clear_tsk_trace_trace(p);
  1407. } while_each_pid_task(pid, PIDTYPE_PID, p);
  1408. put_pid(pid);
  1409. }
  1410. static void set_ftrace_pid(struct pid *pid)
  1411. {
  1412. struct task_struct *p;
  1413. do_each_pid_task(pid, PIDTYPE_PID, p) {
  1414. set_tsk_trace_trace(p);
  1415. } while_each_pid_task(pid, PIDTYPE_PID, p);
  1416. }
  1417. static void clear_ftrace_pid_task(struct pid **pid)
  1418. {
  1419. if (*pid == ftrace_swapper_pid)
  1420. clear_ftrace_swapper();
  1421. else
  1422. clear_ftrace_pid(*pid);
  1423. *pid = NULL;
  1424. }
  1425. static void set_ftrace_pid_task(struct pid *pid)
  1426. {
  1427. if (pid == ftrace_swapper_pid)
  1428. set_ftrace_swapper();
  1429. else
  1430. set_ftrace_pid(pid);
  1431. }
  1432. static ssize_t
  1433. ftrace_pid_write(struct file *filp, const char __user *ubuf,
  1434. size_t cnt, loff_t *ppos)
  1435. {
  1436. struct pid *pid;
  1437. char buf[64];
  1438. long val;
  1439. int ret;
  1440. if (cnt >= sizeof(buf))
  1441. return -EINVAL;
  1442. if (copy_from_user(&buf, ubuf, cnt))
  1443. return -EFAULT;
  1444. buf[cnt] = 0;
  1445. ret = strict_strtol(buf, 10, &val);
  1446. if (ret < 0)
  1447. return ret;
  1448. mutex_lock(&ftrace_start_lock);
  1449. if (val < 0) {
  1450. /* disable pid tracing */
  1451. if (!ftrace_pid_trace)
  1452. goto out;
  1453. clear_ftrace_pid_task(&ftrace_pid_trace);
  1454. } else {
  1455. /* swapper task is special */
  1456. if (!val) {
  1457. pid = ftrace_swapper_pid;
  1458. if (pid == ftrace_pid_trace)
  1459. goto out;
  1460. } else {
  1461. pid = find_get_pid(val);
  1462. if (pid == ftrace_pid_trace) {
  1463. put_pid(pid);
  1464. goto out;
  1465. }
  1466. }
  1467. if (ftrace_pid_trace)
  1468. clear_ftrace_pid_task(&ftrace_pid_trace);
  1469. if (!pid)
  1470. goto out;
  1471. ftrace_pid_trace = pid;
  1472. set_ftrace_pid_task(ftrace_pid_trace);
  1473. }
  1474. /* update the function call */
  1475. ftrace_update_pid_func();
  1476. ftrace_startup_enable(0);
  1477. out:
  1478. mutex_unlock(&ftrace_start_lock);
  1479. return cnt;
  1480. }
  1481. static struct file_operations ftrace_pid_fops = {
  1482. .read = ftrace_pid_read,
  1483. .write = ftrace_pid_write,
  1484. };
  1485. static __init int ftrace_init_debugfs(void)
  1486. {
  1487. struct dentry *d_tracer;
  1488. struct dentry *entry;
  1489. d_tracer = tracing_init_dentry();
  1490. if (!d_tracer)
  1491. return 0;
  1492. ftrace_init_dyn_debugfs(d_tracer);
  1493. entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
  1494. NULL, &ftrace_pid_fops);
  1495. if (!entry)
  1496. pr_warning("Could not create debugfs "
  1497. "'set_ftrace_pid' entry\n");
  1498. return 0;
  1499. }
  1500. fs_initcall(ftrace_init_debugfs);
  1501. /**
  1502. * ftrace_kill - kill ftrace
  1503. *
  1504. * This function should be used by panic code. It stops ftrace
  1505. * but in a not so nice way. If you need to simply kill ftrace
  1506. * from a non-atomic section, use ftrace_kill.
  1507. */
  1508. void ftrace_kill(void)
  1509. {
  1510. ftrace_disabled = 1;
  1511. ftrace_enabled = 0;
  1512. clear_ftrace_function();
  1513. }
  1514. /**
  1515. * register_ftrace_function - register a function for profiling
  1516. * @ops - ops structure that holds the function for profiling.
  1517. *
  1518. * Register a function to be called by all functions in the
  1519. * kernel.
  1520. *
  1521. * Note: @ops->func and all the functions it calls must be labeled
  1522. * with "notrace", otherwise it will go into a
  1523. * recursive loop.
  1524. */
  1525. int register_ftrace_function(struct ftrace_ops *ops)
  1526. {
  1527. int ret;
  1528. if (unlikely(ftrace_disabled))
  1529. return -1;
  1530. mutex_lock(&ftrace_sysctl_lock);
  1531. ret = __register_ftrace_function(ops);
  1532. ftrace_startup(0);
  1533. mutex_unlock(&ftrace_sysctl_lock);
  1534. return ret;
  1535. }
  1536. /**
  1537. * unregister_ftrace_function - unresgister a function for profiling.
  1538. * @ops - ops structure that holds the function to unregister
  1539. *
  1540. * Unregister a function that was added to be called by ftrace profiling.
  1541. */
  1542. int unregister_ftrace_function(struct ftrace_ops *ops)
  1543. {
  1544. int ret;
  1545. mutex_lock(&ftrace_sysctl_lock);
  1546. ret = __unregister_ftrace_function(ops);
  1547. ftrace_shutdown(0);
  1548. mutex_unlock(&ftrace_sysctl_lock);
  1549. return ret;
  1550. }
  1551. int
  1552. ftrace_enable_sysctl(struct ctl_table *table, int write,
  1553. struct file *file, void __user *buffer, size_t *lenp,
  1554. loff_t *ppos)
  1555. {
  1556. int ret;
  1557. if (unlikely(ftrace_disabled))
  1558. return -ENODEV;
  1559. mutex_lock(&ftrace_sysctl_lock);
  1560. ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
  1561. if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
  1562. goto out;
  1563. last_ftrace_enabled = ftrace_enabled;
  1564. if (ftrace_enabled) {
  1565. ftrace_startup_sysctl();
  1566. /* we are starting ftrace again */
  1567. if (ftrace_list != &ftrace_list_end) {
  1568. if (ftrace_list->next == &ftrace_list_end)
  1569. ftrace_trace_function = ftrace_list->func;
  1570. else
  1571. ftrace_trace_function = ftrace_list_func;
  1572. }
  1573. } else {
  1574. /* stopping ftrace calls (just send to ftrace_stub) */
  1575. ftrace_trace_function = ftrace_stub;
  1576. ftrace_shutdown_sysctl();
  1577. }
  1578. out:
  1579. mutex_unlock(&ftrace_sysctl_lock);
  1580. return ret;
  1581. }
  1582. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1583. static atomic_t ftrace_graph_active;
  1584. int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
  1585. {
  1586. return 0;
  1587. }
  1588. /* The callbacks that hook a function */
  1589. trace_func_graph_ret_t ftrace_graph_return =
  1590. (trace_func_graph_ret_t)ftrace_stub;
  1591. trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
  1592. /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
  1593. static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
  1594. {
  1595. int i;
  1596. int ret = 0;
  1597. unsigned long flags;
  1598. int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
  1599. struct task_struct *g, *t;
  1600. for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
  1601. ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
  1602. * sizeof(struct ftrace_ret_stack),
  1603. GFP_KERNEL);
  1604. if (!ret_stack_list[i]) {
  1605. start = 0;
  1606. end = i;
  1607. ret = -ENOMEM;
  1608. goto free;
  1609. }
  1610. }
  1611. read_lock_irqsave(&tasklist_lock, flags);
  1612. do_each_thread(g, t) {
  1613. if (start == end) {
  1614. ret = -EAGAIN;
  1615. goto unlock;
  1616. }
  1617. if (t->ret_stack == NULL) {
  1618. t->curr_ret_stack = -1;
  1619. /* Make sure IRQs see the -1 first: */
  1620. barrier();
  1621. t->ret_stack = ret_stack_list[start++];
  1622. atomic_set(&t->trace_overrun, 0);
  1623. }
  1624. } while_each_thread(g, t);
  1625. unlock:
  1626. read_unlock_irqrestore(&tasklist_lock, flags);
  1627. free:
  1628. for (i = start; i < end; i++)
  1629. kfree(ret_stack_list[i]);
  1630. return ret;
  1631. }
  1632. /* Allocate a return stack for each task */
  1633. static int start_graph_tracing(void)
  1634. {
  1635. struct ftrace_ret_stack **ret_stack_list;
  1636. int ret;
  1637. ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
  1638. sizeof(struct ftrace_ret_stack *),
  1639. GFP_KERNEL);
  1640. if (!ret_stack_list)
  1641. return -ENOMEM;
  1642. do {
  1643. ret = alloc_retstack_tasklist(ret_stack_list);
  1644. } while (ret == -EAGAIN);
  1645. kfree(ret_stack_list);
  1646. return ret;
  1647. }
  1648. int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  1649. trace_func_graph_ent_t entryfunc)
  1650. {
  1651. int ret = 0;
  1652. mutex_lock(&ftrace_sysctl_lock);
  1653. atomic_inc(&ftrace_graph_active);
  1654. ret = start_graph_tracing();
  1655. if (ret) {
  1656. atomic_dec(&ftrace_graph_active);
  1657. goto out;
  1658. }
  1659. ftrace_graph_return = retfunc;
  1660. ftrace_graph_entry = entryfunc;
  1661. ftrace_startup(FTRACE_START_FUNC_RET);
  1662. out:
  1663. mutex_unlock(&ftrace_sysctl_lock);
  1664. return ret;
  1665. }
  1666. void unregister_ftrace_graph(void)
  1667. {
  1668. mutex_lock(&ftrace_sysctl_lock);
  1669. atomic_dec(&ftrace_graph_active);
  1670. ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
  1671. ftrace_graph_entry = ftrace_graph_entry_stub;
  1672. ftrace_shutdown(FTRACE_STOP_FUNC_RET);
  1673. mutex_unlock(&ftrace_sysctl_lock);
  1674. }
  1675. /* Allocate a return stack for newly created task */
  1676. void ftrace_graph_init_task(struct task_struct *t)
  1677. {
  1678. if (atomic_read(&ftrace_graph_active)) {
  1679. t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
  1680. * sizeof(struct ftrace_ret_stack),
  1681. GFP_KERNEL);
  1682. if (!t->ret_stack)
  1683. return;
  1684. t->curr_ret_stack = -1;
  1685. atomic_set(&t->trace_overrun, 0);
  1686. } else
  1687. t->ret_stack = NULL;
  1688. }
  1689. void ftrace_graph_exit_task(struct task_struct *t)
  1690. {
  1691. struct ftrace_ret_stack *ret_stack = t->ret_stack;
  1692. t->ret_stack = NULL;
  1693. /* NULL must become visible to IRQs before we free it: */
  1694. barrier();
  1695. kfree(ret_stack);
  1696. }
  1697. void ftrace_graph_stop(void)
  1698. {
  1699. ftrace_stop();
  1700. }
  1701. #endif