ftrace.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150
  1. /*
  2. * Infrastructure for profiling code inserted by 'gcc -pg'.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Originally ported from the -rt patch by:
  8. * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Based on code in the latency_tracer, that is:
  11. *
  12. * Copyright (C) 2004-2006 Ingo Molnar
  13. * Copyright (C) 2004 William Lee Irwin III
  14. */
  15. #include <linux/stop_machine.h>
  16. #include <linux/clocksource.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/suspend.h>
  20. #include <linux/debugfs.h>
  21. #include <linux/hardirq.h>
  22. #include <linux/kthread.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/kprobes.h>
  25. #include <linux/ftrace.h>
  26. #include <linux/sysctl.h>
  27. #include <linux/ctype.h>
  28. #include <linux/list.h>
  29. #include <asm/ftrace.h>
  30. #include "trace.h"
  31. #define FTRACE_WARN_ON(cond) \
  32. do { \
  33. if (WARN_ON(cond)) \
  34. ftrace_kill(); \
  35. } while (0)
  36. #define FTRACE_WARN_ON_ONCE(cond) \
  37. do { \
  38. if (WARN_ON_ONCE(cond)) \
  39. ftrace_kill(); \
  40. } while (0)
  41. /* ftrace_enabled is a method to turn ftrace on or off */
  42. int ftrace_enabled __read_mostly;
  43. static int last_ftrace_enabled;
  44. /* set when tracing only a pid */
  45. struct pid *ftrace_pid_trace;
  46. static struct pid * const ftrace_swapper_pid = &init_struct_pid;
  47. /* Quick disabling of function tracer. */
  48. int function_trace_stop;
  49. /*
  50. * ftrace_disabled is set when an anomaly is discovered.
  51. * ftrace_disabled is much stronger than ftrace_enabled.
  52. */
  53. static int ftrace_disabled __read_mostly;
  54. static DEFINE_SPINLOCK(ftrace_lock);
  55. static DEFINE_MUTEX(ftrace_sysctl_lock);
  56. static DEFINE_MUTEX(ftrace_start_lock);
  57. static struct ftrace_ops ftrace_list_end __read_mostly =
  58. {
  59. .func = ftrace_stub,
  60. };
  61. static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
  62. ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
  63. ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
  64. ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
  65. static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
  66. {
  67. struct ftrace_ops *op = ftrace_list;
  68. /* in case someone actually ports this to alpha! */
  69. read_barrier_depends();
  70. while (op != &ftrace_list_end) {
  71. /* silly alpha */
  72. read_barrier_depends();
  73. op->func(ip, parent_ip);
  74. op = op->next;
  75. };
  76. }
  77. static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
  78. {
  79. if (!test_tsk_trace_trace(current))
  80. return;
  81. ftrace_pid_function(ip, parent_ip);
  82. }
  83. static void set_ftrace_pid_function(ftrace_func_t func)
  84. {
  85. /* do not set ftrace_pid_function to itself! */
  86. if (func != ftrace_pid_func)
  87. ftrace_pid_function = func;
  88. }
  89. /**
  90. * clear_ftrace_function - reset the ftrace function
  91. *
  92. * This NULLs the ftrace function and in essence stops
  93. * tracing. There may be lag
  94. */
  95. void clear_ftrace_function(void)
  96. {
  97. ftrace_trace_function = ftrace_stub;
  98. __ftrace_trace_function = ftrace_stub;
  99. ftrace_pid_function = ftrace_stub;
  100. }
  101. #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  102. /*
  103. * For those archs that do not test ftrace_trace_stop in their
  104. * mcount call site, we need to do it from C.
  105. */
  106. static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
  107. {
  108. if (function_trace_stop)
  109. return;
  110. __ftrace_trace_function(ip, parent_ip);
  111. }
  112. #endif
  113. static int __register_ftrace_function(struct ftrace_ops *ops)
  114. {
  115. /* should not be called from interrupt context */
  116. spin_lock(&ftrace_lock);
  117. ops->next = ftrace_list;
  118. /*
  119. * We are entering ops into the ftrace_list but another
  120. * CPU might be walking that list. We need to make sure
  121. * the ops->next pointer is valid before another CPU sees
  122. * the ops pointer included into the ftrace_list.
  123. */
  124. smp_wmb();
  125. ftrace_list = ops;
  126. if (ftrace_enabled) {
  127. ftrace_func_t func;
  128. if (ops->next == &ftrace_list_end)
  129. func = ops->func;
  130. else
  131. func = ftrace_list_func;
  132. if (ftrace_pid_trace) {
  133. set_ftrace_pid_function(func);
  134. func = ftrace_pid_func;
  135. }
  136. /*
  137. * For one func, simply call it directly.
  138. * For more than one func, call the chain.
  139. */
  140. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  141. ftrace_trace_function = func;
  142. #else
  143. __ftrace_trace_function = func;
  144. ftrace_trace_function = ftrace_test_stop_func;
  145. #endif
  146. }
  147. spin_unlock(&ftrace_lock);
  148. return 0;
  149. }
  150. static int __unregister_ftrace_function(struct ftrace_ops *ops)
  151. {
  152. struct ftrace_ops **p;
  153. int ret = 0;
  154. /* should not be called from interrupt context */
  155. spin_lock(&ftrace_lock);
  156. /*
  157. * If we are removing the last function, then simply point
  158. * to the ftrace_stub.
  159. */
  160. if (ftrace_list == ops && ops->next == &ftrace_list_end) {
  161. ftrace_trace_function = ftrace_stub;
  162. ftrace_list = &ftrace_list_end;
  163. goto out;
  164. }
  165. for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
  166. if (*p == ops)
  167. break;
  168. if (*p != ops) {
  169. ret = -1;
  170. goto out;
  171. }
  172. *p = (*p)->next;
  173. if (ftrace_enabled) {
  174. /* If we only have one func left, then call that directly */
  175. if (ftrace_list->next == &ftrace_list_end) {
  176. ftrace_func_t func = ftrace_list->func;
  177. if (ftrace_pid_trace) {
  178. set_ftrace_pid_function(func);
  179. func = ftrace_pid_func;
  180. }
  181. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  182. ftrace_trace_function = func;
  183. #else
  184. __ftrace_trace_function = func;
  185. #endif
  186. }
  187. }
  188. out:
  189. spin_unlock(&ftrace_lock);
  190. return ret;
  191. }
  192. static void ftrace_update_pid_func(void)
  193. {
  194. ftrace_func_t func;
  195. /* should not be called from interrupt context */
  196. spin_lock(&ftrace_lock);
  197. if (ftrace_trace_function == ftrace_stub)
  198. goto out;
  199. func = ftrace_trace_function;
  200. if (ftrace_pid_trace) {
  201. set_ftrace_pid_function(func);
  202. func = ftrace_pid_func;
  203. } else {
  204. if (func == ftrace_pid_func)
  205. func = ftrace_pid_function;
  206. }
  207. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  208. ftrace_trace_function = func;
  209. #else
  210. __ftrace_trace_function = func;
  211. #endif
  212. out:
  213. spin_unlock(&ftrace_lock);
  214. }
  215. #ifdef CONFIG_DYNAMIC_FTRACE
  216. #ifndef CONFIG_FTRACE_MCOUNT_RECORD
  217. # error Dynamic ftrace depends on MCOUNT_RECORD
  218. #endif
  219. /*
  220. * Since MCOUNT_ADDR may point to mcount itself, we do not want
  221. * to get it confused by reading a reference in the code as we
  222. * are parsing on objcopy output of text. Use a variable for
  223. * it instead.
  224. */
  225. static unsigned long mcount_addr = MCOUNT_ADDR;
  226. enum {
  227. FTRACE_ENABLE_CALLS = (1 << 0),
  228. FTRACE_DISABLE_CALLS = (1 << 1),
  229. FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
  230. FTRACE_ENABLE_MCOUNT = (1 << 3),
  231. FTRACE_DISABLE_MCOUNT = (1 << 4),
  232. FTRACE_START_FUNC_RET = (1 << 5),
  233. FTRACE_STOP_FUNC_RET = (1 << 6),
  234. };
  235. static int ftrace_filtered;
  236. static LIST_HEAD(ftrace_new_addrs);
  237. static DEFINE_MUTEX(ftrace_regex_lock);
  238. struct ftrace_page {
  239. struct ftrace_page *next;
  240. unsigned long index;
  241. struct dyn_ftrace records[];
  242. };
  243. #define ENTRIES_PER_PAGE \
  244. ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
  245. /* estimate from running different kernels */
  246. #define NR_TO_INIT 10000
  247. static struct ftrace_page *ftrace_pages_start;
  248. static struct ftrace_page *ftrace_pages;
  249. static struct dyn_ftrace *ftrace_free_records;
  250. #ifdef CONFIG_KPROBES
  251. static int frozen_record_count;
  252. static inline void freeze_record(struct dyn_ftrace *rec)
  253. {
  254. if (!(rec->flags & FTRACE_FL_FROZEN)) {
  255. rec->flags |= FTRACE_FL_FROZEN;
  256. frozen_record_count++;
  257. }
  258. }
  259. static inline void unfreeze_record(struct dyn_ftrace *rec)
  260. {
  261. if (rec->flags & FTRACE_FL_FROZEN) {
  262. rec->flags &= ~FTRACE_FL_FROZEN;
  263. frozen_record_count--;
  264. }
  265. }
  266. static inline int record_frozen(struct dyn_ftrace *rec)
  267. {
  268. return rec->flags & FTRACE_FL_FROZEN;
  269. }
  270. #else
  271. # define freeze_record(rec) ({ 0; })
  272. # define unfreeze_record(rec) ({ 0; })
  273. # define record_frozen(rec) ({ 0; })
  274. #endif /* CONFIG_KPROBES */
  275. static void ftrace_free_rec(struct dyn_ftrace *rec)
  276. {
  277. rec->ip = (unsigned long)ftrace_free_records;
  278. ftrace_free_records = rec;
  279. rec->flags |= FTRACE_FL_FREE;
  280. }
  281. void ftrace_release(void *start, unsigned long size)
  282. {
  283. struct dyn_ftrace *rec;
  284. struct ftrace_page *pg;
  285. unsigned long s = (unsigned long)start;
  286. unsigned long e = s + size;
  287. int i;
  288. if (ftrace_disabled || !start)
  289. return;
  290. /* should not be called from interrupt context */
  291. spin_lock(&ftrace_lock);
  292. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  293. for (i = 0; i < pg->index; i++) {
  294. rec = &pg->records[i];
  295. if ((rec->ip >= s) && (rec->ip < e))
  296. ftrace_free_rec(rec);
  297. }
  298. }
  299. spin_unlock(&ftrace_lock);
  300. }
  301. static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
  302. {
  303. struct dyn_ftrace *rec;
  304. /* First check for freed records */
  305. if (ftrace_free_records) {
  306. rec = ftrace_free_records;
  307. if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
  308. FTRACE_WARN_ON_ONCE(1);
  309. ftrace_free_records = NULL;
  310. return NULL;
  311. }
  312. ftrace_free_records = (void *)rec->ip;
  313. memset(rec, 0, sizeof(*rec));
  314. return rec;
  315. }
  316. if (ftrace_pages->index == ENTRIES_PER_PAGE) {
  317. if (!ftrace_pages->next) {
  318. /* allocate another page */
  319. ftrace_pages->next =
  320. (void *)get_zeroed_page(GFP_KERNEL);
  321. if (!ftrace_pages->next)
  322. return NULL;
  323. }
  324. ftrace_pages = ftrace_pages->next;
  325. }
  326. return &ftrace_pages->records[ftrace_pages->index++];
  327. }
  328. static struct dyn_ftrace *
  329. ftrace_record_ip(unsigned long ip)
  330. {
  331. struct dyn_ftrace *rec;
  332. if (ftrace_disabled)
  333. return NULL;
  334. rec = ftrace_alloc_dyn_node(ip);
  335. if (!rec)
  336. return NULL;
  337. rec->ip = ip;
  338. list_add(&rec->list, &ftrace_new_addrs);
  339. return rec;
  340. }
  341. static void print_ip_ins(const char *fmt, unsigned char *p)
  342. {
  343. int i;
  344. printk(KERN_CONT "%s", fmt);
  345. for (i = 0; i < MCOUNT_INSN_SIZE; i++)
  346. printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
  347. }
  348. static void ftrace_bug(int failed, unsigned long ip)
  349. {
  350. switch (failed) {
  351. case -EFAULT:
  352. FTRACE_WARN_ON_ONCE(1);
  353. pr_info("ftrace faulted on modifying ");
  354. print_ip_sym(ip);
  355. break;
  356. case -EINVAL:
  357. FTRACE_WARN_ON_ONCE(1);
  358. pr_info("ftrace failed to modify ");
  359. print_ip_sym(ip);
  360. print_ip_ins(" actual: ", (unsigned char *)ip);
  361. printk(KERN_CONT "\n");
  362. break;
  363. case -EPERM:
  364. FTRACE_WARN_ON_ONCE(1);
  365. pr_info("ftrace faulted on writing ");
  366. print_ip_sym(ip);
  367. break;
  368. default:
  369. FTRACE_WARN_ON_ONCE(1);
  370. pr_info("ftrace faulted on unknown error ");
  371. print_ip_sym(ip);
  372. }
  373. }
  374. static int
  375. __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
  376. {
  377. unsigned long ip, fl;
  378. unsigned long ftrace_addr;
  379. ftrace_addr = (unsigned long)ftrace_caller;
  380. ip = rec->ip;
  381. /*
  382. * If this record is not to be traced and
  383. * it is not enabled then do nothing.
  384. *
  385. * If this record is not to be traced and
  386. * it is enabled then disabled it.
  387. *
  388. */
  389. if (rec->flags & FTRACE_FL_NOTRACE) {
  390. if (rec->flags & FTRACE_FL_ENABLED)
  391. rec->flags &= ~FTRACE_FL_ENABLED;
  392. else
  393. return 0;
  394. } else if (ftrace_filtered && enable) {
  395. /*
  396. * Filtering is on:
  397. */
  398. fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
  399. /* Record is filtered and enabled, do nothing */
  400. if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
  401. return 0;
  402. /* Record is not filtered and is not enabled do nothing */
  403. if (!fl)
  404. return 0;
  405. /* Record is not filtered but enabled, disable it */
  406. if (fl == FTRACE_FL_ENABLED)
  407. rec->flags &= ~FTRACE_FL_ENABLED;
  408. else
  409. /* Otherwise record is filtered but not enabled, enable it */
  410. rec->flags |= FTRACE_FL_ENABLED;
  411. } else {
  412. /* Disable or not filtered */
  413. if (enable) {
  414. /* if record is enabled, do nothing */
  415. if (rec->flags & FTRACE_FL_ENABLED)
  416. return 0;
  417. rec->flags |= FTRACE_FL_ENABLED;
  418. } else {
  419. /* if record is not enabled do nothing */
  420. if (!(rec->flags & FTRACE_FL_ENABLED))
  421. return 0;
  422. rec->flags &= ~FTRACE_FL_ENABLED;
  423. }
  424. }
  425. if (rec->flags & FTRACE_FL_ENABLED)
  426. return ftrace_make_call(rec, ftrace_addr);
  427. else
  428. return ftrace_make_nop(NULL, rec, ftrace_addr);
  429. }
  430. static void ftrace_replace_code(int enable)
  431. {
  432. int i, failed;
  433. struct dyn_ftrace *rec;
  434. struct ftrace_page *pg;
  435. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  436. for (i = 0; i < pg->index; i++) {
  437. rec = &pg->records[i];
  438. /*
  439. * Skip over free records and records that have
  440. * failed.
  441. */
  442. if (rec->flags & FTRACE_FL_FREE ||
  443. rec->flags & FTRACE_FL_FAILED)
  444. continue;
  445. /* ignore updates to this record's mcount site */
  446. if (get_kprobe((void *)rec->ip)) {
  447. freeze_record(rec);
  448. continue;
  449. } else {
  450. unfreeze_record(rec);
  451. }
  452. failed = __ftrace_replace_code(rec, enable);
  453. if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
  454. rec->flags |= FTRACE_FL_FAILED;
  455. if ((system_state == SYSTEM_BOOTING) ||
  456. !core_kernel_text(rec->ip)) {
  457. ftrace_free_rec(rec);
  458. } else
  459. ftrace_bug(failed, rec->ip);
  460. }
  461. }
  462. }
  463. }
  464. static int
  465. ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
  466. {
  467. unsigned long ip;
  468. int ret;
  469. ip = rec->ip;
  470. ret = ftrace_make_nop(mod, rec, mcount_addr);
  471. if (ret) {
  472. ftrace_bug(ret, ip);
  473. rec->flags |= FTRACE_FL_FAILED;
  474. return 0;
  475. }
  476. return 1;
  477. }
  478. static int __ftrace_modify_code(void *data)
  479. {
  480. int *command = data;
  481. if (*command & FTRACE_ENABLE_CALLS)
  482. ftrace_replace_code(1);
  483. else if (*command & FTRACE_DISABLE_CALLS)
  484. ftrace_replace_code(0);
  485. if (*command & FTRACE_UPDATE_TRACE_FUNC)
  486. ftrace_update_ftrace_func(ftrace_trace_function);
  487. if (*command & FTRACE_START_FUNC_RET)
  488. ftrace_enable_ftrace_graph_caller();
  489. else if (*command & FTRACE_STOP_FUNC_RET)
  490. ftrace_disable_ftrace_graph_caller();
  491. return 0;
  492. }
  493. static void ftrace_run_update_code(int command)
  494. {
  495. stop_machine(__ftrace_modify_code, &command, NULL);
  496. }
  497. static ftrace_func_t saved_ftrace_func;
  498. static int ftrace_start_up;
  499. static void ftrace_startup_enable(int command)
  500. {
  501. if (saved_ftrace_func != ftrace_trace_function) {
  502. saved_ftrace_func = ftrace_trace_function;
  503. command |= FTRACE_UPDATE_TRACE_FUNC;
  504. }
  505. if (!command || !ftrace_enabled)
  506. return;
  507. ftrace_run_update_code(command);
  508. }
  509. static void ftrace_startup(int command)
  510. {
  511. if (unlikely(ftrace_disabled))
  512. return;
  513. mutex_lock(&ftrace_start_lock);
  514. ftrace_start_up++;
  515. command |= FTRACE_ENABLE_CALLS;
  516. ftrace_startup_enable(command);
  517. mutex_unlock(&ftrace_start_lock);
  518. }
  519. static void ftrace_shutdown(int command)
  520. {
  521. if (unlikely(ftrace_disabled))
  522. return;
  523. mutex_lock(&ftrace_start_lock);
  524. ftrace_start_up--;
  525. if (!ftrace_start_up)
  526. command |= FTRACE_DISABLE_CALLS;
  527. if (saved_ftrace_func != ftrace_trace_function) {
  528. saved_ftrace_func = ftrace_trace_function;
  529. command |= FTRACE_UPDATE_TRACE_FUNC;
  530. }
  531. if (!command || !ftrace_enabled)
  532. goto out;
  533. ftrace_run_update_code(command);
  534. out:
  535. mutex_unlock(&ftrace_start_lock);
  536. }
  537. static void ftrace_startup_sysctl(void)
  538. {
  539. int command = FTRACE_ENABLE_MCOUNT;
  540. if (unlikely(ftrace_disabled))
  541. return;
  542. mutex_lock(&ftrace_start_lock);
  543. /* Force update next time */
  544. saved_ftrace_func = NULL;
  545. /* ftrace_start_up is true if we want ftrace running */
  546. if (ftrace_start_up)
  547. command |= FTRACE_ENABLE_CALLS;
  548. ftrace_run_update_code(command);
  549. mutex_unlock(&ftrace_start_lock);
  550. }
  551. static void ftrace_shutdown_sysctl(void)
  552. {
  553. int command = FTRACE_DISABLE_MCOUNT;
  554. if (unlikely(ftrace_disabled))
  555. return;
  556. mutex_lock(&ftrace_start_lock);
  557. /* ftrace_start_up is true if ftrace is running */
  558. if (ftrace_start_up)
  559. command |= FTRACE_DISABLE_CALLS;
  560. ftrace_run_update_code(command);
  561. mutex_unlock(&ftrace_start_lock);
  562. }
  563. static cycle_t ftrace_update_time;
  564. static unsigned long ftrace_update_cnt;
  565. unsigned long ftrace_update_tot_cnt;
  566. static int ftrace_update_code(struct module *mod)
  567. {
  568. struct dyn_ftrace *p, *t;
  569. cycle_t start, stop;
  570. start = ftrace_now(raw_smp_processor_id());
  571. ftrace_update_cnt = 0;
  572. list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
  573. /* If something went wrong, bail without enabling anything */
  574. if (unlikely(ftrace_disabled))
  575. return -1;
  576. list_del_init(&p->list);
  577. /* convert record (i.e, patch mcount-call with NOP) */
  578. if (ftrace_code_disable(mod, p)) {
  579. p->flags |= FTRACE_FL_CONVERTED;
  580. ftrace_update_cnt++;
  581. } else
  582. ftrace_free_rec(p);
  583. }
  584. stop = ftrace_now(raw_smp_processor_id());
  585. ftrace_update_time = stop - start;
  586. ftrace_update_tot_cnt += ftrace_update_cnt;
  587. return 0;
  588. }
  589. static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
  590. {
  591. struct ftrace_page *pg;
  592. int cnt;
  593. int i;
  594. /* allocate a few pages */
  595. ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
  596. if (!ftrace_pages_start)
  597. return -1;
  598. /*
  599. * Allocate a few more pages.
  600. *
  601. * TODO: have some parser search vmlinux before
  602. * final linking to find all calls to ftrace.
  603. * Then we can:
  604. * a) know how many pages to allocate.
  605. * and/or
  606. * b) set up the table then.
  607. *
  608. * The dynamic code is still necessary for
  609. * modules.
  610. */
  611. pg = ftrace_pages = ftrace_pages_start;
  612. cnt = num_to_init / ENTRIES_PER_PAGE;
  613. pr_info("ftrace: allocating %ld entries in %d pages\n",
  614. num_to_init, cnt + 1);
  615. for (i = 0; i < cnt; i++) {
  616. pg->next = (void *)get_zeroed_page(GFP_KERNEL);
  617. /* If we fail, we'll try later anyway */
  618. if (!pg->next)
  619. break;
  620. pg = pg->next;
  621. }
  622. return 0;
  623. }
  624. enum {
  625. FTRACE_ITER_FILTER = (1 << 0),
  626. FTRACE_ITER_CONT = (1 << 1),
  627. FTRACE_ITER_NOTRACE = (1 << 2),
  628. FTRACE_ITER_FAILURES = (1 << 3),
  629. };
  630. #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
  631. struct ftrace_iterator {
  632. struct ftrace_page *pg;
  633. unsigned idx;
  634. unsigned flags;
  635. unsigned char buffer[FTRACE_BUFF_MAX+1];
  636. unsigned buffer_idx;
  637. unsigned filtered;
  638. };
  639. static void *
  640. t_next(struct seq_file *m, void *v, loff_t *pos)
  641. {
  642. struct ftrace_iterator *iter = m->private;
  643. struct dyn_ftrace *rec = NULL;
  644. (*pos)++;
  645. /* should not be called from interrupt context */
  646. spin_lock(&ftrace_lock);
  647. retry:
  648. if (iter->idx >= iter->pg->index) {
  649. if (iter->pg->next) {
  650. iter->pg = iter->pg->next;
  651. iter->idx = 0;
  652. goto retry;
  653. } else {
  654. iter->idx = -1;
  655. }
  656. } else {
  657. rec = &iter->pg->records[iter->idx++];
  658. if ((rec->flags & FTRACE_FL_FREE) ||
  659. (!(iter->flags & FTRACE_ITER_FAILURES) &&
  660. (rec->flags & FTRACE_FL_FAILED)) ||
  661. ((iter->flags & FTRACE_ITER_FAILURES) &&
  662. !(rec->flags & FTRACE_FL_FAILED)) ||
  663. ((iter->flags & FTRACE_ITER_FILTER) &&
  664. !(rec->flags & FTRACE_FL_FILTER)) ||
  665. ((iter->flags & FTRACE_ITER_NOTRACE) &&
  666. !(rec->flags & FTRACE_FL_NOTRACE))) {
  667. rec = NULL;
  668. goto retry;
  669. }
  670. }
  671. spin_unlock(&ftrace_lock);
  672. return rec;
  673. }
  674. static void *t_start(struct seq_file *m, loff_t *pos)
  675. {
  676. struct ftrace_iterator *iter = m->private;
  677. void *p = NULL;
  678. if (*pos > 0) {
  679. if (iter->idx < 0)
  680. return p;
  681. (*pos)--;
  682. iter->idx--;
  683. }
  684. p = t_next(m, p, pos);
  685. return p;
  686. }
  687. static void t_stop(struct seq_file *m, void *p)
  688. {
  689. }
  690. static int t_show(struct seq_file *m, void *v)
  691. {
  692. struct dyn_ftrace *rec = v;
  693. char str[KSYM_SYMBOL_LEN];
  694. if (!rec)
  695. return 0;
  696. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  697. seq_printf(m, "%s\n", str);
  698. return 0;
  699. }
  700. static struct seq_operations show_ftrace_seq_ops = {
  701. .start = t_start,
  702. .next = t_next,
  703. .stop = t_stop,
  704. .show = t_show,
  705. };
  706. static int
  707. ftrace_avail_open(struct inode *inode, struct file *file)
  708. {
  709. struct ftrace_iterator *iter;
  710. int ret;
  711. if (unlikely(ftrace_disabled))
  712. return -ENODEV;
  713. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  714. if (!iter)
  715. return -ENOMEM;
  716. iter->pg = ftrace_pages_start;
  717. ret = seq_open(file, &show_ftrace_seq_ops);
  718. if (!ret) {
  719. struct seq_file *m = file->private_data;
  720. m->private = iter;
  721. } else {
  722. kfree(iter);
  723. }
  724. return ret;
  725. }
  726. int ftrace_avail_release(struct inode *inode, struct file *file)
  727. {
  728. struct seq_file *m = (struct seq_file *)file->private_data;
  729. struct ftrace_iterator *iter = m->private;
  730. seq_release(inode, file);
  731. kfree(iter);
  732. return 0;
  733. }
  734. static int
  735. ftrace_failures_open(struct inode *inode, struct file *file)
  736. {
  737. int ret;
  738. struct seq_file *m;
  739. struct ftrace_iterator *iter;
  740. ret = ftrace_avail_open(inode, file);
  741. if (!ret) {
  742. m = (struct seq_file *)file->private_data;
  743. iter = (struct ftrace_iterator *)m->private;
  744. iter->flags = FTRACE_ITER_FAILURES;
  745. }
  746. return ret;
  747. }
  748. static void ftrace_filter_reset(int enable)
  749. {
  750. struct ftrace_page *pg;
  751. struct dyn_ftrace *rec;
  752. unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  753. unsigned i;
  754. /* should not be called from interrupt context */
  755. spin_lock(&ftrace_lock);
  756. if (enable)
  757. ftrace_filtered = 0;
  758. pg = ftrace_pages_start;
  759. while (pg) {
  760. for (i = 0; i < pg->index; i++) {
  761. rec = &pg->records[i];
  762. if (rec->flags & FTRACE_FL_FAILED)
  763. continue;
  764. rec->flags &= ~type;
  765. }
  766. pg = pg->next;
  767. }
  768. spin_unlock(&ftrace_lock);
  769. }
  770. static int
  771. ftrace_regex_open(struct inode *inode, struct file *file, int enable)
  772. {
  773. struct ftrace_iterator *iter;
  774. int ret = 0;
  775. if (unlikely(ftrace_disabled))
  776. return -ENODEV;
  777. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  778. if (!iter)
  779. return -ENOMEM;
  780. mutex_lock(&ftrace_regex_lock);
  781. if ((file->f_mode & FMODE_WRITE) &&
  782. !(file->f_flags & O_APPEND))
  783. ftrace_filter_reset(enable);
  784. if (file->f_mode & FMODE_READ) {
  785. iter->pg = ftrace_pages_start;
  786. iter->flags = enable ? FTRACE_ITER_FILTER :
  787. FTRACE_ITER_NOTRACE;
  788. ret = seq_open(file, &show_ftrace_seq_ops);
  789. if (!ret) {
  790. struct seq_file *m = file->private_data;
  791. m->private = iter;
  792. } else
  793. kfree(iter);
  794. } else
  795. file->private_data = iter;
  796. mutex_unlock(&ftrace_regex_lock);
  797. return ret;
  798. }
  799. static int
  800. ftrace_filter_open(struct inode *inode, struct file *file)
  801. {
  802. return ftrace_regex_open(inode, file, 1);
  803. }
  804. static int
  805. ftrace_notrace_open(struct inode *inode, struct file *file)
  806. {
  807. return ftrace_regex_open(inode, file, 0);
  808. }
  809. static ssize_t
  810. ftrace_regex_read(struct file *file, char __user *ubuf,
  811. size_t cnt, loff_t *ppos)
  812. {
  813. if (file->f_mode & FMODE_READ)
  814. return seq_read(file, ubuf, cnt, ppos);
  815. else
  816. return -EPERM;
  817. }
  818. static loff_t
  819. ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
  820. {
  821. loff_t ret;
  822. if (file->f_mode & FMODE_READ)
  823. ret = seq_lseek(file, offset, origin);
  824. else
  825. file->f_pos = ret = 1;
  826. return ret;
  827. }
  828. enum {
  829. MATCH_FULL,
  830. MATCH_FRONT_ONLY,
  831. MATCH_MIDDLE_ONLY,
  832. MATCH_END_ONLY,
  833. };
  834. static void
  835. ftrace_match(unsigned char *buff, int len, int enable)
  836. {
  837. char str[KSYM_SYMBOL_LEN];
  838. char *search = NULL;
  839. struct ftrace_page *pg;
  840. struct dyn_ftrace *rec;
  841. int type = MATCH_FULL;
  842. unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  843. unsigned i, match = 0, search_len = 0;
  844. int not = 0;
  845. if (buff[0] == '!') {
  846. not = 1;
  847. buff++;
  848. len--;
  849. }
  850. for (i = 0; i < len; i++) {
  851. if (buff[i] == '*') {
  852. if (!i) {
  853. search = buff + i + 1;
  854. type = MATCH_END_ONLY;
  855. search_len = len - (i + 1);
  856. } else {
  857. if (type == MATCH_END_ONLY) {
  858. type = MATCH_MIDDLE_ONLY;
  859. } else {
  860. match = i;
  861. type = MATCH_FRONT_ONLY;
  862. }
  863. buff[i] = 0;
  864. break;
  865. }
  866. }
  867. }
  868. /* should not be called from interrupt context */
  869. spin_lock(&ftrace_lock);
  870. if (enable)
  871. ftrace_filtered = 1;
  872. pg = ftrace_pages_start;
  873. while (pg) {
  874. for (i = 0; i < pg->index; i++) {
  875. int matched = 0;
  876. char *ptr;
  877. rec = &pg->records[i];
  878. if (rec->flags & FTRACE_FL_FAILED)
  879. continue;
  880. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  881. switch (type) {
  882. case MATCH_FULL:
  883. if (strcmp(str, buff) == 0)
  884. matched = 1;
  885. break;
  886. case MATCH_FRONT_ONLY:
  887. if (memcmp(str, buff, match) == 0)
  888. matched = 1;
  889. break;
  890. case MATCH_MIDDLE_ONLY:
  891. if (strstr(str, search))
  892. matched = 1;
  893. break;
  894. case MATCH_END_ONLY:
  895. ptr = strstr(str, search);
  896. if (ptr && (ptr[search_len] == 0))
  897. matched = 1;
  898. break;
  899. }
  900. if (matched) {
  901. if (not)
  902. rec->flags &= ~flag;
  903. else
  904. rec->flags |= flag;
  905. }
  906. }
  907. pg = pg->next;
  908. }
  909. spin_unlock(&ftrace_lock);
  910. }
  911. static ssize_t
  912. ftrace_regex_write(struct file *file, const char __user *ubuf,
  913. size_t cnt, loff_t *ppos, int enable)
  914. {
  915. struct ftrace_iterator *iter;
  916. char ch;
  917. size_t read = 0;
  918. ssize_t ret;
  919. if (!cnt || cnt < 0)
  920. return 0;
  921. mutex_lock(&ftrace_regex_lock);
  922. if (file->f_mode & FMODE_READ) {
  923. struct seq_file *m = file->private_data;
  924. iter = m->private;
  925. } else
  926. iter = file->private_data;
  927. if (!*ppos) {
  928. iter->flags &= ~FTRACE_ITER_CONT;
  929. iter->buffer_idx = 0;
  930. }
  931. ret = get_user(ch, ubuf++);
  932. if (ret)
  933. goto out;
  934. read++;
  935. cnt--;
  936. if (!(iter->flags & ~FTRACE_ITER_CONT)) {
  937. /* skip white space */
  938. while (cnt && isspace(ch)) {
  939. ret = get_user(ch, ubuf++);
  940. if (ret)
  941. goto out;
  942. read++;
  943. cnt--;
  944. }
  945. if (isspace(ch)) {
  946. file->f_pos += read;
  947. ret = read;
  948. goto out;
  949. }
  950. iter->buffer_idx = 0;
  951. }
  952. while (cnt && !isspace(ch)) {
  953. if (iter->buffer_idx < FTRACE_BUFF_MAX)
  954. iter->buffer[iter->buffer_idx++] = ch;
  955. else {
  956. ret = -EINVAL;
  957. goto out;
  958. }
  959. ret = get_user(ch, ubuf++);
  960. if (ret)
  961. goto out;
  962. read++;
  963. cnt--;
  964. }
  965. if (isspace(ch)) {
  966. iter->filtered++;
  967. iter->buffer[iter->buffer_idx] = 0;
  968. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  969. iter->buffer_idx = 0;
  970. } else
  971. iter->flags |= FTRACE_ITER_CONT;
  972. file->f_pos += read;
  973. ret = read;
  974. out:
  975. mutex_unlock(&ftrace_regex_lock);
  976. return ret;
  977. }
  978. static ssize_t
  979. ftrace_filter_write(struct file *file, const char __user *ubuf,
  980. size_t cnt, loff_t *ppos)
  981. {
  982. return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
  983. }
  984. static ssize_t
  985. ftrace_notrace_write(struct file *file, const char __user *ubuf,
  986. size_t cnt, loff_t *ppos)
  987. {
  988. return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
  989. }
  990. static void
  991. ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
  992. {
  993. if (unlikely(ftrace_disabled))
  994. return;
  995. mutex_lock(&ftrace_regex_lock);
  996. if (reset)
  997. ftrace_filter_reset(enable);
  998. if (buf)
  999. ftrace_match(buf, len, enable);
  1000. mutex_unlock(&ftrace_regex_lock);
  1001. }
  1002. /**
  1003. * ftrace_set_filter - set a function to filter on in ftrace
  1004. * @buf - the string that holds the function filter text.
  1005. * @len - the length of the string.
  1006. * @reset - non zero to reset all filters before applying this filter.
  1007. *
  1008. * Filters denote which functions should be enabled when tracing is enabled.
  1009. * If @buf is NULL and reset is set, all functions will be enabled for tracing.
  1010. */
  1011. void ftrace_set_filter(unsigned char *buf, int len, int reset)
  1012. {
  1013. ftrace_set_regex(buf, len, reset, 1);
  1014. }
  1015. /**
  1016. * ftrace_set_notrace - set a function to not trace in ftrace
  1017. * @buf - the string that holds the function notrace text.
  1018. * @len - the length of the string.
  1019. * @reset - non zero to reset all filters before applying this filter.
  1020. *
  1021. * Notrace Filters denote which functions should not be enabled when tracing
  1022. * is enabled. If @buf is NULL and reset is set, all functions will be enabled
  1023. * for tracing.
  1024. */
  1025. void ftrace_set_notrace(unsigned char *buf, int len, int reset)
  1026. {
  1027. ftrace_set_regex(buf, len, reset, 0);
  1028. }
  1029. static int
  1030. ftrace_regex_release(struct inode *inode, struct file *file, int enable)
  1031. {
  1032. struct seq_file *m = (struct seq_file *)file->private_data;
  1033. struct ftrace_iterator *iter;
  1034. mutex_lock(&ftrace_regex_lock);
  1035. if (file->f_mode & FMODE_READ) {
  1036. iter = m->private;
  1037. seq_release(inode, file);
  1038. } else
  1039. iter = file->private_data;
  1040. if (iter->buffer_idx) {
  1041. iter->filtered++;
  1042. iter->buffer[iter->buffer_idx] = 0;
  1043. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  1044. }
  1045. mutex_lock(&ftrace_sysctl_lock);
  1046. mutex_lock(&ftrace_start_lock);
  1047. if (ftrace_start_up && ftrace_enabled)
  1048. ftrace_run_update_code(FTRACE_ENABLE_CALLS);
  1049. mutex_unlock(&ftrace_start_lock);
  1050. mutex_unlock(&ftrace_sysctl_lock);
  1051. kfree(iter);
  1052. mutex_unlock(&ftrace_regex_lock);
  1053. return 0;
  1054. }
  1055. static int
  1056. ftrace_filter_release(struct inode *inode, struct file *file)
  1057. {
  1058. return ftrace_regex_release(inode, file, 1);
  1059. }
  1060. static int
  1061. ftrace_notrace_release(struct inode *inode, struct file *file)
  1062. {
  1063. return ftrace_regex_release(inode, file, 0);
  1064. }
  1065. static struct file_operations ftrace_avail_fops = {
  1066. .open = ftrace_avail_open,
  1067. .read = seq_read,
  1068. .llseek = seq_lseek,
  1069. .release = ftrace_avail_release,
  1070. };
  1071. static struct file_operations ftrace_failures_fops = {
  1072. .open = ftrace_failures_open,
  1073. .read = seq_read,
  1074. .llseek = seq_lseek,
  1075. .release = ftrace_avail_release,
  1076. };
  1077. static struct file_operations ftrace_filter_fops = {
  1078. .open = ftrace_filter_open,
  1079. .read = ftrace_regex_read,
  1080. .write = ftrace_filter_write,
  1081. .llseek = ftrace_regex_lseek,
  1082. .release = ftrace_filter_release,
  1083. };
  1084. static struct file_operations ftrace_notrace_fops = {
  1085. .open = ftrace_notrace_open,
  1086. .read = ftrace_regex_read,
  1087. .write = ftrace_notrace_write,
  1088. .llseek = ftrace_regex_lseek,
  1089. .release = ftrace_notrace_release,
  1090. };
  1091. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1092. static DEFINE_MUTEX(graph_lock);
  1093. int ftrace_graph_count;
  1094. unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
  1095. static void *
  1096. g_next(struct seq_file *m, void *v, loff_t *pos)
  1097. {
  1098. unsigned long *array = m->private;
  1099. int index = *pos;
  1100. (*pos)++;
  1101. if (index >= ftrace_graph_count)
  1102. return NULL;
  1103. return &array[index];
  1104. }
  1105. static void *g_start(struct seq_file *m, loff_t *pos)
  1106. {
  1107. void *p = NULL;
  1108. mutex_lock(&graph_lock);
  1109. p = g_next(m, p, pos);
  1110. return p;
  1111. }
  1112. static void g_stop(struct seq_file *m, void *p)
  1113. {
  1114. mutex_unlock(&graph_lock);
  1115. }
  1116. static int g_show(struct seq_file *m, void *v)
  1117. {
  1118. unsigned long *ptr = v;
  1119. char str[KSYM_SYMBOL_LEN];
  1120. if (!ptr)
  1121. return 0;
  1122. kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
  1123. seq_printf(m, "%s\n", str);
  1124. return 0;
  1125. }
  1126. static struct seq_operations ftrace_graph_seq_ops = {
  1127. .start = g_start,
  1128. .next = g_next,
  1129. .stop = g_stop,
  1130. .show = g_show,
  1131. };
  1132. static int
  1133. ftrace_graph_open(struct inode *inode, struct file *file)
  1134. {
  1135. int ret = 0;
  1136. if (unlikely(ftrace_disabled))
  1137. return -ENODEV;
  1138. mutex_lock(&graph_lock);
  1139. if ((file->f_mode & FMODE_WRITE) &&
  1140. !(file->f_flags & O_APPEND)) {
  1141. ftrace_graph_count = 0;
  1142. memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
  1143. }
  1144. if (file->f_mode & FMODE_READ) {
  1145. ret = seq_open(file, &ftrace_graph_seq_ops);
  1146. if (!ret) {
  1147. struct seq_file *m = file->private_data;
  1148. m->private = ftrace_graph_funcs;
  1149. }
  1150. } else
  1151. file->private_data = ftrace_graph_funcs;
  1152. mutex_unlock(&graph_lock);
  1153. return ret;
  1154. }
  1155. static ssize_t
  1156. ftrace_graph_read(struct file *file, char __user *ubuf,
  1157. size_t cnt, loff_t *ppos)
  1158. {
  1159. if (file->f_mode & FMODE_READ)
  1160. return seq_read(file, ubuf, cnt, ppos);
  1161. else
  1162. return -EPERM;
  1163. }
  1164. static int
  1165. ftrace_set_func(unsigned long *array, int idx, char *buffer)
  1166. {
  1167. char str[KSYM_SYMBOL_LEN];
  1168. struct dyn_ftrace *rec;
  1169. struct ftrace_page *pg;
  1170. int found = 0;
  1171. int i, j;
  1172. if (ftrace_disabled)
  1173. return -ENODEV;
  1174. /* should not be called from interrupt context */
  1175. spin_lock(&ftrace_lock);
  1176. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  1177. for (i = 0; i < pg->index; i++) {
  1178. rec = &pg->records[i];
  1179. if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
  1180. continue;
  1181. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  1182. if (strcmp(str, buffer) == 0) {
  1183. found = 1;
  1184. for (j = 0; j < idx; j++)
  1185. if (array[j] == rec->ip) {
  1186. found = 0;
  1187. break;
  1188. }
  1189. if (found)
  1190. array[idx] = rec->ip;
  1191. break;
  1192. }
  1193. }
  1194. }
  1195. spin_unlock(&ftrace_lock);
  1196. return found ? 0 : -EINVAL;
  1197. }
  1198. static ssize_t
  1199. ftrace_graph_write(struct file *file, const char __user *ubuf,
  1200. size_t cnt, loff_t *ppos)
  1201. {
  1202. unsigned char buffer[FTRACE_BUFF_MAX+1];
  1203. unsigned long *array;
  1204. size_t read = 0;
  1205. ssize_t ret;
  1206. int index = 0;
  1207. char ch;
  1208. if (!cnt || cnt < 0)
  1209. return 0;
  1210. mutex_lock(&graph_lock);
  1211. if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
  1212. ret = -EBUSY;
  1213. goto out;
  1214. }
  1215. if (file->f_mode & FMODE_READ) {
  1216. struct seq_file *m = file->private_data;
  1217. array = m->private;
  1218. } else
  1219. array = file->private_data;
  1220. ret = get_user(ch, ubuf++);
  1221. if (ret)
  1222. goto out;
  1223. read++;
  1224. cnt--;
  1225. /* skip white space */
  1226. while (cnt && isspace(ch)) {
  1227. ret = get_user(ch, ubuf++);
  1228. if (ret)
  1229. goto out;
  1230. read++;
  1231. cnt--;
  1232. }
  1233. if (isspace(ch)) {
  1234. *ppos += read;
  1235. ret = read;
  1236. goto out;
  1237. }
  1238. while (cnt && !isspace(ch)) {
  1239. if (index < FTRACE_BUFF_MAX)
  1240. buffer[index++] = ch;
  1241. else {
  1242. ret = -EINVAL;
  1243. goto out;
  1244. }
  1245. ret = get_user(ch, ubuf++);
  1246. if (ret)
  1247. goto out;
  1248. read++;
  1249. cnt--;
  1250. }
  1251. buffer[index] = 0;
  1252. /* we allow only one at a time */
  1253. ret = ftrace_set_func(array, ftrace_graph_count, buffer);
  1254. if (ret)
  1255. goto out;
  1256. ftrace_graph_count++;
  1257. file->f_pos += read;
  1258. ret = read;
  1259. out:
  1260. mutex_unlock(&graph_lock);
  1261. return ret;
  1262. }
  1263. static const struct file_operations ftrace_graph_fops = {
  1264. .open = ftrace_graph_open,
  1265. .read = ftrace_graph_read,
  1266. .write = ftrace_graph_write,
  1267. };
  1268. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  1269. static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
  1270. {
  1271. struct dentry *entry;
  1272. entry = debugfs_create_file("available_filter_functions", 0444,
  1273. d_tracer, NULL, &ftrace_avail_fops);
  1274. if (!entry)
  1275. pr_warning("Could not create debugfs "
  1276. "'available_filter_functions' entry\n");
  1277. entry = debugfs_create_file("failures", 0444,
  1278. d_tracer, NULL, &ftrace_failures_fops);
  1279. if (!entry)
  1280. pr_warning("Could not create debugfs 'failures' entry\n");
  1281. entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
  1282. NULL, &ftrace_filter_fops);
  1283. if (!entry)
  1284. pr_warning("Could not create debugfs "
  1285. "'set_ftrace_filter' entry\n");
  1286. entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
  1287. NULL, &ftrace_notrace_fops);
  1288. if (!entry)
  1289. pr_warning("Could not create debugfs "
  1290. "'set_ftrace_notrace' entry\n");
  1291. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1292. entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
  1293. NULL,
  1294. &ftrace_graph_fops);
  1295. if (!entry)
  1296. pr_warning("Could not create debugfs "
  1297. "'set_graph_function' entry\n");
  1298. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  1299. return 0;
  1300. }
  1301. static int ftrace_convert_nops(struct module *mod,
  1302. unsigned long *start,
  1303. unsigned long *end)
  1304. {
  1305. unsigned long *p;
  1306. unsigned long addr;
  1307. unsigned long flags;
  1308. mutex_lock(&ftrace_start_lock);
  1309. p = start;
  1310. while (p < end) {
  1311. addr = ftrace_call_adjust(*p++);
  1312. /*
  1313. * Some architecture linkers will pad between
  1314. * the different mcount_loc sections of different
  1315. * object files to satisfy alignments.
  1316. * Skip any NULL pointers.
  1317. */
  1318. if (!addr)
  1319. continue;
  1320. ftrace_record_ip(addr);
  1321. }
  1322. /* disable interrupts to prevent kstop machine */
  1323. local_irq_save(flags);
  1324. ftrace_update_code(mod);
  1325. local_irq_restore(flags);
  1326. mutex_unlock(&ftrace_start_lock);
  1327. return 0;
  1328. }
  1329. void ftrace_init_module(struct module *mod,
  1330. unsigned long *start, unsigned long *end)
  1331. {
  1332. if (ftrace_disabled || start == end)
  1333. return;
  1334. ftrace_convert_nops(mod, start, end);
  1335. }
  1336. extern unsigned long __start_mcount_loc[];
  1337. extern unsigned long __stop_mcount_loc[];
  1338. void __init ftrace_init(void)
  1339. {
  1340. unsigned long count, addr, flags;
  1341. int ret;
  1342. /* Keep the ftrace pointer to the stub */
  1343. addr = (unsigned long)ftrace_stub;
  1344. local_irq_save(flags);
  1345. ftrace_dyn_arch_init(&addr);
  1346. local_irq_restore(flags);
  1347. /* ftrace_dyn_arch_init places the return code in addr */
  1348. if (addr)
  1349. goto failed;
  1350. count = __stop_mcount_loc - __start_mcount_loc;
  1351. ret = ftrace_dyn_table_alloc(count);
  1352. if (ret)
  1353. goto failed;
  1354. last_ftrace_enabled = ftrace_enabled = 1;
  1355. ret = ftrace_convert_nops(NULL,
  1356. __start_mcount_loc,
  1357. __stop_mcount_loc);
  1358. return;
  1359. failed:
  1360. ftrace_disabled = 1;
  1361. }
  1362. #else
  1363. static int __init ftrace_nodyn_init(void)
  1364. {
  1365. ftrace_enabled = 1;
  1366. return 0;
  1367. }
  1368. device_initcall(ftrace_nodyn_init);
  1369. static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
  1370. static inline void ftrace_startup_enable(int command) { }
  1371. /* Keep as macros so we do not need to define the commands */
  1372. # define ftrace_startup(command) do { } while (0)
  1373. # define ftrace_shutdown(command) do { } while (0)
  1374. # define ftrace_startup_sysctl() do { } while (0)
  1375. # define ftrace_shutdown_sysctl() do { } while (0)
  1376. #endif /* CONFIG_DYNAMIC_FTRACE */
  1377. static ssize_t
  1378. ftrace_pid_read(struct file *file, char __user *ubuf,
  1379. size_t cnt, loff_t *ppos)
  1380. {
  1381. char buf[64];
  1382. int r;
  1383. if (ftrace_pid_trace == ftrace_swapper_pid)
  1384. r = sprintf(buf, "swapper tasks\n");
  1385. else if (ftrace_pid_trace)
  1386. r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
  1387. else
  1388. r = sprintf(buf, "no pid\n");
  1389. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  1390. }
  1391. static void clear_ftrace_swapper(void)
  1392. {
  1393. struct task_struct *p;
  1394. int cpu;
  1395. get_online_cpus();
  1396. for_each_online_cpu(cpu) {
  1397. p = idle_task(cpu);
  1398. clear_tsk_trace_trace(p);
  1399. }
  1400. put_online_cpus();
  1401. }
  1402. static void set_ftrace_swapper(void)
  1403. {
  1404. struct task_struct *p;
  1405. int cpu;
  1406. get_online_cpus();
  1407. for_each_online_cpu(cpu) {
  1408. p = idle_task(cpu);
  1409. set_tsk_trace_trace(p);
  1410. }
  1411. put_online_cpus();
  1412. }
  1413. static void clear_ftrace_pid(struct pid *pid)
  1414. {
  1415. struct task_struct *p;
  1416. rcu_read_lock();
  1417. do_each_pid_task(pid, PIDTYPE_PID, p) {
  1418. clear_tsk_trace_trace(p);
  1419. } while_each_pid_task(pid, PIDTYPE_PID, p);
  1420. rcu_read_unlock();
  1421. put_pid(pid);
  1422. }
  1423. static void set_ftrace_pid(struct pid *pid)
  1424. {
  1425. struct task_struct *p;
  1426. rcu_read_lock();
  1427. do_each_pid_task(pid, PIDTYPE_PID, p) {
  1428. set_tsk_trace_trace(p);
  1429. } while_each_pid_task(pid, PIDTYPE_PID, p);
  1430. rcu_read_unlock();
  1431. }
  1432. static void clear_ftrace_pid_task(struct pid **pid)
  1433. {
  1434. if (*pid == ftrace_swapper_pid)
  1435. clear_ftrace_swapper();
  1436. else
  1437. clear_ftrace_pid(*pid);
  1438. *pid = NULL;
  1439. }
  1440. static void set_ftrace_pid_task(struct pid *pid)
  1441. {
  1442. if (pid == ftrace_swapper_pid)
  1443. set_ftrace_swapper();
  1444. else
  1445. set_ftrace_pid(pid);
  1446. }
  1447. static ssize_t
  1448. ftrace_pid_write(struct file *filp, const char __user *ubuf,
  1449. size_t cnt, loff_t *ppos)
  1450. {
  1451. struct pid *pid;
  1452. char buf[64];
  1453. long val;
  1454. int ret;
  1455. if (cnt >= sizeof(buf))
  1456. return -EINVAL;
  1457. if (copy_from_user(&buf, ubuf, cnt))
  1458. return -EFAULT;
  1459. buf[cnt] = 0;
  1460. ret = strict_strtol(buf, 10, &val);
  1461. if (ret < 0)
  1462. return ret;
  1463. mutex_lock(&ftrace_start_lock);
  1464. if (val < 0) {
  1465. /* disable pid tracing */
  1466. if (!ftrace_pid_trace)
  1467. goto out;
  1468. clear_ftrace_pid_task(&ftrace_pid_trace);
  1469. } else {
  1470. /* swapper task is special */
  1471. if (!val) {
  1472. pid = ftrace_swapper_pid;
  1473. if (pid == ftrace_pid_trace)
  1474. goto out;
  1475. } else {
  1476. pid = find_get_pid(val);
  1477. if (pid == ftrace_pid_trace) {
  1478. put_pid(pid);
  1479. goto out;
  1480. }
  1481. }
  1482. if (ftrace_pid_trace)
  1483. clear_ftrace_pid_task(&ftrace_pid_trace);
  1484. if (!pid)
  1485. goto out;
  1486. ftrace_pid_trace = pid;
  1487. set_ftrace_pid_task(ftrace_pid_trace);
  1488. }
  1489. /* update the function call */
  1490. ftrace_update_pid_func();
  1491. ftrace_startup_enable(0);
  1492. out:
  1493. mutex_unlock(&ftrace_start_lock);
  1494. return cnt;
  1495. }
  1496. static struct file_operations ftrace_pid_fops = {
  1497. .read = ftrace_pid_read,
  1498. .write = ftrace_pid_write,
  1499. };
  1500. static __init int ftrace_init_debugfs(void)
  1501. {
  1502. struct dentry *d_tracer;
  1503. struct dentry *entry;
  1504. d_tracer = tracing_init_dentry();
  1505. if (!d_tracer)
  1506. return 0;
  1507. ftrace_init_dyn_debugfs(d_tracer);
  1508. entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
  1509. NULL, &ftrace_pid_fops);
  1510. if (!entry)
  1511. pr_warning("Could not create debugfs "
  1512. "'set_ftrace_pid' entry\n");
  1513. return 0;
  1514. }
  1515. fs_initcall(ftrace_init_debugfs);
  1516. /**
  1517. * ftrace_kill - kill ftrace
  1518. *
  1519. * This function should be used by panic code. It stops ftrace
  1520. * but in a not so nice way. If you need to simply kill ftrace
  1521. * from a non-atomic section, use ftrace_kill.
  1522. */
  1523. void ftrace_kill(void)
  1524. {
  1525. ftrace_disabled = 1;
  1526. ftrace_enabled = 0;
  1527. clear_ftrace_function();
  1528. }
  1529. /**
  1530. * register_ftrace_function - register a function for profiling
  1531. * @ops - ops structure that holds the function for profiling.
  1532. *
  1533. * Register a function to be called by all functions in the
  1534. * kernel.
  1535. *
  1536. * Note: @ops->func and all the functions it calls must be labeled
  1537. * with "notrace", otherwise it will go into a
  1538. * recursive loop.
  1539. */
  1540. int register_ftrace_function(struct ftrace_ops *ops)
  1541. {
  1542. int ret;
  1543. if (unlikely(ftrace_disabled))
  1544. return -1;
  1545. mutex_lock(&ftrace_sysctl_lock);
  1546. ret = __register_ftrace_function(ops);
  1547. ftrace_startup(0);
  1548. mutex_unlock(&ftrace_sysctl_lock);
  1549. return ret;
  1550. }
  1551. /**
  1552. * unregister_ftrace_function - unresgister a function for profiling.
  1553. * @ops - ops structure that holds the function to unregister
  1554. *
  1555. * Unregister a function that was added to be called by ftrace profiling.
  1556. */
  1557. int unregister_ftrace_function(struct ftrace_ops *ops)
  1558. {
  1559. int ret;
  1560. mutex_lock(&ftrace_sysctl_lock);
  1561. ret = __unregister_ftrace_function(ops);
  1562. ftrace_shutdown(0);
  1563. mutex_unlock(&ftrace_sysctl_lock);
  1564. return ret;
  1565. }
  1566. int
  1567. ftrace_enable_sysctl(struct ctl_table *table, int write,
  1568. struct file *file, void __user *buffer, size_t *lenp,
  1569. loff_t *ppos)
  1570. {
  1571. int ret;
  1572. if (unlikely(ftrace_disabled))
  1573. return -ENODEV;
  1574. mutex_lock(&ftrace_sysctl_lock);
  1575. ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
  1576. if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
  1577. goto out;
  1578. last_ftrace_enabled = ftrace_enabled;
  1579. if (ftrace_enabled) {
  1580. ftrace_startup_sysctl();
  1581. /* we are starting ftrace again */
  1582. if (ftrace_list != &ftrace_list_end) {
  1583. if (ftrace_list->next == &ftrace_list_end)
  1584. ftrace_trace_function = ftrace_list->func;
  1585. else
  1586. ftrace_trace_function = ftrace_list_func;
  1587. }
  1588. } else {
  1589. /* stopping ftrace calls (just send to ftrace_stub) */
  1590. ftrace_trace_function = ftrace_stub;
  1591. ftrace_shutdown_sysctl();
  1592. }
  1593. out:
  1594. mutex_unlock(&ftrace_sysctl_lock);
  1595. return ret;
  1596. }
  1597. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1598. static atomic_t ftrace_graph_active;
  1599. static struct notifier_block ftrace_suspend_notifier;
  1600. int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
  1601. {
  1602. return 0;
  1603. }
  1604. /* The callbacks that hook a function */
  1605. trace_func_graph_ret_t ftrace_graph_return =
  1606. (trace_func_graph_ret_t)ftrace_stub;
  1607. trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
  1608. /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
  1609. static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
  1610. {
  1611. int i;
  1612. int ret = 0;
  1613. unsigned long flags;
  1614. int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
  1615. struct task_struct *g, *t;
  1616. for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
  1617. ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
  1618. * sizeof(struct ftrace_ret_stack),
  1619. GFP_KERNEL);
  1620. if (!ret_stack_list[i]) {
  1621. start = 0;
  1622. end = i;
  1623. ret = -ENOMEM;
  1624. goto free;
  1625. }
  1626. }
  1627. read_lock_irqsave(&tasklist_lock, flags);
  1628. do_each_thread(g, t) {
  1629. if (start == end) {
  1630. ret = -EAGAIN;
  1631. goto unlock;
  1632. }
  1633. if (t->ret_stack == NULL) {
  1634. t->curr_ret_stack = -1;
  1635. /* Make sure IRQs see the -1 first: */
  1636. barrier();
  1637. t->ret_stack = ret_stack_list[start++];
  1638. atomic_set(&t->tracing_graph_pause, 0);
  1639. atomic_set(&t->trace_overrun, 0);
  1640. }
  1641. } while_each_thread(g, t);
  1642. unlock:
  1643. read_unlock_irqrestore(&tasklist_lock, flags);
  1644. free:
  1645. for (i = start; i < end; i++)
  1646. kfree(ret_stack_list[i]);
  1647. return ret;
  1648. }
  1649. /* Allocate a return stack for each task */
  1650. static int start_graph_tracing(void)
  1651. {
  1652. struct ftrace_ret_stack **ret_stack_list;
  1653. int ret, cpu;
  1654. ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
  1655. sizeof(struct ftrace_ret_stack *),
  1656. GFP_KERNEL);
  1657. if (!ret_stack_list)
  1658. return -ENOMEM;
  1659. /* The cpu_boot init_task->ret_stack will never be freed */
  1660. for_each_online_cpu(cpu)
  1661. ftrace_graph_init_task(idle_task(cpu));
  1662. do {
  1663. ret = alloc_retstack_tasklist(ret_stack_list);
  1664. } while (ret == -EAGAIN);
  1665. kfree(ret_stack_list);
  1666. return ret;
  1667. }
  1668. /*
  1669. * Hibernation protection.
  1670. * The state of the current task is too much unstable during
  1671. * suspend/restore to disk. We want to protect against that.
  1672. */
  1673. static int
  1674. ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
  1675. void *unused)
  1676. {
  1677. switch (state) {
  1678. case PM_HIBERNATION_PREPARE:
  1679. pause_graph_tracing();
  1680. break;
  1681. case PM_POST_HIBERNATION:
  1682. unpause_graph_tracing();
  1683. break;
  1684. }
  1685. return NOTIFY_DONE;
  1686. }
  1687. int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  1688. trace_func_graph_ent_t entryfunc)
  1689. {
  1690. int ret = 0;
  1691. mutex_lock(&ftrace_sysctl_lock);
  1692. ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
  1693. register_pm_notifier(&ftrace_suspend_notifier);
  1694. atomic_inc(&ftrace_graph_active);
  1695. ret = start_graph_tracing();
  1696. if (ret) {
  1697. atomic_dec(&ftrace_graph_active);
  1698. goto out;
  1699. }
  1700. ftrace_graph_return = retfunc;
  1701. ftrace_graph_entry = entryfunc;
  1702. ftrace_startup(FTRACE_START_FUNC_RET);
  1703. out:
  1704. mutex_unlock(&ftrace_sysctl_lock);
  1705. return ret;
  1706. }
  1707. void unregister_ftrace_graph(void)
  1708. {
  1709. mutex_lock(&ftrace_sysctl_lock);
  1710. atomic_dec(&ftrace_graph_active);
  1711. ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
  1712. ftrace_graph_entry = ftrace_graph_entry_stub;
  1713. ftrace_shutdown(FTRACE_STOP_FUNC_RET);
  1714. unregister_pm_notifier(&ftrace_suspend_notifier);
  1715. mutex_unlock(&ftrace_sysctl_lock);
  1716. }
  1717. /* Allocate a return stack for newly created task */
  1718. void ftrace_graph_init_task(struct task_struct *t)
  1719. {
  1720. if (atomic_read(&ftrace_graph_active)) {
  1721. t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
  1722. * sizeof(struct ftrace_ret_stack),
  1723. GFP_KERNEL);
  1724. if (!t->ret_stack)
  1725. return;
  1726. t->curr_ret_stack = -1;
  1727. atomic_set(&t->tracing_graph_pause, 0);
  1728. atomic_set(&t->trace_overrun, 0);
  1729. } else
  1730. t->ret_stack = NULL;
  1731. }
  1732. void ftrace_graph_exit_task(struct task_struct *t)
  1733. {
  1734. struct ftrace_ret_stack *ret_stack = t->ret_stack;
  1735. t->ret_stack = NULL;
  1736. /* NULL must become visible to IRQs before we free it: */
  1737. barrier();
  1738. kfree(ret_stack);
  1739. }
  1740. void ftrace_graph_stop(void)
  1741. {
  1742. ftrace_stop();
  1743. }
  1744. #endif