ftrace.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502
  1. /*
  2. * Infrastructure for profiling code inserted by 'gcc -pg'.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Originally ported from the -rt patch by:
  8. * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Based on code in the latency_tracer, that is:
  11. *
  12. * Copyright (C) 2004-2006 Ingo Molnar
  13. * Copyright (C) 2004 William Lee Irwin III
  14. */
  15. #include <linux/stop_machine.h>
  16. #include <linux/clocksource.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/debugfs.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/kthread.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/kprobes.h>
  24. #include <linux/ftrace.h>
  25. #include <linux/sysctl.h>
  26. #include <linux/ctype.h>
  27. #include <linux/list.h>
  28. #include <asm/ftrace.h>
  29. #include "trace.h"
  30. #define FTRACE_WARN_ON(cond) \
  31. do { \
  32. if (WARN_ON(cond)) \
  33. ftrace_kill(); \
  34. } while (0)
  35. #define FTRACE_WARN_ON_ONCE(cond) \
  36. do { \
  37. if (WARN_ON_ONCE(cond)) \
  38. ftrace_kill(); \
  39. } while (0)
  40. /* ftrace_enabled is a method to turn ftrace on or off */
  41. int ftrace_enabled __read_mostly;
  42. static int last_ftrace_enabled;
  43. /* Quick disabling of function tracer. */
  44. int function_trace_stop;
  45. /*
  46. * ftrace_disabled is set when an anomaly is discovered.
  47. * ftrace_disabled is much stronger than ftrace_enabled.
  48. */
  49. static int ftrace_disabled __read_mostly;
  50. static DEFINE_SPINLOCK(ftrace_lock);
  51. static DEFINE_MUTEX(ftrace_sysctl_lock);
  52. static struct ftrace_ops ftrace_list_end __read_mostly =
  53. {
  54. .func = ftrace_stub,
  55. };
  56. static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
  57. ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
  58. ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
  59. static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
  60. {
  61. struct ftrace_ops *op = ftrace_list;
  62. /* in case someone actually ports this to alpha! */
  63. read_barrier_depends();
  64. while (op != &ftrace_list_end) {
  65. /* silly alpha */
  66. read_barrier_depends();
  67. op->func(ip, parent_ip);
  68. op = op->next;
  69. };
  70. }
  71. /**
  72. * clear_ftrace_function - reset the ftrace function
  73. *
  74. * This NULLs the ftrace function and in essence stops
  75. * tracing. There may be lag
  76. */
  77. void clear_ftrace_function(void)
  78. {
  79. ftrace_trace_function = ftrace_stub;
  80. __ftrace_trace_function = ftrace_stub;
  81. }
  82. #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  83. /*
  84. * For those archs that do not test ftrace_trace_stop in their
  85. * mcount call site, we need to do it from C.
  86. */
  87. static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
  88. {
  89. if (function_trace_stop)
  90. return;
  91. __ftrace_trace_function(ip, parent_ip);
  92. }
  93. #endif
  94. static int __register_ftrace_function(struct ftrace_ops *ops)
  95. {
  96. /* should not be called from interrupt context */
  97. spin_lock(&ftrace_lock);
  98. ops->next = ftrace_list;
  99. /*
  100. * We are entering ops into the ftrace_list but another
  101. * CPU might be walking that list. We need to make sure
  102. * the ops->next pointer is valid before another CPU sees
  103. * the ops pointer included into the ftrace_list.
  104. */
  105. smp_wmb();
  106. ftrace_list = ops;
  107. if (ftrace_enabled) {
  108. /*
  109. * For one func, simply call it directly.
  110. * For more than one func, call the chain.
  111. */
  112. #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  113. if (ops->next == &ftrace_list_end)
  114. ftrace_trace_function = ops->func;
  115. else
  116. ftrace_trace_function = ftrace_list_func;
  117. #else
  118. if (ops->next == &ftrace_list_end)
  119. __ftrace_trace_function = ops->func;
  120. else
  121. __ftrace_trace_function = ftrace_list_func;
  122. ftrace_trace_function = ftrace_test_stop_func;
  123. #endif
  124. }
  125. spin_unlock(&ftrace_lock);
  126. return 0;
  127. }
  128. static int __unregister_ftrace_function(struct ftrace_ops *ops)
  129. {
  130. struct ftrace_ops **p;
  131. int ret = 0;
  132. /* should not be called from interrupt context */
  133. spin_lock(&ftrace_lock);
  134. /*
  135. * If we are removing the last function, then simply point
  136. * to the ftrace_stub.
  137. */
  138. if (ftrace_list == ops && ops->next == &ftrace_list_end) {
  139. ftrace_trace_function = ftrace_stub;
  140. ftrace_list = &ftrace_list_end;
  141. goto out;
  142. }
  143. for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
  144. if (*p == ops)
  145. break;
  146. if (*p != ops) {
  147. ret = -1;
  148. goto out;
  149. }
  150. *p = (*p)->next;
  151. if (ftrace_enabled) {
  152. /* If we only have one func left, then call that directly */
  153. if (ftrace_list == &ftrace_list_end ||
  154. ftrace_list->next == &ftrace_list_end)
  155. ftrace_trace_function = ftrace_list->func;
  156. }
  157. out:
  158. spin_unlock(&ftrace_lock);
  159. return ret;
  160. }
  161. #ifdef CONFIG_DYNAMIC_FTRACE
  162. #ifndef CONFIG_FTRACE_MCOUNT_RECORD
  163. # error Dynamic ftrace depends on MCOUNT_RECORD
  164. #endif
  165. /*
  166. * Since MCOUNT_ADDR may point to mcount itself, we do not want
  167. * to get it confused by reading a reference in the code as we
  168. * are parsing on objcopy output of text. Use a variable for
  169. * it instead.
  170. */
  171. static unsigned long mcount_addr = MCOUNT_ADDR;
  172. enum {
  173. FTRACE_ENABLE_CALLS = (1 << 0),
  174. FTRACE_DISABLE_CALLS = (1 << 1),
  175. FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
  176. FTRACE_ENABLE_MCOUNT = (1 << 3),
  177. FTRACE_DISABLE_MCOUNT = (1 << 4),
  178. };
  179. static int ftrace_filtered;
  180. static int tracing_on;
  181. static LIST_HEAD(ftrace_new_addrs);
  182. static DEFINE_MUTEX(ftrace_regex_lock);
  183. struct ftrace_page {
  184. struct ftrace_page *next;
  185. unsigned long index;
  186. struct dyn_ftrace records[];
  187. };
  188. #define ENTRIES_PER_PAGE \
  189. ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
  190. /* estimate from running different kernels */
  191. #define NR_TO_INIT 10000
  192. static struct ftrace_page *ftrace_pages_start;
  193. static struct ftrace_page *ftrace_pages;
  194. static struct dyn_ftrace *ftrace_free_records;
  195. #ifdef CONFIG_KPROBES
  196. static int frozen_record_count;
  197. static inline void freeze_record(struct dyn_ftrace *rec)
  198. {
  199. if (!(rec->flags & FTRACE_FL_FROZEN)) {
  200. rec->flags |= FTRACE_FL_FROZEN;
  201. frozen_record_count++;
  202. }
  203. }
  204. static inline void unfreeze_record(struct dyn_ftrace *rec)
  205. {
  206. if (rec->flags & FTRACE_FL_FROZEN) {
  207. rec->flags &= ~FTRACE_FL_FROZEN;
  208. frozen_record_count--;
  209. }
  210. }
  211. static inline int record_frozen(struct dyn_ftrace *rec)
  212. {
  213. return rec->flags & FTRACE_FL_FROZEN;
  214. }
  215. #else
  216. # define freeze_record(rec) ({ 0; })
  217. # define unfreeze_record(rec) ({ 0; })
  218. # define record_frozen(rec) ({ 0; })
  219. #endif /* CONFIG_KPROBES */
  220. static void ftrace_free_rec(struct dyn_ftrace *rec)
  221. {
  222. rec->ip = (unsigned long)ftrace_free_records;
  223. ftrace_free_records = rec;
  224. rec->flags |= FTRACE_FL_FREE;
  225. }
  226. void ftrace_release(void *start, unsigned long size)
  227. {
  228. struct dyn_ftrace *rec;
  229. struct ftrace_page *pg;
  230. unsigned long s = (unsigned long)start;
  231. unsigned long e = s + size;
  232. int i;
  233. if (ftrace_disabled || !start)
  234. return;
  235. /* should not be called from interrupt context */
  236. spin_lock(&ftrace_lock);
  237. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  238. for (i = 0; i < pg->index; i++) {
  239. rec = &pg->records[i];
  240. if ((rec->ip >= s) && (rec->ip < e))
  241. ftrace_free_rec(rec);
  242. }
  243. }
  244. spin_unlock(&ftrace_lock);
  245. }
  246. static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
  247. {
  248. struct dyn_ftrace *rec;
  249. /* First check for freed records */
  250. if (ftrace_free_records) {
  251. rec = ftrace_free_records;
  252. if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
  253. FTRACE_WARN_ON_ONCE(1);
  254. ftrace_free_records = NULL;
  255. return NULL;
  256. }
  257. ftrace_free_records = (void *)rec->ip;
  258. memset(rec, 0, sizeof(*rec));
  259. return rec;
  260. }
  261. if (ftrace_pages->index == ENTRIES_PER_PAGE) {
  262. if (!ftrace_pages->next) {
  263. /* allocate another page */
  264. ftrace_pages->next =
  265. (void *)get_zeroed_page(GFP_KERNEL);
  266. if (!ftrace_pages->next)
  267. return NULL;
  268. }
  269. ftrace_pages = ftrace_pages->next;
  270. }
  271. return &ftrace_pages->records[ftrace_pages->index++];
  272. }
  273. static struct dyn_ftrace *
  274. ftrace_record_ip(unsigned long ip)
  275. {
  276. struct dyn_ftrace *rec;
  277. if (!ftrace_enabled || ftrace_disabled)
  278. return NULL;
  279. rec = ftrace_alloc_dyn_node(ip);
  280. if (!rec)
  281. return NULL;
  282. rec->ip = ip;
  283. list_add(&rec->list, &ftrace_new_addrs);
  284. return rec;
  285. }
  286. #define FTRACE_ADDR ((long)(ftrace_caller))
  287. static int
  288. __ftrace_replace_code(struct dyn_ftrace *rec,
  289. unsigned char *old, unsigned char *new, int enable)
  290. {
  291. unsigned long ip, fl;
  292. ip = rec->ip;
  293. if (ftrace_filtered && enable) {
  294. /*
  295. * If filtering is on:
  296. *
  297. * If this record is set to be filtered and
  298. * is enabled then do nothing.
  299. *
  300. * If this record is set to be filtered and
  301. * it is not enabled, enable it.
  302. *
  303. * If this record is not set to be filtered
  304. * and it is not enabled do nothing.
  305. *
  306. * If this record is set not to trace then
  307. * do nothing.
  308. *
  309. * If this record is set not to trace and
  310. * it is enabled then disable it.
  311. *
  312. * If this record is not set to be filtered and
  313. * it is enabled, disable it.
  314. */
  315. fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
  316. FTRACE_FL_ENABLED);
  317. if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
  318. (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
  319. !fl || (fl == FTRACE_FL_NOTRACE))
  320. return 0;
  321. /*
  322. * If it is enabled disable it,
  323. * otherwise enable it!
  324. */
  325. if (fl & FTRACE_FL_ENABLED) {
  326. /* swap new and old */
  327. new = old;
  328. old = ftrace_call_replace(ip, FTRACE_ADDR);
  329. rec->flags &= ~FTRACE_FL_ENABLED;
  330. } else {
  331. new = ftrace_call_replace(ip, FTRACE_ADDR);
  332. rec->flags |= FTRACE_FL_ENABLED;
  333. }
  334. } else {
  335. if (enable) {
  336. /*
  337. * If this record is set not to trace and is
  338. * not enabled, do nothing.
  339. */
  340. fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
  341. if (fl == FTRACE_FL_NOTRACE)
  342. return 0;
  343. new = ftrace_call_replace(ip, FTRACE_ADDR);
  344. } else
  345. old = ftrace_call_replace(ip, FTRACE_ADDR);
  346. if (enable) {
  347. if (rec->flags & FTRACE_FL_ENABLED)
  348. return 0;
  349. rec->flags |= FTRACE_FL_ENABLED;
  350. } else {
  351. if (!(rec->flags & FTRACE_FL_ENABLED))
  352. return 0;
  353. rec->flags &= ~FTRACE_FL_ENABLED;
  354. }
  355. }
  356. return ftrace_modify_code(ip, old, new);
  357. }
  358. static void ftrace_replace_code(int enable)
  359. {
  360. int i, failed;
  361. unsigned char *new = NULL, *old = NULL;
  362. struct dyn_ftrace *rec;
  363. struct ftrace_page *pg;
  364. if (enable)
  365. old = ftrace_nop_replace();
  366. else
  367. new = ftrace_nop_replace();
  368. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  369. for (i = 0; i < pg->index; i++) {
  370. rec = &pg->records[i];
  371. /* don't modify code that has already faulted */
  372. if (rec->flags & FTRACE_FL_FAILED)
  373. continue;
  374. /* ignore updates to this record's mcount site */
  375. if (get_kprobe((void *)rec->ip)) {
  376. freeze_record(rec);
  377. continue;
  378. } else {
  379. unfreeze_record(rec);
  380. }
  381. failed = __ftrace_replace_code(rec, old, new, enable);
  382. if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
  383. rec->flags |= FTRACE_FL_FAILED;
  384. if ((system_state == SYSTEM_BOOTING) ||
  385. !core_kernel_text(rec->ip)) {
  386. ftrace_free_rec(rec);
  387. }
  388. }
  389. }
  390. }
  391. }
  392. static void print_ip_ins(const char *fmt, unsigned char *p)
  393. {
  394. int i;
  395. printk(KERN_CONT "%s", fmt);
  396. for (i = 0; i < MCOUNT_INSN_SIZE; i++)
  397. printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
  398. }
  399. static int
  400. ftrace_code_disable(struct dyn_ftrace *rec)
  401. {
  402. unsigned long ip;
  403. unsigned char *nop, *call;
  404. int ret;
  405. ip = rec->ip;
  406. nop = ftrace_nop_replace();
  407. call = ftrace_call_replace(ip, mcount_addr);
  408. ret = ftrace_modify_code(ip, call, nop);
  409. if (ret) {
  410. switch (ret) {
  411. case -EFAULT:
  412. FTRACE_WARN_ON_ONCE(1);
  413. pr_info("ftrace faulted on modifying ");
  414. print_ip_sym(ip);
  415. break;
  416. case -EINVAL:
  417. FTRACE_WARN_ON_ONCE(1);
  418. pr_info("ftrace failed to modify ");
  419. print_ip_sym(ip);
  420. print_ip_ins(" expected: ", call);
  421. print_ip_ins(" actual: ", (unsigned char *)ip);
  422. print_ip_ins(" replace: ", nop);
  423. printk(KERN_CONT "\n");
  424. break;
  425. case -EPERM:
  426. FTRACE_WARN_ON_ONCE(1);
  427. pr_info("ftrace faulted on writing ");
  428. print_ip_sym(ip);
  429. break;
  430. default:
  431. FTRACE_WARN_ON_ONCE(1);
  432. pr_info("ftrace faulted on unknown error ");
  433. print_ip_sym(ip);
  434. }
  435. rec->flags |= FTRACE_FL_FAILED;
  436. return 0;
  437. }
  438. return 1;
  439. }
  440. static int __ftrace_modify_code(void *data)
  441. {
  442. int *command = data;
  443. if (*command & FTRACE_ENABLE_CALLS) {
  444. ftrace_replace_code(1);
  445. tracing_on = 1;
  446. } else if (*command & FTRACE_DISABLE_CALLS) {
  447. ftrace_replace_code(0);
  448. tracing_on = 0;
  449. }
  450. if (*command & FTRACE_UPDATE_TRACE_FUNC)
  451. ftrace_update_ftrace_func(ftrace_trace_function);
  452. return 0;
  453. }
  454. static void ftrace_run_update_code(int command)
  455. {
  456. stop_machine(__ftrace_modify_code, &command, NULL);
  457. }
  458. static ftrace_func_t saved_ftrace_func;
  459. static int ftrace_start_up;
  460. static DEFINE_MUTEX(ftrace_start_lock);
  461. static void ftrace_startup(void)
  462. {
  463. int command = 0;
  464. if (unlikely(ftrace_disabled))
  465. return;
  466. mutex_lock(&ftrace_start_lock);
  467. ftrace_start_up++;
  468. if (ftrace_start_up == 1)
  469. command |= FTRACE_ENABLE_CALLS;
  470. if (saved_ftrace_func != ftrace_trace_function) {
  471. saved_ftrace_func = ftrace_trace_function;
  472. command |= FTRACE_UPDATE_TRACE_FUNC;
  473. }
  474. if (!command || !ftrace_enabled)
  475. goto out;
  476. ftrace_run_update_code(command);
  477. out:
  478. mutex_unlock(&ftrace_start_lock);
  479. }
  480. static void ftrace_shutdown(void)
  481. {
  482. int command = 0;
  483. if (unlikely(ftrace_disabled))
  484. return;
  485. mutex_lock(&ftrace_start_lock);
  486. ftrace_start_up--;
  487. if (!ftrace_start_up)
  488. command |= FTRACE_DISABLE_CALLS;
  489. if (saved_ftrace_func != ftrace_trace_function) {
  490. saved_ftrace_func = ftrace_trace_function;
  491. command |= FTRACE_UPDATE_TRACE_FUNC;
  492. }
  493. if (!command || !ftrace_enabled)
  494. goto out;
  495. ftrace_run_update_code(command);
  496. out:
  497. mutex_unlock(&ftrace_start_lock);
  498. }
  499. static void ftrace_startup_sysctl(void)
  500. {
  501. int command = FTRACE_ENABLE_MCOUNT;
  502. if (unlikely(ftrace_disabled))
  503. return;
  504. mutex_lock(&ftrace_start_lock);
  505. /* Force update next time */
  506. saved_ftrace_func = NULL;
  507. /* ftrace_start_up is true if we want ftrace running */
  508. if (ftrace_start_up)
  509. command |= FTRACE_ENABLE_CALLS;
  510. ftrace_run_update_code(command);
  511. mutex_unlock(&ftrace_start_lock);
  512. }
  513. static void ftrace_shutdown_sysctl(void)
  514. {
  515. int command = FTRACE_DISABLE_MCOUNT;
  516. if (unlikely(ftrace_disabled))
  517. return;
  518. mutex_lock(&ftrace_start_lock);
  519. /* ftrace_start_up is true if ftrace is running */
  520. if (ftrace_start_up)
  521. command |= FTRACE_DISABLE_CALLS;
  522. ftrace_run_update_code(command);
  523. mutex_unlock(&ftrace_start_lock);
  524. }
  525. static cycle_t ftrace_update_time;
  526. static unsigned long ftrace_update_cnt;
  527. unsigned long ftrace_update_tot_cnt;
  528. static int ftrace_update_code(void)
  529. {
  530. struct dyn_ftrace *p, *t;
  531. cycle_t start, stop;
  532. start = ftrace_now(raw_smp_processor_id());
  533. ftrace_update_cnt = 0;
  534. list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
  535. /* If something went wrong, bail without enabling anything */
  536. if (unlikely(ftrace_disabled))
  537. return -1;
  538. list_del_init(&p->list);
  539. /* convert record (i.e, patch mcount-call with NOP) */
  540. if (ftrace_code_disable(p)) {
  541. p->flags |= FTRACE_FL_CONVERTED;
  542. ftrace_update_cnt++;
  543. } else
  544. ftrace_free_rec(p);
  545. }
  546. stop = ftrace_now(raw_smp_processor_id());
  547. ftrace_update_time = stop - start;
  548. ftrace_update_tot_cnt += ftrace_update_cnt;
  549. return 0;
  550. }
  551. static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
  552. {
  553. struct ftrace_page *pg;
  554. int cnt;
  555. int i;
  556. /* allocate a few pages */
  557. ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
  558. if (!ftrace_pages_start)
  559. return -1;
  560. /*
  561. * Allocate a few more pages.
  562. *
  563. * TODO: have some parser search vmlinux before
  564. * final linking to find all calls to ftrace.
  565. * Then we can:
  566. * a) know how many pages to allocate.
  567. * and/or
  568. * b) set up the table then.
  569. *
  570. * The dynamic code is still necessary for
  571. * modules.
  572. */
  573. pg = ftrace_pages = ftrace_pages_start;
  574. cnt = num_to_init / ENTRIES_PER_PAGE;
  575. pr_info("ftrace: allocating %ld entries in %d pages\n",
  576. num_to_init, cnt);
  577. for (i = 0; i < cnt; i++) {
  578. pg->next = (void *)get_zeroed_page(GFP_KERNEL);
  579. /* If we fail, we'll try later anyway */
  580. if (!pg->next)
  581. break;
  582. pg = pg->next;
  583. }
  584. return 0;
  585. }
  586. enum {
  587. FTRACE_ITER_FILTER = (1 << 0),
  588. FTRACE_ITER_CONT = (1 << 1),
  589. FTRACE_ITER_NOTRACE = (1 << 2),
  590. FTRACE_ITER_FAILURES = (1 << 3),
  591. };
  592. #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
  593. struct ftrace_iterator {
  594. loff_t pos;
  595. struct ftrace_page *pg;
  596. unsigned idx;
  597. unsigned flags;
  598. unsigned char buffer[FTRACE_BUFF_MAX+1];
  599. unsigned buffer_idx;
  600. unsigned filtered;
  601. };
  602. static void *
  603. t_next(struct seq_file *m, void *v, loff_t *pos)
  604. {
  605. struct ftrace_iterator *iter = m->private;
  606. struct dyn_ftrace *rec = NULL;
  607. (*pos)++;
  608. /* should not be called from interrupt context */
  609. spin_lock(&ftrace_lock);
  610. retry:
  611. if (iter->idx >= iter->pg->index) {
  612. if (iter->pg->next) {
  613. iter->pg = iter->pg->next;
  614. iter->idx = 0;
  615. goto retry;
  616. }
  617. } else {
  618. rec = &iter->pg->records[iter->idx++];
  619. if ((rec->flags & FTRACE_FL_FREE) ||
  620. (!(iter->flags & FTRACE_ITER_FAILURES) &&
  621. (rec->flags & FTRACE_FL_FAILED)) ||
  622. ((iter->flags & FTRACE_ITER_FAILURES) &&
  623. !(rec->flags & FTRACE_FL_FAILED)) ||
  624. ((iter->flags & FTRACE_ITER_FILTER) &&
  625. !(rec->flags & FTRACE_FL_FILTER)) ||
  626. ((iter->flags & FTRACE_ITER_NOTRACE) &&
  627. !(rec->flags & FTRACE_FL_NOTRACE))) {
  628. rec = NULL;
  629. goto retry;
  630. }
  631. }
  632. spin_unlock(&ftrace_lock);
  633. iter->pos = *pos;
  634. return rec;
  635. }
  636. static void *t_start(struct seq_file *m, loff_t *pos)
  637. {
  638. struct ftrace_iterator *iter = m->private;
  639. void *p = NULL;
  640. loff_t l = -1;
  641. if (*pos != iter->pos) {
  642. for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
  643. ;
  644. } else {
  645. l = *pos;
  646. p = t_next(m, p, &l);
  647. }
  648. return p;
  649. }
  650. static void t_stop(struct seq_file *m, void *p)
  651. {
  652. }
  653. static int t_show(struct seq_file *m, void *v)
  654. {
  655. struct dyn_ftrace *rec = v;
  656. char str[KSYM_SYMBOL_LEN];
  657. if (!rec)
  658. return 0;
  659. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  660. seq_printf(m, "%s\n", str);
  661. return 0;
  662. }
  663. static struct seq_operations show_ftrace_seq_ops = {
  664. .start = t_start,
  665. .next = t_next,
  666. .stop = t_stop,
  667. .show = t_show,
  668. };
  669. static int
  670. ftrace_avail_open(struct inode *inode, struct file *file)
  671. {
  672. struct ftrace_iterator *iter;
  673. int ret;
  674. if (unlikely(ftrace_disabled))
  675. return -ENODEV;
  676. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  677. if (!iter)
  678. return -ENOMEM;
  679. iter->pg = ftrace_pages_start;
  680. iter->pos = -1;
  681. ret = seq_open(file, &show_ftrace_seq_ops);
  682. if (!ret) {
  683. struct seq_file *m = file->private_data;
  684. m->private = iter;
  685. } else {
  686. kfree(iter);
  687. }
  688. return ret;
  689. }
  690. int ftrace_avail_release(struct inode *inode, struct file *file)
  691. {
  692. struct seq_file *m = (struct seq_file *)file->private_data;
  693. struct ftrace_iterator *iter = m->private;
  694. seq_release(inode, file);
  695. kfree(iter);
  696. return 0;
  697. }
  698. static int
  699. ftrace_failures_open(struct inode *inode, struct file *file)
  700. {
  701. int ret;
  702. struct seq_file *m;
  703. struct ftrace_iterator *iter;
  704. ret = ftrace_avail_open(inode, file);
  705. if (!ret) {
  706. m = (struct seq_file *)file->private_data;
  707. iter = (struct ftrace_iterator *)m->private;
  708. iter->flags = FTRACE_ITER_FAILURES;
  709. }
  710. return ret;
  711. }
  712. static void ftrace_filter_reset(int enable)
  713. {
  714. struct ftrace_page *pg;
  715. struct dyn_ftrace *rec;
  716. unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  717. unsigned i;
  718. /* should not be called from interrupt context */
  719. spin_lock(&ftrace_lock);
  720. if (enable)
  721. ftrace_filtered = 0;
  722. pg = ftrace_pages_start;
  723. while (pg) {
  724. for (i = 0; i < pg->index; i++) {
  725. rec = &pg->records[i];
  726. if (rec->flags & FTRACE_FL_FAILED)
  727. continue;
  728. rec->flags &= ~type;
  729. }
  730. pg = pg->next;
  731. }
  732. spin_unlock(&ftrace_lock);
  733. }
  734. static int
  735. ftrace_regex_open(struct inode *inode, struct file *file, int enable)
  736. {
  737. struct ftrace_iterator *iter;
  738. int ret = 0;
  739. if (unlikely(ftrace_disabled))
  740. return -ENODEV;
  741. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  742. if (!iter)
  743. return -ENOMEM;
  744. mutex_lock(&ftrace_regex_lock);
  745. if ((file->f_mode & FMODE_WRITE) &&
  746. !(file->f_flags & O_APPEND))
  747. ftrace_filter_reset(enable);
  748. if (file->f_mode & FMODE_READ) {
  749. iter->pg = ftrace_pages_start;
  750. iter->pos = -1;
  751. iter->flags = enable ? FTRACE_ITER_FILTER :
  752. FTRACE_ITER_NOTRACE;
  753. ret = seq_open(file, &show_ftrace_seq_ops);
  754. if (!ret) {
  755. struct seq_file *m = file->private_data;
  756. m->private = iter;
  757. } else
  758. kfree(iter);
  759. } else
  760. file->private_data = iter;
  761. mutex_unlock(&ftrace_regex_lock);
  762. return ret;
  763. }
  764. static int
  765. ftrace_filter_open(struct inode *inode, struct file *file)
  766. {
  767. return ftrace_regex_open(inode, file, 1);
  768. }
  769. static int
  770. ftrace_notrace_open(struct inode *inode, struct file *file)
  771. {
  772. return ftrace_regex_open(inode, file, 0);
  773. }
  774. static ssize_t
  775. ftrace_regex_read(struct file *file, char __user *ubuf,
  776. size_t cnt, loff_t *ppos)
  777. {
  778. if (file->f_mode & FMODE_READ)
  779. return seq_read(file, ubuf, cnt, ppos);
  780. else
  781. return -EPERM;
  782. }
  783. static loff_t
  784. ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
  785. {
  786. loff_t ret;
  787. if (file->f_mode & FMODE_READ)
  788. ret = seq_lseek(file, offset, origin);
  789. else
  790. file->f_pos = ret = 1;
  791. return ret;
  792. }
  793. enum {
  794. MATCH_FULL,
  795. MATCH_FRONT_ONLY,
  796. MATCH_MIDDLE_ONLY,
  797. MATCH_END_ONLY,
  798. };
  799. static void
  800. ftrace_match(unsigned char *buff, int len, int enable)
  801. {
  802. char str[KSYM_SYMBOL_LEN];
  803. char *search = NULL;
  804. struct ftrace_page *pg;
  805. struct dyn_ftrace *rec;
  806. int type = MATCH_FULL;
  807. unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  808. unsigned i, match = 0, search_len = 0;
  809. for (i = 0; i < len; i++) {
  810. if (buff[i] == '*') {
  811. if (!i) {
  812. search = buff + i + 1;
  813. type = MATCH_END_ONLY;
  814. search_len = len - (i + 1);
  815. } else {
  816. if (type == MATCH_END_ONLY) {
  817. type = MATCH_MIDDLE_ONLY;
  818. } else {
  819. match = i;
  820. type = MATCH_FRONT_ONLY;
  821. }
  822. buff[i] = 0;
  823. break;
  824. }
  825. }
  826. }
  827. /* should not be called from interrupt context */
  828. spin_lock(&ftrace_lock);
  829. if (enable)
  830. ftrace_filtered = 1;
  831. pg = ftrace_pages_start;
  832. while (pg) {
  833. for (i = 0; i < pg->index; i++) {
  834. int matched = 0;
  835. char *ptr;
  836. rec = &pg->records[i];
  837. if (rec->flags & FTRACE_FL_FAILED)
  838. continue;
  839. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  840. switch (type) {
  841. case MATCH_FULL:
  842. if (strcmp(str, buff) == 0)
  843. matched = 1;
  844. break;
  845. case MATCH_FRONT_ONLY:
  846. if (memcmp(str, buff, match) == 0)
  847. matched = 1;
  848. break;
  849. case MATCH_MIDDLE_ONLY:
  850. if (strstr(str, search))
  851. matched = 1;
  852. break;
  853. case MATCH_END_ONLY:
  854. ptr = strstr(str, search);
  855. if (ptr && (ptr[search_len] == 0))
  856. matched = 1;
  857. break;
  858. }
  859. if (matched)
  860. rec->flags |= flag;
  861. }
  862. pg = pg->next;
  863. }
  864. spin_unlock(&ftrace_lock);
  865. }
  866. static ssize_t
  867. ftrace_regex_write(struct file *file, const char __user *ubuf,
  868. size_t cnt, loff_t *ppos, int enable)
  869. {
  870. struct ftrace_iterator *iter;
  871. char ch;
  872. size_t read = 0;
  873. ssize_t ret;
  874. if (!cnt || cnt < 0)
  875. return 0;
  876. mutex_lock(&ftrace_regex_lock);
  877. if (file->f_mode & FMODE_READ) {
  878. struct seq_file *m = file->private_data;
  879. iter = m->private;
  880. } else
  881. iter = file->private_data;
  882. if (!*ppos) {
  883. iter->flags &= ~FTRACE_ITER_CONT;
  884. iter->buffer_idx = 0;
  885. }
  886. ret = get_user(ch, ubuf++);
  887. if (ret)
  888. goto out;
  889. read++;
  890. cnt--;
  891. if (!(iter->flags & ~FTRACE_ITER_CONT)) {
  892. /* skip white space */
  893. while (cnt && isspace(ch)) {
  894. ret = get_user(ch, ubuf++);
  895. if (ret)
  896. goto out;
  897. read++;
  898. cnt--;
  899. }
  900. if (isspace(ch)) {
  901. file->f_pos += read;
  902. ret = read;
  903. goto out;
  904. }
  905. iter->buffer_idx = 0;
  906. }
  907. while (cnt && !isspace(ch)) {
  908. if (iter->buffer_idx < FTRACE_BUFF_MAX)
  909. iter->buffer[iter->buffer_idx++] = ch;
  910. else {
  911. ret = -EINVAL;
  912. goto out;
  913. }
  914. ret = get_user(ch, ubuf++);
  915. if (ret)
  916. goto out;
  917. read++;
  918. cnt--;
  919. }
  920. if (isspace(ch)) {
  921. iter->filtered++;
  922. iter->buffer[iter->buffer_idx] = 0;
  923. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  924. iter->buffer_idx = 0;
  925. } else
  926. iter->flags |= FTRACE_ITER_CONT;
  927. file->f_pos += read;
  928. ret = read;
  929. out:
  930. mutex_unlock(&ftrace_regex_lock);
  931. return ret;
  932. }
  933. static ssize_t
  934. ftrace_filter_write(struct file *file, const char __user *ubuf,
  935. size_t cnt, loff_t *ppos)
  936. {
  937. return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
  938. }
  939. static ssize_t
  940. ftrace_notrace_write(struct file *file, const char __user *ubuf,
  941. size_t cnt, loff_t *ppos)
  942. {
  943. return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
  944. }
  945. static void
  946. ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
  947. {
  948. if (unlikely(ftrace_disabled))
  949. return;
  950. mutex_lock(&ftrace_regex_lock);
  951. if (reset)
  952. ftrace_filter_reset(enable);
  953. if (buf)
  954. ftrace_match(buf, len, enable);
  955. mutex_unlock(&ftrace_regex_lock);
  956. }
  957. /**
  958. * ftrace_set_filter - set a function to filter on in ftrace
  959. * @buf - the string that holds the function filter text.
  960. * @len - the length of the string.
  961. * @reset - non zero to reset all filters before applying this filter.
  962. *
  963. * Filters denote which functions should be enabled when tracing is enabled.
  964. * If @buf is NULL and reset is set, all functions will be enabled for tracing.
  965. */
  966. void ftrace_set_filter(unsigned char *buf, int len, int reset)
  967. {
  968. ftrace_set_regex(buf, len, reset, 1);
  969. }
  970. /**
  971. * ftrace_set_notrace - set a function to not trace in ftrace
  972. * @buf - the string that holds the function notrace text.
  973. * @len - the length of the string.
  974. * @reset - non zero to reset all filters before applying this filter.
  975. *
  976. * Notrace Filters denote which functions should not be enabled when tracing
  977. * is enabled. If @buf is NULL and reset is set, all functions will be enabled
  978. * for tracing.
  979. */
  980. void ftrace_set_notrace(unsigned char *buf, int len, int reset)
  981. {
  982. ftrace_set_regex(buf, len, reset, 0);
  983. }
  984. static int
  985. ftrace_regex_release(struct inode *inode, struct file *file, int enable)
  986. {
  987. struct seq_file *m = (struct seq_file *)file->private_data;
  988. struct ftrace_iterator *iter;
  989. mutex_lock(&ftrace_regex_lock);
  990. if (file->f_mode & FMODE_READ) {
  991. iter = m->private;
  992. seq_release(inode, file);
  993. } else
  994. iter = file->private_data;
  995. if (iter->buffer_idx) {
  996. iter->filtered++;
  997. iter->buffer[iter->buffer_idx] = 0;
  998. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  999. }
  1000. mutex_lock(&ftrace_sysctl_lock);
  1001. mutex_lock(&ftrace_start_lock);
  1002. if (iter->filtered && ftrace_start_up && ftrace_enabled)
  1003. ftrace_run_update_code(FTRACE_ENABLE_CALLS);
  1004. mutex_unlock(&ftrace_start_lock);
  1005. mutex_unlock(&ftrace_sysctl_lock);
  1006. kfree(iter);
  1007. mutex_unlock(&ftrace_regex_lock);
  1008. return 0;
  1009. }
  1010. static int
  1011. ftrace_filter_release(struct inode *inode, struct file *file)
  1012. {
  1013. return ftrace_regex_release(inode, file, 1);
  1014. }
  1015. static int
  1016. ftrace_notrace_release(struct inode *inode, struct file *file)
  1017. {
  1018. return ftrace_regex_release(inode, file, 0);
  1019. }
  1020. static struct file_operations ftrace_avail_fops = {
  1021. .open = ftrace_avail_open,
  1022. .read = seq_read,
  1023. .llseek = seq_lseek,
  1024. .release = ftrace_avail_release,
  1025. };
  1026. static struct file_operations ftrace_failures_fops = {
  1027. .open = ftrace_failures_open,
  1028. .read = seq_read,
  1029. .llseek = seq_lseek,
  1030. .release = ftrace_avail_release,
  1031. };
  1032. static struct file_operations ftrace_filter_fops = {
  1033. .open = ftrace_filter_open,
  1034. .read = ftrace_regex_read,
  1035. .write = ftrace_filter_write,
  1036. .llseek = ftrace_regex_lseek,
  1037. .release = ftrace_filter_release,
  1038. };
  1039. static struct file_operations ftrace_notrace_fops = {
  1040. .open = ftrace_notrace_open,
  1041. .read = ftrace_regex_read,
  1042. .write = ftrace_notrace_write,
  1043. .llseek = ftrace_regex_lseek,
  1044. .release = ftrace_notrace_release,
  1045. };
  1046. static __init int ftrace_init_debugfs(void)
  1047. {
  1048. struct dentry *d_tracer;
  1049. struct dentry *entry;
  1050. d_tracer = tracing_init_dentry();
  1051. entry = debugfs_create_file("available_filter_functions", 0444,
  1052. d_tracer, NULL, &ftrace_avail_fops);
  1053. if (!entry)
  1054. pr_warning("Could not create debugfs "
  1055. "'available_filter_functions' entry\n");
  1056. entry = debugfs_create_file("failures", 0444,
  1057. d_tracer, NULL, &ftrace_failures_fops);
  1058. if (!entry)
  1059. pr_warning("Could not create debugfs 'failures' entry\n");
  1060. entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
  1061. NULL, &ftrace_filter_fops);
  1062. if (!entry)
  1063. pr_warning("Could not create debugfs "
  1064. "'set_ftrace_filter' entry\n");
  1065. entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
  1066. NULL, &ftrace_notrace_fops);
  1067. if (!entry)
  1068. pr_warning("Could not create debugfs "
  1069. "'set_ftrace_notrace' entry\n");
  1070. return 0;
  1071. }
  1072. fs_initcall(ftrace_init_debugfs);
  1073. static int ftrace_convert_nops(unsigned long *start,
  1074. unsigned long *end)
  1075. {
  1076. unsigned long *p;
  1077. unsigned long addr;
  1078. unsigned long flags;
  1079. mutex_lock(&ftrace_start_lock);
  1080. p = start;
  1081. while (p < end) {
  1082. addr = ftrace_call_adjust(*p++);
  1083. ftrace_record_ip(addr);
  1084. }
  1085. /* disable interrupts to prevent kstop machine */
  1086. local_irq_save(flags);
  1087. ftrace_update_code();
  1088. local_irq_restore(flags);
  1089. mutex_unlock(&ftrace_start_lock);
  1090. return 0;
  1091. }
  1092. void ftrace_init_module(unsigned long *start, unsigned long *end)
  1093. {
  1094. if (ftrace_disabled || start == end)
  1095. return;
  1096. ftrace_convert_nops(start, end);
  1097. }
  1098. extern unsigned long __start_mcount_loc[];
  1099. extern unsigned long __stop_mcount_loc[];
  1100. void __init ftrace_init(void)
  1101. {
  1102. unsigned long count, addr, flags;
  1103. int ret;
  1104. /* Keep the ftrace pointer to the stub */
  1105. addr = (unsigned long)ftrace_stub;
  1106. local_irq_save(flags);
  1107. ftrace_dyn_arch_init(&addr);
  1108. local_irq_restore(flags);
  1109. /* ftrace_dyn_arch_init places the return code in addr */
  1110. if (addr)
  1111. goto failed;
  1112. count = __stop_mcount_loc - __start_mcount_loc;
  1113. ret = ftrace_dyn_table_alloc(count);
  1114. if (ret)
  1115. goto failed;
  1116. last_ftrace_enabled = ftrace_enabled = 1;
  1117. ret = ftrace_convert_nops(__start_mcount_loc,
  1118. __stop_mcount_loc);
  1119. return;
  1120. failed:
  1121. ftrace_disabled = 1;
  1122. }
  1123. #else
  1124. static int __init ftrace_nodyn_init(void)
  1125. {
  1126. ftrace_enabled = 1;
  1127. return 0;
  1128. }
  1129. device_initcall(ftrace_nodyn_init);
  1130. # define ftrace_startup() do { } while (0)
  1131. # define ftrace_shutdown() do { } while (0)
  1132. # define ftrace_startup_sysctl() do { } while (0)
  1133. # define ftrace_shutdown_sysctl() do { } while (0)
  1134. #endif /* CONFIG_DYNAMIC_FTRACE */
  1135. /**
  1136. * ftrace_kill - kill ftrace
  1137. *
  1138. * This function should be used by panic code. It stops ftrace
  1139. * but in a not so nice way. If you need to simply kill ftrace
  1140. * from a non-atomic section, use ftrace_kill.
  1141. */
  1142. void ftrace_kill(void)
  1143. {
  1144. ftrace_disabled = 1;
  1145. ftrace_enabled = 0;
  1146. clear_ftrace_function();
  1147. }
  1148. /**
  1149. * register_ftrace_function - register a function for profiling
  1150. * @ops - ops structure that holds the function for profiling.
  1151. *
  1152. * Register a function to be called by all functions in the
  1153. * kernel.
  1154. *
  1155. * Note: @ops->func and all the functions it calls must be labeled
  1156. * with "notrace", otherwise it will go into a
  1157. * recursive loop.
  1158. */
  1159. int register_ftrace_function(struct ftrace_ops *ops)
  1160. {
  1161. int ret;
  1162. if (unlikely(ftrace_disabled))
  1163. return -1;
  1164. mutex_lock(&ftrace_sysctl_lock);
  1165. ret = __register_ftrace_function(ops);
  1166. ftrace_startup();
  1167. mutex_unlock(&ftrace_sysctl_lock);
  1168. return ret;
  1169. }
  1170. /**
  1171. * unregister_ftrace_function - unresgister a function for profiling.
  1172. * @ops - ops structure that holds the function to unregister
  1173. *
  1174. * Unregister a function that was added to be called by ftrace profiling.
  1175. */
  1176. int unregister_ftrace_function(struct ftrace_ops *ops)
  1177. {
  1178. int ret;
  1179. mutex_lock(&ftrace_sysctl_lock);
  1180. ret = __unregister_ftrace_function(ops);
  1181. ftrace_shutdown();
  1182. mutex_unlock(&ftrace_sysctl_lock);
  1183. return ret;
  1184. }
  1185. int
  1186. ftrace_enable_sysctl(struct ctl_table *table, int write,
  1187. struct file *file, void __user *buffer, size_t *lenp,
  1188. loff_t *ppos)
  1189. {
  1190. int ret;
  1191. if (unlikely(ftrace_disabled))
  1192. return -ENODEV;
  1193. mutex_lock(&ftrace_sysctl_lock);
  1194. ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
  1195. if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
  1196. goto out;
  1197. last_ftrace_enabled = ftrace_enabled;
  1198. if (ftrace_enabled) {
  1199. ftrace_startup_sysctl();
  1200. /* we are starting ftrace again */
  1201. if (ftrace_list != &ftrace_list_end) {
  1202. if (ftrace_list->next == &ftrace_list_end)
  1203. ftrace_trace_function = ftrace_list->func;
  1204. else
  1205. ftrace_trace_function = ftrace_list_func;
  1206. }
  1207. } else {
  1208. /* stopping ftrace calls (just send to ftrace_stub) */
  1209. ftrace_trace_function = ftrace_stub;
  1210. ftrace_shutdown_sysctl();
  1211. }
  1212. out:
  1213. mutex_unlock(&ftrace_sysctl_lock);
  1214. return ret;
  1215. }
  1216. #ifdef CONFIG_FUNCTION_RET_TRACER
  1217. trace_function_return_t ftrace_function_return =
  1218. (trace_function_return_t)ftrace_stub;
  1219. void register_ftrace_return(trace_function_return_t func)
  1220. {
  1221. ftrace_function_return = func;
  1222. }
  1223. void unregister_ftrace_return(void)
  1224. {
  1225. ftrace_function_return = (trace_function_return_t)ftrace_stub;
  1226. }
  1227. #endif