ftrace.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451
  1. /*
  2. * Infrastructure for profiling code inserted by 'gcc -pg'.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Originally ported from the -rt patch by:
  8. * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Based on code in the latency_tracer, that is:
  11. *
  12. * Copyright (C) 2004-2006 Ingo Molnar
  13. * Copyright (C) 2004 William Lee Irwin III
  14. */
  15. #include <linux/stop_machine.h>
  16. #include <linux/clocksource.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/debugfs.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/kthread.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/kprobes.h>
  24. #include <linux/ftrace.h>
  25. #include <linux/sysctl.h>
  26. #include <linux/ctype.h>
  27. #include <linux/list.h>
  28. #include <asm/ftrace.h>
  29. #include "trace.h"
  30. #define FTRACE_WARN_ON(cond) \
  31. do { \
  32. if (WARN_ON(cond)) \
  33. ftrace_kill(); \
  34. } while (0)
  35. #define FTRACE_WARN_ON_ONCE(cond) \
  36. do { \
  37. if (WARN_ON_ONCE(cond)) \
  38. ftrace_kill(); \
  39. } while (0)
  40. /* ftrace_enabled is a method to turn ftrace on or off */
  41. int ftrace_enabled __read_mostly;
  42. static int last_ftrace_enabled;
  43. /*
  44. * ftrace_disabled is set when an anomaly is discovered.
  45. * ftrace_disabled is much stronger than ftrace_enabled.
  46. */
  47. static int ftrace_disabled __read_mostly;
  48. static DEFINE_SPINLOCK(ftrace_lock);
  49. static DEFINE_MUTEX(ftrace_sysctl_lock);
  50. static struct ftrace_ops ftrace_list_end __read_mostly =
  51. {
  52. .func = ftrace_stub,
  53. };
  54. static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
  55. ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
  56. static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
  57. {
  58. struct ftrace_ops *op = ftrace_list;
  59. /* in case someone actually ports this to alpha! */
  60. read_barrier_depends();
  61. while (op != &ftrace_list_end) {
  62. /* silly alpha */
  63. read_barrier_depends();
  64. op->func(ip, parent_ip);
  65. op = op->next;
  66. };
  67. }
  68. /**
  69. * clear_ftrace_function - reset the ftrace function
  70. *
  71. * This NULLs the ftrace function and in essence stops
  72. * tracing. There may be lag
  73. */
  74. void clear_ftrace_function(void)
  75. {
  76. ftrace_trace_function = ftrace_stub;
  77. }
  78. static int __register_ftrace_function(struct ftrace_ops *ops)
  79. {
  80. /* should not be called from interrupt context */
  81. spin_lock(&ftrace_lock);
  82. ops->next = ftrace_list;
  83. /*
  84. * We are entering ops into the ftrace_list but another
  85. * CPU might be walking that list. We need to make sure
  86. * the ops->next pointer is valid before another CPU sees
  87. * the ops pointer included into the ftrace_list.
  88. */
  89. smp_wmb();
  90. ftrace_list = ops;
  91. if (ftrace_enabled) {
  92. /*
  93. * For one func, simply call it directly.
  94. * For more than one func, call the chain.
  95. */
  96. if (ops->next == &ftrace_list_end)
  97. ftrace_trace_function = ops->func;
  98. else
  99. ftrace_trace_function = ftrace_list_func;
  100. }
  101. spin_unlock(&ftrace_lock);
  102. return 0;
  103. }
  104. static int __unregister_ftrace_function(struct ftrace_ops *ops)
  105. {
  106. struct ftrace_ops **p;
  107. int ret = 0;
  108. /* should not be called from interrupt context */
  109. spin_lock(&ftrace_lock);
  110. /*
  111. * If we are removing the last function, then simply point
  112. * to the ftrace_stub.
  113. */
  114. if (ftrace_list == ops && ops->next == &ftrace_list_end) {
  115. ftrace_trace_function = ftrace_stub;
  116. ftrace_list = &ftrace_list_end;
  117. goto out;
  118. }
  119. for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
  120. if (*p == ops)
  121. break;
  122. if (*p != ops) {
  123. ret = -1;
  124. goto out;
  125. }
  126. *p = (*p)->next;
  127. if (ftrace_enabled) {
  128. /* If we only have one func left, then call that directly */
  129. if (ftrace_list == &ftrace_list_end ||
  130. ftrace_list->next == &ftrace_list_end)
  131. ftrace_trace_function = ftrace_list->func;
  132. }
  133. out:
  134. spin_unlock(&ftrace_lock);
  135. return ret;
  136. }
  137. #ifdef CONFIG_DYNAMIC_FTRACE
  138. #ifndef CONFIG_FTRACE_MCOUNT_RECORD
  139. # error Dynamic ftrace depends on MCOUNT_RECORD
  140. #endif
  141. /*
  142. * Since MCOUNT_ADDR may point to mcount itself, we do not want
  143. * to get it confused by reading a reference in the code as we
  144. * are parsing on objcopy output of text. Use a variable for
  145. * it instead.
  146. */
  147. static unsigned long mcount_addr = MCOUNT_ADDR;
  148. enum {
  149. FTRACE_ENABLE_CALLS = (1 << 0),
  150. FTRACE_DISABLE_CALLS = (1 << 1),
  151. FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
  152. FTRACE_ENABLE_MCOUNT = (1 << 3),
  153. FTRACE_DISABLE_MCOUNT = (1 << 4),
  154. };
  155. static int ftrace_filtered;
  156. static LIST_HEAD(ftrace_new_addrs);
  157. static DEFINE_MUTEX(ftrace_regex_lock);
  158. struct ftrace_page {
  159. struct ftrace_page *next;
  160. unsigned long index;
  161. struct dyn_ftrace records[];
  162. };
  163. #define ENTRIES_PER_PAGE \
  164. ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
  165. /* estimate from running different kernels */
  166. #define NR_TO_INIT 10000
  167. static struct ftrace_page *ftrace_pages_start;
  168. static struct ftrace_page *ftrace_pages;
  169. static struct dyn_ftrace *ftrace_free_records;
  170. #ifdef CONFIG_KPROBES
  171. static int frozen_record_count;
  172. static inline void freeze_record(struct dyn_ftrace *rec)
  173. {
  174. if (!(rec->flags & FTRACE_FL_FROZEN)) {
  175. rec->flags |= FTRACE_FL_FROZEN;
  176. frozen_record_count++;
  177. }
  178. }
  179. static inline void unfreeze_record(struct dyn_ftrace *rec)
  180. {
  181. if (rec->flags & FTRACE_FL_FROZEN) {
  182. rec->flags &= ~FTRACE_FL_FROZEN;
  183. frozen_record_count--;
  184. }
  185. }
  186. static inline int record_frozen(struct dyn_ftrace *rec)
  187. {
  188. return rec->flags & FTRACE_FL_FROZEN;
  189. }
  190. #else
  191. # define freeze_record(rec) ({ 0; })
  192. # define unfreeze_record(rec) ({ 0; })
  193. # define record_frozen(rec) ({ 0; })
  194. #endif /* CONFIG_KPROBES */
  195. static void ftrace_free_rec(struct dyn_ftrace *rec)
  196. {
  197. rec->ip = (unsigned long)ftrace_free_records;
  198. ftrace_free_records = rec;
  199. rec->flags |= FTRACE_FL_FREE;
  200. }
  201. void ftrace_release(void *start, unsigned long size)
  202. {
  203. struct dyn_ftrace *rec;
  204. struct ftrace_page *pg;
  205. unsigned long s = (unsigned long)start;
  206. unsigned long e = s + size;
  207. int i;
  208. if (ftrace_disabled || !start)
  209. return;
  210. /* should not be called from interrupt context */
  211. spin_lock(&ftrace_lock);
  212. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  213. for (i = 0; i < pg->index; i++) {
  214. rec = &pg->records[i];
  215. if ((rec->ip >= s) && (rec->ip < e))
  216. ftrace_free_rec(rec);
  217. }
  218. }
  219. spin_unlock(&ftrace_lock);
  220. }
  221. static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
  222. {
  223. struct dyn_ftrace *rec;
  224. /* First check for freed records */
  225. if (ftrace_free_records) {
  226. rec = ftrace_free_records;
  227. if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
  228. FTRACE_WARN_ON_ONCE(1);
  229. ftrace_free_records = NULL;
  230. return NULL;
  231. }
  232. ftrace_free_records = (void *)rec->ip;
  233. memset(rec, 0, sizeof(*rec));
  234. return rec;
  235. }
  236. if (ftrace_pages->index == ENTRIES_PER_PAGE) {
  237. if (!ftrace_pages->next) {
  238. /* allocate another page */
  239. ftrace_pages->next =
  240. (void *)get_zeroed_page(GFP_KERNEL);
  241. if (!ftrace_pages->next)
  242. return NULL;
  243. }
  244. ftrace_pages = ftrace_pages->next;
  245. }
  246. return &ftrace_pages->records[ftrace_pages->index++];
  247. }
  248. static struct dyn_ftrace *
  249. ftrace_record_ip(unsigned long ip)
  250. {
  251. struct dyn_ftrace *rec;
  252. if (!ftrace_enabled || ftrace_disabled)
  253. return NULL;
  254. rec = ftrace_alloc_dyn_node(ip);
  255. if (!rec)
  256. return NULL;
  257. rec->ip = ip;
  258. list_add(&rec->list, &ftrace_new_addrs);
  259. return rec;
  260. }
  261. #define FTRACE_ADDR ((long)(ftrace_caller))
  262. static int
  263. __ftrace_replace_code(struct dyn_ftrace *rec,
  264. unsigned char *nop, int enable)
  265. {
  266. unsigned long ip, fl;
  267. unsigned char *call, *old, *new;
  268. ip = rec->ip;
  269. /*
  270. * If this record is not to be traced and
  271. * it is not enabled then do nothing.
  272. *
  273. * If this record is not to be traced and
  274. * it is enabled then disabled it.
  275. *
  276. */
  277. if (rec->flags & FTRACE_FL_NOTRACE) {
  278. if (rec->flags & FTRACE_FL_ENABLED)
  279. rec->flags &= ~FTRACE_FL_ENABLED;
  280. else
  281. return 0;
  282. } else if (ftrace_filtered && enable) {
  283. /*
  284. * Filtering is on:
  285. */
  286. fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
  287. /* Record is filtered and enabled, do nothing */
  288. if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
  289. return 0;
  290. /* Record is not filtered and is not enabled do nothing */
  291. if (!fl)
  292. return 0;
  293. /* Record is not filtered but enabled, disable it */
  294. if (fl == FTRACE_FL_ENABLED)
  295. rec->flags &= ~FTRACE_FL_ENABLED;
  296. else
  297. /* Otherwise record is filtered but not enabled, enable it */
  298. rec->flags |= FTRACE_FL_ENABLED;
  299. } else {
  300. /* Disable or not filtered */
  301. if (enable) {
  302. /* if record is enabled, do nothing */
  303. if (rec->flags & FTRACE_FL_ENABLED)
  304. return 0;
  305. rec->flags |= FTRACE_FL_ENABLED;
  306. } else {
  307. /* if record is not enabled do nothing */
  308. if (!(rec->flags & FTRACE_FL_ENABLED))
  309. return 0;
  310. rec->flags &= ~FTRACE_FL_ENABLED;
  311. }
  312. }
  313. call = ftrace_call_replace(ip, FTRACE_ADDR);
  314. if (rec->flags & FTRACE_FL_ENABLED) {
  315. old = nop;
  316. new = call;
  317. } else {
  318. old = call;
  319. new = nop;
  320. }
  321. return ftrace_modify_code(ip, old, new);
  322. }
  323. static void ftrace_replace_code(int enable)
  324. {
  325. int i, failed;
  326. unsigned char *nop = NULL;
  327. struct dyn_ftrace *rec;
  328. struct ftrace_page *pg;
  329. nop = ftrace_nop_replace();
  330. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  331. for (i = 0; i < pg->index; i++) {
  332. rec = &pg->records[i];
  333. /* don't modify code that has already faulted */
  334. if (rec->flags & FTRACE_FL_FAILED)
  335. continue;
  336. /* ignore updates to this record's mcount site */
  337. if (get_kprobe((void *)rec->ip)) {
  338. freeze_record(rec);
  339. continue;
  340. } else {
  341. unfreeze_record(rec);
  342. }
  343. failed = __ftrace_replace_code(rec, nop, enable);
  344. if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
  345. rec->flags |= FTRACE_FL_FAILED;
  346. if ((system_state == SYSTEM_BOOTING) ||
  347. !core_kernel_text(rec->ip)) {
  348. ftrace_free_rec(rec);
  349. }
  350. }
  351. }
  352. }
  353. }
  354. static void print_ip_ins(const char *fmt, unsigned char *p)
  355. {
  356. int i;
  357. printk(KERN_CONT "%s", fmt);
  358. for (i = 0; i < MCOUNT_INSN_SIZE; i++)
  359. printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
  360. }
  361. static int
  362. ftrace_code_disable(struct dyn_ftrace *rec)
  363. {
  364. unsigned long ip;
  365. unsigned char *nop, *call;
  366. int ret;
  367. ip = rec->ip;
  368. nop = ftrace_nop_replace();
  369. call = ftrace_call_replace(ip, mcount_addr);
  370. ret = ftrace_modify_code(ip, call, nop);
  371. if (ret) {
  372. switch (ret) {
  373. case -EFAULT:
  374. FTRACE_WARN_ON_ONCE(1);
  375. pr_info("ftrace faulted on modifying ");
  376. print_ip_sym(ip);
  377. break;
  378. case -EINVAL:
  379. FTRACE_WARN_ON_ONCE(1);
  380. pr_info("ftrace failed to modify ");
  381. print_ip_sym(ip);
  382. print_ip_ins(" expected: ", call);
  383. print_ip_ins(" actual: ", (unsigned char *)ip);
  384. print_ip_ins(" replace: ", nop);
  385. printk(KERN_CONT "\n");
  386. break;
  387. case -EPERM:
  388. FTRACE_WARN_ON_ONCE(1);
  389. pr_info("ftrace faulted on writing ");
  390. print_ip_sym(ip);
  391. break;
  392. default:
  393. FTRACE_WARN_ON_ONCE(1);
  394. pr_info("ftrace faulted on unknown error ");
  395. print_ip_sym(ip);
  396. }
  397. rec->flags |= FTRACE_FL_FAILED;
  398. return 0;
  399. }
  400. return 1;
  401. }
  402. static int __ftrace_modify_code(void *data)
  403. {
  404. int *command = data;
  405. if (*command & FTRACE_ENABLE_CALLS)
  406. ftrace_replace_code(1);
  407. else if (*command & FTRACE_DISABLE_CALLS)
  408. ftrace_replace_code(0);
  409. if (*command & FTRACE_UPDATE_TRACE_FUNC)
  410. ftrace_update_ftrace_func(ftrace_trace_function);
  411. return 0;
  412. }
  413. static void ftrace_run_update_code(int command)
  414. {
  415. stop_machine(__ftrace_modify_code, &command, NULL);
  416. }
  417. static ftrace_func_t saved_ftrace_func;
  418. static int ftrace_start;
  419. static DEFINE_MUTEX(ftrace_start_lock);
  420. static void ftrace_startup(void)
  421. {
  422. int command = 0;
  423. if (unlikely(ftrace_disabled))
  424. return;
  425. mutex_lock(&ftrace_start_lock);
  426. ftrace_start++;
  427. command |= FTRACE_ENABLE_CALLS;
  428. if (saved_ftrace_func != ftrace_trace_function) {
  429. saved_ftrace_func = ftrace_trace_function;
  430. command |= FTRACE_UPDATE_TRACE_FUNC;
  431. }
  432. if (!command || !ftrace_enabled)
  433. goto out;
  434. ftrace_run_update_code(command);
  435. out:
  436. mutex_unlock(&ftrace_start_lock);
  437. }
  438. static void ftrace_shutdown(void)
  439. {
  440. int command = 0;
  441. if (unlikely(ftrace_disabled))
  442. return;
  443. mutex_lock(&ftrace_start_lock);
  444. ftrace_start--;
  445. if (!ftrace_start)
  446. command |= FTRACE_DISABLE_CALLS;
  447. if (saved_ftrace_func != ftrace_trace_function) {
  448. saved_ftrace_func = ftrace_trace_function;
  449. command |= FTRACE_UPDATE_TRACE_FUNC;
  450. }
  451. if (!command || !ftrace_enabled)
  452. goto out;
  453. ftrace_run_update_code(command);
  454. out:
  455. mutex_unlock(&ftrace_start_lock);
  456. }
  457. static void ftrace_startup_sysctl(void)
  458. {
  459. int command = FTRACE_ENABLE_MCOUNT;
  460. if (unlikely(ftrace_disabled))
  461. return;
  462. mutex_lock(&ftrace_start_lock);
  463. /* Force update next time */
  464. saved_ftrace_func = NULL;
  465. /* ftrace_start is true if we want ftrace running */
  466. if (ftrace_start)
  467. command |= FTRACE_ENABLE_CALLS;
  468. ftrace_run_update_code(command);
  469. mutex_unlock(&ftrace_start_lock);
  470. }
  471. static void ftrace_shutdown_sysctl(void)
  472. {
  473. int command = FTRACE_DISABLE_MCOUNT;
  474. if (unlikely(ftrace_disabled))
  475. return;
  476. mutex_lock(&ftrace_start_lock);
  477. /* ftrace_start is true if ftrace is running */
  478. if (ftrace_start)
  479. command |= FTRACE_DISABLE_CALLS;
  480. ftrace_run_update_code(command);
  481. mutex_unlock(&ftrace_start_lock);
  482. }
  483. static cycle_t ftrace_update_time;
  484. static unsigned long ftrace_update_cnt;
  485. unsigned long ftrace_update_tot_cnt;
  486. static int ftrace_update_code(void)
  487. {
  488. struct dyn_ftrace *p, *t;
  489. cycle_t start, stop;
  490. start = ftrace_now(raw_smp_processor_id());
  491. ftrace_update_cnt = 0;
  492. list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
  493. /* If something went wrong, bail without enabling anything */
  494. if (unlikely(ftrace_disabled))
  495. return -1;
  496. list_del_init(&p->list);
  497. /* convert record (i.e, patch mcount-call with NOP) */
  498. if (ftrace_code_disable(p)) {
  499. p->flags |= FTRACE_FL_CONVERTED;
  500. ftrace_update_cnt++;
  501. } else
  502. ftrace_free_rec(p);
  503. }
  504. stop = ftrace_now(raw_smp_processor_id());
  505. ftrace_update_time = stop - start;
  506. ftrace_update_tot_cnt += ftrace_update_cnt;
  507. return 0;
  508. }
  509. static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
  510. {
  511. struct ftrace_page *pg;
  512. int cnt;
  513. int i;
  514. /* allocate a few pages */
  515. ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
  516. if (!ftrace_pages_start)
  517. return -1;
  518. /*
  519. * Allocate a few more pages.
  520. *
  521. * TODO: have some parser search vmlinux before
  522. * final linking to find all calls to ftrace.
  523. * Then we can:
  524. * a) know how many pages to allocate.
  525. * and/or
  526. * b) set up the table then.
  527. *
  528. * The dynamic code is still necessary for
  529. * modules.
  530. */
  531. pg = ftrace_pages = ftrace_pages_start;
  532. cnt = num_to_init / ENTRIES_PER_PAGE;
  533. pr_info("ftrace: allocating %ld entries in %d pages\n",
  534. num_to_init, cnt + 1);
  535. for (i = 0; i < cnt; i++) {
  536. pg->next = (void *)get_zeroed_page(GFP_KERNEL);
  537. /* If we fail, we'll try later anyway */
  538. if (!pg->next)
  539. break;
  540. pg = pg->next;
  541. }
  542. return 0;
  543. }
  544. enum {
  545. FTRACE_ITER_FILTER = (1 << 0),
  546. FTRACE_ITER_CONT = (1 << 1),
  547. FTRACE_ITER_NOTRACE = (1 << 2),
  548. FTRACE_ITER_FAILURES = (1 << 3),
  549. };
  550. #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
  551. struct ftrace_iterator {
  552. loff_t pos;
  553. struct ftrace_page *pg;
  554. unsigned idx;
  555. unsigned flags;
  556. unsigned char buffer[FTRACE_BUFF_MAX+1];
  557. unsigned buffer_idx;
  558. unsigned filtered;
  559. };
  560. static void *
  561. t_next(struct seq_file *m, void *v, loff_t *pos)
  562. {
  563. struct ftrace_iterator *iter = m->private;
  564. struct dyn_ftrace *rec = NULL;
  565. (*pos)++;
  566. /* should not be called from interrupt context */
  567. spin_lock(&ftrace_lock);
  568. retry:
  569. if (iter->idx >= iter->pg->index) {
  570. if (iter->pg->next) {
  571. iter->pg = iter->pg->next;
  572. iter->idx = 0;
  573. goto retry;
  574. }
  575. } else {
  576. rec = &iter->pg->records[iter->idx++];
  577. if ((rec->flags & FTRACE_FL_FREE) ||
  578. (!(iter->flags & FTRACE_ITER_FAILURES) &&
  579. (rec->flags & FTRACE_FL_FAILED)) ||
  580. ((iter->flags & FTRACE_ITER_FAILURES) &&
  581. !(rec->flags & FTRACE_FL_FAILED)) ||
  582. ((iter->flags & FTRACE_ITER_FILTER) &&
  583. !(rec->flags & FTRACE_FL_FILTER)) ||
  584. ((iter->flags & FTRACE_ITER_NOTRACE) &&
  585. !(rec->flags & FTRACE_FL_NOTRACE))) {
  586. rec = NULL;
  587. goto retry;
  588. }
  589. }
  590. spin_unlock(&ftrace_lock);
  591. iter->pos = *pos;
  592. return rec;
  593. }
  594. static void *t_start(struct seq_file *m, loff_t *pos)
  595. {
  596. struct ftrace_iterator *iter = m->private;
  597. void *p = NULL;
  598. loff_t l = -1;
  599. if (*pos > iter->pos)
  600. *pos = iter->pos;
  601. l = *pos;
  602. p = t_next(m, p, &l);
  603. return p;
  604. }
  605. static void t_stop(struct seq_file *m, void *p)
  606. {
  607. }
  608. static int t_show(struct seq_file *m, void *v)
  609. {
  610. struct ftrace_iterator *iter = m->private;
  611. struct dyn_ftrace *rec = v;
  612. char str[KSYM_SYMBOL_LEN];
  613. int ret = 0;
  614. if (!rec)
  615. return 0;
  616. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  617. ret = seq_printf(m, "%s\n", str);
  618. if (ret < 0) {
  619. iter->pos--;
  620. iter->idx--;
  621. }
  622. return 0;
  623. }
  624. static struct seq_operations show_ftrace_seq_ops = {
  625. .start = t_start,
  626. .next = t_next,
  627. .stop = t_stop,
  628. .show = t_show,
  629. };
  630. static int
  631. ftrace_avail_open(struct inode *inode, struct file *file)
  632. {
  633. struct ftrace_iterator *iter;
  634. int ret;
  635. if (unlikely(ftrace_disabled))
  636. return -ENODEV;
  637. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  638. if (!iter)
  639. return -ENOMEM;
  640. iter->pg = ftrace_pages_start;
  641. iter->pos = 0;
  642. ret = seq_open(file, &show_ftrace_seq_ops);
  643. if (!ret) {
  644. struct seq_file *m = file->private_data;
  645. m->private = iter;
  646. } else {
  647. kfree(iter);
  648. }
  649. return ret;
  650. }
  651. int ftrace_avail_release(struct inode *inode, struct file *file)
  652. {
  653. struct seq_file *m = (struct seq_file *)file->private_data;
  654. struct ftrace_iterator *iter = m->private;
  655. seq_release(inode, file);
  656. kfree(iter);
  657. return 0;
  658. }
  659. static int
  660. ftrace_failures_open(struct inode *inode, struct file *file)
  661. {
  662. int ret;
  663. struct seq_file *m;
  664. struct ftrace_iterator *iter;
  665. ret = ftrace_avail_open(inode, file);
  666. if (!ret) {
  667. m = (struct seq_file *)file->private_data;
  668. iter = (struct ftrace_iterator *)m->private;
  669. iter->flags = FTRACE_ITER_FAILURES;
  670. }
  671. return ret;
  672. }
  673. static void ftrace_filter_reset(int enable)
  674. {
  675. struct ftrace_page *pg;
  676. struct dyn_ftrace *rec;
  677. unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  678. unsigned i;
  679. /* should not be called from interrupt context */
  680. spin_lock(&ftrace_lock);
  681. if (enable)
  682. ftrace_filtered = 0;
  683. pg = ftrace_pages_start;
  684. while (pg) {
  685. for (i = 0; i < pg->index; i++) {
  686. rec = &pg->records[i];
  687. if (rec->flags & FTRACE_FL_FAILED)
  688. continue;
  689. rec->flags &= ~type;
  690. }
  691. pg = pg->next;
  692. }
  693. spin_unlock(&ftrace_lock);
  694. }
  695. static int
  696. ftrace_regex_open(struct inode *inode, struct file *file, int enable)
  697. {
  698. struct ftrace_iterator *iter;
  699. int ret = 0;
  700. if (unlikely(ftrace_disabled))
  701. return -ENODEV;
  702. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  703. if (!iter)
  704. return -ENOMEM;
  705. mutex_lock(&ftrace_regex_lock);
  706. if ((file->f_mode & FMODE_WRITE) &&
  707. !(file->f_flags & O_APPEND))
  708. ftrace_filter_reset(enable);
  709. if (file->f_mode & FMODE_READ) {
  710. iter->pg = ftrace_pages_start;
  711. iter->pos = 0;
  712. iter->flags = enable ? FTRACE_ITER_FILTER :
  713. FTRACE_ITER_NOTRACE;
  714. ret = seq_open(file, &show_ftrace_seq_ops);
  715. if (!ret) {
  716. struct seq_file *m = file->private_data;
  717. m->private = iter;
  718. } else
  719. kfree(iter);
  720. } else
  721. file->private_data = iter;
  722. mutex_unlock(&ftrace_regex_lock);
  723. return ret;
  724. }
  725. static int
  726. ftrace_filter_open(struct inode *inode, struct file *file)
  727. {
  728. return ftrace_regex_open(inode, file, 1);
  729. }
  730. static int
  731. ftrace_notrace_open(struct inode *inode, struct file *file)
  732. {
  733. return ftrace_regex_open(inode, file, 0);
  734. }
  735. static ssize_t
  736. ftrace_regex_read(struct file *file, char __user *ubuf,
  737. size_t cnt, loff_t *ppos)
  738. {
  739. if (file->f_mode & FMODE_READ)
  740. return seq_read(file, ubuf, cnt, ppos);
  741. else
  742. return -EPERM;
  743. }
  744. static loff_t
  745. ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
  746. {
  747. loff_t ret;
  748. if (file->f_mode & FMODE_READ)
  749. ret = seq_lseek(file, offset, origin);
  750. else
  751. file->f_pos = ret = 1;
  752. return ret;
  753. }
  754. enum {
  755. MATCH_FULL,
  756. MATCH_FRONT_ONLY,
  757. MATCH_MIDDLE_ONLY,
  758. MATCH_END_ONLY,
  759. };
  760. static void
  761. ftrace_match(unsigned char *buff, int len, int enable)
  762. {
  763. char str[KSYM_SYMBOL_LEN];
  764. char *search = NULL;
  765. struct ftrace_page *pg;
  766. struct dyn_ftrace *rec;
  767. int type = MATCH_FULL;
  768. unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  769. unsigned i, match = 0, search_len = 0;
  770. for (i = 0; i < len; i++) {
  771. if (buff[i] == '*') {
  772. if (!i) {
  773. search = buff + i + 1;
  774. type = MATCH_END_ONLY;
  775. search_len = len - (i + 1);
  776. } else {
  777. if (type == MATCH_END_ONLY) {
  778. type = MATCH_MIDDLE_ONLY;
  779. } else {
  780. match = i;
  781. type = MATCH_FRONT_ONLY;
  782. }
  783. buff[i] = 0;
  784. break;
  785. }
  786. }
  787. }
  788. /* should not be called from interrupt context */
  789. spin_lock(&ftrace_lock);
  790. if (enable)
  791. ftrace_filtered = 1;
  792. pg = ftrace_pages_start;
  793. while (pg) {
  794. for (i = 0; i < pg->index; i++) {
  795. int matched = 0;
  796. char *ptr;
  797. rec = &pg->records[i];
  798. if (rec->flags & FTRACE_FL_FAILED)
  799. continue;
  800. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  801. switch (type) {
  802. case MATCH_FULL:
  803. if (strcmp(str, buff) == 0)
  804. matched = 1;
  805. break;
  806. case MATCH_FRONT_ONLY:
  807. if (memcmp(str, buff, match) == 0)
  808. matched = 1;
  809. break;
  810. case MATCH_MIDDLE_ONLY:
  811. if (strstr(str, search))
  812. matched = 1;
  813. break;
  814. case MATCH_END_ONLY:
  815. ptr = strstr(str, search);
  816. if (ptr && (ptr[search_len] == 0))
  817. matched = 1;
  818. break;
  819. }
  820. if (matched)
  821. rec->flags |= flag;
  822. }
  823. pg = pg->next;
  824. }
  825. spin_unlock(&ftrace_lock);
  826. }
  827. static ssize_t
  828. ftrace_regex_write(struct file *file, const char __user *ubuf,
  829. size_t cnt, loff_t *ppos, int enable)
  830. {
  831. struct ftrace_iterator *iter;
  832. char ch;
  833. size_t read = 0;
  834. ssize_t ret;
  835. if (!cnt || cnt < 0)
  836. return 0;
  837. mutex_lock(&ftrace_regex_lock);
  838. if (file->f_mode & FMODE_READ) {
  839. struct seq_file *m = file->private_data;
  840. iter = m->private;
  841. } else
  842. iter = file->private_data;
  843. if (!*ppos) {
  844. iter->flags &= ~FTRACE_ITER_CONT;
  845. iter->buffer_idx = 0;
  846. }
  847. ret = get_user(ch, ubuf++);
  848. if (ret)
  849. goto out;
  850. read++;
  851. cnt--;
  852. if (!(iter->flags & ~FTRACE_ITER_CONT)) {
  853. /* skip white space */
  854. while (cnt && isspace(ch)) {
  855. ret = get_user(ch, ubuf++);
  856. if (ret)
  857. goto out;
  858. read++;
  859. cnt--;
  860. }
  861. if (isspace(ch)) {
  862. file->f_pos += read;
  863. ret = read;
  864. goto out;
  865. }
  866. iter->buffer_idx = 0;
  867. }
  868. while (cnt && !isspace(ch)) {
  869. if (iter->buffer_idx < FTRACE_BUFF_MAX)
  870. iter->buffer[iter->buffer_idx++] = ch;
  871. else {
  872. ret = -EINVAL;
  873. goto out;
  874. }
  875. ret = get_user(ch, ubuf++);
  876. if (ret)
  877. goto out;
  878. read++;
  879. cnt--;
  880. }
  881. if (isspace(ch)) {
  882. iter->filtered++;
  883. iter->buffer[iter->buffer_idx] = 0;
  884. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  885. iter->buffer_idx = 0;
  886. } else
  887. iter->flags |= FTRACE_ITER_CONT;
  888. file->f_pos += read;
  889. ret = read;
  890. out:
  891. mutex_unlock(&ftrace_regex_lock);
  892. return ret;
  893. }
  894. static ssize_t
  895. ftrace_filter_write(struct file *file, const char __user *ubuf,
  896. size_t cnt, loff_t *ppos)
  897. {
  898. return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
  899. }
  900. static ssize_t
  901. ftrace_notrace_write(struct file *file, const char __user *ubuf,
  902. size_t cnt, loff_t *ppos)
  903. {
  904. return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
  905. }
  906. static void
  907. ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
  908. {
  909. if (unlikely(ftrace_disabled))
  910. return;
  911. mutex_lock(&ftrace_regex_lock);
  912. if (reset)
  913. ftrace_filter_reset(enable);
  914. if (buf)
  915. ftrace_match(buf, len, enable);
  916. mutex_unlock(&ftrace_regex_lock);
  917. }
  918. /**
  919. * ftrace_set_filter - set a function to filter on in ftrace
  920. * @buf - the string that holds the function filter text.
  921. * @len - the length of the string.
  922. * @reset - non zero to reset all filters before applying this filter.
  923. *
  924. * Filters denote which functions should be enabled when tracing is enabled.
  925. * If @buf is NULL and reset is set, all functions will be enabled for tracing.
  926. */
  927. void ftrace_set_filter(unsigned char *buf, int len, int reset)
  928. {
  929. ftrace_set_regex(buf, len, reset, 1);
  930. }
  931. /**
  932. * ftrace_set_notrace - set a function to not trace in ftrace
  933. * @buf - the string that holds the function notrace text.
  934. * @len - the length of the string.
  935. * @reset - non zero to reset all filters before applying this filter.
  936. *
  937. * Notrace Filters denote which functions should not be enabled when tracing
  938. * is enabled. If @buf is NULL and reset is set, all functions will be enabled
  939. * for tracing.
  940. */
  941. void ftrace_set_notrace(unsigned char *buf, int len, int reset)
  942. {
  943. ftrace_set_regex(buf, len, reset, 0);
  944. }
  945. static int
  946. ftrace_regex_release(struct inode *inode, struct file *file, int enable)
  947. {
  948. struct seq_file *m = (struct seq_file *)file->private_data;
  949. struct ftrace_iterator *iter;
  950. mutex_lock(&ftrace_regex_lock);
  951. if (file->f_mode & FMODE_READ) {
  952. iter = m->private;
  953. seq_release(inode, file);
  954. } else
  955. iter = file->private_data;
  956. if (iter->buffer_idx) {
  957. iter->filtered++;
  958. iter->buffer[iter->buffer_idx] = 0;
  959. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  960. }
  961. mutex_lock(&ftrace_sysctl_lock);
  962. mutex_lock(&ftrace_start_lock);
  963. if (ftrace_start && ftrace_enabled)
  964. ftrace_run_update_code(FTRACE_ENABLE_CALLS);
  965. mutex_unlock(&ftrace_start_lock);
  966. mutex_unlock(&ftrace_sysctl_lock);
  967. kfree(iter);
  968. mutex_unlock(&ftrace_regex_lock);
  969. return 0;
  970. }
  971. static int
  972. ftrace_filter_release(struct inode *inode, struct file *file)
  973. {
  974. return ftrace_regex_release(inode, file, 1);
  975. }
  976. static int
  977. ftrace_notrace_release(struct inode *inode, struct file *file)
  978. {
  979. return ftrace_regex_release(inode, file, 0);
  980. }
  981. static struct file_operations ftrace_avail_fops = {
  982. .open = ftrace_avail_open,
  983. .read = seq_read,
  984. .llseek = seq_lseek,
  985. .release = ftrace_avail_release,
  986. };
  987. static struct file_operations ftrace_failures_fops = {
  988. .open = ftrace_failures_open,
  989. .read = seq_read,
  990. .llseek = seq_lseek,
  991. .release = ftrace_avail_release,
  992. };
  993. static struct file_operations ftrace_filter_fops = {
  994. .open = ftrace_filter_open,
  995. .read = ftrace_regex_read,
  996. .write = ftrace_filter_write,
  997. .llseek = ftrace_regex_lseek,
  998. .release = ftrace_filter_release,
  999. };
  1000. static struct file_operations ftrace_notrace_fops = {
  1001. .open = ftrace_notrace_open,
  1002. .read = ftrace_regex_read,
  1003. .write = ftrace_notrace_write,
  1004. .llseek = ftrace_regex_lseek,
  1005. .release = ftrace_notrace_release,
  1006. };
  1007. static __init int ftrace_init_debugfs(void)
  1008. {
  1009. struct dentry *d_tracer;
  1010. struct dentry *entry;
  1011. d_tracer = tracing_init_dentry();
  1012. entry = debugfs_create_file("available_filter_functions", 0444,
  1013. d_tracer, NULL, &ftrace_avail_fops);
  1014. if (!entry)
  1015. pr_warning("Could not create debugfs "
  1016. "'available_filter_functions' entry\n");
  1017. entry = debugfs_create_file("failures", 0444,
  1018. d_tracer, NULL, &ftrace_failures_fops);
  1019. if (!entry)
  1020. pr_warning("Could not create debugfs 'failures' entry\n");
  1021. entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
  1022. NULL, &ftrace_filter_fops);
  1023. if (!entry)
  1024. pr_warning("Could not create debugfs "
  1025. "'set_ftrace_filter' entry\n");
  1026. entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
  1027. NULL, &ftrace_notrace_fops);
  1028. if (!entry)
  1029. pr_warning("Could not create debugfs "
  1030. "'set_ftrace_notrace' entry\n");
  1031. return 0;
  1032. }
  1033. fs_initcall(ftrace_init_debugfs);
  1034. static int ftrace_convert_nops(unsigned long *start,
  1035. unsigned long *end)
  1036. {
  1037. unsigned long *p;
  1038. unsigned long addr;
  1039. unsigned long flags;
  1040. mutex_lock(&ftrace_start_lock);
  1041. p = start;
  1042. while (p < end) {
  1043. addr = ftrace_call_adjust(*p++);
  1044. ftrace_record_ip(addr);
  1045. }
  1046. /* disable interrupts to prevent kstop machine */
  1047. local_irq_save(flags);
  1048. ftrace_update_code();
  1049. local_irq_restore(flags);
  1050. mutex_unlock(&ftrace_start_lock);
  1051. return 0;
  1052. }
  1053. void ftrace_init_module(unsigned long *start, unsigned long *end)
  1054. {
  1055. if (ftrace_disabled || start == end)
  1056. return;
  1057. ftrace_convert_nops(start, end);
  1058. }
  1059. extern unsigned long __start_mcount_loc[];
  1060. extern unsigned long __stop_mcount_loc[];
  1061. void __init ftrace_init(void)
  1062. {
  1063. unsigned long count, addr, flags;
  1064. int ret;
  1065. /* Keep the ftrace pointer to the stub */
  1066. addr = (unsigned long)ftrace_stub;
  1067. local_irq_save(flags);
  1068. ftrace_dyn_arch_init(&addr);
  1069. local_irq_restore(flags);
  1070. /* ftrace_dyn_arch_init places the return code in addr */
  1071. if (addr)
  1072. goto failed;
  1073. count = __stop_mcount_loc - __start_mcount_loc;
  1074. ret = ftrace_dyn_table_alloc(count);
  1075. if (ret)
  1076. goto failed;
  1077. last_ftrace_enabled = ftrace_enabled = 1;
  1078. ret = ftrace_convert_nops(__start_mcount_loc,
  1079. __stop_mcount_loc);
  1080. return;
  1081. failed:
  1082. ftrace_disabled = 1;
  1083. }
  1084. #else
  1085. static int __init ftrace_nodyn_init(void)
  1086. {
  1087. ftrace_enabled = 1;
  1088. return 0;
  1089. }
  1090. device_initcall(ftrace_nodyn_init);
  1091. # define ftrace_startup() do { } while (0)
  1092. # define ftrace_shutdown() do { } while (0)
  1093. # define ftrace_startup_sysctl() do { } while (0)
  1094. # define ftrace_shutdown_sysctl() do { } while (0)
  1095. #endif /* CONFIG_DYNAMIC_FTRACE */
  1096. /**
  1097. * ftrace_kill - kill ftrace
  1098. *
  1099. * This function should be used by panic code. It stops ftrace
  1100. * but in a not so nice way. If you need to simply kill ftrace
  1101. * from a non-atomic section, use ftrace_kill.
  1102. */
  1103. void ftrace_kill(void)
  1104. {
  1105. ftrace_disabled = 1;
  1106. ftrace_enabled = 0;
  1107. clear_ftrace_function();
  1108. }
  1109. /**
  1110. * register_ftrace_function - register a function for profiling
  1111. * @ops - ops structure that holds the function for profiling.
  1112. *
  1113. * Register a function to be called by all functions in the
  1114. * kernel.
  1115. *
  1116. * Note: @ops->func and all the functions it calls must be labeled
  1117. * with "notrace", otherwise it will go into a
  1118. * recursive loop.
  1119. */
  1120. int register_ftrace_function(struct ftrace_ops *ops)
  1121. {
  1122. int ret;
  1123. if (unlikely(ftrace_disabled))
  1124. return -1;
  1125. mutex_lock(&ftrace_sysctl_lock);
  1126. ret = __register_ftrace_function(ops);
  1127. ftrace_startup();
  1128. mutex_unlock(&ftrace_sysctl_lock);
  1129. return ret;
  1130. }
  1131. /**
  1132. * unregister_ftrace_function - unresgister a function for profiling.
  1133. * @ops - ops structure that holds the function to unregister
  1134. *
  1135. * Unregister a function that was added to be called by ftrace profiling.
  1136. */
  1137. int unregister_ftrace_function(struct ftrace_ops *ops)
  1138. {
  1139. int ret;
  1140. mutex_lock(&ftrace_sysctl_lock);
  1141. ret = __unregister_ftrace_function(ops);
  1142. ftrace_shutdown();
  1143. mutex_unlock(&ftrace_sysctl_lock);
  1144. return ret;
  1145. }
  1146. int
  1147. ftrace_enable_sysctl(struct ctl_table *table, int write,
  1148. struct file *file, void __user *buffer, size_t *lenp,
  1149. loff_t *ppos)
  1150. {
  1151. int ret;
  1152. if (unlikely(ftrace_disabled))
  1153. return -ENODEV;
  1154. mutex_lock(&ftrace_sysctl_lock);
  1155. ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
  1156. if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
  1157. goto out;
  1158. last_ftrace_enabled = ftrace_enabled;
  1159. if (ftrace_enabled) {
  1160. ftrace_startup_sysctl();
  1161. /* we are starting ftrace again */
  1162. if (ftrace_list != &ftrace_list_end) {
  1163. if (ftrace_list->next == &ftrace_list_end)
  1164. ftrace_trace_function = ftrace_list->func;
  1165. else
  1166. ftrace_trace_function = ftrace_list_func;
  1167. }
  1168. } else {
  1169. /* stopping ftrace calls (just send to ftrace_stub) */
  1170. ftrace_trace_function = ftrace_stub;
  1171. ftrace_shutdown_sysctl();
  1172. }
  1173. out:
  1174. mutex_unlock(&ftrace_sysctl_lock);
  1175. return ret;
  1176. }