ftrace.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262
  1. /*
  2. * Infrastructure for profiling code inserted by 'gcc -pg'.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Originally ported from the -rt patch by:
  8. * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Based on code in the latency_tracer, that is:
  11. *
  12. * Copyright (C) 2004-2006 Ingo Molnar
  13. * Copyright (C) 2004 William Lee Irwin III
  14. */
  15. #include <linux/stop_machine.h>
  16. #include <linux/clocksource.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/debugfs.h>
  20. #include <linux/kthread.h>
  21. #include <linux/hardirq.h>
  22. #include <linux/ftrace.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/sysctl.h>
  25. #include <linux/hash.h>
  26. #include <linux/ctype.h>
  27. #include <linux/list.h>
  28. #include "trace.h"
  29. int ftrace_enabled;
  30. static int last_ftrace_enabled;
  31. static DEFINE_SPINLOCK(ftrace_lock);
  32. static DEFINE_MUTEX(ftrace_sysctl_lock);
  33. static struct ftrace_ops ftrace_list_end __read_mostly =
  34. {
  35. .func = ftrace_stub,
  36. };
  37. static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
  38. ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
  39. /* mcount is defined per arch in assembly */
  40. EXPORT_SYMBOL(mcount);
  41. notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
  42. {
  43. struct ftrace_ops *op = ftrace_list;
  44. /* in case someone actually ports this to alpha! */
  45. read_barrier_depends();
  46. while (op != &ftrace_list_end) {
  47. /* silly alpha */
  48. read_barrier_depends();
  49. op->func(ip, parent_ip);
  50. op = op->next;
  51. };
  52. }
  53. /**
  54. * clear_ftrace_function - reset the ftrace function
  55. *
  56. * This NULLs the ftrace function and in essence stops
  57. * tracing. There may be lag
  58. */
  59. void clear_ftrace_function(void)
  60. {
  61. ftrace_trace_function = ftrace_stub;
  62. }
  63. static int notrace __register_ftrace_function(struct ftrace_ops *ops)
  64. {
  65. /* Should never be called by interrupts */
  66. spin_lock(&ftrace_lock);
  67. ops->next = ftrace_list;
  68. /*
  69. * We are entering ops into the ftrace_list but another
  70. * CPU might be walking that list. We need to make sure
  71. * the ops->next pointer is valid before another CPU sees
  72. * the ops pointer included into the ftrace_list.
  73. */
  74. smp_wmb();
  75. ftrace_list = ops;
  76. if (ftrace_enabled) {
  77. /*
  78. * For one func, simply call it directly.
  79. * For more than one func, call the chain.
  80. */
  81. if (ops->next == &ftrace_list_end)
  82. ftrace_trace_function = ops->func;
  83. else
  84. ftrace_trace_function = ftrace_list_func;
  85. }
  86. spin_unlock(&ftrace_lock);
  87. return 0;
  88. }
  89. static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
  90. {
  91. struct ftrace_ops **p;
  92. int ret = 0;
  93. spin_lock(&ftrace_lock);
  94. /*
  95. * If we are removing the last function, then simply point
  96. * to the ftrace_stub.
  97. */
  98. if (ftrace_list == ops && ops->next == &ftrace_list_end) {
  99. ftrace_trace_function = ftrace_stub;
  100. ftrace_list = &ftrace_list_end;
  101. goto out;
  102. }
  103. for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
  104. if (*p == ops)
  105. break;
  106. if (*p != ops) {
  107. ret = -1;
  108. goto out;
  109. }
  110. *p = (*p)->next;
  111. if (ftrace_enabled) {
  112. /* If we only have one func left, then call that directly */
  113. if (ftrace_list == &ftrace_list_end ||
  114. ftrace_list->next == &ftrace_list_end)
  115. ftrace_trace_function = ftrace_list->func;
  116. }
  117. out:
  118. spin_unlock(&ftrace_lock);
  119. return ret;
  120. }
  121. #ifdef CONFIG_DYNAMIC_FTRACE
  122. static struct task_struct *ftraced_task;
  123. static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
  124. static unsigned long ftraced_iteration_counter;
  125. enum {
  126. FTRACE_ENABLE_CALLS = (1 << 0),
  127. FTRACE_DISABLE_CALLS = (1 << 1),
  128. FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
  129. FTRACE_ENABLE_MCOUNT = (1 << 3),
  130. FTRACE_DISABLE_MCOUNT = (1 << 4),
  131. };
  132. static int ftrace_filtered;
  133. static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
  134. static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
  135. static DEFINE_SPINLOCK(ftrace_shutdown_lock);
  136. static DEFINE_MUTEX(ftraced_lock);
  137. static DEFINE_MUTEX(ftrace_filter_lock);
  138. struct ftrace_page {
  139. struct ftrace_page *next;
  140. int index;
  141. struct dyn_ftrace records[];
  142. } __attribute__((packed));
  143. #define ENTRIES_PER_PAGE \
  144. ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
  145. /* estimate from running different kernels */
  146. #define NR_TO_INIT 10000
  147. static struct ftrace_page *ftrace_pages_start;
  148. static struct ftrace_page *ftrace_pages;
  149. static int ftraced_trigger;
  150. static int ftraced_suspend;
  151. static int ftrace_record_suspend;
  152. static inline int
  153. notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key)
  154. {
  155. struct dyn_ftrace *p;
  156. struct hlist_node *t;
  157. int found = 0;
  158. hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
  159. if (p->ip == ip) {
  160. found = 1;
  161. break;
  162. }
  163. }
  164. return found;
  165. }
  166. static inline void notrace
  167. ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
  168. {
  169. hlist_add_head(&node->node, &ftrace_hash[key]);
  170. }
  171. static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
  172. {
  173. if (ftrace_pages->index == ENTRIES_PER_PAGE) {
  174. if (!ftrace_pages->next)
  175. return NULL;
  176. ftrace_pages = ftrace_pages->next;
  177. }
  178. return &ftrace_pages->records[ftrace_pages->index++];
  179. }
  180. static void notrace
  181. ftrace_record_ip(unsigned long ip)
  182. {
  183. struct dyn_ftrace *node;
  184. unsigned long flags;
  185. unsigned long key;
  186. int resched;
  187. int atomic;
  188. if (!ftrace_enabled)
  189. return;
  190. resched = need_resched();
  191. preempt_disable_notrace();
  192. /* We simply need to protect against recursion */
  193. __get_cpu_var(ftrace_shutdown_disable_cpu)++;
  194. if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
  195. goto out;
  196. if (unlikely(ftrace_record_suspend))
  197. goto out;
  198. key = hash_long(ip, FTRACE_HASHBITS);
  199. WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
  200. if (ftrace_ip_in_hash(ip, key))
  201. goto out;
  202. atomic = irqs_disabled();
  203. spin_lock_irqsave(&ftrace_shutdown_lock, flags);
  204. /* This ip may have hit the hash before the lock */
  205. if (ftrace_ip_in_hash(ip, key))
  206. goto out_unlock;
  207. /*
  208. * There's a slight race that the ftraced will update the
  209. * hash and reset here. If it is already converted, skip it.
  210. */
  211. if (ftrace_ip_converted(ip))
  212. goto out_unlock;
  213. node = ftrace_alloc_dyn_node(ip);
  214. if (!node)
  215. goto out_unlock;
  216. node->ip = ip;
  217. ftrace_add_hash(node, key);
  218. ftraced_trigger = 1;
  219. out_unlock:
  220. spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
  221. out:
  222. __get_cpu_var(ftrace_shutdown_disable_cpu)--;
  223. /* prevent recursion with scheduler */
  224. if (resched)
  225. preempt_enable_no_resched_notrace();
  226. else
  227. preempt_enable_notrace();
  228. }
  229. #define FTRACE_ADDR ((long)(&ftrace_caller))
  230. #define MCOUNT_ADDR ((long)(&mcount))
  231. static void notrace
  232. __ftrace_replace_code(struct dyn_ftrace *rec,
  233. unsigned char *old, unsigned char *new, int enable)
  234. {
  235. unsigned long ip;
  236. int failed;
  237. ip = rec->ip;
  238. if (ftrace_filtered && enable) {
  239. unsigned long fl;
  240. /*
  241. * If filtering is on:
  242. *
  243. * If this record is set to be filtered and
  244. * is enabled then do nothing.
  245. *
  246. * If this record is set to be filtered and
  247. * it is not enabled, enable it.
  248. *
  249. * If this record is not set to be filtered
  250. * and it is not enabled do nothing.
  251. *
  252. * If this record is not set to be filtered and
  253. * it is enabled, disable it.
  254. */
  255. fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
  256. if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
  257. (fl == 0))
  258. return;
  259. /*
  260. * If it is enabled disable it,
  261. * otherwise enable it!
  262. */
  263. if (fl == FTRACE_FL_ENABLED) {
  264. /* swap new and old */
  265. new = old;
  266. old = ftrace_call_replace(ip, FTRACE_ADDR);
  267. rec->flags &= ~FTRACE_FL_ENABLED;
  268. } else {
  269. new = ftrace_call_replace(ip, FTRACE_ADDR);
  270. rec->flags |= FTRACE_FL_ENABLED;
  271. }
  272. } else {
  273. if (enable)
  274. new = ftrace_call_replace(ip, FTRACE_ADDR);
  275. else
  276. old = ftrace_call_replace(ip, FTRACE_ADDR);
  277. if (enable) {
  278. if (rec->flags & FTRACE_FL_ENABLED)
  279. return;
  280. rec->flags |= FTRACE_FL_ENABLED;
  281. } else {
  282. if (!(rec->flags & FTRACE_FL_ENABLED))
  283. return;
  284. rec->flags &= ~FTRACE_FL_ENABLED;
  285. }
  286. }
  287. failed = ftrace_modify_code(ip, old, new);
  288. if (failed)
  289. rec->flags |= FTRACE_FL_FAILED;
  290. }
  291. static void notrace ftrace_replace_code(int enable)
  292. {
  293. unsigned char *new = NULL, *old = NULL;
  294. struct dyn_ftrace *rec;
  295. struct ftrace_page *pg;
  296. int i;
  297. if (enable)
  298. old = ftrace_nop_replace();
  299. else
  300. new = ftrace_nop_replace();
  301. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  302. for (i = 0; i < pg->index; i++) {
  303. rec = &pg->records[i];
  304. /* don't modify code that has already faulted */
  305. if (rec->flags & FTRACE_FL_FAILED)
  306. continue;
  307. __ftrace_replace_code(rec, old, new, enable);
  308. }
  309. }
  310. }
  311. static notrace void ftrace_shutdown_replenish(void)
  312. {
  313. if (ftrace_pages->next)
  314. return;
  315. /* allocate another page */
  316. ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
  317. }
  318. static notrace void
  319. ftrace_code_disable(struct dyn_ftrace *rec)
  320. {
  321. unsigned long ip;
  322. unsigned char *nop, *call;
  323. int failed;
  324. ip = rec->ip;
  325. nop = ftrace_nop_replace();
  326. call = ftrace_call_replace(ip, MCOUNT_ADDR);
  327. failed = ftrace_modify_code(ip, call, nop);
  328. if (failed)
  329. rec->flags |= FTRACE_FL_FAILED;
  330. }
  331. static int notrace __ftrace_modify_code(void *data)
  332. {
  333. unsigned long addr;
  334. int *command = data;
  335. if (*command & FTRACE_ENABLE_CALLS)
  336. ftrace_replace_code(1);
  337. else if (*command & FTRACE_DISABLE_CALLS)
  338. ftrace_replace_code(0);
  339. if (*command & FTRACE_UPDATE_TRACE_FUNC)
  340. ftrace_update_ftrace_func(ftrace_trace_function);
  341. if (*command & FTRACE_ENABLE_MCOUNT) {
  342. addr = (unsigned long)ftrace_record_ip;
  343. ftrace_mcount_set(&addr);
  344. } else if (*command & FTRACE_DISABLE_MCOUNT) {
  345. addr = (unsigned long)ftrace_stub;
  346. ftrace_mcount_set(&addr);
  347. }
  348. return 0;
  349. }
  350. static void notrace ftrace_run_update_code(int command)
  351. {
  352. stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
  353. }
  354. static ftrace_func_t saved_ftrace_func;
  355. static void notrace ftrace_startup(void)
  356. {
  357. int command = 0;
  358. mutex_lock(&ftraced_lock);
  359. ftraced_suspend++;
  360. if (ftraced_suspend == 1)
  361. command |= FTRACE_ENABLE_CALLS;
  362. if (saved_ftrace_func != ftrace_trace_function) {
  363. saved_ftrace_func = ftrace_trace_function;
  364. command |= FTRACE_UPDATE_TRACE_FUNC;
  365. }
  366. if (!command || !ftrace_enabled)
  367. goto out;
  368. ftrace_run_update_code(command);
  369. out:
  370. mutex_unlock(&ftraced_lock);
  371. }
  372. static void notrace ftrace_shutdown(void)
  373. {
  374. int command = 0;
  375. mutex_lock(&ftraced_lock);
  376. ftraced_suspend--;
  377. if (!ftraced_suspend)
  378. command |= FTRACE_DISABLE_CALLS;
  379. if (saved_ftrace_func != ftrace_trace_function) {
  380. saved_ftrace_func = ftrace_trace_function;
  381. command |= FTRACE_UPDATE_TRACE_FUNC;
  382. }
  383. if (!command || !ftrace_enabled)
  384. goto out;
  385. ftrace_run_update_code(command);
  386. out:
  387. mutex_unlock(&ftraced_lock);
  388. }
  389. static void notrace ftrace_startup_sysctl(void)
  390. {
  391. int command = FTRACE_ENABLE_MCOUNT;
  392. mutex_lock(&ftraced_lock);
  393. /* Force update next time */
  394. saved_ftrace_func = NULL;
  395. /* ftraced_suspend is true if we want ftrace running */
  396. if (ftraced_suspend)
  397. command |= FTRACE_ENABLE_CALLS;
  398. ftrace_run_update_code(command);
  399. mutex_unlock(&ftraced_lock);
  400. }
  401. static void notrace ftrace_shutdown_sysctl(void)
  402. {
  403. int command = FTRACE_DISABLE_MCOUNT;
  404. mutex_lock(&ftraced_lock);
  405. /* ftraced_suspend is true if ftrace is running */
  406. if (ftraced_suspend)
  407. command |= FTRACE_DISABLE_CALLS;
  408. ftrace_run_update_code(command);
  409. mutex_unlock(&ftraced_lock);
  410. }
  411. static cycle_t ftrace_update_time;
  412. static unsigned long ftrace_update_cnt;
  413. unsigned long ftrace_update_tot_cnt;
  414. static int notrace __ftrace_update_code(void *ignore)
  415. {
  416. struct dyn_ftrace *p;
  417. struct hlist_head head;
  418. struct hlist_node *t;
  419. int save_ftrace_enabled;
  420. cycle_t start, stop;
  421. int i;
  422. /* Don't be recording funcs now */
  423. save_ftrace_enabled = ftrace_enabled;
  424. ftrace_enabled = 0;
  425. start = ftrace_now(raw_smp_processor_id());
  426. ftrace_update_cnt = 0;
  427. /* No locks needed, the machine is stopped! */
  428. for (i = 0; i < FTRACE_HASHSIZE; i++) {
  429. if (hlist_empty(&ftrace_hash[i]))
  430. continue;
  431. head = ftrace_hash[i];
  432. INIT_HLIST_HEAD(&ftrace_hash[i]);
  433. /* all CPUS are stopped, we are safe to modify code */
  434. hlist_for_each_entry(p, t, &head, node) {
  435. ftrace_code_disable(p);
  436. ftrace_update_cnt++;
  437. }
  438. }
  439. stop = ftrace_now(raw_smp_processor_id());
  440. ftrace_update_time = stop - start;
  441. ftrace_update_tot_cnt += ftrace_update_cnt;
  442. ftrace_enabled = save_ftrace_enabled;
  443. return 0;
  444. }
  445. static void notrace ftrace_update_code(void)
  446. {
  447. stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
  448. }
  449. static int notrace ftraced(void *ignore)
  450. {
  451. unsigned long usecs;
  452. set_current_state(TASK_INTERRUPTIBLE);
  453. while (!kthread_should_stop()) {
  454. /* check once a second */
  455. schedule_timeout(HZ);
  456. mutex_lock(&ftrace_sysctl_lock);
  457. mutex_lock(&ftraced_lock);
  458. if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
  459. ftrace_record_suspend++;
  460. ftrace_update_code();
  461. usecs = nsecs_to_usecs(ftrace_update_time);
  462. if (ftrace_update_tot_cnt > 100000) {
  463. ftrace_update_tot_cnt = 0;
  464. pr_info("hm, dftrace overflow: %lu change%s"
  465. " (%lu total) in %lu usec%s\n",
  466. ftrace_update_cnt,
  467. ftrace_update_cnt != 1 ? "s" : "",
  468. ftrace_update_tot_cnt,
  469. usecs, usecs != 1 ? "s" : "");
  470. WARN_ON_ONCE(1);
  471. }
  472. ftraced_trigger = 0;
  473. ftrace_record_suspend--;
  474. }
  475. ftraced_iteration_counter++;
  476. mutex_unlock(&ftraced_lock);
  477. mutex_unlock(&ftrace_sysctl_lock);
  478. wake_up_interruptible(&ftraced_waiters);
  479. ftrace_shutdown_replenish();
  480. set_current_state(TASK_INTERRUPTIBLE);
  481. }
  482. __set_current_state(TASK_RUNNING);
  483. return 0;
  484. }
  485. static int __init ftrace_dyn_table_alloc(void)
  486. {
  487. struct ftrace_page *pg;
  488. int cnt;
  489. int i;
  490. /* allocate a few pages */
  491. ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
  492. if (!ftrace_pages_start)
  493. return -1;
  494. /*
  495. * Allocate a few more pages.
  496. *
  497. * TODO: have some parser search vmlinux before
  498. * final linking to find all calls to ftrace.
  499. * Then we can:
  500. * a) know how many pages to allocate.
  501. * and/or
  502. * b) set up the table then.
  503. *
  504. * The dynamic code is still necessary for
  505. * modules.
  506. */
  507. pg = ftrace_pages = ftrace_pages_start;
  508. cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
  509. for (i = 0; i < cnt; i++) {
  510. pg->next = (void *)get_zeroed_page(GFP_KERNEL);
  511. /* If we fail, we'll try later anyway */
  512. if (!pg->next)
  513. break;
  514. pg = pg->next;
  515. }
  516. return 0;
  517. }
  518. enum {
  519. FTRACE_ITER_FILTER = (1 << 0),
  520. FTRACE_ITER_CONT = (1 << 1),
  521. };
  522. #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
  523. struct ftrace_iterator {
  524. loff_t pos;
  525. struct ftrace_page *pg;
  526. unsigned idx;
  527. unsigned flags;
  528. unsigned char buffer[FTRACE_BUFF_MAX+1];
  529. unsigned buffer_idx;
  530. unsigned filtered;
  531. };
  532. static void notrace *
  533. t_next(struct seq_file *m, void *v, loff_t *pos)
  534. {
  535. struct ftrace_iterator *iter = m->private;
  536. struct dyn_ftrace *rec = NULL;
  537. (*pos)++;
  538. retry:
  539. if (iter->idx >= iter->pg->index) {
  540. if (iter->pg->next) {
  541. iter->pg = iter->pg->next;
  542. iter->idx = 0;
  543. goto retry;
  544. }
  545. } else {
  546. rec = &iter->pg->records[iter->idx++];
  547. if ((rec->flags & FTRACE_FL_FAILED) ||
  548. ((iter->flags & FTRACE_ITER_FILTER) &&
  549. !(rec->flags & FTRACE_FL_FILTER))) {
  550. rec = NULL;
  551. goto retry;
  552. }
  553. }
  554. iter->pos = *pos;
  555. return rec;
  556. }
  557. static void *t_start(struct seq_file *m, loff_t *pos)
  558. {
  559. struct ftrace_iterator *iter = m->private;
  560. void *p = NULL;
  561. loff_t l = -1;
  562. if (*pos != iter->pos) {
  563. for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
  564. ;
  565. } else {
  566. l = *pos;
  567. p = t_next(m, p, &l);
  568. }
  569. return p;
  570. }
  571. static void t_stop(struct seq_file *m, void *p)
  572. {
  573. }
  574. static int t_show(struct seq_file *m, void *v)
  575. {
  576. struct dyn_ftrace *rec = v;
  577. char str[KSYM_SYMBOL_LEN];
  578. if (!rec)
  579. return 0;
  580. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  581. seq_printf(m, "%s\n", str);
  582. return 0;
  583. }
  584. static struct seq_operations show_ftrace_seq_ops = {
  585. .start = t_start,
  586. .next = t_next,
  587. .stop = t_stop,
  588. .show = t_show,
  589. };
  590. static int notrace
  591. ftrace_avail_open(struct inode *inode, struct file *file)
  592. {
  593. struct ftrace_iterator *iter;
  594. int ret;
  595. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  596. if (!iter)
  597. return -ENOMEM;
  598. iter->pg = ftrace_pages_start;
  599. iter->pos = -1;
  600. ret = seq_open(file, &show_ftrace_seq_ops);
  601. if (!ret) {
  602. struct seq_file *m = file->private_data;
  603. m->private = iter;
  604. } else {
  605. kfree(iter);
  606. }
  607. return ret;
  608. }
  609. int ftrace_avail_release(struct inode *inode, struct file *file)
  610. {
  611. struct seq_file *m = (struct seq_file *)file->private_data;
  612. struct ftrace_iterator *iter = m->private;
  613. seq_release(inode, file);
  614. kfree(iter);
  615. return 0;
  616. }
  617. static void notrace ftrace_filter_reset(void)
  618. {
  619. struct ftrace_page *pg;
  620. struct dyn_ftrace *rec;
  621. unsigned i;
  622. /* keep kstop machine from running */
  623. preempt_disable();
  624. ftrace_filtered = 0;
  625. pg = ftrace_pages_start;
  626. while (pg) {
  627. for (i = 0; i < pg->index; i++) {
  628. rec = &pg->records[i];
  629. if (rec->flags & FTRACE_FL_FAILED)
  630. continue;
  631. rec->flags &= ~FTRACE_FL_FILTER;
  632. }
  633. pg = pg->next;
  634. }
  635. preempt_enable();
  636. }
  637. static int notrace
  638. ftrace_filter_open(struct inode *inode, struct file *file)
  639. {
  640. struct ftrace_iterator *iter;
  641. int ret = 0;
  642. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  643. if (!iter)
  644. return -ENOMEM;
  645. mutex_lock(&ftrace_filter_lock);
  646. if ((file->f_mode & FMODE_WRITE) &&
  647. !(file->f_flags & O_APPEND))
  648. ftrace_filter_reset();
  649. if (file->f_mode & FMODE_READ) {
  650. iter->pg = ftrace_pages_start;
  651. iter->pos = -1;
  652. iter->flags = FTRACE_ITER_FILTER;
  653. ret = seq_open(file, &show_ftrace_seq_ops);
  654. if (!ret) {
  655. struct seq_file *m = file->private_data;
  656. m->private = iter;
  657. } else
  658. kfree(iter);
  659. } else
  660. file->private_data = iter;
  661. mutex_unlock(&ftrace_filter_lock);
  662. return ret;
  663. }
  664. static ssize_t notrace
  665. ftrace_filter_read(struct file *file, char __user *ubuf,
  666. size_t cnt, loff_t *ppos)
  667. {
  668. if (file->f_mode & FMODE_READ)
  669. return seq_read(file, ubuf, cnt, ppos);
  670. else
  671. return -EPERM;
  672. }
  673. static loff_t notrace
  674. ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
  675. {
  676. loff_t ret;
  677. if (file->f_mode & FMODE_READ)
  678. ret = seq_lseek(file, offset, origin);
  679. else
  680. file->f_pos = ret = 1;
  681. return ret;
  682. }
  683. enum {
  684. MATCH_FULL,
  685. MATCH_FRONT_ONLY,
  686. MATCH_MIDDLE_ONLY,
  687. MATCH_END_ONLY,
  688. };
  689. static void notrace
  690. ftrace_match(unsigned char *buff, int len)
  691. {
  692. char str[KSYM_SYMBOL_LEN];
  693. char *search = NULL;
  694. struct ftrace_page *pg;
  695. struct dyn_ftrace *rec;
  696. int type = MATCH_FULL;
  697. unsigned i, match = 0, search_len = 0;
  698. for (i = 0; i < len; i++) {
  699. if (buff[i] == '*') {
  700. if (!i) {
  701. search = buff + i + 1;
  702. type = MATCH_END_ONLY;
  703. search_len = len - (i + 1);
  704. } else {
  705. if (type == MATCH_END_ONLY) {
  706. type = MATCH_MIDDLE_ONLY;
  707. } else {
  708. match = i;
  709. type = MATCH_FRONT_ONLY;
  710. }
  711. buff[i] = 0;
  712. break;
  713. }
  714. }
  715. }
  716. /* keep kstop machine from running */
  717. preempt_disable();
  718. ftrace_filtered = 1;
  719. pg = ftrace_pages_start;
  720. while (pg) {
  721. for (i = 0; i < pg->index; i++) {
  722. int matched = 0;
  723. char *ptr;
  724. rec = &pg->records[i];
  725. if (rec->flags & FTRACE_FL_FAILED)
  726. continue;
  727. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  728. switch (type) {
  729. case MATCH_FULL:
  730. if (strcmp(str, buff) == 0)
  731. matched = 1;
  732. break;
  733. case MATCH_FRONT_ONLY:
  734. if (memcmp(str, buff, match) == 0)
  735. matched = 1;
  736. break;
  737. case MATCH_MIDDLE_ONLY:
  738. if (strstr(str, search))
  739. matched = 1;
  740. break;
  741. case MATCH_END_ONLY:
  742. ptr = strstr(str, search);
  743. if (ptr && (ptr[search_len] == 0))
  744. matched = 1;
  745. break;
  746. }
  747. if (matched)
  748. rec->flags |= FTRACE_FL_FILTER;
  749. }
  750. pg = pg->next;
  751. }
  752. preempt_enable();
  753. }
  754. static ssize_t notrace
  755. ftrace_filter_write(struct file *file, const char __user *ubuf,
  756. size_t cnt, loff_t *ppos)
  757. {
  758. struct ftrace_iterator *iter;
  759. char ch;
  760. size_t read = 0;
  761. ssize_t ret;
  762. if (!cnt || cnt < 0)
  763. return 0;
  764. mutex_lock(&ftrace_filter_lock);
  765. if (file->f_mode & FMODE_READ) {
  766. struct seq_file *m = file->private_data;
  767. iter = m->private;
  768. } else
  769. iter = file->private_data;
  770. if (!*ppos) {
  771. iter->flags &= ~FTRACE_ITER_CONT;
  772. iter->buffer_idx = 0;
  773. }
  774. ret = get_user(ch, ubuf++);
  775. if (ret)
  776. goto out;
  777. read++;
  778. cnt--;
  779. if (!(iter->flags & ~FTRACE_ITER_CONT)) {
  780. /* skip white space */
  781. while (cnt && isspace(ch)) {
  782. ret = get_user(ch, ubuf++);
  783. if (ret)
  784. goto out;
  785. read++;
  786. cnt--;
  787. }
  788. if (isspace(ch)) {
  789. file->f_pos += read;
  790. ret = read;
  791. goto out;
  792. }
  793. iter->buffer_idx = 0;
  794. }
  795. while (cnt && !isspace(ch)) {
  796. if (iter->buffer_idx < FTRACE_BUFF_MAX)
  797. iter->buffer[iter->buffer_idx++] = ch;
  798. else {
  799. ret = -EINVAL;
  800. goto out;
  801. }
  802. ret = get_user(ch, ubuf++);
  803. if (ret)
  804. goto out;
  805. read++;
  806. cnt--;
  807. }
  808. if (isspace(ch)) {
  809. iter->filtered++;
  810. iter->buffer[iter->buffer_idx] = 0;
  811. ftrace_match(iter->buffer, iter->buffer_idx);
  812. iter->buffer_idx = 0;
  813. } else
  814. iter->flags |= FTRACE_ITER_CONT;
  815. file->f_pos += read;
  816. ret = read;
  817. out:
  818. mutex_unlock(&ftrace_filter_lock);
  819. return ret;
  820. }
  821. /**
  822. * ftrace_set_filter - set a function to filter on in ftrace
  823. * @buf - the string that holds the function filter text.
  824. * @len - the length of the string.
  825. * @reset - non zero to reset all filters before applying this filter.
  826. *
  827. * Filters denote which functions should be enabled when tracing is enabled.
  828. * If @buf is NULL and reset is set, all functions will be enabled for tracing.
  829. */
  830. notrace void ftrace_set_filter(unsigned char *buf, int len, int reset)
  831. {
  832. mutex_lock(&ftrace_filter_lock);
  833. if (reset)
  834. ftrace_filter_reset();
  835. if (buf)
  836. ftrace_match(buf, len);
  837. mutex_unlock(&ftrace_filter_lock);
  838. }
  839. static int notrace
  840. ftrace_filter_release(struct inode *inode, struct file *file)
  841. {
  842. struct seq_file *m = (struct seq_file *)file->private_data;
  843. struct ftrace_iterator *iter;
  844. mutex_lock(&ftrace_filter_lock);
  845. if (file->f_mode & FMODE_READ) {
  846. iter = m->private;
  847. seq_release(inode, file);
  848. } else
  849. iter = file->private_data;
  850. if (iter->buffer_idx) {
  851. iter->filtered++;
  852. iter->buffer[iter->buffer_idx] = 0;
  853. ftrace_match(iter->buffer, iter->buffer_idx);
  854. }
  855. mutex_lock(&ftrace_sysctl_lock);
  856. mutex_lock(&ftraced_lock);
  857. if (iter->filtered && ftraced_suspend && ftrace_enabled)
  858. ftrace_run_update_code(FTRACE_ENABLE_CALLS);
  859. mutex_unlock(&ftraced_lock);
  860. mutex_unlock(&ftrace_sysctl_lock);
  861. kfree(iter);
  862. mutex_unlock(&ftrace_filter_lock);
  863. return 0;
  864. }
  865. static struct file_operations ftrace_avail_fops = {
  866. .open = ftrace_avail_open,
  867. .read = seq_read,
  868. .llseek = seq_lseek,
  869. .release = ftrace_avail_release,
  870. };
  871. static struct file_operations ftrace_filter_fops = {
  872. .open = ftrace_filter_open,
  873. .read = ftrace_filter_read,
  874. .write = ftrace_filter_write,
  875. .llseek = ftrace_filter_lseek,
  876. .release = ftrace_filter_release,
  877. };
  878. /**
  879. * ftrace_force_update - force an update to all recording ftrace functions
  880. *
  881. * The ftrace dynamic update daemon only wakes up once a second.
  882. * There may be cases where an update needs to be done immediately
  883. * for tests or internal kernel tracing to begin. This function
  884. * wakes the daemon to do an update and will not return until the
  885. * update is complete.
  886. */
  887. int ftrace_force_update(void)
  888. {
  889. unsigned long last_counter;
  890. DECLARE_WAITQUEUE(wait, current);
  891. int ret = 0;
  892. if (!ftraced_task)
  893. return -ENODEV;
  894. mutex_lock(&ftraced_lock);
  895. last_counter = ftraced_iteration_counter;
  896. set_current_state(TASK_INTERRUPTIBLE);
  897. add_wait_queue(&ftraced_waiters, &wait);
  898. do {
  899. mutex_unlock(&ftraced_lock);
  900. wake_up_process(ftraced_task);
  901. schedule();
  902. mutex_lock(&ftraced_lock);
  903. if (signal_pending(current)) {
  904. ret = -EINTR;
  905. break;
  906. }
  907. set_current_state(TASK_INTERRUPTIBLE);
  908. } while (last_counter == ftraced_iteration_counter);
  909. mutex_unlock(&ftraced_lock);
  910. remove_wait_queue(&ftraced_waiters, &wait);
  911. set_current_state(TASK_RUNNING);
  912. return ret;
  913. }
  914. static __init int ftrace_init_debugfs(void)
  915. {
  916. struct dentry *d_tracer;
  917. struct dentry *entry;
  918. d_tracer = tracing_init_dentry();
  919. entry = debugfs_create_file("available_filter_functions", 0444,
  920. d_tracer, NULL, &ftrace_avail_fops);
  921. if (!entry)
  922. pr_warning("Could not create debugfs "
  923. "'available_filter_functions' entry\n");
  924. entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
  925. NULL, &ftrace_filter_fops);
  926. if (!entry)
  927. pr_warning("Could not create debugfs "
  928. "'set_ftrace_filter' entry\n");
  929. return 0;
  930. }
  931. fs_initcall(ftrace_init_debugfs);
  932. static int __init notrace ftrace_dynamic_init(void)
  933. {
  934. struct task_struct *p;
  935. unsigned long addr;
  936. int ret;
  937. addr = (unsigned long)ftrace_record_ip;
  938. stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
  939. /* ftrace_dyn_arch_init places the return code in addr */
  940. if (addr)
  941. return addr;
  942. ret = ftrace_dyn_table_alloc();
  943. if (ret)
  944. return ret;
  945. p = kthread_run(ftraced, NULL, "ftraced");
  946. if (IS_ERR(p))
  947. return -1;
  948. last_ftrace_enabled = ftrace_enabled = 1;
  949. ftraced_task = p;
  950. return 0;
  951. }
  952. core_initcall(ftrace_dynamic_init);
  953. #else
  954. # define ftrace_startup() do { } while (0)
  955. # define ftrace_shutdown() do { } while (0)
  956. # define ftrace_startup_sysctl() do { } while (0)
  957. # define ftrace_shutdown_sysctl() do { } while (0)
  958. #endif /* CONFIG_DYNAMIC_FTRACE */
  959. /**
  960. * register_ftrace_function - register a function for profiling
  961. * @ops - ops structure that holds the function for profiling.
  962. *
  963. * Register a function to be called by all functions in the
  964. * kernel.
  965. *
  966. * Note: @ops->func and all the functions it calls must be labeled
  967. * with "notrace", otherwise it will go into a
  968. * recursive loop.
  969. */
  970. int register_ftrace_function(struct ftrace_ops *ops)
  971. {
  972. int ret;
  973. mutex_lock(&ftrace_sysctl_lock);
  974. ret = __register_ftrace_function(ops);
  975. ftrace_startup();
  976. mutex_unlock(&ftrace_sysctl_lock);
  977. return ret;
  978. }
  979. /**
  980. * unregister_ftrace_function - unresgister a function for profiling.
  981. * @ops - ops structure that holds the function to unregister
  982. *
  983. * Unregister a function that was added to be called by ftrace profiling.
  984. */
  985. int unregister_ftrace_function(struct ftrace_ops *ops)
  986. {
  987. int ret;
  988. mutex_lock(&ftrace_sysctl_lock);
  989. ret = __unregister_ftrace_function(ops);
  990. ftrace_shutdown();
  991. mutex_unlock(&ftrace_sysctl_lock);
  992. return ret;
  993. }
  994. notrace int
  995. ftrace_enable_sysctl(struct ctl_table *table, int write,
  996. struct file *file, void __user *buffer, size_t *lenp,
  997. loff_t *ppos)
  998. {
  999. int ret;
  1000. mutex_lock(&ftrace_sysctl_lock);
  1001. ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
  1002. if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
  1003. goto out;
  1004. last_ftrace_enabled = ftrace_enabled;
  1005. if (ftrace_enabled) {
  1006. ftrace_startup_sysctl();
  1007. /* we are starting ftrace again */
  1008. if (ftrace_list != &ftrace_list_end) {
  1009. if (ftrace_list->next == &ftrace_list_end)
  1010. ftrace_trace_function = ftrace_list->func;
  1011. else
  1012. ftrace_trace_function = ftrace_list_func;
  1013. }
  1014. } else {
  1015. /* stopping ftrace calls (just send to ftrace_stub) */
  1016. ftrace_trace_function = ftrace_stub;
  1017. ftrace_shutdown_sysctl();
  1018. }
  1019. out:
  1020. mutex_unlock(&ftrace_sysctl_lock);
  1021. return ret;
  1022. }