ftrace.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915
  1. /*
  2. * Infrastructure for profiling code inserted by 'gcc -pg'.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Originally ported from the -rt patch by:
  8. * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Based on code in the latency_tracer, that is:
  11. *
  12. * Copyright (C) 2004-2006 Ingo Molnar
  13. * Copyright (C) 2004 William Lee Irwin III
  14. */
  15. #include <linux/stop_machine.h>
  16. #include <linux/clocksource.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/debugfs.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/kthread.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/kprobes.h>
  24. #include <linux/ftrace.h>
  25. #include <linux/sysctl.h>
  26. #include <linux/ctype.h>
  27. #include <linux/hash.h>
  28. #include <linux/list.h>
  29. #include <asm/ftrace.h>
  30. #include "trace.h"
  31. /* ftrace_enabled is a method to turn ftrace on or off */
  32. int ftrace_enabled __read_mostly;
  33. static int last_ftrace_enabled;
  34. /*
  35. * ftrace_disabled is set when an anomaly is discovered.
  36. * ftrace_disabled is much stronger than ftrace_enabled.
  37. */
  38. static int ftrace_disabled __read_mostly;
  39. static DEFINE_SPINLOCK(ftrace_lock);
  40. static DEFINE_MUTEX(ftrace_sysctl_lock);
  41. static struct ftrace_ops ftrace_list_end __read_mostly =
  42. {
  43. .func = ftrace_stub,
  44. };
  45. static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
  46. ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
  47. static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
  48. {
  49. struct ftrace_ops *op = ftrace_list;
  50. /* in case someone actually ports this to alpha! */
  51. read_barrier_depends();
  52. while (op != &ftrace_list_end) {
  53. /* silly alpha */
  54. read_barrier_depends();
  55. op->func(ip, parent_ip);
  56. op = op->next;
  57. };
  58. }
  59. /**
  60. * clear_ftrace_function - reset the ftrace function
  61. *
  62. * This NULLs the ftrace function and in essence stops
  63. * tracing. There may be lag
  64. */
  65. void clear_ftrace_function(void)
  66. {
  67. ftrace_trace_function = ftrace_stub;
  68. }
  69. static int __register_ftrace_function(struct ftrace_ops *ops)
  70. {
  71. /* should not be called from interrupt context */
  72. spin_lock(&ftrace_lock);
  73. ops->next = ftrace_list;
  74. /*
  75. * We are entering ops into the ftrace_list but another
  76. * CPU might be walking that list. We need to make sure
  77. * the ops->next pointer is valid before another CPU sees
  78. * the ops pointer included into the ftrace_list.
  79. */
  80. smp_wmb();
  81. ftrace_list = ops;
  82. if (ftrace_enabled) {
  83. /*
  84. * For one func, simply call it directly.
  85. * For more than one func, call the chain.
  86. */
  87. if (ops->next == &ftrace_list_end)
  88. ftrace_trace_function = ops->func;
  89. else
  90. ftrace_trace_function = ftrace_list_func;
  91. }
  92. spin_unlock(&ftrace_lock);
  93. return 0;
  94. }
  95. static int __unregister_ftrace_function(struct ftrace_ops *ops)
  96. {
  97. struct ftrace_ops **p;
  98. int ret = 0;
  99. /* should not be called from interrupt context */
  100. spin_lock(&ftrace_lock);
  101. /*
  102. * If we are removing the last function, then simply point
  103. * to the ftrace_stub.
  104. */
  105. if (ftrace_list == ops && ops->next == &ftrace_list_end) {
  106. ftrace_trace_function = ftrace_stub;
  107. ftrace_list = &ftrace_list_end;
  108. goto out;
  109. }
  110. for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
  111. if (*p == ops)
  112. break;
  113. if (*p != ops) {
  114. ret = -1;
  115. goto out;
  116. }
  117. *p = (*p)->next;
  118. if (ftrace_enabled) {
  119. /* If we only have one func left, then call that directly */
  120. if (ftrace_list == &ftrace_list_end ||
  121. ftrace_list->next == &ftrace_list_end)
  122. ftrace_trace_function = ftrace_list->func;
  123. }
  124. out:
  125. spin_unlock(&ftrace_lock);
  126. return ret;
  127. }
  128. #ifdef CONFIG_DYNAMIC_FTRACE
  129. #ifndef CONFIG_FTRACE_MCOUNT_RECORD
  130. /*
  131. * The hash lock is only needed when the recording of the mcount
  132. * callers are dynamic. That is, by the caller themselves and
  133. * not recorded via the compilation.
  134. */
  135. static DEFINE_SPINLOCK(ftrace_hash_lock);
  136. #define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
  137. #define ftrace_hash_unlock(flags) \
  138. spin_unlock_irqrestore(&ftrace_hash_lock, flags)
  139. static void ftrace_release_hash(unsigned long start, unsigned long end);
  140. #else
  141. /* This is protected via the ftrace_lock with MCOUNT_RECORD. */
  142. #define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
  143. #define ftrace_hash_unlock(flags) do { } while(0)
  144. static inline void ftrace_release_hash(unsigned long start, unsigned long end)
  145. {
  146. }
  147. #endif
  148. /*
  149. * Since MCOUNT_ADDR may point to mcount itself, we do not want
  150. * to get it confused by reading a reference in the code as we
  151. * are parsing on objcopy output of text. Use a variable for
  152. * it instead.
  153. */
  154. static unsigned long mcount_addr = MCOUNT_ADDR;
  155. static struct task_struct *ftraced_task;
  156. enum {
  157. FTRACE_ENABLE_CALLS = (1 << 0),
  158. FTRACE_DISABLE_CALLS = (1 << 1),
  159. FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
  160. FTRACE_ENABLE_MCOUNT = (1 << 3),
  161. FTRACE_DISABLE_MCOUNT = (1 << 4),
  162. };
  163. static int ftrace_filtered;
  164. static int tracing_on;
  165. static int frozen_record_count;
  166. static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
  167. static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
  168. static DEFINE_MUTEX(ftraced_lock);
  169. static DEFINE_MUTEX(ftrace_regex_lock);
  170. struct ftrace_page {
  171. struct ftrace_page *next;
  172. unsigned long index;
  173. struct dyn_ftrace records[];
  174. };
  175. #define ENTRIES_PER_PAGE \
  176. ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
  177. /* estimate from running different kernels */
  178. #define NR_TO_INIT 10000
  179. static struct ftrace_page *ftrace_pages_start;
  180. static struct ftrace_page *ftrace_pages;
  181. static int ftraced_trigger;
  182. static int ftraced_suspend;
  183. static int ftraced_stop;
  184. static int ftrace_record_suspend;
  185. static struct dyn_ftrace *ftrace_free_records;
  186. #ifdef CONFIG_KPROBES
  187. static inline void freeze_record(struct dyn_ftrace *rec)
  188. {
  189. if (!(rec->flags & FTRACE_FL_FROZEN)) {
  190. rec->flags |= FTRACE_FL_FROZEN;
  191. frozen_record_count++;
  192. }
  193. }
  194. static inline void unfreeze_record(struct dyn_ftrace *rec)
  195. {
  196. if (rec->flags & FTRACE_FL_FROZEN) {
  197. rec->flags &= ~FTRACE_FL_FROZEN;
  198. frozen_record_count--;
  199. }
  200. }
  201. static inline int record_frozen(struct dyn_ftrace *rec)
  202. {
  203. return rec->flags & FTRACE_FL_FROZEN;
  204. }
  205. #else
  206. # define freeze_record(rec) ({ 0; })
  207. # define unfreeze_record(rec) ({ 0; })
  208. # define record_frozen(rec) ({ 0; })
  209. #endif /* CONFIG_KPROBES */
  210. int skip_trace(unsigned long ip)
  211. {
  212. unsigned long fl;
  213. struct dyn_ftrace *rec;
  214. struct hlist_node *t;
  215. struct hlist_head *head;
  216. if (frozen_record_count == 0)
  217. return 0;
  218. head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
  219. hlist_for_each_entry_rcu(rec, t, head, node) {
  220. if (rec->ip == ip) {
  221. if (record_frozen(rec)) {
  222. if (rec->flags & FTRACE_FL_FAILED)
  223. return 1;
  224. if (!(rec->flags & FTRACE_FL_CONVERTED))
  225. return 1;
  226. if (!tracing_on || !ftrace_enabled)
  227. return 1;
  228. if (ftrace_filtered) {
  229. fl = rec->flags & (FTRACE_FL_FILTER |
  230. FTRACE_FL_NOTRACE);
  231. if (!fl || (fl & FTRACE_FL_NOTRACE))
  232. return 1;
  233. }
  234. }
  235. break;
  236. }
  237. }
  238. return 0;
  239. }
  240. static inline int
  241. ftrace_ip_in_hash(unsigned long ip, unsigned long key)
  242. {
  243. struct dyn_ftrace *p;
  244. struct hlist_node *t;
  245. int found = 0;
  246. hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
  247. if (p->ip == ip) {
  248. found = 1;
  249. break;
  250. }
  251. }
  252. return found;
  253. }
  254. static inline void
  255. ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
  256. {
  257. hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
  258. }
  259. /* called from kstop_machine */
  260. static inline void ftrace_del_hash(struct dyn_ftrace *node)
  261. {
  262. hlist_del(&node->node);
  263. }
  264. static void ftrace_free_rec(struct dyn_ftrace *rec)
  265. {
  266. rec->ip = (unsigned long)ftrace_free_records;
  267. ftrace_free_records = rec;
  268. rec->flags |= FTRACE_FL_FREE;
  269. }
  270. void ftrace_release(void *start, unsigned long size)
  271. {
  272. struct dyn_ftrace *rec;
  273. struct ftrace_page *pg;
  274. unsigned long s = (unsigned long)start;
  275. unsigned long e = s + size;
  276. int i;
  277. if (ftrace_disabled || !start)
  278. return;
  279. /* should not be called from interrupt context */
  280. spin_lock(&ftrace_lock);
  281. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  282. for (i = 0; i < pg->index; i++) {
  283. rec = &pg->records[i];
  284. if ((rec->ip >= s) && (rec->ip < e))
  285. ftrace_free_rec(rec);
  286. }
  287. }
  288. spin_unlock(&ftrace_lock);
  289. ftrace_release_hash(s, e);
  290. }
  291. static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
  292. {
  293. struct dyn_ftrace *rec;
  294. /* First check for freed records */
  295. if (ftrace_free_records) {
  296. rec = ftrace_free_records;
  297. if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
  298. WARN_ON_ONCE(1);
  299. ftrace_free_records = NULL;
  300. ftrace_disabled = 1;
  301. ftrace_enabled = 0;
  302. return NULL;
  303. }
  304. ftrace_free_records = (void *)rec->ip;
  305. memset(rec, 0, sizeof(*rec));
  306. return rec;
  307. }
  308. if (ftrace_pages->index == ENTRIES_PER_PAGE) {
  309. if (!ftrace_pages->next)
  310. return NULL;
  311. ftrace_pages = ftrace_pages->next;
  312. }
  313. return &ftrace_pages->records[ftrace_pages->index++];
  314. }
  315. static void
  316. ftrace_record_ip(unsigned long ip)
  317. {
  318. struct dyn_ftrace *node;
  319. unsigned long flags;
  320. unsigned long key;
  321. int resched;
  322. int cpu;
  323. if (!ftrace_enabled || ftrace_disabled)
  324. return;
  325. resched = need_resched();
  326. preempt_disable_notrace();
  327. /*
  328. * We simply need to protect against recursion.
  329. * Use the the raw version of smp_processor_id and not
  330. * __get_cpu_var which can call debug hooks that can
  331. * cause a recursive crash here.
  332. */
  333. cpu = raw_smp_processor_id();
  334. per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
  335. if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
  336. goto out;
  337. if (unlikely(ftrace_record_suspend))
  338. goto out;
  339. key = hash_long(ip, FTRACE_HASHBITS);
  340. WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
  341. if (ftrace_ip_in_hash(ip, key))
  342. goto out;
  343. ftrace_hash_lock(flags);
  344. /* This ip may have hit the hash before the lock */
  345. if (ftrace_ip_in_hash(ip, key))
  346. goto out_unlock;
  347. node = ftrace_alloc_dyn_node(ip);
  348. if (!node)
  349. goto out_unlock;
  350. node->ip = ip;
  351. ftrace_add_hash(node, key);
  352. ftraced_trigger = 1;
  353. out_unlock:
  354. ftrace_hash_unlock(flags);
  355. out:
  356. per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
  357. /* prevent recursion with scheduler */
  358. if (resched)
  359. preempt_enable_no_resched_notrace();
  360. else
  361. preempt_enable_notrace();
  362. }
  363. #define FTRACE_ADDR ((long)(ftrace_caller))
  364. static int
  365. __ftrace_replace_code(struct dyn_ftrace *rec,
  366. unsigned char *old, unsigned char *new, int enable)
  367. {
  368. unsigned long ip, fl;
  369. ip = rec->ip;
  370. if (ftrace_filtered && enable) {
  371. /*
  372. * If filtering is on:
  373. *
  374. * If this record is set to be filtered and
  375. * is enabled then do nothing.
  376. *
  377. * If this record is set to be filtered and
  378. * it is not enabled, enable it.
  379. *
  380. * If this record is not set to be filtered
  381. * and it is not enabled do nothing.
  382. *
  383. * If this record is set not to trace then
  384. * do nothing.
  385. *
  386. * If this record is set not to trace and
  387. * it is enabled then disable it.
  388. *
  389. * If this record is not set to be filtered and
  390. * it is enabled, disable it.
  391. */
  392. fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
  393. FTRACE_FL_ENABLED);
  394. if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
  395. (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
  396. !fl || (fl == FTRACE_FL_NOTRACE))
  397. return 0;
  398. /*
  399. * If it is enabled disable it,
  400. * otherwise enable it!
  401. */
  402. if (fl & FTRACE_FL_ENABLED) {
  403. /* swap new and old */
  404. new = old;
  405. old = ftrace_call_replace(ip, FTRACE_ADDR);
  406. rec->flags &= ~FTRACE_FL_ENABLED;
  407. } else {
  408. new = ftrace_call_replace(ip, FTRACE_ADDR);
  409. rec->flags |= FTRACE_FL_ENABLED;
  410. }
  411. } else {
  412. if (enable) {
  413. /*
  414. * If this record is set not to trace and is
  415. * not enabled, do nothing.
  416. */
  417. fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
  418. if (fl == FTRACE_FL_NOTRACE)
  419. return 0;
  420. new = ftrace_call_replace(ip, FTRACE_ADDR);
  421. } else
  422. old = ftrace_call_replace(ip, FTRACE_ADDR);
  423. if (enable) {
  424. if (rec->flags & FTRACE_FL_ENABLED)
  425. return 0;
  426. rec->flags |= FTRACE_FL_ENABLED;
  427. } else {
  428. if (!(rec->flags & FTRACE_FL_ENABLED))
  429. return 0;
  430. rec->flags &= ~FTRACE_FL_ENABLED;
  431. }
  432. }
  433. return ftrace_modify_code(ip, old, new);
  434. }
  435. static void ftrace_replace_code(int enable)
  436. {
  437. int i, failed;
  438. unsigned char *new = NULL, *old = NULL;
  439. struct dyn_ftrace *rec;
  440. struct ftrace_page *pg;
  441. if (enable)
  442. old = ftrace_nop_replace();
  443. else
  444. new = ftrace_nop_replace();
  445. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  446. for (i = 0; i < pg->index; i++) {
  447. rec = &pg->records[i];
  448. /* don't modify code that has already faulted */
  449. if (rec->flags & FTRACE_FL_FAILED)
  450. continue;
  451. /* ignore updates to this record's mcount site */
  452. if (get_kprobe((void *)rec->ip)) {
  453. freeze_record(rec);
  454. continue;
  455. } else {
  456. unfreeze_record(rec);
  457. }
  458. failed = __ftrace_replace_code(rec, old, new, enable);
  459. if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
  460. rec->flags |= FTRACE_FL_FAILED;
  461. if ((system_state == SYSTEM_BOOTING) ||
  462. !core_kernel_text(rec->ip)) {
  463. ftrace_del_hash(rec);
  464. ftrace_free_rec(rec);
  465. }
  466. }
  467. }
  468. }
  469. }
  470. static void ftrace_shutdown_replenish(void)
  471. {
  472. if (ftrace_pages->next)
  473. return;
  474. /* allocate another page */
  475. ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
  476. }
  477. static void print_ip_ins(const char *fmt, unsigned char *p)
  478. {
  479. int i;
  480. printk(KERN_CONT "%s", fmt);
  481. for (i = 0; i < MCOUNT_INSN_SIZE; i++)
  482. printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
  483. }
  484. static int
  485. ftrace_code_disable(struct dyn_ftrace *rec)
  486. {
  487. unsigned long ip;
  488. unsigned char *nop, *call;
  489. int failed;
  490. ip = rec->ip;
  491. nop = ftrace_nop_replace();
  492. call = ftrace_call_replace(ip, mcount_addr);
  493. failed = ftrace_modify_code(ip, call, nop);
  494. if (failed) {
  495. switch (failed) {
  496. case 1:
  497. WARN_ON_ONCE(1);
  498. pr_info("ftrace faulted on modifying ");
  499. print_ip_sym(ip);
  500. break;
  501. case 2:
  502. WARN_ON_ONCE(1);
  503. pr_info("ftrace failed to modify ");
  504. print_ip_sym(ip);
  505. print_ip_ins(" expected: ", call);
  506. print_ip_ins(" actual: ", (unsigned char *)ip);
  507. print_ip_ins(" replace: ", nop);
  508. printk(KERN_CONT "\n");
  509. break;
  510. }
  511. rec->flags |= FTRACE_FL_FAILED;
  512. return 0;
  513. }
  514. return 1;
  515. }
  516. static int __ftrace_update_code(void *ignore);
  517. static int __ftrace_modify_code(void *data)
  518. {
  519. unsigned long addr;
  520. int *command = data;
  521. if (*command & FTRACE_ENABLE_CALLS) {
  522. /*
  523. * Update any recorded ips now that we have the
  524. * machine stopped
  525. */
  526. __ftrace_update_code(NULL);
  527. ftrace_replace_code(1);
  528. tracing_on = 1;
  529. } else if (*command & FTRACE_DISABLE_CALLS) {
  530. ftrace_replace_code(0);
  531. tracing_on = 0;
  532. }
  533. if (*command & FTRACE_UPDATE_TRACE_FUNC)
  534. ftrace_update_ftrace_func(ftrace_trace_function);
  535. if (*command & FTRACE_ENABLE_MCOUNT) {
  536. addr = (unsigned long)ftrace_record_ip;
  537. ftrace_mcount_set(&addr);
  538. } else if (*command & FTRACE_DISABLE_MCOUNT) {
  539. addr = (unsigned long)ftrace_stub;
  540. ftrace_mcount_set(&addr);
  541. }
  542. return 0;
  543. }
  544. static void ftrace_run_update_code(int command)
  545. {
  546. stop_machine(__ftrace_modify_code, &command, NULL);
  547. }
  548. void ftrace_disable_daemon(void)
  549. {
  550. /* Stop the daemon from calling kstop_machine */
  551. mutex_lock(&ftraced_lock);
  552. ftraced_stop = 1;
  553. mutex_unlock(&ftraced_lock);
  554. ftrace_force_update();
  555. }
  556. void ftrace_enable_daemon(void)
  557. {
  558. mutex_lock(&ftraced_lock);
  559. ftraced_stop = 0;
  560. mutex_unlock(&ftraced_lock);
  561. ftrace_force_update();
  562. }
  563. static ftrace_func_t saved_ftrace_func;
  564. static void ftrace_startup(void)
  565. {
  566. int command = 0;
  567. if (unlikely(ftrace_disabled))
  568. return;
  569. mutex_lock(&ftraced_lock);
  570. ftraced_suspend++;
  571. if (ftraced_suspend == 1)
  572. command |= FTRACE_ENABLE_CALLS;
  573. if (saved_ftrace_func != ftrace_trace_function) {
  574. saved_ftrace_func = ftrace_trace_function;
  575. command |= FTRACE_UPDATE_TRACE_FUNC;
  576. }
  577. if (!command || !ftrace_enabled)
  578. goto out;
  579. ftrace_run_update_code(command);
  580. out:
  581. mutex_unlock(&ftraced_lock);
  582. }
  583. static void ftrace_shutdown(void)
  584. {
  585. int command = 0;
  586. if (unlikely(ftrace_disabled))
  587. return;
  588. mutex_lock(&ftraced_lock);
  589. ftraced_suspend--;
  590. if (!ftraced_suspend)
  591. command |= FTRACE_DISABLE_CALLS;
  592. if (saved_ftrace_func != ftrace_trace_function) {
  593. saved_ftrace_func = ftrace_trace_function;
  594. command |= FTRACE_UPDATE_TRACE_FUNC;
  595. }
  596. if (!command || !ftrace_enabled)
  597. goto out;
  598. ftrace_run_update_code(command);
  599. out:
  600. mutex_unlock(&ftraced_lock);
  601. }
  602. static void ftrace_startup_sysctl(void)
  603. {
  604. int command = FTRACE_ENABLE_MCOUNT;
  605. if (unlikely(ftrace_disabled))
  606. return;
  607. mutex_lock(&ftraced_lock);
  608. /* Force update next time */
  609. saved_ftrace_func = NULL;
  610. /* ftraced_suspend is true if we want ftrace running */
  611. if (ftraced_suspend)
  612. command |= FTRACE_ENABLE_CALLS;
  613. ftrace_run_update_code(command);
  614. mutex_unlock(&ftraced_lock);
  615. }
  616. static void ftrace_shutdown_sysctl(void)
  617. {
  618. int command = FTRACE_DISABLE_MCOUNT;
  619. if (unlikely(ftrace_disabled))
  620. return;
  621. mutex_lock(&ftraced_lock);
  622. /* ftraced_suspend is true if ftrace is running */
  623. if (ftraced_suspend)
  624. command |= FTRACE_DISABLE_CALLS;
  625. ftrace_run_update_code(command);
  626. mutex_unlock(&ftraced_lock);
  627. }
  628. static cycle_t ftrace_update_time;
  629. static unsigned long ftrace_update_cnt;
  630. unsigned long ftrace_update_tot_cnt;
  631. static int __ftrace_update_code(void *ignore)
  632. {
  633. int i, save_ftrace_enabled;
  634. cycle_t start, stop;
  635. struct dyn_ftrace *p;
  636. struct hlist_node *t, *n;
  637. struct hlist_head *head, temp_list;
  638. /* Don't be recording funcs now */
  639. ftrace_record_suspend++;
  640. save_ftrace_enabled = ftrace_enabled;
  641. ftrace_enabled = 0;
  642. start = ftrace_now(raw_smp_processor_id());
  643. ftrace_update_cnt = 0;
  644. /* No locks needed, the machine is stopped! */
  645. for (i = 0; i < FTRACE_HASHSIZE; i++) {
  646. INIT_HLIST_HEAD(&temp_list);
  647. head = &ftrace_hash[i];
  648. /* all CPUS are stopped, we are safe to modify code */
  649. hlist_for_each_entry_safe(p, t, n, head, node) {
  650. /* Skip over failed records which have not been
  651. * freed. */
  652. if (p->flags & FTRACE_FL_FAILED)
  653. continue;
  654. /* Unconverted records are always at the head of the
  655. * hash bucket. Once we encounter a converted record,
  656. * simply skip over to the next bucket. Saves ftraced
  657. * some processor cycles (ftrace does its bid for
  658. * global warming :-p ). */
  659. if (p->flags & (FTRACE_FL_CONVERTED))
  660. break;
  661. /* Ignore updates to this record's mcount site.
  662. * Reintroduce this record at the head of this
  663. * bucket to attempt to "convert" it again if
  664. * the kprobe on it is unregistered before the
  665. * next run. */
  666. if (get_kprobe((void *)p->ip)) {
  667. ftrace_del_hash(p);
  668. INIT_HLIST_NODE(&p->node);
  669. hlist_add_head(&p->node, &temp_list);
  670. freeze_record(p);
  671. continue;
  672. } else {
  673. unfreeze_record(p);
  674. }
  675. /* convert record (i.e, patch mcount-call with NOP) */
  676. if (ftrace_code_disable(p)) {
  677. p->flags |= FTRACE_FL_CONVERTED;
  678. ftrace_update_cnt++;
  679. } else {
  680. if ((system_state == SYSTEM_BOOTING) ||
  681. !core_kernel_text(p->ip)) {
  682. ftrace_del_hash(p);
  683. ftrace_free_rec(p);
  684. }
  685. }
  686. }
  687. hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
  688. hlist_del(&p->node);
  689. INIT_HLIST_NODE(&p->node);
  690. hlist_add_head(&p->node, head);
  691. }
  692. }
  693. stop = ftrace_now(raw_smp_processor_id());
  694. ftrace_update_time = stop - start;
  695. ftrace_update_tot_cnt += ftrace_update_cnt;
  696. ftraced_trigger = 0;
  697. ftrace_enabled = save_ftrace_enabled;
  698. ftrace_record_suspend--;
  699. return 0;
  700. }
  701. static int ftrace_update_code(void)
  702. {
  703. if (unlikely(ftrace_disabled) ||
  704. !ftrace_enabled || !ftraced_trigger)
  705. return 0;
  706. stop_machine(__ftrace_update_code, NULL, NULL);
  707. return 1;
  708. }
  709. static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
  710. {
  711. struct ftrace_page *pg;
  712. int cnt;
  713. int i;
  714. /* allocate a few pages */
  715. ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
  716. if (!ftrace_pages_start)
  717. return -1;
  718. /*
  719. * Allocate a few more pages.
  720. *
  721. * TODO: have some parser search vmlinux before
  722. * final linking to find all calls to ftrace.
  723. * Then we can:
  724. * a) know how many pages to allocate.
  725. * and/or
  726. * b) set up the table then.
  727. *
  728. * The dynamic code is still necessary for
  729. * modules.
  730. */
  731. pg = ftrace_pages = ftrace_pages_start;
  732. cnt = num_to_init / ENTRIES_PER_PAGE;
  733. pr_info("ftrace: allocating %ld hash entries in %d pages\n",
  734. num_to_init, cnt);
  735. for (i = 0; i < cnt; i++) {
  736. pg->next = (void *)get_zeroed_page(GFP_KERNEL);
  737. /* If we fail, we'll try later anyway */
  738. if (!pg->next)
  739. break;
  740. pg = pg->next;
  741. }
  742. return 0;
  743. }
  744. enum {
  745. FTRACE_ITER_FILTER = (1 << 0),
  746. FTRACE_ITER_CONT = (1 << 1),
  747. FTRACE_ITER_NOTRACE = (1 << 2),
  748. FTRACE_ITER_FAILURES = (1 << 3),
  749. };
  750. #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
  751. struct ftrace_iterator {
  752. loff_t pos;
  753. struct ftrace_page *pg;
  754. unsigned idx;
  755. unsigned flags;
  756. unsigned char buffer[FTRACE_BUFF_MAX+1];
  757. unsigned buffer_idx;
  758. unsigned filtered;
  759. };
  760. static void *
  761. t_next(struct seq_file *m, void *v, loff_t *pos)
  762. {
  763. struct ftrace_iterator *iter = m->private;
  764. struct dyn_ftrace *rec = NULL;
  765. (*pos)++;
  766. /* should not be called from interrupt context */
  767. spin_lock(&ftrace_lock);
  768. retry:
  769. if (iter->idx >= iter->pg->index) {
  770. if (iter->pg->next) {
  771. iter->pg = iter->pg->next;
  772. iter->idx = 0;
  773. goto retry;
  774. }
  775. } else {
  776. rec = &iter->pg->records[iter->idx++];
  777. if ((rec->flags & FTRACE_FL_FREE) ||
  778. (!(iter->flags & FTRACE_ITER_FAILURES) &&
  779. (rec->flags & FTRACE_FL_FAILED)) ||
  780. ((iter->flags & FTRACE_ITER_FAILURES) &&
  781. !(rec->flags & FTRACE_FL_FAILED)) ||
  782. ((iter->flags & FTRACE_ITER_NOTRACE) &&
  783. !(rec->flags & FTRACE_FL_NOTRACE))) {
  784. rec = NULL;
  785. goto retry;
  786. }
  787. }
  788. spin_unlock(&ftrace_lock);
  789. iter->pos = *pos;
  790. return rec;
  791. }
  792. static void *t_start(struct seq_file *m, loff_t *pos)
  793. {
  794. struct ftrace_iterator *iter = m->private;
  795. void *p = NULL;
  796. loff_t l = -1;
  797. if (*pos != iter->pos) {
  798. for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
  799. ;
  800. } else {
  801. l = *pos;
  802. p = t_next(m, p, &l);
  803. }
  804. return p;
  805. }
  806. static void t_stop(struct seq_file *m, void *p)
  807. {
  808. }
  809. static int t_show(struct seq_file *m, void *v)
  810. {
  811. struct dyn_ftrace *rec = v;
  812. char str[KSYM_SYMBOL_LEN];
  813. if (!rec)
  814. return 0;
  815. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  816. seq_printf(m, "%s\n", str);
  817. return 0;
  818. }
  819. static struct seq_operations show_ftrace_seq_ops = {
  820. .start = t_start,
  821. .next = t_next,
  822. .stop = t_stop,
  823. .show = t_show,
  824. };
  825. static int
  826. ftrace_avail_open(struct inode *inode, struct file *file)
  827. {
  828. struct ftrace_iterator *iter;
  829. int ret;
  830. if (unlikely(ftrace_disabled))
  831. return -ENODEV;
  832. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  833. if (!iter)
  834. return -ENOMEM;
  835. iter->pg = ftrace_pages_start;
  836. iter->pos = -1;
  837. ret = seq_open(file, &show_ftrace_seq_ops);
  838. if (!ret) {
  839. struct seq_file *m = file->private_data;
  840. m->private = iter;
  841. } else {
  842. kfree(iter);
  843. }
  844. return ret;
  845. }
  846. int ftrace_avail_release(struct inode *inode, struct file *file)
  847. {
  848. struct seq_file *m = (struct seq_file *)file->private_data;
  849. struct ftrace_iterator *iter = m->private;
  850. seq_release(inode, file);
  851. kfree(iter);
  852. return 0;
  853. }
  854. static int
  855. ftrace_failures_open(struct inode *inode, struct file *file)
  856. {
  857. int ret;
  858. struct seq_file *m;
  859. struct ftrace_iterator *iter;
  860. ret = ftrace_avail_open(inode, file);
  861. if (!ret) {
  862. m = (struct seq_file *)file->private_data;
  863. iter = (struct ftrace_iterator *)m->private;
  864. iter->flags = FTRACE_ITER_FAILURES;
  865. }
  866. return ret;
  867. }
  868. static void ftrace_filter_reset(int enable)
  869. {
  870. struct ftrace_page *pg;
  871. struct dyn_ftrace *rec;
  872. unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  873. unsigned i;
  874. /* should not be called from interrupt context */
  875. spin_lock(&ftrace_lock);
  876. if (enable)
  877. ftrace_filtered = 0;
  878. pg = ftrace_pages_start;
  879. while (pg) {
  880. for (i = 0; i < pg->index; i++) {
  881. rec = &pg->records[i];
  882. if (rec->flags & FTRACE_FL_FAILED)
  883. continue;
  884. rec->flags &= ~type;
  885. }
  886. pg = pg->next;
  887. }
  888. spin_unlock(&ftrace_lock);
  889. }
  890. static int
  891. ftrace_regex_open(struct inode *inode, struct file *file, int enable)
  892. {
  893. struct ftrace_iterator *iter;
  894. int ret = 0;
  895. if (unlikely(ftrace_disabled))
  896. return -ENODEV;
  897. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  898. if (!iter)
  899. return -ENOMEM;
  900. mutex_lock(&ftrace_regex_lock);
  901. if ((file->f_mode & FMODE_WRITE) &&
  902. !(file->f_flags & O_APPEND))
  903. ftrace_filter_reset(enable);
  904. if (file->f_mode & FMODE_READ) {
  905. iter->pg = ftrace_pages_start;
  906. iter->pos = -1;
  907. iter->flags = enable ? FTRACE_ITER_FILTER :
  908. FTRACE_ITER_NOTRACE;
  909. ret = seq_open(file, &show_ftrace_seq_ops);
  910. if (!ret) {
  911. struct seq_file *m = file->private_data;
  912. m->private = iter;
  913. } else
  914. kfree(iter);
  915. } else
  916. file->private_data = iter;
  917. mutex_unlock(&ftrace_regex_lock);
  918. return ret;
  919. }
  920. static int
  921. ftrace_filter_open(struct inode *inode, struct file *file)
  922. {
  923. return ftrace_regex_open(inode, file, 1);
  924. }
  925. static int
  926. ftrace_notrace_open(struct inode *inode, struct file *file)
  927. {
  928. return ftrace_regex_open(inode, file, 0);
  929. }
  930. static ssize_t
  931. ftrace_regex_read(struct file *file, char __user *ubuf,
  932. size_t cnt, loff_t *ppos)
  933. {
  934. if (file->f_mode & FMODE_READ)
  935. return seq_read(file, ubuf, cnt, ppos);
  936. else
  937. return -EPERM;
  938. }
  939. static loff_t
  940. ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
  941. {
  942. loff_t ret;
  943. if (file->f_mode & FMODE_READ)
  944. ret = seq_lseek(file, offset, origin);
  945. else
  946. file->f_pos = ret = 1;
  947. return ret;
  948. }
  949. enum {
  950. MATCH_FULL,
  951. MATCH_FRONT_ONLY,
  952. MATCH_MIDDLE_ONLY,
  953. MATCH_END_ONLY,
  954. };
  955. static void
  956. ftrace_match(unsigned char *buff, int len, int enable)
  957. {
  958. char str[KSYM_SYMBOL_LEN];
  959. char *search = NULL;
  960. struct ftrace_page *pg;
  961. struct dyn_ftrace *rec;
  962. int type = MATCH_FULL;
  963. unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
  964. unsigned i, match = 0, search_len = 0;
  965. for (i = 0; i < len; i++) {
  966. if (buff[i] == '*') {
  967. if (!i) {
  968. search = buff + i + 1;
  969. type = MATCH_END_ONLY;
  970. search_len = len - (i + 1);
  971. } else {
  972. if (type == MATCH_END_ONLY) {
  973. type = MATCH_MIDDLE_ONLY;
  974. } else {
  975. match = i;
  976. type = MATCH_FRONT_ONLY;
  977. }
  978. buff[i] = 0;
  979. break;
  980. }
  981. }
  982. }
  983. /* should not be called from interrupt context */
  984. spin_lock(&ftrace_lock);
  985. if (enable)
  986. ftrace_filtered = 1;
  987. pg = ftrace_pages_start;
  988. while (pg) {
  989. for (i = 0; i < pg->index; i++) {
  990. int matched = 0;
  991. char *ptr;
  992. rec = &pg->records[i];
  993. if (rec->flags & FTRACE_FL_FAILED)
  994. continue;
  995. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  996. switch (type) {
  997. case MATCH_FULL:
  998. if (strcmp(str, buff) == 0)
  999. matched = 1;
  1000. break;
  1001. case MATCH_FRONT_ONLY:
  1002. if (memcmp(str, buff, match) == 0)
  1003. matched = 1;
  1004. break;
  1005. case MATCH_MIDDLE_ONLY:
  1006. if (strstr(str, search))
  1007. matched = 1;
  1008. break;
  1009. case MATCH_END_ONLY:
  1010. ptr = strstr(str, search);
  1011. if (ptr && (ptr[search_len] == 0))
  1012. matched = 1;
  1013. break;
  1014. }
  1015. if (matched)
  1016. rec->flags |= flag;
  1017. }
  1018. pg = pg->next;
  1019. }
  1020. spin_unlock(&ftrace_lock);
  1021. }
  1022. static ssize_t
  1023. ftrace_regex_write(struct file *file, const char __user *ubuf,
  1024. size_t cnt, loff_t *ppos, int enable)
  1025. {
  1026. struct ftrace_iterator *iter;
  1027. char ch;
  1028. size_t read = 0;
  1029. ssize_t ret;
  1030. if (!cnt || cnt < 0)
  1031. return 0;
  1032. mutex_lock(&ftrace_regex_lock);
  1033. if (file->f_mode & FMODE_READ) {
  1034. struct seq_file *m = file->private_data;
  1035. iter = m->private;
  1036. } else
  1037. iter = file->private_data;
  1038. if (!*ppos) {
  1039. iter->flags &= ~FTRACE_ITER_CONT;
  1040. iter->buffer_idx = 0;
  1041. }
  1042. ret = get_user(ch, ubuf++);
  1043. if (ret)
  1044. goto out;
  1045. read++;
  1046. cnt--;
  1047. if (!(iter->flags & ~FTRACE_ITER_CONT)) {
  1048. /* skip white space */
  1049. while (cnt && isspace(ch)) {
  1050. ret = get_user(ch, ubuf++);
  1051. if (ret)
  1052. goto out;
  1053. read++;
  1054. cnt--;
  1055. }
  1056. if (isspace(ch)) {
  1057. file->f_pos += read;
  1058. ret = read;
  1059. goto out;
  1060. }
  1061. iter->buffer_idx = 0;
  1062. }
  1063. while (cnt && !isspace(ch)) {
  1064. if (iter->buffer_idx < FTRACE_BUFF_MAX)
  1065. iter->buffer[iter->buffer_idx++] = ch;
  1066. else {
  1067. ret = -EINVAL;
  1068. goto out;
  1069. }
  1070. ret = get_user(ch, ubuf++);
  1071. if (ret)
  1072. goto out;
  1073. read++;
  1074. cnt--;
  1075. }
  1076. if (isspace(ch)) {
  1077. iter->filtered++;
  1078. iter->buffer[iter->buffer_idx] = 0;
  1079. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  1080. iter->buffer_idx = 0;
  1081. } else
  1082. iter->flags |= FTRACE_ITER_CONT;
  1083. file->f_pos += read;
  1084. ret = read;
  1085. out:
  1086. mutex_unlock(&ftrace_regex_lock);
  1087. return ret;
  1088. }
  1089. static ssize_t
  1090. ftrace_filter_write(struct file *file, const char __user *ubuf,
  1091. size_t cnt, loff_t *ppos)
  1092. {
  1093. return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
  1094. }
  1095. static ssize_t
  1096. ftrace_notrace_write(struct file *file, const char __user *ubuf,
  1097. size_t cnt, loff_t *ppos)
  1098. {
  1099. return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
  1100. }
  1101. static void
  1102. ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
  1103. {
  1104. if (unlikely(ftrace_disabled))
  1105. return;
  1106. mutex_lock(&ftrace_regex_lock);
  1107. if (reset)
  1108. ftrace_filter_reset(enable);
  1109. if (buf)
  1110. ftrace_match(buf, len, enable);
  1111. mutex_unlock(&ftrace_regex_lock);
  1112. }
  1113. /**
  1114. * ftrace_set_filter - set a function to filter on in ftrace
  1115. * @buf - the string that holds the function filter text.
  1116. * @len - the length of the string.
  1117. * @reset - non zero to reset all filters before applying this filter.
  1118. *
  1119. * Filters denote which functions should be enabled when tracing is enabled.
  1120. * If @buf is NULL and reset is set, all functions will be enabled for tracing.
  1121. */
  1122. void ftrace_set_filter(unsigned char *buf, int len, int reset)
  1123. {
  1124. ftrace_set_regex(buf, len, reset, 1);
  1125. }
  1126. /**
  1127. * ftrace_set_notrace - set a function to not trace in ftrace
  1128. * @buf - the string that holds the function notrace text.
  1129. * @len - the length of the string.
  1130. * @reset - non zero to reset all filters before applying this filter.
  1131. *
  1132. * Notrace Filters denote which functions should not be enabled when tracing
  1133. * is enabled. If @buf is NULL and reset is set, all functions will be enabled
  1134. * for tracing.
  1135. */
  1136. void ftrace_set_notrace(unsigned char *buf, int len, int reset)
  1137. {
  1138. ftrace_set_regex(buf, len, reset, 0);
  1139. }
  1140. static int
  1141. ftrace_regex_release(struct inode *inode, struct file *file, int enable)
  1142. {
  1143. struct seq_file *m = (struct seq_file *)file->private_data;
  1144. struct ftrace_iterator *iter;
  1145. mutex_lock(&ftrace_regex_lock);
  1146. if (file->f_mode & FMODE_READ) {
  1147. iter = m->private;
  1148. seq_release(inode, file);
  1149. } else
  1150. iter = file->private_data;
  1151. if (iter->buffer_idx) {
  1152. iter->filtered++;
  1153. iter->buffer[iter->buffer_idx] = 0;
  1154. ftrace_match(iter->buffer, iter->buffer_idx, enable);
  1155. }
  1156. mutex_lock(&ftrace_sysctl_lock);
  1157. mutex_lock(&ftraced_lock);
  1158. if (iter->filtered && ftraced_suspend && ftrace_enabled)
  1159. ftrace_run_update_code(FTRACE_ENABLE_CALLS);
  1160. mutex_unlock(&ftraced_lock);
  1161. mutex_unlock(&ftrace_sysctl_lock);
  1162. kfree(iter);
  1163. mutex_unlock(&ftrace_regex_lock);
  1164. return 0;
  1165. }
  1166. static int
  1167. ftrace_filter_release(struct inode *inode, struct file *file)
  1168. {
  1169. return ftrace_regex_release(inode, file, 1);
  1170. }
  1171. static int
  1172. ftrace_notrace_release(struct inode *inode, struct file *file)
  1173. {
  1174. return ftrace_regex_release(inode, file, 0);
  1175. }
  1176. static ssize_t
  1177. ftraced_read(struct file *filp, char __user *ubuf,
  1178. size_t cnt, loff_t *ppos)
  1179. {
  1180. /* don't worry about races */
  1181. char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
  1182. int r = strlen(buf);
  1183. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  1184. }
  1185. static ssize_t
  1186. ftraced_write(struct file *filp, const char __user *ubuf,
  1187. size_t cnt, loff_t *ppos)
  1188. {
  1189. char buf[64];
  1190. long val;
  1191. int ret;
  1192. if (cnt >= sizeof(buf))
  1193. return -EINVAL;
  1194. if (copy_from_user(&buf, ubuf, cnt))
  1195. return -EFAULT;
  1196. if (strncmp(buf, "enable", 6) == 0)
  1197. val = 1;
  1198. else if (strncmp(buf, "disable", 7) == 0)
  1199. val = 0;
  1200. else {
  1201. buf[cnt] = 0;
  1202. ret = strict_strtoul(buf, 10, &val);
  1203. if (ret < 0)
  1204. return ret;
  1205. val = !!val;
  1206. }
  1207. if (val)
  1208. ftrace_enable_daemon();
  1209. else
  1210. ftrace_disable_daemon();
  1211. filp->f_pos += cnt;
  1212. return cnt;
  1213. }
  1214. static struct file_operations ftrace_avail_fops = {
  1215. .open = ftrace_avail_open,
  1216. .read = seq_read,
  1217. .llseek = seq_lseek,
  1218. .release = ftrace_avail_release,
  1219. };
  1220. static struct file_operations ftrace_failures_fops = {
  1221. .open = ftrace_failures_open,
  1222. .read = seq_read,
  1223. .llseek = seq_lseek,
  1224. .release = ftrace_avail_release,
  1225. };
  1226. static struct file_operations ftrace_filter_fops = {
  1227. .open = ftrace_filter_open,
  1228. .read = ftrace_regex_read,
  1229. .write = ftrace_filter_write,
  1230. .llseek = ftrace_regex_lseek,
  1231. .release = ftrace_filter_release,
  1232. };
  1233. static struct file_operations ftrace_notrace_fops = {
  1234. .open = ftrace_notrace_open,
  1235. .read = ftrace_regex_read,
  1236. .write = ftrace_notrace_write,
  1237. .llseek = ftrace_regex_lseek,
  1238. .release = ftrace_notrace_release,
  1239. };
  1240. static struct file_operations ftraced_fops = {
  1241. .open = tracing_open_generic,
  1242. .read = ftraced_read,
  1243. .write = ftraced_write,
  1244. };
  1245. /**
  1246. * ftrace_force_update - force an update to all recording ftrace functions
  1247. */
  1248. int ftrace_force_update(void)
  1249. {
  1250. int ret = 0;
  1251. if (unlikely(ftrace_disabled))
  1252. return -ENODEV;
  1253. mutex_lock(&ftrace_sysctl_lock);
  1254. mutex_lock(&ftraced_lock);
  1255. /*
  1256. * If ftraced_trigger is not set, then there is nothing
  1257. * to update.
  1258. */
  1259. if (ftraced_trigger && !ftrace_update_code())
  1260. ret = -EBUSY;
  1261. mutex_unlock(&ftraced_lock);
  1262. mutex_unlock(&ftrace_sysctl_lock);
  1263. return ret;
  1264. }
  1265. static void ftrace_force_shutdown(void)
  1266. {
  1267. struct task_struct *task;
  1268. int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
  1269. mutex_lock(&ftraced_lock);
  1270. task = ftraced_task;
  1271. ftraced_task = NULL;
  1272. ftraced_suspend = -1;
  1273. ftrace_run_update_code(command);
  1274. mutex_unlock(&ftraced_lock);
  1275. if (task)
  1276. kthread_stop(task);
  1277. }
  1278. static __init int ftrace_init_debugfs(void)
  1279. {
  1280. struct dentry *d_tracer;
  1281. struct dentry *entry;
  1282. d_tracer = tracing_init_dentry();
  1283. entry = debugfs_create_file("available_filter_functions", 0444,
  1284. d_tracer, NULL, &ftrace_avail_fops);
  1285. if (!entry)
  1286. pr_warning("Could not create debugfs "
  1287. "'available_filter_functions' entry\n");
  1288. entry = debugfs_create_file("failures", 0444,
  1289. d_tracer, NULL, &ftrace_failures_fops);
  1290. if (!entry)
  1291. pr_warning("Could not create debugfs 'failures' entry\n");
  1292. entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
  1293. NULL, &ftrace_filter_fops);
  1294. if (!entry)
  1295. pr_warning("Could not create debugfs "
  1296. "'set_ftrace_filter' entry\n");
  1297. entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
  1298. NULL, &ftrace_notrace_fops);
  1299. if (!entry)
  1300. pr_warning("Could not create debugfs "
  1301. "'set_ftrace_notrace' entry\n");
  1302. entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
  1303. NULL, &ftraced_fops);
  1304. if (!entry)
  1305. pr_warning("Could not create debugfs "
  1306. "'ftraced_enabled' entry\n");
  1307. return 0;
  1308. }
  1309. fs_initcall(ftrace_init_debugfs);
  1310. #ifdef CONFIG_FTRACE_MCOUNT_RECORD
  1311. static int ftrace_convert_nops(unsigned long *start,
  1312. unsigned long *end)
  1313. {
  1314. unsigned long *p;
  1315. unsigned long addr;
  1316. unsigned long flags;
  1317. p = start;
  1318. while (p < end) {
  1319. addr = ftrace_call_adjust(*p++);
  1320. /* should not be called from interrupt context */
  1321. spin_lock(&ftrace_lock);
  1322. ftrace_record_ip(addr);
  1323. spin_unlock(&ftrace_lock);
  1324. ftrace_shutdown_replenish();
  1325. }
  1326. /* p is ignored */
  1327. local_irq_save(flags);
  1328. __ftrace_update_code(p);
  1329. local_irq_restore(flags);
  1330. return 0;
  1331. }
  1332. void ftrace_init_module(unsigned long *start, unsigned long *end)
  1333. {
  1334. if (ftrace_disabled || start == end)
  1335. return;
  1336. ftrace_convert_nops(start, end);
  1337. }
  1338. extern unsigned long __start_mcount_loc[];
  1339. extern unsigned long __stop_mcount_loc[];
  1340. void __init ftrace_init(void)
  1341. {
  1342. unsigned long count, addr, flags;
  1343. int ret;
  1344. /* Keep the ftrace pointer to the stub */
  1345. addr = (unsigned long)ftrace_stub;
  1346. local_irq_save(flags);
  1347. ftrace_dyn_arch_init(&addr);
  1348. local_irq_restore(flags);
  1349. /* ftrace_dyn_arch_init places the return code in addr */
  1350. if (addr)
  1351. goto failed;
  1352. count = __stop_mcount_loc - __start_mcount_loc;
  1353. ret = ftrace_dyn_table_alloc(count);
  1354. if (ret)
  1355. goto failed;
  1356. last_ftrace_enabled = ftrace_enabled = 1;
  1357. ret = ftrace_convert_nops(__start_mcount_loc,
  1358. __stop_mcount_loc);
  1359. return;
  1360. failed:
  1361. ftrace_disabled = 1;
  1362. }
  1363. #else /* CONFIG_FTRACE_MCOUNT_RECORD */
  1364. static void ftrace_release_hash(unsigned long start, unsigned long end)
  1365. {
  1366. struct dyn_ftrace *rec;
  1367. struct hlist_node *t, *n;
  1368. struct hlist_head *head, temp_list;
  1369. unsigned long flags;
  1370. int i, cpu;
  1371. preempt_disable_notrace();
  1372. /* disable incase we call something that calls mcount */
  1373. cpu = raw_smp_processor_id();
  1374. per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
  1375. ftrace_hash_lock(flags);
  1376. for (i = 0; i < FTRACE_HASHSIZE; i++) {
  1377. INIT_HLIST_HEAD(&temp_list);
  1378. head = &ftrace_hash[i];
  1379. /* all CPUS are stopped, we are safe to modify code */
  1380. hlist_for_each_entry_safe(rec, t, n, head, node) {
  1381. if (rec->flags & FTRACE_FL_FREE)
  1382. continue;
  1383. if ((rec->ip >= start) && (rec->ip < end))
  1384. ftrace_free_rec(rec);
  1385. }
  1386. }
  1387. ftrace_hash_unlock(flags);
  1388. per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
  1389. preempt_enable_notrace();
  1390. }
  1391. static int ftraced(void *ignore)
  1392. {
  1393. unsigned long usecs;
  1394. while (!kthread_should_stop()) {
  1395. set_current_state(TASK_INTERRUPTIBLE);
  1396. /* check once a second */
  1397. schedule_timeout(HZ);
  1398. if (unlikely(ftrace_disabled))
  1399. continue;
  1400. mutex_lock(&ftrace_sysctl_lock);
  1401. mutex_lock(&ftraced_lock);
  1402. if (!ftraced_suspend && !ftraced_stop &&
  1403. ftrace_update_code()) {
  1404. usecs = nsecs_to_usecs(ftrace_update_time);
  1405. if (ftrace_update_tot_cnt > 100000) {
  1406. ftrace_update_tot_cnt = 0;
  1407. pr_info("hm, dftrace overflow: %lu change%s"
  1408. " (%lu total) in %lu usec%s\n",
  1409. ftrace_update_cnt,
  1410. ftrace_update_cnt != 1 ? "s" : "",
  1411. ftrace_update_tot_cnt,
  1412. usecs, usecs != 1 ? "s" : "");
  1413. ftrace_disabled = 1;
  1414. WARN_ON_ONCE(1);
  1415. }
  1416. }
  1417. mutex_unlock(&ftraced_lock);
  1418. mutex_unlock(&ftrace_sysctl_lock);
  1419. ftrace_shutdown_replenish();
  1420. }
  1421. __set_current_state(TASK_RUNNING);
  1422. return 0;
  1423. }
  1424. static int __init ftrace_dynamic_init(void)
  1425. {
  1426. struct task_struct *p;
  1427. unsigned long addr;
  1428. int ret;
  1429. addr = (unsigned long)ftrace_record_ip;
  1430. stop_machine(ftrace_dyn_arch_init, &addr, NULL);
  1431. /* ftrace_dyn_arch_init places the return code in addr */
  1432. if (addr) {
  1433. ret = (int)addr;
  1434. goto failed;
  1435. }
  1436. ret = ftrace_dyn_table_alloc(NR_TO_INIT);
  1437. if (ret)
  1438. goto failed;
  1439. p = kthread_run(ftraced, NULL, "ftraced");
  1440. if (IS_ERR(p)) {
  1441. ret = -1;
  1442. goto failed;
  1443. }
  1444. last_ftrace_enabled = ftrace_enabled = 1;
  1445. ftraced_task = p;
  1446. return 0;
  1447. failed:
  1448. ftrace_disabled = 1;
  1449. return ret;
  1450. }
  1451. core_initcall(ftrace_dynamic_init);
  1452. #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
  1453. #else
  1454. # define ftrace_startup() do { } while (0)
  1455. # define ftrace_shutdown() do { } while (0)
  1456. # define ftrace_startup_sysctl() do { } while (0)
  1457. # define ftrace_shutdown_sysctl() do { } while (0)
  1458. # define ftrace_force_shutdown() do { } while (0)
  1459. #endif /* CONFIG_DYNAMIC_FTRACE */
  1460. /**
  1461. * ftrace_kill_atomic - kill ftrace from critical sections
  1462. *
  1463. * This function should be used by panic code. It stops ftrace
  1464. * but in a not so nice way. If you need to simply kill ftrace
  1465. * from a non-atomic section, use ftrace_kill.
  1466. */
  1467. void ftrace_kill_atomic(void)
  1468. {
  1469. ftrace_disabled = 1;
  1470. ftrace_enabled = 0;
  1471. #ifdef CONFIG_DYNAMIC_FTRACE
  1472. ftraced_suspend = -1;
  1473. #endif
  1474. clear_ftrace_function();
  1475. }
  1476. /**
  1477. * ftrace_kill - totally shutdown ftrace
  1478. *
  1479. * This is a safety measure. If something was detected that seems
  1480. * wrong, calling this function will keep ftrace from doing
  1481. * any more modifications, and updates.
  1482. * used when something went wrong.
  1483. */
  1484. void ftrace_kill(void)
  1485. {
  1486. mutex_lock(&ftrace_sysctl_lock);
  1487. ftrace_disabled = 1;
  1488. ftrace_enabled = 0;
  1489. clear_ftrace_function();
  1490. mutex_unlock(&ftrace_sysctl_lock);
  1491. /* Try to totally disable ftrace */
  1492. ftrace_force_shutdown();
  1493. }
  1494. /**
  1495. * register_ftrace_function - register a function for profiling
  1496. * @ops - ops structure that holds the function for profiling.
  1497. *
  1498. * Register a function to be called by all functions in the
  1499. * kernel.
  1500. *
  1501. * Note: @ops->func and all the functions it calls must be labeled
  1502. * with "notrace", otherwise it will go into a
  1503. * recursive loop.
  1504. */
  1505. int register_ftrace_function(struct ftrace_ops *ops)
  1506. {
  1507. int ret;
  1508. if (unlikely(ftrace_disabled))
  1509. return -1;
  1510. mutex_lock(&ftrace_sysctl_lock);
  1511. ret = __register_ftrace_function(ops);
  1512. ftrace_startup();
  1513. mutex_unlock(&ftrace_sysctl_lock);
  1514. return ret;
  1515. }
  1516. /**
  1517. * unregister_ftrace_function - unresgister a function for profiling.
  1518. * @ops - ops structure that holds the function to unregister
  1519. *
  1520. * Unregister a function that was added to be called by ftrace profiling.
  1521. */
  1522. int unregister_ftrace_function(struct ftrace_ops *ops)
  1523. {
  1524. int ret;
  1525. mutex_lock(&ftrace_sysctl_lock);
  1526. ret = __unregister_ftrace_function(ops);
  1527. ftrace_shutdown();
  1528. mutex_unlock(&ftrace_sysctl_lock);
  1529. return ret;
  1530. }
  1531. int
  1532. ftrace_enable_sysctl(struct ctl_table *table, int write,
  1533. struct file *file, void __user *buffer, size_t *lenp,
  1534. loff_t *ppos)
  1535. {
  1536. int ret;
  1537. if (unlikely(ftrace_disabled))
  1538. return -ENODEV;
  1539. mutex_lock(&ftrace_sysctl_lock);
  1540. ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
  1541. if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
  1542. goto out;
  1543. last_ftrace_enabled = ftrace_enabled;
  1544. if (ftrace_enabled) {
  1545. ftrace_startup_sysctl();
  1546. /* we are starting ftrace again */
  1547. if (ftrace_list != &ftrace_list_end) {
  1548. if (ftrace_list->next == &ftrace_list_end)
  1549. ftrace_trace_function = ftrace_list->func;
  1550. else
  1551. ftrace_trace_function = ftrace_list_func;
  1552. }
  1553. } else {
  1554. /* stopping ftrace calls (just send to ftrace_stub) */
  1555. ftrace_trace_function = ftrace_stub;
  1556. ftrace_shutdown_sysctl();
  1557. }
  1558. out:
  1559. mutex_unlock(&ftrace_sysctl_lock);
  1560. return ret;
  1561. }