trace.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Originally taken from the RT patch by:
  8. * Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Based on code from the latency_tracer, that is:
  11. * Copyright (C) 2004-2006 Ingo Molnar
  12. * Copyright (C) 2004 William Lee Irwin III
  13. */
  14. #include <linux/utsrelease.h>
  15. #include <linux/kallsyms.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/debugfs.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/hardirq.h>
  20. #include <linux/linkage.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/ftrace.h>
  23. #include <linux/module.h>
  24. #include <linux/percpu.h>
  25. #include <linux/ctype.h>
  26. #include <linux/init.h>
  27. #include <linux/gfp.h>
  28. #include <linux/fs.h>
  29. #include "trace.h"
  30. unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
  31. unsigned long __read_mostly tracing_thresh;
  32. static int tracing_disabled = 1;
  33. static long notrace
  34. ns2usecs(cycle_t nsec)
  35. {
  36. nsec += 500;
  37. do_div(nsec, 1000);
  38. return nsec;
  39. }
  40. static atomic_t tracer_counter;
  41. static struct trace_array global_trace;
  42. static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
  43. static struct trace_array max_tr;
  44. static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
  45. static int tracer_enabled;
  46. static unsigned long trace_nr_entries = 16384UL;
  47. static struct tracer *trace_types __read_mostly;
  48. static struct tracer *current_trace __read_mostly;
  49. static int max_tracer_type_len;
  50. static DEFINE_MUTEX(trace_types_lock);
  51. #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
  52. static int __init set_nr_entries(char *str)
  53. {
  54. if (!str)
  55. return 0;
  56. trace_nr_entries = simple_strtoul(str, &str, 0);
  57. return 1;
  58. }
  59. __setup("trace_entries=", set_nr_entries);
  60. unsigned long nsecs_to_usecs(unsigned long nsecs)
  61. {
  62. return nsecs / 1000;
  63. }
  64. enum trace_type {
  65. __TRACE_FIRST_TYPE = 0,
  66. TRACE_FN,
  67. TRACE_CTX,
  68. __TRACE_LAST_TYPE
  69. };
  70. enum trace_flag_type {
  71. TRACE_FLAG_IRQS_OFF = 0x01,
  72. TRACE_FLAG_NEED_RESCHED = 0x02,
  73. TRACE_FLAG_HARDIRQ = 0x04,
  74. TRACE_FLAG_SOFTIRQ = 0x08,
  75. };
  76. enum trace_iterator_flags {
  77. TRACE_ITER_PRINT_PARENT = 0x01,
  78. TRACE_ITER_SYM_OFFSET = 0x02,
  79. TRACE_ITER_SYM_ADDR = 0x04,
  80. TRACE_ITER_VERBOSE = 0x08,
  81. };
  82. #define TRACE_ITER_SYM_MASK \
  83. (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
  84. /* These must match the bit postions above */
  85. static const char *trace_options[] = {
  86. "print-parent",
  87. "sym-offset",
  88. "sym-addr",
  89. "verbose",
  90. NULL
  91. };
  92. static unsigned trace_flags;
  93. static DEFINE_SPINLOCK(ftrace_max_lock);
  94. /*
  95. * Copy the new maximum trace into the separate maximum-trace
  96. * structure. (this way the maximum trace is permanently saved,
  97. * for later retrieval via /debugfs/tracing/latency_trace)
  98. */
  99. static notrace void
  100. __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
  101. {
  102. struct trace_array_cpu *data = tr->data[cpu];
  103. max_tr.cpu = cpu;
  104. max_tr.time_start = data->preempt_timestamp;
  105. data = max_tr.data[cpu];
  106. data->saved_latency = tracing_max_latency;
  107. memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
  108. data->pid = tsk->pid;
  109. data->uid = tsk->uid;
  110. data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
  111. data->policy = tsk->policy;
  112. data->rt_priority = tsk->rt_priority;
  113. /* record this tasks comm */
  114. tracing_record_cmdline(current);
  115. }
  116. void check_pages(struct trace_array_cpu *data)
  117. {
  118. struct page *page, *tmp;
  119. BUG_ON(data->trace_pages.next->prev != &data->trace_pages);
  120. BUG_ON(data->trace_pages.prev->next != &data->trace_pages);
  121. list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
  122. BUG_ON(page->lru.next->prev != &page->lru);
  123. BUG_ON(page->lru.prev->next != &page->lru);
  124. }
  125. }
  126. void *head_page(struct trace_array_cpu *data)
  127. {
  128. struct page *page;
  129. check_pages(data);
  130. if (list_empty(&data->trace_pages))
  131. return NULL;
  132. page = list_entry(data->trace_pages.next, struct page, lru);
  133. BUG_ON(&page->lru == &data->trace_pages);
  134. return page_address(page);
  135. }
  136. notrace static void
  137. flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
  138. {
  139. struct list_head flip_pages;
  140. INIT_LIST_HEAD(&flip_pages);
  141. memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx,
  142. sizeof(struct trace_array_cpu) -
  143. offsetof(struct trace_array_cpu, trace_head_idx));
  144. check_pages(tr1);
  145. check_pages(tr2);
  146. list_splice_init(&tr1->trace_pages, &flip_pages);
  147. list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
  148. list_splice_init(&flip_pages, &tr2->trace_pages);
  149. BUG_ON(!list_empty(&flip_pages));
  150. check_pages(tr1);
  151. check_pages(tr2);
  152. }
  153. notrace void
  154. update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
  155. {
  156. struct trace_array_cpu *data;
  157. int i;
  158. WARN_ON_ONCE(!irqs_disabled());
  159. spin_lock(&ftrace_max_lock);
  160. /* clear out all the previous traces */
  161. for_each_possible_cpu(i) {
  162. data = tr->data[i];
  163. flip_trace(max_tr.data[i], data);
  164. tracing_reset(data);
  165. }
  166. __update_max_tr(tr, tsk, cpu);
  167. spin_unlock(&ftrace_max_lock);
  168. }
  169. /**
  170. * update_max_tr_single - only copy one trace over, and reset the rest
  171. * @tr - tracer
  172. * @tsk - task with the latency
  173. * @cpu - the cpu of the buffer to copy.
  174. */
  175. notrace void
  176. update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
  177. {
  178. struct trace_array_cpu *data = tr->data[cpu];
  179. int i;
  180. WARN_ON_ONCE(!irqs_disabled());
  181. spin_lock(&ftrace_max_lock);
  182. for_each_possible_cpu(i)
  183. tracing_reset(max_tr.data[i]);
  184. flip_trace(max_tr.data[cpu], data);
  185. tracing_reset(data);
  186. __update_max_tr(tr, tsk, cpu);
  187. spin_unlock(&ftrace_max_lock);
  188. }
  189. int register_tracer(struct tracer *type)
  190. {
  191. struct tracer *t;
  192. int len;
  193. int ret = 0;
  194. if (!type->name) {
  195. pr_info("Tracer must have a name\n");
  196. return -1;
  197. }
  198. mutex_lock(&trace_types_lock);
  199. for (t = trace_types; t; t = t->next) {
  200. if (strcmp(type->name, t->name) == 0) {
  201. /* already found */
  202. pr_info("Trace %s already registered\n",
  203. type->name);
  204. ret = -1;
  205. goto out;
  206. }
  207. }
  208. #ifdef CONFIG_FTRACE_STARTUP_TEST
  209. if (type->selftest) {
  210. struct tracer *saved_tracer = current_trace;
  211. struct trace_array_cpu *data;
  212. struct trace_array *tr = &global_trace;
  213. int saved_ctrl = tr->ctrl;
  214. int i;
  215. /*
  216. * Run a selftest on this tracer.
  217. * Here we reset the trace buffer, and set the current
  218. * tracer to be this tracer. The tracer can then run some
  219. * internal tracing to verify that everything is in order.
  220. * If we fail, we do not register this tracer.
  221. */
  222. for_each_possible_cpu(i) {
  223. data = tr->data[i];
  224. if (!head_page(data))
  225. continue;
  226. tracing_reset(data);
  227. }
  228. current_trace = type;
  229. tr->ctrl = 0;
  230. /* the test is responsible for initializing and enabling */
  231. pr_info("Testing tracer %s: ", type->name);
  232. ret = type->selftest(type, tr);
  233. /* the test is responsible for resetting too */
  234. current_trace = saved_tracer;
  235. tr->ctrl = saved_ctrl;
  236. if (ret) {
  237. printk(KERN_CONT "FAILED!\n");
  238. goto out;
  239. }
  240. /* Only reset on passing, to avoid touching corrupted buffers */
  241. for_each_possible_cpu(i) {
  242. data = tr->data[i];
  243. if (!head_page(data))
  244. continue;
  245. tracing_reset(data);
  246. }
  247. printk(KERN_CONT "PASSED\n");
  248. }
  249. #endif
  250. type->next = trace_types;
  251. trace_types = type;
  252. len = strlen(type->name);
  253. if (len > max_tracer_type_len)
  254. max_tracer_type_len = len;
  255. out:
  256. mutex_unlock(&trace_types_lock);
  257. return ret;
  258. }
  259. void unregister_tracer(struct tracer *type)
  260. {
  261. struct tracer **t;
  262. int len;
  263. mutex_lock(&trace_types_lock);
  264. for (t = &trace_types; *t; t = &(*t)->next) {
  265. if (*t == type)
  266. goto found;
  267. }
  268. pr_info("Trace %s not registered\n", type->name);
  269. goto out;
  270. found:
  271. *t = (*t)->next;
  272. if (strlen(type->name) != max_tracer_type_len)
  273. goto out;
  274. max_tracer_type_len = 0;
  275. for (t = &trace_types; *t; t = &(*t)->next) {
  276. len = strlen((*t)->name);
  277. if (len > max_tracer_type_len)
  278. max_tracer_type_len = len;
  279. }
  280. out:
  281. mutex_unlock(&trace_types_lock);
  282. }
  283. notrace void tracing_reset(struct trace_array_cpu *data)
  284. {
  285. data->trace_idx = 0;
  286. data->trace_head = data->trace_tail = head_page(data);
  287. data->trace_head_idx = 0;
  288. data->trace_tail_idx = 0;
  289. }
  290. #ifdef CONFIG_FTRACE
  291. static notrace void
  292. function_trace_call(unsigned long ip, unsigned long parent_ip)
  293. {
  294. struct trace_array *tr = &global_trace;
  295. struct trace_array_cpu *data;
  296. unsigned long flags;
  297. long disabled;
  298. int cpu;
  299. if (unlikely(!tracer_enabled))
  300. return;
  301. local_irq_save(flags);
  302. cpu = raw_smp_processor_id();
  303. data = tr->data[cpu];
  304. disabled = atomic_inc_return(&data->disabled);
  305. if (likely(disabled == 1))
  306. ftrace(tr, data, ip, parent_ip, flags);
  307. atomic_dec(&data->disabled);
  308. local_irq_restore(flags);
  309. }
  310. static struct ftrace_ops trace_ops __read_mostly =
  311. {
  312. .func = function_trace_call,
  313. };
  314. #endif
  315. notrace void tracing_start_function_trace(void)
  316. {
  317. register_ftrace_function(&trace_ops);
  318. }
  319. notrace void tracing_stop_function_trace(void)
  320. {
  321. unregister_ftrace_function(&trace_ops);
  322. }
  323. #define SAVED_CMDLINES 128
  324. static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
  325. static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
  326. static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
  327. static int cmdline_idx;
  328. static DEFINE_SPINLOCK(trace_cmdline_lock);
  329. atomic_t trace_record_cmdline_disabled;
  330. static void trace_init_cmdlines(void)
  331. {
  332. memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
  333. memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
  334. cmdline_idx = 0;
  335. }
  336. notrace void trace_stop_cmdline_recording(void);
  337. static notrace void trace_save_cmdline(struct task_struct *tsk)
  338. {
  339. unsigned map;
  340. unsigned idx;
  341. if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
  342. return;
  343. /*
  344. * It's not the end of the world if we don't get
  345. * the lock, but we also don't want to spin
  346. * nor do we want to disable interrupts,
  347. * so if we miss here, then better luck next time.
  348. */
  349. if (!spin_trylock(&trace_cmdline_lock))
  350. return;
  351. idx = map_pid_to_cmdline[tsk->pid];
  352. if (idx >= SAVED_CMDLINES) {
  353. idx = (cmdline_idx + 1) % SAVED_CMDLINES;
  354. map = map_cmdline_to_pid[idx];
  355. if (map <= PID_MAX_DEFAULT)
  356. map_pid_to_cmdline[map] = (unsigned)-1;
  357. map_pid_to_cmdline[tsk->pid] = idx;
  358. cmdline_idx = idx;
  359. }
  360. memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
  361. spin_unlock(&trace_cmdline_lock);
  362. }
  363. static notrace char *trace_find_cmdline(int pid)
  364. {
  365. char *cmdline = "<...>";
  366. unsigned map;
  367. if (!pid)
  368. return "<idle>";
  369. if (pid > PID_MAX_DEFAULT)
  370. goto out;
  371. map = map_pid_to_cmdline[pid];
  372. if (map >= SAVED_CMDLINES)
  373. goto out;
  374. cmdline = saved_cmdlines[map];
  375. out:
  376. return cmdline;
  377. }
  378. notrace void tracing_record_cmdline(struct task_struct *tsk)
  379. {
  380. if (atomic_read(&trace_record_cmdline_disabled))
  381. return;
  382. trace_save_cmdline(tsk);
  383. }
  384. static inline notrace struct list_head *
  385. trace_next_list(struct trace_array_cpu *data, struct list_head *next)
  386. {
  387. /*
  388. * Roundrobin - but skip the head (which is not a real page):
  389. */
  390. next = next->next;
  391. if (unlikely(next == &data->trace_pages))
  392. next = next->next;
  393. BUG_ON(next == &data->trace_pages);
  394. return next;
  395. }
  396. static inline notrace void *
  397. trace_next_page(struct trace_array_cpu *data, void *addr)
  398. {
  399. struct list_head *next;
  400. struct page *page;
  401. page = virt_to_page(addr);
  402. next = trace_next_list(data, &page->lru);
  403. page = list_entry(next, struct page, lru);
  404. return page_address(page);
  405. }
  406. static inline notrace struct trace_entry *
  407. tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
  408. {
  409. unsigned long idx, idx_next;
  410. struct trace_entry *entry;
  411. data->trace_idx++;
  412. idx = data->trace_head_idx;
  413. idx_next = idx + 1;
  414. BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
  415. entry = data->trace_head + idx * TRACE_ENTRY_SIZE;
  416. if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
  417. data->trace_head = trace_next_page(data, data->trace_head);
  418. idx_next = 0;
  419. }
  420. if (data->trace_head == data->trace_tail &&
  421. idx_next == data->trace_tail_idx) {
  422. /* overrun */
  423. data->trace_tail_idx++;
  424. if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
  425. data->trace_tail =
  426. trace_next_page(data, data->trace_tail);
  427. data->trace_tail_idx = 0;
  428. }
  429. }
  430. data->trace_head_idx = idx_next;
  431. return entry;
  432. }
  433. static inline notrace void
  434. tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
  435. {
  436. struct task_struct *tsk = current;
  437. unsigned long pc;
  438. pc = preempt_count();
  439. entry->idx = atomic_inc_return(&tracer_counter);
  440. entry->preempt_count = pc & 0xff;
  441. entry->pid = tsk->pid;
  442. entry->t = now(raw_smp_processor_id());
  443. entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
  444. ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
  445. ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
  446. (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
  447. }
  448. notrace void
  449. ftrace(struct trace_array *tr, struct trace_array_cpu *data,
  450. unsigned long ip, unsigned long parent_ip, unsigned long flags)
  451. {
  452. struct trace_entry *entry;
  453. entry = tracing_get_trace_entry(tr, data);
  454. tracing_generic_entry_update(entry, flags);
  455. entry->type = TRACE_FN;
  456. entry->fn.ip = ip;
  457. entry->fn.parent_ip = parent_ip;
  458. }
  459. notrace void
  460. tracing_sched_switch_trace(struct trace_array *tr,
  461. struct trace_array_cpu *data,
  462. struct task_struct *prev, struct task_struct *next,
  463. unsigned long flags)
  464. {
  465. struct trace_entry *entry;
  466. entry = tracing_get_trace_entry(tr, data);
  467. tracing_generic_entry_update(entry, flags);
  468. entry->type = TRACE_CTX;
  469. entry->ctx.prev_pid = prev->pid;
  470. entry->ctx.prev_prio = prev->prio;
  471. entry->ctx.prev_state = prev->state;
  472. entry->ctx.next_pid = next->pid;
  473. entry->ctx.next_prio = next->prio;
  474. }
  475. enum trace_file_type {
  476. TRACE_FILE_LAT_FMT = 1,
  477. };
  478. static struct trace_entry *
  479. trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
  480. struct trace_iterator *iter, int cpu)
  481. {
  482. struct page *page;
  483. struct trace_entry *array;
  484. if (iter->next_idx[cpu] >= tr->entries ||
  485. iter->next_idx[cpu] >= data->trace_idx)
  486. return NULL;
  487. if (!iter->next_page[cpu]) {
  488. /* Initialize the iterator for this cpu trace buffer */
  489. WARN_ON(!data->trace_tail);
  490. page = virt_to_page(data->trace_tail);
  491. iter->next_page[cpu] = &page->lru;
  492. iter->next_page_idx[cpu] = data->trace_tail_idx;
  493. }
  494. page = list_entry(iter->next_page[cpu], struct page, lru);
  495. BUG_ON(&data->trace_pages == &page->lru);
  496. array = page_address(page);
  497. /* Still possible to catch up to the tail */
  498. if (iter->next_idx[cpu] && array == data->trace_tail &&
  499. iter->next_page_idx[cpu] == data->trace_tail_idx)
  500. return NULL;
  501. WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE);
  502. return &array[iter->next_page_idx[cpu]];
  503. }
  504. static struct notrace trace_entry *
  505. find_next_entry(struct trace_iterator *iter, int *ent_cpu)
  506. {
  507. struct trace_array *tr = iter->tr;
  508. struct trace_entry *ent, *next = NULL;
  509. int next_cpu = -1;
  510. int cpu;
  511. for_each_possible_cpu(cpu) {
  512. if (!head_page(tr->data[cpu]))
  513. continue;
  514. ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
  515. if (ent &&
  516. (!next || (long)(next->idx - ent->idx) > 0)) {
  517. next = ent;
  518. next_cpu = cpu;
  519. }
  520. }
  521. if (ent_cpu)
  522. *ent_cpu = next_cpu;
  523. return next;
  524. }
  525. static void *find_next_entry_inc(struct trace_iterator *iter)
  526. {
  527. struct trace_entry *next;
  528. int next_cpu = -1;
  529. next = find_next_entry(iter, &next_cpu);
  530. if (next) {
  531. iter->idx++;
  532. iter->next_idx[next_cpu]++;
  533. iter->next_page_idx[next_cpu]++;
  534. if (iter->next_page_idx[next_cpu] >= ENTRIES_PER_PAGE) {
  535. struct trace_array_cpu *data = iter->tr->data[next_cpu];
  536. iter->next_page_idx[next_cpu] = 0;
  537. iter->next_page[next_cpu] =
  538. trace_next_list(data, iter->next_page[next_cpu]);
  539. }
  540. }
  541. iter->prev_ent = iter->ent;
  542. iter->prev_cpu = iter->cpu;
  543. iter->ent = next;
  544. iter->cpu = next_cpu;
  545. return next ? iter : NULL;
  546. }
  547. static notrace void *s_next(struct seq_file *m, void *v, loff_t *pos)
  548. {
  549. struct trace_iterator *iter = m->private;
  550. void *last_ent = iter->ent;
  551. int i = (int)*pos;
  552. void *ent;
  553. (*pos)++;
  554. /* can't go backwards */
  555. if (iter->idx > i)
  556. return NULL;
  557. if (iter->idx < 0)
  558. ent = find_next_entry_inc(iter);
  559. else
  560. ent = iter;
  561. while (ent && iter->idx < i)
  562. ent = find_next_entry_inc(iter);
  563. iter->pos = *pos;
  564. if (last_ent && !ent)
  565. seq_puts(m, "\n\nvim:ft=help\n");
  566. return ent;
  567. }
  568. static void *s_start(struct seq_file *m, loff_t *pos)
  569. {
  570. struct trace_iterator *iter = m->private;
  571. void *p = NULL;
  572. loff_t l = 0;
  573. int i;
  574. mutex_lock(&trace_types_lock);
  575. if (!current_trace || current_trace != iter->trace)
  576. return NULL;
  577. atomic_inc(&trace_record_cmdline_disabled);
  578. /* let the tracer grab locks here if needed */
  579. if (current_trace->start)
  580. current_trace->start(iter);
  581. if (*pos != iter->pos) {
  582. iter->ent = NULL;
  583. iter->cpu = 0;
  584. iter->idx = -1;
  585. iter->prev_ent = NULL;
  586. iter->prev_cpu = -1;
  587. for_each_possible_cpu(i) {
  588. iter->next_idx[i] = 0;
  589. iter->next_page[i] = NULL;
  590. }
  591. for (p = iter; p && l < *pos; p = s_next(m, p, &l))
  592. ;
  593. } else {
  594. l = *pos - 1;
  595. p = s_next(m, p, &l);
  596. }
  597. return p;
  598. }
  599. static void s_stop(struct seq_file *m, void *p)
  600. {
  601. struct trace_iterator *iter = m->private;
  602. atomic_dec(&trace_record_cmdline_disabled);
  603. /* let the tracer release locks here if needed */
  604. if (current_trace && current_trace == iter->trace && iter->trace->stop)
  605. iter->trace->stop(iter);
  606. mutex_unlock(&trace_types_lock);
  607. }
  608. static void
  609. seq_print_sym_short(struct seq_file *m, const char *fmt, unsigned long address)
  610. {
  611. #ifdef CONFIG_KALLSYMS
  612. char str[KSYM_SYMBOL_LEN];
  613. kallsyms_lookup(address, NULL, NULL, NULL, str);
  614. seq_printf(m, fmt, str);
  615. #endif
  616. }
  617. static void
  618. seq_print_sym_offset(struct seq_file *m, const char *fmt, unsigned long address)
  619. {
  620. #ifdef CONFIG_KALLSYMS
  621. char str[KSYM_SYMBOL_LEN];
  622. sprint_symbol(str, address);
  623. seq_printf(m, fmt, str);
  624. #endif
  625. }
  626. #ifndef CONFIG_64BIT
  627. # define IP_FMT "%08lx"
  628. #else
  629. # define IP_FMT "%016lx"
  630. #endif
  631. static notrace void
  632. seq_print_ip_sym(struct seq_file *m, unsigned long ip, unsigned long sym_flags)
  633. {
  634. if (!ip) {
  635. seq_printf(m, "0");
  636. return;
  637. }
  638. if (sym_flags & TRACE_ITER_SYM_OFFSET)
  639. seq_print_sym_offset(m, "%s", ip);
  640. else
  641. seq_print_sym_short(m, "%s", ip);
  642. if (sym_flags & TRACE_ITER_SYM_ADDR)
  643. seq_printf(m, " <" IP_FMT ">", ip);
  644. }
  645. static notrace void print_lat_help_header(struct seq_file *m)
  646. {
  647. seq_puts(m, "# _------=> CPU# \n");
  648. seq_puts(m, "# / _-----=> irqs-off \n");
  649. seq_puts(m, "# | / _----=> need-resched \n");
  650. seq_puts(m, "# || / _---=> hardirq/softirq \n");
  651. seq_puts(m, "# ||| / _--=> preempt-depth \n");
  652. seq_puts(m, "# |||| / \n");
  653. seq_puts(m, "# ||||| delay \n");
  654. seq_puts(m, "# cmd pid ||||| time | caller \n");
  655. seq_puts(m, "# \\ / ||||| \\ | / \n");
  656. }
  657. static notrace void print_func_help_header(struct seq_file *m)
  658. {
  659. seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
  660. seq_puts(m, "# | | | | |\n");
  661. }
  662. static notrace void
  663. print_trace_header(struct seq_file *m, struct trace_iterator *iter)
  664. {
  665. unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
  666. struct trace_array *tr = iter->tr;
  667. struct trace_array_cpu *data = tr->data[tr->cpu];
  668. struct tracer *type = current_trace;
  669. unsigned long total = 0;
  670. unsigned long entries = 0;
  671. int cpu;
  672. const char *name = "preemption";
  673. if (type)
  674. name = type->name;
  675. for_each_possible_cpu(cpu) {
  676. if (head_page(tr->data[cpu])) {
  677. total += tr->data[cpu]->trace_idx;
  678. if (tr->data[cpu]->trace_idx > tr->entries)
  679. entries += tr->entries;
  680. else
  681. entries += tr->data[cpu]->trace_idx;
  682. }
  683. }
  684. seq_printf(m, "%s latency trace v1.1.5 on %s\n",
  685. name, UTS_RELEASE);
  686. seq_puts(m, "-----------------------------------"
  687. "---------------------------------\n");
  688. seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
  689. " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
  690. nsecs_to_usecs(data->saved_latency),
  691. entries,
  692. total,
  693. tr->cpu,
  694. #if defined(CONFIG_PREEMPT_NONE)
  695. "server",
  696. #elif defined(CONFIG_PREEMPT_VOLUNTARY)
  697. "desktop",
  698. #elif defined(CONFIG_PREEMPT_DESKTOP)
  699. "preempt",
  700. #else
  701. "unknown",
  702. #endif
  703. /* These are reserved for later use */
  704. 0, 0, 0, 0);
  705. #ifdef CONFIG_SMP
  706. seq_printf(m, " #P:%d)\n", num_online_cpus());
  707. #else
  708. seq_puts(m, ")\n");
  709. #endif
  710. seq_puts(m, " -----------------\n");
  711. seq_printf(m, " | task: %.16s-%d "
  712. "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
  713. data->comm, data->pid, data->uid, data->nice,
  714. data->policy, data->rt_priority);
  715. seq_puts(m, " -----------------\n");
  716. if (data->critical_start) {
  717. seq_puts(m, " => started at: ");
  718. seq_print_ip_sym(m, data->critical_start, sym_flags);
  719. seq_puts(m, "\n => ended at: ");
  720. seq_print_ip_sym(m, data->critical_end, sym_flags);
  721. seq_puts(m, "\n");
  722. }
  723. seq_puts(m, "\n");
  724. }
  725. static notrace void
  726. lat_print_generic(struct seq_file *m, struct trace_entry *entry, int cpu)
  727. {
  728. int hardirq, softirq;
  729. char *comm;
  730. comm = trace_find_cmdline(entry->pid);
  731. seq_printf(m, "%8.8s-%-5d ", comm, entry->pid);
  732. seq_printf(m, "%d", cpu);
  733. seq_printf(m, "%c%c",
  734. (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
  735. ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
  736. hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
  737. softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
  738. if (hardirq && softirq)
  739. seq_putc(m, 'H');
  740. else {
  741. if (hardirq)
  742. seq_putc(m, 'h');
  743. else {
  744. if (softirq)
  745. seq_putc(m, 's');
  746. else
  747. seq_putc(m, '.');
  748. }
  749. }
  750. if (entry->preempt_count)
  751. seq_printf(m, "%x", entry->preempt_count);
  752. else
  753. seq_puts(m, ".");
  754. }
  755. unsigned long preempt_mark_thresh = 100;
  756. static notrace void
  757. lat_print_timestamp(struct seq_file *m, unsigned long long abs_usecs,
  758. unsigned long rel_usecs)
  759. {
  760. seq_printf(m, " %4lldus", abs_usecs);
  761. if (rel_usecs > preempt_mark_thresh)
  762. seq_puts(m, "!: ");
  763. else if (rel_usecs > 1)
  764. seq_puts(m, "+: ");
  765. else
  766. seq_puts(m, " : ");
  767. }
  768. static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
  769. static notrace void
  770. print_lat_fmt(struct seq_file *m, struct trace_iterator *iter,
  771. unsigned int trace_idx, int cpu)
  772. {
  773. unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
  774. struct trace_entry *next_entry = find_next_entry(iter, NULL);
  775. unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
  776. struct trace_entry *entry = iter->ent;
  777. unsigned long abs_usecs;
  778. unsigned long rel_usecs;
  779. char *comm;
  780. int S;
  781. if (!next_entry)
  782. next_entry = entry;
  783. rel_usecs = ns2usecs(next_entry->t - entry->t);
  784. abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
  785. if (verbose) {
  786. comm = trace_find_cmdline(entry->pid);
  787. seq_printf(m, "%16s %5d %d %d %08x %08x [%08lx]"
  788. " %ld.%03ldms (+%ld.%03ldms): ",
  789. comm,
  790. entry->pid, cpu, entry->flags,
  791. entry->preempt_count, trace_idx,
  792. ns2usecs(entry->t),
  793. abs_usecs/1000,
  794. abs_usecs % 1000, rel_usecs/1000, rel_usecs % 1000);
  795. } else {
  796. lat_print_generic(m, entry, cpu);
  797. lat_print_timestamp(m, abs_usecs, rel_usecs);
  798. }
  799. switch (entry->type) {
  800. case TRACE_FN:
  801. seq_print_ip_sym(m, entry->fn.ip, sym_flags);
  802. seq_puts(m, " (");
  803. seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
  804. seq_puts(m, ")\n");
  805. break;
  806. case TRACE_CTX:
  807. S = entry->ctx.prev_state < sizeof(state_to_char) ?
  808. state_to_char[entry->ctx.prev_state] : 'X';
  809. comm = trace_find_cmdline(entry->ctx.next_pid);
  810. seq_printf(m, " %d:%d:%c --> %d:%d %s\n",
  811. entry->ctx.prev_pid,
  812. entry->ctx.prev_prio,
  813. S,
  814. entry->ctx.next_pid,
  815. entry->ctx.next_prio,
  816. comm);
  817. break;
  818. default:
  819. seq_printf(m, "Unknown type %d\n", entry->type);
  820. }
  821. }
  822. static notrace void sync_time_offset(struct trace_iterator *iter)
  823. {
  824. struct trace_array_cpu *prev_array, *array;
  825. struct trace_entry *prev_entry, *entry;
  826. cycle_t prev_t, t;
  827. entry = iter->ent;
  828. prev_entry = iter->prev_ent;
  829. if (!prev_entry)
  830. return;
  831. prev_array = iter->tr->data[iter->prev_cpu];
  832. array = iter->tr->data[iter->cpu];
  833. prev_t = prev_entry->t + prev_array->time_offset;
  834. t = entry->t + array->time_offset;
  835. /*
  836. * If time goes backwards we increase the offset of
  837. * the current array, to not have observable time warps.
  838. * This will quickly synchronize the time offsets of
  839. * multiple CPUs:
  840. */
  841. if (t < prev_t)
  842. array->time_offset += prev_t - t;
  843. }
  844. static notrace void
  845. print_trace_fmt(struct seq_file *m, struct trace_iterator *iter)
  846. {
  847. unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
  848. struct trace_entry *entry;
  849. unsigned long usec_rem;
  850. unsigned long long t;
  851. unsigned long secs;
  852. char *comm;
  853. int S;
  854. sync_time_offset(iter);
  855. entry = iter->ent;
  856. comm = trace_find_cmdline(iter->ent->pid);
  857. t = ns2usecs(entry->t + iter->tr->data[iter->cpu]->time_offset);
  858. usec_rem = do_div(t, 1000000ULL);
  859. secs = (unsigned long)t;
  860. seq_printf(m, "%16s-%-5d ", comm, entry->pid);
  861. seq_printf(m, "[%02d] ", iter->cpu);
  862. seq_printf(m, "%5lu.%06lu: ", secs, usec_rem);
  863. switch (entry->type) {
  864. case TRACE_FN:
  865. seq_print_ip_sym(m, entry->fn.ip, sym_flags);
  866. if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
  867. entry->fn.parent_ip) {
  868. seq_printf(m, " <-");
  869. seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
  870. }
  871. seq_printf(m, "\n");
  872. break;
  873. case TRACE_CTX:
  874. S = entry->ctx.prev_state < sizeof(state_to_char) ?
  875. state_to_char[entry->ctx.prev_state] : 'X';
  876. seq_printf(m, " %d:%d:%c ==> %d:%d\n",
  877. entry->ctx.prev_pid,
  878. entry->ctx.prev_prio,
  879. S,
  880. entry->ctx.next_pid,
  881. entry->ctx.next_prio);
  882. break;
  883. }
  884. }
  885. static int trace_empty(struct trace_iterator *iter)
  886. {
  887. struct trace_array_cpu *data;
  888. int cpu;
  889. for_each_possible_cpu(cpu) {
  890. data = iter->tr->data[cpu];
  891. if (head_page(data) && data->trace_idx)
  892. return 0;
  893. }
  894. return 1;
  895. }
  896. static int s_show(struct seq_file *m, void *v)
  897. {
  898. struct trace_iterator *iter = v;
  899. if (iter->ent == NULL) {
  900. if (iter->tr) {
  901. seq_printf(m, "# tracer: %s\n", iter->trace->name);
  902. seq_puts(m, "#\n");
  903. }
  904. if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
  905. /* print nothing if the buffers are empty */
  906. if (trace_empty(iter))
  907. return 0;
  908. print_trace_header(m, iter);
  909. if (!(trace_flags & TRACE_ITER_VERBOSE))
  910. print_lat_help_header(m);
  911. } else {
  912. if (!(trace_flags & TRACE_ITER_VERBOSE))
  913. print_func_help_header(m);
  914. }
  915. } else {
  916. if (iter->iter_flags & TRACE_FILE_LAT_FMT)
  917. print_lat_fmt(m, iter, iter->idx, iter->cpu);
  918. else
  919. print_trace_fmt(m, iter);
  920. }
  921. return 0;
  922. }
  923. static struct seq_operations tracer_seq_ops = {
  924. .start = s_start,
  925. .next = s_next,
  926. .stop = s_stop,
  927. .show = s_show,
  928. };
  929. static struct trace_iterator notrace *
  930. __tracing_open(struct inode *inode, struct file *file, int *ret)
  931. {
  932. struct trace_iterator *iter;
  933. if (tracing_disabled) {
  934. *ret = -ENODEV;
  935. return NULL;
  936. }
  937. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  938. if (!iter) {
  939. *ret = -ENOMEM;
  940. goto out;
  941. }
  942. mutex_lock(&trace_types_lock);
  943. if (current_trace && current_trace->print_max)
  944. iter->tr = &max_tr;
  945. else
  946. iter->tr = inode->i_private;
  947. iter->trace = current_trace;
  948. iter->pos = -1;
  949. /* TODO stop tracer */
  950. *ret = seq_open(file, &tracer_seq_ops);
  951. if (!*ret) {
  952. struct seq_file *m = file->private_data;
  953. m->private = iter;
  954. /* stop the trace while dumping */
  955. if (iter->tr->ctrl)
  956. tracer_enabled = 0;
  957. if (iter->trace && iter->trace->open)
  958. iter->trace->open(iter);
  959. } else {
  960. kfree(iter);
  961. iter = NULL;
  962. }
  963. mutex_unlock(&trace_types_lock);
  964. out:
  965. return iter;
  966. }
  967. int tracing_open_generic(struct inode *inode, struct file *filp)
  968. {
  969. if (tracing_disabled)
  970. return -ENODEV;
  971. filp->private_data = inode->i_private;
  972. return 0;
  973. }
  974. int tracing_release(struct inode *inode, struct file *file)
  975. {
  976. struct seq_file *m = (struct seq_file *)file->private_data;
  977. struct trace_iterator *iter = m->private;
  978. mutex_lock(&trace_types_lock);
  979. if (iter->trace && iter->trace->close)
  980. iter->trace->close(iter);
  981. /* reenable tracing if it was previously enabled */
  982. if (iter->tr->ctrl)
  983. tracer_enabled = 1;
  984. mutex_unlock(&trace_types_lock);
  985. seq_release(inode, file);
  986. kfree(iter);
  987. return 0;
  988. }
  989. static int tracing_open(struct inode *inode, struct file *file)
  990. {
  991. int ret;
  992. __tracing_open(inode, file, &ret);
  993. return ret;
  994. }
  995. static int tracing_lt_open(struct inode *inode, struct file *file)
  996. {
  997. struct trace_iterator *iter;
  998. int ret;
  999. iter = __tracing_open(inode, file, &ret);
  1000. if (!ret)
  1001. iter->iter_flags |= TRACE_FILE_LAT_FMT;
  1002. return ret;
  1003. }
  1004. static notrace void *
  1005. t_next(struct seq_file *m, void *v, loff_t *pos)
  1006. {
  1007. struct tracer *t = m->private;
  1008. (*pos)++;
  1009. if (t)
  1010. t = t->next;
  1011. m->private = t;
  1012. return t;
  1013. }
  1014. static void *t_start(struct seq_file *m, loff_t *pos)
  1015. {
  1016. struct tracer *t = m->private;
  1017. loff_t l = 0;
  1018. mutex_lock(&trace_types_lock);
  1019. for (; t && l < *pos; t = t_next(m, t, &l))
  1020. ;
  1021. return t;
  1022. }
  1023. static void t_stop(struct seq_file *m, void *p)
  1024. {
  1025. mutex_unlock(&trace_types_lock);
  1026. }
  1027. static int t_show(struct seq_file *m, void *v)
  1028. {
  1029. struct tracer *t = v;
  1030. if (!t)
  1031. return 0;
  1032. seq_printf(m, "%s", t->name);
  1033. if (t->next)
  1034. seq_putc(m, ' ');
  1035. else
  1036. seq_putc(m, '\n');
  1037. return 0;
  1038. }
  1039. static struct seq_operations show_traces_seq_ops = {
  1040. .start = t_start,
  1041. .next = t_next,
  1042. .stop = t_stop,
  1043. .show = t_show,
  1044. };
  1045. static int show_traces_open(struct inode *inode, struct file *file)
  1046. {
  1047. int ret;
  1048. if (tracing_disabled)
  1049. return -ENODEV;
  1050. ret = seq_open(file, &show_traces_seq_ops);
  1051. if (!ret) {
  1052. struct seq_file *m = file->private_data;
  1053. m->private = trace_types;
  1054. }
  1055. return ret;
  1056. }
  1057. static struct file_operations tracing_fops = {
  1058. .open = tracing_open,
  1059. .read = seq_read,
  1060. .llseek = seq_lseek,
  1061. .release = tracing_release,
  1062. };
  1063. static struct file_operations tracing_lt_fops = {
  1064. .open = tracing_lt_open,
  1065. .read = seq_read,
  1066. .llseek = seq_lseek,
  1067. .release = tracing_release,
  1068. };
  1069. static struct file_operations show_traces_fops = {
  1070. .open = show_traces_open,
  1071. .read = seq_read,
  1072. .release = seq_release,
  1073. };
  1074. static ssize_t
  1075. tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
  1076. size_t cnt, loff_t *ppos)
  1077. {
  1078. char *buf;
  1079. int r = 0;
  1080. int len = 0;
  1081. int i;
  1082. /* calulate max size */
  1083. for (i = 0; trace_options[i]; i++) {
  1084. len += strlen(trace_options[i]);
  1085. len += 3; /* "no" and space */
  1086. }
  1087. /* +2 for \n and \0 */
  1088. buf = kmalloc(len + 2, GFP_KERNEL);
  1089. if (!buf)
  1090. return -ENOMEM;
  1091. for (i = 0; trace_options[i]; i++) {
  1092. if (trace_flags & (1 << i))
  1093. r += sprintf(buf + r, "%s ", trace_options[i]);
  1094. else
  1095. r += sprintf(buf + r, "no%s ", trace_options[i]);
  1096. }
  1097. r += sprintf(buf + r, "\n");
  1098. WARN_ON(r >= len + 2);
  1099. r = simple_read_from_buffer(ubuf, cnt, ppos,
  1100. buf, r);
  1101. kfree(buf);
  1102. return r;
  1103. }
  1104. static ssize_t
  1105. tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
  1106. size_t cnt, loff_t *ppos)
  1107. {
  1108. char buf[64];
  1109. char *cmp = buf;
  1110. int neg = 0;
  1111. int i;
  1112. if (cnt > 63)
  1113. cnt = 63;
  1114. if (copy_from_user(&buf, ubuf, cnt))
  1115. return -EFAULT;
  1116. buf[cnt] = 0;
  1117. if (strncmp(buf, "no", 2) == 0) {
  1118. neg = 1;
  1119. cmp += 2;
  1120. }
  1121. for (i = 0; trace_options[i]; i++) {
  1122. int len = strlen(trace_options[i]);
  1123. if (strncmp(cmp, trace_options[i], len) == 0) {
  1124. if (neg)
  1125. trace_flags &= ~(1 << i);
  1126. else
  1127. trace_flags |= (1 << i);
  1128. break;
  1129. }
  1130. }
  1131. filp->f_pos += cnt;
  1132. return cnt;
  1133. }
  1134. static struct file_operations tracing_iter_fops = {
  1135. .open = tracing_open_generic,
  1136. .read = tracing_iter_ctrl_read,
  1137. .write = tracing_iter_ctrl_write,
  1138. };
  1139. static const char readme_msg[] =
  1140. "tracing mini-HOWTO:\n\n"
  1141. "# mkdir /debug\n"
  1142. "# mount -t debugfs nodev /debug\n\n"
  1143. "# cat /debug/tracing/available_tracers\n"
  1144. "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
  1145. "# cat /debug/tracing/current_tracer\n"
  1146. "none\n"
  1147. "# echo sched_switch > /debug/tracing/current_tracer\n"
  1148. "# cat /debug/tracing/current_tracer\n"
  1149. "sched_switch\n"
  1150. "# cat /debug/tracing/iter_ctrl\n"
  1151. "noprint-parent nosym-offset nosym-addr noverbose\n"
  1152. "# echo print-parent > /debug/tracing/iter_ctrl\n"
  1153. "# echo 1 > /debug/tracing/tracing_enabled\n"
  1154. "# cat /debug/tracing/trace > /tmp/trace.txt\n"
  1155. "echo 0 > /debug/tracing/tracing_enabled\n"
  1156. ;
  1157. static ssize_t
  1158. tracing_readme_read(struct file *filp, char __user *ubuf,
  1159. size_t cnt, loff_t *ppos)
  1160. {
  1161. return simple_read_from_buffer(ubuf, cnt, ppos,
  1162. readme_msg, strlen(readme_msg));
  1163. }
  1164. static struct file_operations tracing_readme_fops = {
  1165. .open = tracing_open_generic,
  1166. .read = tracing_readme_read,
  1167. };
  1168. static ssize_t
  1169. tracing_ctrl_read(struct file *filp, char __user *ubuf,
  1170. size_t cnt, loff_t *ppos)
  1171. {
  1172. struct trace_array *tr = filp->private_data;
  1173. char buf[64];
  1174. int r;
  1175. r = sprintf(buf, "%ld\n", tr->ctrl);
  1176. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  1177. }
  1178. static ssize_t
  1179. tracing_ctrl_write(struct file *filp, const char __user *ubuf,
  1180. size_t cnt, loff_t *ppos)
  1181. {
  1182. struct trace_array *tr = filp->private_data;
  1183. long val;
  1184. char buf[64];
  1185. if (cnt > 63)
  1186. cnt = 63;
  1187. if (copy_from_user(&buf, ubuf, cnt))
  1188. return -EFAULT;
  1189. buf[cnt] = 0;
  1190. val = simple_strtoul(buf, NULL, 10);
  1191. val = !!val;
  1192. mutex_lock(&trace_types_lock);
  1193. if (tr->ctrl ^ val) {
  1194. if (val)
  1195. tracer_enabled = 1;
  1196. else
  1197. tracer_enabled = 0;
  1198. tr->ctrl = val;
  1199. if (current_trace && current_trace->ctrl_update)
  1200. current_trace->ctrl_update(tr);
  1201. }
  1202. mutex_unlock(&trace_types_lock);
  1203. filp->f_pos += cnt;
  1204. return cnt;
  1205. }
  1206. static ssize_t
  1207. tracing_set_trace_read(struct file *filp, char __user *ubuf,
  1208. size_t cnt, loff_t *ppos)
  1209. {
  1210. char buf[max_tracer_type_len+2];
  1211. int r;
  1212. mutex_lock(&trace_types_lock);
  1213. if (current_trace)
  1214. r = sprintf(buf, "%s\n", current_trace->name);
  1215. else
  1216. r = sprintf(buf, "\n");
  1217. mutex_unlock(&trace_types_lock);
  1218. return simple_read_from_buffer(ubuf, cnt, ppos,
  1219. buf, r);
  1220. }
  1221. static ssize_t
  1222. tracing_set_trace_write(struct file *filp, const char __user *ubuf,
  1223. size_t cnt, loff_t *ppos)
  1224. {
  1225. struct trace_array *tr = &global_trace;
  1226. struct tracer *t;
  1227. char buf[max_tracer_type_len+1];
  1228. int i;
  1229. if (cnt > max_tracer_type_len)
  1230. cnt = max_tracer_type_len;
  1231. if (copy_from_user(&buf, ubuf, cnt))
  1232. return -EFAULT;
  1233. buf[cnt] = 0;
  1234. /* strip ending whitespace. */
  1235. for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
  1236. buf[i] = 0;
  1237. mutex_lock(&trace_types_lock);
  1238. for (t = trace_types; t; t = t->next) {
  1239. if (strcmp(t->name, buf) == 0)
  1240. break;
  1241. }
  1242. if (!t || t == current_trace)
  1243. goto out;
  1244. if (current_trace && current_trace->reset)
  1245. current_trace->reset(tr);
  1246. current_trace = t;
  1247. if (t->init)
  1248. t->init(tr);
  1249. out:
  1250. mutex_unlock(&trace_types_lock);
  1251. filp->f_pos += cnt;
  1252. return cnt;
  1253. }
  1254. static ssize_t
  1255. tracing_max_lat_read(struct file *filp, char __user *ubuf,
  1256. size_t cnt, loff_t *ppos)
  1257. {
  1258. unsigned long *ptr = filp->private_data;
  1259. char buf[64];
  1260. int r;
  1261. r = snprintf(buf, 64, "%ld\n",
  1262. *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
  1263. if (r > 64)
  1264. r = 64;
  1265. return simple_read_from_buffer(ubuf, cnt, ppos,
  1266. buf, r);
  1267. }
  1268. static ssize_t
  1269. tracing_max_lat_write(struct file *filp, const char __user *ubuf,
  1270. size_t cnt, loff_t *ppos)
  1271. {
  1272. long *ptr = filp->private_data;
  1273. long val;
  1274. char buf[64];
  1275. if (cnt > 63)
  1276. cnt = 63;
  1277. if (copy_from_user(&buf, ubuf, cnt))
  1278. return -EFAULT;
  1279. buf[cnt] = 0;
  1280. val = simple_strtoul(buf, NULL, 10);
  1281. *ptr = val * 1000;
  1282. return cnt;
  1283. }
  1284. static struct file_operations tracing_max_lat_fops = {
  1285. .open = tracing_open_generic,
  1286. .read = tracing_max_lat_read,
  1287. .write = tracing_max_lat_write,
  1288. };
  1289. static struct file_operations tracing_ctrl_fops = {
  1290. .open = tracing_open_generic,
  1291. .read = tracing_ctrl_read,
  1292. .write = tracing_ctrl_write,
  1293. };
  1294. static struct file_operations set_tracer_fops = {
  1295. .open = tracing_open_generic,
  1296. .read = tracing_set_trace_read,
  1297. .write = tracing_set_trace_write,
  1298. };
  1299. #ifdef CONFIG_DYNAMIC_FTRACE
  1300. static ssize_t
  1301. tracing_read_long(struct file *filp, char __user *ubuf,
  1302. size_t cnt, loff_t *ppos)
  1303. {
  1304. unsigned long *p = filp->private_data;
  1305. char buf[64];
  1306. int r;
  1307. r = sprintf(buf, "%ld\n", *p);
  1308. return simple_read_from_buffer(ubuf, cnt, ppos,
  1309. buf, r);
  1310. }
  1311. static struct file_operations tracing_read_long_fops = {
  1312. .open = tracing_open_generic,
  1313. .read = tracing_read_long,
  1314. };
  1315. #endif
  1316. static struct dentry *d_tracer;
  1317. struct dentry *tracing_init_dentry(void)
  1318. {
  1319. static int once;
  1320. if (d_tracer)
  1321. return d_tracer;
  1322. d_tracer = debugfs_create_dir("tracing", NULL);
  1323. if (!d_tracer && !once) {
  1324. once = 1;
  1325. pr_warning("Could not create debugfs directory 'tracing'\n");
  1326. return NULL;
  1327. }
  1328. return d_tracer;
  1329. }
  1330. #ifdef CONFIG_FTRACE_SELFTEST
  1331. /* Let selftest have access to static functions in this file */
  1332. #include "trace_selftest.c"
  1333. #endif
  1334. static __init void tracer_init_debugfs(void)
  1335. {
  1336. struct dentry *d_tracer;
  1337. struct dentry *entry;
  1338. d_tracer = tracing_init_dentry();
  1339. entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
  1340. &global_trace, &tracing_ctrl_fops);
  1341. if (!entry)
  1342. pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
  1343. entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
  1344. NULL, &tracing_iter_fops);
  1345. if (!entry)
  1346. pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
  1347. entry = debugfs_create_file("latency_trace", 0444, d_tracer,
  1348. &global_trace, &tracing_lt_fops);
  1349. if (!entry)
  1350. pr_warning("Could not create debugfs 'latency_trace' entry\n");
  1351. entry = debugfs_create_file("trace", 0444, d_tracer,
  1352. &global_trace, &tracing_fops);
  1353. if (!entry)
  1354. pr_warning("Could not create debugfs 'trace' entry\n");
  1355. entry = debugfs_create_file("available_tracers", 0444, d_tracer,
  1356. &global_trace, &show_traces_fops);
  1357. if (!entry)
  1358. pr_warning("Could not create debugfs 'trace' entry\n");
  1359. entry = debugfs_create_file("current_tracer", 0444, d_tracer,
  1360. &global_trace, &set_tracer_fops);
  1361. if (!entry)
  1362. pr_warning("Could not create debugfs 'trace' entry\n");
  1363. entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
  1364. &tracing_max_latency,
  1365. &tracing_max_lat_fops);
  1366. if (!entry)
  1367. pr_warning("Could not create debugfs "
  1368. "'tracing_max_latency' entry\n");
  1369. entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
  1370. &tracing_thresh, &tracing_max_lat_fops);
  1371. if (!entry)
  1372. pr_warning("Could not create debugfs "
  1373. "'tracing_threash' entry\n");
  1374. entry = debugfs_create_file("README", 0644, d_tracer,
  1375. NULL, &tracing_readme_fops);
  1376. if (!entry)
  1377. pr_warning("Could not create debugfs 'README' entry\n");
  1378. #ifdef CONFIG_DYNAMIC_FTRACE
  1379. entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
  1380. &ftrace_update_tot_cnt,
  1381. &tracing_read_long_fops);
  1382. if (!entry)
  1383. pr_warning("Could not create debugfs "
  1384. "'dyn_ftrace_total_info' entry\n");
  1385. #endif
  1386. }
  1387. /* dummy trace to disable tracing */
  1388. static struct tracer no_tracer __read_mostly =
  1389. {
  1390. .name = "none",
  1391. };
  1392. static int trace_alloc_page(void)
  1393. {
  1394. struct trace_array_cpu *data;
  1395. struct page *page, *tmp;
  1396. LIST_HEAD(pages);
  1397. void *array;
  1398. int i;
  1399. /* first allocate a page for each CPU */
  1400. for_each_possible_cpu(i) {
  1401. array = (void *)__get_free_page(GFP_KERNEL);
  1402. if (array == NULL) {
  1403. printk(KERN_ERR "tracer: failed to allocate page"
  1404. "for trace buffer!\n");
  1405. goto free_pages;
  1406. }
  1407. page = virt_to_page(array);
  1408. list_add(&page->lru, &pages);
  1409. /* Only allocate if we are actually using the max trace */
  1410. #ifdef CONFIG_TRACER_MAX_TRACE
  1411. array = (void *)__get_free_page(GFP_KERNEL);
  1412. if (array == NULL) {
  1413. printk(KERN_ERR "tracer: failed to allocate page"
  1414. "for trace buffer!\n");
  1415. goto free_pages;
  1416. }
  1417. page = virt_to_page(array);
  1418. list_add(&page->lru, &pages);
  1419. #endif
  1420. }
  1421. /* Now that we successfully allocate a page per CPU, add them */
  1422. for_each_possible_cpu(i) {
  1423. data = global_trace.data[i];
  1424. page = list_entry(pages.next, struct page, lru);
  1425. list_del_init(&page->lru);
  1426. list_add_tail(&page->lru, &data->trace_pages);
  1427. ClearPageLRU(page);
  1428. #ifdef CONFIG_TRACER_MAX_TRACE
  1429. data = max_tr.data[i];
  1430. page = list_entry(pages.next, struct page, lru);
  1431. list_del_init(&page->lru);
  1432. list_add_tail(&page->lru, &data->trace_pages);
  1433. SetPageLRU(page);
  1434. #endif
  1435. }
  1436. global_trace.entries += ENTRIES_PER_PAGE;
  1437. return 0;
  1438. free_pages:
  1439. list_for_each_entry_safe(page, tmp, &pages, lru) {
  1440. list_del_init(&page->lru);
  1441. __free_page(page);
  1442. }
  1443. return -ENOMEM;
  1444. }
  1445. __init static int tracer_alloc_buffers(void)
  1446. {
  1447. struct trace_array_cpu *data;
  1448. void *array;
  1449. struct page *page;
  1450. int pages = 0;
  1451. int ret = -ENOMEM;
  1452. int i;
  1453. /* Allocate the first page for all buffers */
  1454. for_each_possible_cpu(i) {
  1455. data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
  1456. max_tr.data[i] = &per_cpu(max_data, i);
  1457. array = (void *)__get_free_page(GFP_KERNEL);
  1458. if (array == NULL) {
  1459. printk(KERN_ERR "tracer: failed to allocate page"
  1460. "for trace buffer!\n");
  1461. goto free_buffers;
  1462. }
  1463. /* set the array to the list */
  1464. INIT_LIST_HEAD(&data->trace_pages);
  1465. page = virt_to_page(array);
  1466. list_add(&page->lru, &data->trace_pages);
  1467. /* use the LRU flag to differentiate the two buffers */
  1468. ClearPageLRU(page);
  1469. /* Only allocate if we are actually using the max trace */
  1470. #ifdef CONFIG_TRACER_MAX_TRACE
  1471. array = (void *)__get_free_page(GFP_KERNEL);
  1472. if (array == NULL) {
  1473. printk(KERN_ERR "tracer: failed to allocate page"
  1474. "for trace buffer!\n");
  1475. goto free_buffers;
  1476. }
  1477. INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
  1478. page = virt_to_page(array);
  1479. list_add(&page->lru, &max_tr.data[i]->trace_pages);
  1480. SetPageLRU(page);
  1481. #endif
  1482. }
  1483. /*
  1484. * Since we allocate by orders of pages, we may be able to
  1485. * round up a bit.
  1486. */
  1487. global_trace.entries = ENTRIES_PER_PAGE;
  1488. pages++;
  1489. while (global_trace.entries < trace_nr_entries) {
  1490. if (trace_alloc_page())
  1491. break;
  1492. pages++;
  1493. }
  1494. max_tr.entries = global_trace.entries;
  1495. pr_info("tracer: %d pages allocated for %ld",
  1496. pages, trace_nr_entries);
  1497. pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
  1498. pr_info(" actual entries %ld\n", global_trace.entries);
  1499. tracer_init_debugfs();
  1500. trace_init_cmdlines();
  1501. register_tracer(&no_tracer);
  1502. current_trace = &no_tracer;
  1503. /* All seems OK, enable tracing */
  1504. tracing_disabled = 0;
  1505. return 0;
  1506. free_buffers:
  1507. for (i-- ; i >= 0; i--) {
  1508. struct page *page, *tmp;
  1509. struct trace_array_cpu *data = global_trace.data[i];
  1510. if (data) {
  1511. list_for_each_entry_safe(page, tmp,
  1512. &data->trace_pages, lru) {
  1513. list_del_init(&page->lru);
  1514. __free_page(page);
  1515. }
  1516. }
  1517. #ifdef CONFIG_TRACER_MAX_TRACE
  1518. data = max_tr.data[i];
  1519. if (data) {
  1520. list_for_each_entry_safe(page, tmp,
  1521. &data->trace_pages, lru) {
  1522. list_del_init(&page->lru);
  1523. __free_page(page);
  1524. }
  1525. }
  1526. #endif
  1527. }
  1528. return ret;
  1529. }
  1530. fs_initcall(tracer_alloc_buffers);