trace.c 75 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Originally taken from the RT patch by:
  8. * Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Based on code from the latency_tracer, that is:
  11. * Copyright (C) 2004-2006 Ingo Molnar
  12. * Copyright (C) 2004 William Lee Irwin III
  13. */
  14. #include <linux/utsrelease.h>
  15. #include <linux/kallsyms.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/notifier.h>
  18. #include <linux/debugfs.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/linkage.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/ftrace.h>
  24. #include <linux/module.h>
  25. #include <linux/percpu.h>
  26. #include <linux/kdebug.h>
  27. #include <linux/ctype.h>
  28. #include <linux/init.h>
  29. #include <linux/poll.h>
  30. #include <linux/gfp.h>
  31. #include <linux/fs.h>
  32. #include <linux/kprobes.h>
  33. #include <linux/writeback.h>
  34. #include <linux/stacktrace.h>
  35. #include <linux/ring_buffer.h>
  36. #include <linux/irqflags.h>
  37. #include "trace.h"
  38. #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
  39. unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
  40. unsigned long __read_mostly tracing_thresh;
  41. /*
  42. * Kill all tracing for good (never come back).
  43. * It is initialized to 1 but will turn to zero if the initialization
  44. * of the tracer is successful. But that is the only place that sets
  45. * this back to zero.
  46. */
  47. int tracing_disabled = 1;
  48. static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
  49. static inline void ftrace_disable_cpu(void)
  50. {
  51. preempt_disable();
  52. local_inc(&__get_cpu_var(ftrace_cpu_disabled));
  53. }
  54. static inline void ftrace_enable_cpu(void)
  55. {
  56. local_dec(&__get_cpu_var(ftrace_cpu_disabled));
  57. preempt_enable();
  58. }
  59. static cpumask_t __read_mostly tracing_buffer_mask;
  60. #define for_each_tracing_cpu(cpu) \
  61. for_each_cpu_mask(cpu, tracing_buffer_mask)
  62. /*
  63. * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
  64. *
  65. * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
  66. * is set, then ftrace_dump is called. This will output the contents
  67. * of the ftrace buffers to the console. This is very useful for
  68. * capturing traces that lead to crashes and outputing it to a
  69. * serial console.
  70. *
  71. * It is default off, but you can enable it with either specifying
  72. * "ftrace_dump_on_oops" in the kernel command line, or setting
  73. * /proc/sys/kernel/ftrace_dump_on_oops to true.
  74. */
  75. int ftrace_dump_on_oops;
  76. static int tracing_set_tracer(char *buf);
  77. static int __init set_ftrace(char *str)
  78. {
  79. tracing_set_tracer(str);
  80. return 1;
  81. }
  82. __setup("ftrace", set_ftrace);
  83. static int __init set_ftrace_dump_on_oops(char *str)
  84. {
  85. ftrace_dump_on_oops = 1;
  86. return 1;
  87. }
  88. __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
  89. long
  90. ns2usecs(cycle_t nsec)
  91. {
  92. nsec += 500;
  93. do_div(nsec, 1000);
  94. return nsec;
  95. }
  96. cycle_t ftrace_now(int cpu)
  97. {
  98. u64 ts = ring_buffer_time_stamp(cpu);
  99. ring_buffer_normalize_time_stamp(cpu, &ts);
  100. return ts;
  101. }
  102. /*
  103. * The global_trace is the descriptor that holds the tracing
  104. * buffers for the live tracing. For each CPU, it contains
  105. * a link list of pages that will store trace entries. The
  106. * page descriptor of the pages in the memory is used to hold
  107. * the link list by linking the lru item in the page descriptor
  108. * to each of the pages in the buffer per CPU.
  109. *
  110. * For each active CPU there is a data field that holds the
  111. * pages for the buffer for that CPU. Each CPU has the same number
  112. * of pages allocated for its buffer.
  113. */
  114. static struct trace_array global_trace;
  115. static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
  116. /*
  117. * The max_tr is used to snapshot the global_trace when a maximum
  118. * latency is reached. Some tracers will use this to store a maximum
  119. * trace while it continues examining live traces.
  120. *
  121. * The buffers for the max_tr are set up the same as the global_trace.
  122. * When a snapshot is taken, the link list of the max_tr is swapped
  123. * with the link list of the global_trace and the buffers are reset for
  124. * the global_trace so the tracing can continue.
  125. */
  126. static struct trace_array max_tr;
  127. static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
  128. /* tracer_enabled is used to toggle activation of a tracer */
  129. static int tracer_enabled = 1;
  130. /**
  131. * tracing_is_enabled - return tracer_enabled status
  132. *
  133. * This function is used by other tracers to know the status
  134. * of the tracer_enabled flag. Tracers may use this function
  135. * to know if it should enable their features when starting
  136. * up. See irqsoff tracer for an example (start_irqsoff_tracer).
  137. */
  138. int tracing_is_enabled(void)
  139. {
  140. return tracer_enabled;
  141. }
  142. /* function tracing enabled */
  143. int ftrace_function_enabled;
  144. /*
  145. * trace_buf_size is the size in bytes that is allocated
  146. * for a buffer. Note, the number of bytes is always rounded
  147. * to page size.
  148. *
  149. * This number is purposely set to a low number of 16384.
  150. * If the dump on oops happens, it will be much appreciated
  151. * to not have to wait for all that output. Anyway this can be
  152. * boot time and run time configurable.
  153. */
  154. #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
  155. static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
  156. /* trace_types holds a link list of available tracers. */
  157. static struct tracer *trace_types __read_mostly;
  158. /* current_trace points to the tracer that is currently active */
  159. static struct tracer *current_trace __read_mostly;
  160. /*
  161. * max_tracer_type_len is used to simplify the allocating of
  162. * buffers to read userspace tracer names. We keep track of
  163. * the longest tracer name registered.
  164. */
  165. static int max_tracer_type_len;
  166. /*
  167. * trace_types_lock is used to protect the trace_types list.
  168. * This lock is also used to keep user access serialized.
  169. * Accesses from userspace will grab this lock while userspace
  170. * activities happen inside the kernel.
  171. */
  172. static DEFINE_MUTEX(trace_types_lock);
  173. /* trace_wait is a waitqueue for tasks blocked on trace_poll */
  174. static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
  175. /* trace_flags holds iter_ctrl options */
  176. unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK;
  177. /**
  178. * trace_wake_up - wake up tasks waiting for trace input
  179. *
  180. * Simply wakes up any task that is blocked on the trace_wait
  181. * queue. These is used with trace_poll for tasks polling the trace.
  182. */
  183. void trace_wake_up(void)
  184. {
  185. /*
  186. * The runqueue_is_locked() can fail, but this is the best we
  187. * have for now:
  188. */
  189. if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
  190. wake_up(&trace_wait);
  191. }
  192. static int __init set_buf_size(char *str)
  193. {
  194. unsigned long buf_size;
  195. int ret;
  196. if (!str)
  197. return 0;
  198. ret = strict_strtoul(str, 0, &buf_size);
  199. /* nr_entries can not be zero */
  200. if (ret < 0 || buf_size == 0)
  201. return 0;
  202. trace_buf_size = buf_size;
  203. return 1;
  204. }
  205. __setup("trace_buf_size=", set_buf_size);
  206. unsigned long nsecs_to_usecs(unsigned long nsecs)
  207. {
  208. return nsecs / 1000;
  209. }
  210. /*
  211. * TRACE_ITER_SYM_MASK masks the options in trace_flags that
  212. * control the output of kernel symbols.
  213. */
  214. #define TRACE_ITER_SYM_MASK \
  215. (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
  216. /* These must match the bit postions in trace_iterator_flags */
  217. static const char *trace_options[] = {
  218. "print-parent",
  219. "sym-offset",
  220. "sym-addr",
  221. "verbose",
  222. "raw",
  223. "hex",
  224. "bin",
  225. "block",
  226. "stacktrace",
  227. "sched-tree",
  228. "ftrace_printk",
  229. "ftrace_preempt",
  230. NULL
  231. };
  232. /*
  233. * ftrace_max_lock is used to protect the swapping of buffers
  234. * when taking a max snapshot. The buffers themselves are
  235. * protected by per_cpu spinlocks. But the action of the swap
  236. * needs its own lock.
  237. *
  238. * This is defined as a raw_spinlock_t in order to help
  239. * with performance when lockdep debugging is enabled.
  240. */
  241. static raw_spinlock_t ftrace_max_lock =
  242. (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
  243. /*
  244. * Copy the new maximum trace into the separate maximum-trace
  245. * structure. (this way the maximum trace is permanently saved,
  246. * for later retrieval via /debugfs/tracing/latency_trace)
  247. */
  248. static void
  249. __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
  250. {
  251. struct trace_array_cpu *data = tr->data[cpu];
  252. max_tr.cpu = cpu;
  253. max_tr.time_start = data->preempt_timestamp;
  254. data = max_tr.data[cpu];
  255. data->saved_latency = tracing_max_latency;
  256. memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
  257. data->pid = tsk->pid;
  258. data->uid = tsk->uid;
  259. data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
  260. data->policy = tsk->policy;
  261. data->rt_priority = tsk->rt_priority;
  262. /* record this tasks comm */
  263. tracing_record_cmdline(current);
  264. }
  265. /**
  266. * trace_seq_printf - sequence printing of trace information
  267. * @s: trace sequence descriptor
  268. * @fmt: printf format string
  269. *
  270. * The tracer may use either sequence operations or its own
  271. * copy to user routines. To simplify formating of a trace
  272. * trace_seq_printf is used to store strings into a special
  273. * buffer (@s). Then the output may be either used by
  274. * the sequencer or pulled into another buffer.
  275. */
  276. int
  277. trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
  278. {
  279. int len = (PAGE_SIZE - 1) - s->len;
  280. va_list ap;
  281. int ret;
  282. if (!len)
  283. return 0;
  284. va_start(ap, fmt);
  285. ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
  286. va_end(ap);
  287. /* If we can't write it all, don't bother writing anything */
  288. if (ret >= len)
  289. return 0;
  290. s->len += ret;
  291. return len;
  292. }
  293. /**
  294. * trace_seq_puts - trace sequence printing of simple string
  295. * @s: trace sequence descriptor
  296. * @str: simple string to record
  297. *
  298. * The tracer may use either the sequence operations or its own
  299. * copy to user routines. This function records a simple string
  300. * into a special buffer (@s) for later retrieval by a sequencer
  301. * or other mechanism.
  302. */
  303. static int
  304. trace_seq_puts(struct trace_seq *s, const char *str)
  305. {
  306. int len = strlen(str);
  307. if (len > ((PAGE_SIZE - 1) - s->len))
  308. return 0;
  309. memcpy(s->buffer + s->len, str, len);
  310. s->len += len;
  311. return len;
  312. }
  313. static int
  314. trace_seq_putc(struct trace_seq *s, unsigned char c)
  315. {
  316. if (s->len >= (PAGE_SIZE - 1))
  317. return 0;
  318. s->buffer[s->len++] = c;
  319. return 1;
  320. }
  321. static int
  322. trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
  323. {
  324. if (len > ((PAGE_SIZE - 1) - s->len))
  325. return 0;
  326. memcpy(s->buffer + s->len, mem, len);
  327. s->len += len;
  328. return len;
  329. }
  330. #define MAX_MEMHEX_BYTES 8
  331. #define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
  332. static int
  333. trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
  334. {
  335. unsigned char hex[HEX_CHARS];
  336. unsigned char *data = mem;
  337. int i, j;
  338. #ifdef __BIG_ENDIAN
  339. for (i = 0, j = 0; i < len; i++) {
  340. #else
  341. for (i = len-1, j = 0; i >= 0; i--) {
  342. #endif
  343. hex[j++] = hex_asc_hi(data[i]);
  344. hex[j++] = hex_asc_lo(data[i]);
  345. }
  346. hex[j++] = ' ';
  347. return trace_seq_putmem(s, hex, j);
  348. }
  349. static void
  350. trace_seq_reset(struct trace_seq *s)
  351. {
  352. s->len = 0;
  353. s->readpos = 0;
  354. }
  355. ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
  356. {
  357. int len;
  358. int ret;
  359. if (s->len <= s->readpos)
  360. return -EBUSY;
  361. len = s->len - s->readpos;
  362. if (cnt > len)
  363. cnt = len;
  364. ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
  365. if (ret)
  366. return -EFAULT;
  367. s->readpos += len;
  368. return cnt;
  369. }
  370. static void
  371. trace_print_seq(struct seq_file *m, struct trace_seq *s)
  372. {
  373. int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
  374. s->buffer[len] = 0;
  375. seq_puts(m, s->buffer);
  376. trace_seq_reset(s);
  377. }
  378. /**
  379. * update_max_tr - snapshot all trace buffers from global_trace to max_tr
  380. * @tr: tracer
  381. * @tsk: the task with the latency
  382. * @cpu: The cpu that initiated the trace.
  383. *
  384. * Flip the buffers between the @tr and the max_tr and record information
  385. * about which task was the cause of this latency.
  386. */
  387. void
  388. update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
  389. {
  390. struct ring_buffer *buf = tr->buffer;
  391. WARN_ON_ONCE(!irqs_disabled());
  392. __raw_spin_lock(&ftrace_max_lock);
  393. tr->buffer = max_tr.buffer;
  394. max_tr.buffer = buf;
  395. ftrace_disable_cpu();
  396. ring_buffer_reset(tr->buffer);
  397. ftrace_enable_cpu();
  398. __update_max_tr(tr, tsk, cpu);
  399. __raw_spin_unlock(&ftrace_max_lock);
  400. }
  401. /**
  402. * update_max_tr_single - only copy one trace over, and reset the rest
  403. * @tr - tracer
  404. * @tsk - task with the latency
  405. * @cpu - the cpu of the buffer to copy.
  406. *
  407. * Flip the trace of a single CPU buffer between the @tr and the max_tr.
  408. */
  409. void
  410. update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
  411. {
  412. int ret;
  413. WARN_ON_ONCE(!irqs_disabled());
  414. __raw_spin_lock(&ftrace_max_lock);
  415. ftrace_disable_cpu();
  416. ring_buffer_reset(max_tr.buffer);
  417. ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
  418. ftrace_enable_cpu();
  419. WARN_ON_ONCE(ret);
  420. __update_max_tr(tr, tsk, cpu);
  421. __raw_spin_unlock(&ftrace_max_lock);
  422. }
  423. /**
  424. * register_tracer - register a tracer with the ftrace system.
  425. * @type - the plugin for the tracer
  426. *
  427. * Register a new plugin tracer.
  428. */
  429. int register_tracer(struct tracer *type)
  430. {
  431. struct tracer *t;
  432. int len;
  433. int ret = 0;
  434. if (!type->name) {
  435. pr_info("Tracer must have a name\n");
  436. return -1;
  437. }
  438. mutex_lock(&trace_types_lock);
  439. for (t = trace_types; t; t = t->next) {
  440. if (strcmp(type->name, t->name) == 0) {
  441. /* already found */
  442. pr_info("Trace %s already registered\n",
  443. type->name);
  444. ret = -1;
  445. goto out;
  446. }
  447. }
  448. #ifdef CONFIG_FTRACE_STARTUP_TEST
  449. if (type->selftest) {
  450. struct tracer *saved_tracer = current_trace;
  451. struct trace_array *tr = &global_trace;
  452. int i;
  453. /*
  454. * Run a selftest on this tracer.
  455. * Here we reset the trace buffer, and set the current
  456. * tracer to be this tracer. The tracer can then run some
  457. * internal tracing to verify that everything is in order.
  458. * If we fail, we do not register this tracer.
  459. */
  460. for_each_tracing_cpu(i) {
  461. tracing_reset(tr, i);
  462. }
  463. current_trace = type;
  464. /* the test is responsible for initializing and enabling */
  465. pr_info("Testing tracer %s: ", type->name);
  466. ret = type->selftest(type, tr);
  467. /* the test is responsible for resetting too */
  468. current_trace = saved_tracer;
  469. if (ret) {
  470. printk(KERN_CONT "FAILED!\n");
  471. goto out;
  472. }
  473. /* Only reset on passing, to avoid touching corrupted buffers */
  474. for_each_tracing_cpu(i) {
  475. tracing_reset(tr, i);
  476. }
  477. printk(KERN_CONT "PASSED\n");
  478. }
  479. #endif
  480. type->next = trace_types;
  481. trace_types = type;
  482. len = strlen(type->name);
  483. if (len > max_tracer_type_len)
  484. max_tracer_type_len = len;
  485. out:
  486. mutex_unlock(&trace_types_lock);
  487. return ret;
  488. }
  489. void unregister_tracer(struct tracer *type)
  490. {
  491. struct tracer **t;
  492. int len;
  493. mutex_lock(&trace_types_lock);
  494. for (t = &trace_types; *t; t = &(*t)->next) {
  495. if (*t == type)
  496. goto found;
  497. }
  498. pr_info("Trace %s not registered\n", type->name);
  499. goto out;
  500. found:
  501. *t = (*t)->next;
  502. if (strlen(type->name) != max_tracer_type_len)
  503. goto out;
  504. max_tracer_type_len = 0;
  505. for (t = &trace_types; *t; t = &(*t)->next) {
  506. len = strlen((*t)->name);
  507. if (len > max_tracer_type_len)
  508. max_tracer_type_len = len;
  509. }
  510. out:
  511. mutex_unlock(&trace_types_lock);
  512. }
  513. void tracing_reset(struct trace_array *tr, int cpu)
  514. {
  515. ftrace_disable_cpu();
  516. ring_buffer_reset_cpu(tr->buffer, cpu);
  517. ftrace_enable_cpu();
  518. }
  519. #define SAVED_CMDLINES 128
  520. static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
  521. static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
  522. static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
  523. static int cmdline_idx;
  524. static DEFINE_SPINLOCK(trace_cmdline_lock);
  525. /* temporary disable recording */
  526. atomic_t trace_record_cmdline_disabled __read_mostly;
  527. static void trace_init_cmdlines(void)
  528. {
  529. memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
  530. memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
  531. cmdline_idx = 0;
  532. }
  533. static int trace_stop_count;
  534. static DEFINE_SPINLOCK(tracing_start_lock);
  535. /**
  536. * tracing_start - quick start of the tracer
  537. *
  538. * If tracing is enabled but was stopped by tracing_stop,
  539. * this will start the tracer back up.
  540. */
  541. void tracing_start(void)
  542. {
  543. struct ring_buffer *buffer;
  544. unsigned long flags;
  545. if (tracing_disabled)
  546. return;
  547. spin_lock_irqsave(&tracing_start_lock, flags);
  548. if (--trace_stop_count)
  549. goto out;
  550. if (trace_stop_count < 0) {
  551. /* Someone screwed up their debugging */
  552. WARN_ON_ONCE(1);
  553. trace_stop_count = 0;
  554. goto out;
  555. }
  556. buffer = global_trace.buffer;
  557. if (buffer)
  558. ring_buffer_record_enable(buffer);
  559. buffer = max_tr.buffer;
  560. if (buffer)
  561. ring_buffer_record_enable(buffer);
  562. ftrace_start();
  563. out:
  564. spin_unlock_irqrestore(&tracing_start_lock, flags);
  565. }
  566. /**
  567. * tracing_stop - quick stop of the tracer
  568. *
  569. * Light weight way to stop tracing. Use in conjunction with
  570. * tracing_start.
  571. */
  572. void tracing_stop(void)
  573. {
  574. struct ring_buffer *buffer;
  575. unsigned long flags;
  576. ftrace_stop();
  577. spin_lock_irqsave(&tracing_start_lock, flags);
  578. if (trace_stop_count++)
  579. goto out;
  580. buffer = global_trace.buffer;
  581. if (buffer)
  582. ring_buffer_record_disable(buffer);
  583. buffer = max_tr.buffer;
  584. if (buffer)
  585. ring_buffer_record_disable(buffer);
  586. out:
  587. spin_unlock_irqrestore(&tracing_start_lock, flags);
  588. }
  589. void trace_stop_cmdline_recording(void);
  590. static void trace_save_cmdline(struct task_struct *tsk)
  591. {
  592. unsigned map;
  593. unsigned idx;
  594. if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
  595. return;
  596. /*
  597. * It's not the end of the world if we don't get
  598. * the lock, but we also don't want to spin
  599. * nor do we want to disable interrupts,
  600. * so if we miss here, then better luck next time.
  601. */
  602. if (!spin_trylock(&trace_cmdline_lock))
  603. return;
  604. idx = map_pid_to_cmdline[tsk->pid];
  605. if (idx >= SAVED_CMDLINES) {
  606. idx = (cmdline_idx + 1) % SAVED_CMDLINES;
  607. map = map_cmdline_to_pid[idx];
  608. if (map <= PID_MAX_DEFAULT)
  609. map_pid_to_cmdline[map] = (unsigned)-1;
  610. map_pid_to_cmdline[tsk->pid] = idx;
  611. cmdline_idx = idx;
  612. }
  613. memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
  614. spin_unlock(&trace_cmdline_lock);
  615. }
  616. static char *trace_find_cmdline(int pid)
  617. {
  618. char *cmdline = "<...>";
  619. unsigned map;
  620. if (!pid)
  621. return "<idle>";
  622. if (pid > PID_MAX_DEFAULT)
  623. goto out;
  624. map = map_pid_to_cmdline[pid];
  625. if (map >= SAVED_CMDLINES)
  626. goto out;
  627. cmdline = saved_cmdlines[map];
  628. out:
  629. return cmdline;
  630. }
  631. void tracing_record_cmdline(struct task_struct *tsk)
  632. {
  633. if (atomic_read(&trace_record_cmdline_disabled))
  634. return;
  635. trace_save_cmdline(tsk);
  636. }
  637. void
  638. tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
  639. int pc)
  640. {
  641. struct task_struct *tsk = current;
  642. entry->preempt_count = pc & 0xff;
  643. entry->pid = (tsk) ? tsk->pid : 0;
  644. entry->flags =
  645. #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
  646. (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
  647. #else
  648. TRACE_FLAG_IRQS_NOSUPPORT |
  649. #endif
  650. ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
  651. ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
  652. (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
  653. }
  654. void
  655. trace_function(struct trace_array *tr, struct trace_array_cpu *data,
  656. unsigned long ip, unsigned long parent_ip, unsigned long flags,
  657. int pc)
  658. {
  659. struct ring_buffer_event *event;
  660. struct ftrace_entry *entry;
  661. unsigned long irq_flags;
  662. /* If we are reading the ring buffer, don't trace */
  663. if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
  664. return;
  665. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  666. &irq_flags);
  667. if (!event)
  668. return;
  669. entry = ring_buffer_event_data(event);
  670. tracing_generic_entry_update(&entry->ent, flags, pc);
  671. entry->ent.type = TRACE_FN;
  672. entry->ip = ip;
  673. entry->parent_ip = parent_ip;
  674. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  675. }
  676. void
  677. ftrace(struct trace_array *tr, struct trace_array_cpu *data,
  678. unsigned long ip, unsigned long parent_ip, unsigned long flags,
  679. int pc)
  680. {
  681. if (likely(!atomic_read(&data->disabled)))
  682. trace_function(tr, data, ip, parent_ip, flags, pc);
  683. }
  684. static void ftrace_trace_stack(struct trace_array *tr,
  685. struct trace_array_cpu *data,
  686. unsigned long flags,
  687. int skip, int pc)
  688. {
  689. #ifdef CONFIG_STACKTRACE
  690. struct ring_buffer_event *event;
  691. struct stack_entry *entry;
  692. struct stack_trace trace;
  693. unsigned long irq_flags;
  694. if (!(trace_flags & TRACE_ITER_STACKTRACE))
  695. return;
  696. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  697. &irq_flags);
  698. if (!event)
  699. return;
  700. entry = ring_buffer_event_data(event);
  701. tracing_generic_entry_update(&entry->ent, flags, pc);
  702. entry->ent.type = TRACE_STACK;
  703. memset(&entry->caller, 0, sizeof(entry->caller));
  704. trace.nr_entries = 0;
  705. trace.max_entries = FTRACE_STACK_ENTRIES;
  706. trace.skip = skip;
  707. trace.entries = entry->caller;
  708. save_stack_trace(&trace);
  709. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  710. #endif
  711. }
  712. void __trace_stack(struct trace_array *tr,
  713. struct trace_array_cpu *data,
  714. unsigned long flags,
  715. int skip)
  716. {
  717. ftrace_trace_stack(tr, data, flags, skip, preempt_count());
  718. }
  719. static void
  720. ftrace_trace_special(void *__tr, void *__data,
  721. unsigned long arg1, unsigned long arg2, unsigned long arg3,
  722. int pc)
  723. {
  724. struct ring_buffer_event *event;
  725. struct trace_array_cpu *data = __data;
  726. struct trace_array *tr = __tr;
  727. struct special_entry *entry;
  728. unsigned long irq_flags;
  729. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  730. &irq_flags);
  731. if (!event)
  732. return;
  733. entry = ring_buffer_event_data(event);
  734. tracing_generic_entry_update(&entry->ent, 0, pc);
  735. entry->ent.type = TRACE_SPECIAL;
  736. entry->arg1 = arg1;
  737. entry->arg2 = arg2;
  738. entry->arg3 = arg3;
  739. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  740. ftrace_trace_stack(tr, data, irq_flags, 4, pc);
  741. trace_wake_up();
  742. }
  743. void
  744. __trace_special(void *__tr, void *__data,
  745. unsigned long arg1, unsigned long arg2, unsigned long arg3)
  746. {
  747. ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
  748. }
  749. void
  750. tracing_sched_switch_trace(struct trace_array *tr,
  751. struct trace_array_cpu *data,
  752. struct task_struct *prev,
  753. struct task_struct *next,
  754. unsigned long flags, int pc)
  755. {
  756. struct ring_buffer_event *event;
  757. struct ctx_switch_entry *entry;
  758. unsigned long irq_flags;
  759. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  760. &irq_flags);
  761. if (!event)
  762. return;
  763. entry = ring_buffer_event_data(event);
  764. tracing_generic_entry_update(&entry->ent, flags, pc);
  765. entry->ent.type = TRACE_CTX;
  766. entry->prev_pid = prev->pid;
  767. entry->prev_prio = prev->prio;
  768. entry->prev_state = prev->state;
  769. entry->next_pid = next->pid;
  770. entry->next_prio = next->prio;
  771. entry->next_state = next->state;
  772. entry->next_cpu = task_cpu(next);
  773. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  774. ftrace_trace_stack(tr, data, flags, 5, pc);
  775. }
  776. void
  777. tracing_sched_wakeup_trace(struct trace_array *tr,
  778. struct trace_array_cpu *data,
  779. struct task_struct *wakee,
  780. struct task_struct *curr,
  781. unsigned long flags, int pc)
  782. {
  783. struct ring_buffer_event *event;
  784. struct ctx_switch_entry *entry;
  785. unsigned long irq_flags;
  786. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  787. &irq_flags);
  788. if (!event)
  789. return;
  790. entry = ring_buffer_event_data(event);
  791. tracing_generic_entry_update(&entry->ent, flags, pc);
  792. entry->ent.type = TRACE_WAKE;
  793. entry->prev_pid = curr->pid;
  794. entry->prev_prio = curr->prio;
  795. entry->prev_state = curr->state;
  796. entry->next_pid = wakee->pid;
  797. entry->next_prio = wakee->prio;
  798. entry->next_state = wakee->state;
  799. entry->next_cpu = task_cpu(wakee);
  800. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  801. ftrace_trace_stack(tr, data, flags, 6, pc);
  802. trace_wake_up();
  803. }
  804. void
  805. ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
  806. {
  807. struct trace_array *tr = &global_trace;
  808. struct trace_array_cpu *data;
  809. unsigned long flags;
  810. int cpu;
  811. int pc;
  812. if (tracing_disabled)
  813. return;
  814. pc = preempt_count();
  815. local_irq_save(flags);
  816. cpu = raw_smp_processor_id();
  817. data = tr->data[cpu];
  818. if (likely(atomic_inc_return(&data->disabled) == 1))
  819. ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
  820. atomic_dec(&data->disabled);
  821. local_irq_restore(flags);
  822. }
  823. #ifdef CONFIG_FUNCTION_TRACER
  824. static void
  825. function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
  826. {
  827. struct trace_array *tr = &global_trace;
  828. struct trace_array_cpu *data;
  829. unsigned long flags;
  830. long disabled;
  831. int cpu, resched;
  832. int pc;
  833. if (unlikely(!ftrace_function_enabled))
  834. return;
  835. pc = preempt_count();
  836. resched = ftrace_preempt_disable();
  837. local_save_flags(flags);
  838. cpu = raw_smp_processor_id();
  839. data = tr->data[cpu];
  840. disabled = atomic_inc_return(&data->disabled);
  841. if (likely(disabled == 1))
  842. trace_function(tr, data, ip, parent_ip, flags, pc);
  843. atomic_dec(&data->disabled);
  844. ftrace_preempt_enable(resched);
  845. }
  846. static void
  847. function_trace_call(unsigned long ip, unsigned long parent_ip)
  848. {
  849. struct trace_array *tr = &global_trace;
  850. struct trace_array_cpu *data;
  851. unsigned long flags;
  852. long disabled;
  853. int cpu;
  854. int pc;
  855. if (unlikely(!ftrace_function_enabled))
  856. return;
  857. /*
  858. * Need to use raw, since this must be called before the
  859. * recursive protection is performed.
  860. */
  861. raw_local_irq_save(flags);
  862. cpu = raw_smp_processor_id();
  863. data = tr->data[cpu];
  864. disabled = atomic_inc_return(&data->disabled);
  865. if (likely(disabled == 1)) {
  866. pc = preempt_count();
  867. trace_function(tr, data, ip, parent_ip, flags, pc);
  868. }
  869. atomic_dec(&data->disabled);
  870. raw_local_irq_restore(flags);
  871. }
  872. static struct ftrace_ops trace_ops __read_mostly =
  873. {
  874. .func = function_trace_call,
  875. };
  876. void tracing_start_function_trace(void)
  877. {
  878. ftrace_function_enabled = 0;
  879. if (trace_flags & TRACE_ITER_PREEMPTONLY)
  880. trace_ops.func = function_trace_call_preempt_only;
  881. else
  882. trace_ops.func = function_trace_call;
  883. register_ftrace_function(&trace_ops);
  884. ftrace_function_enabled = 1;
  885. }
  886. void tracing_stop_function_trace(void)
  887. {
  888. ftrace_function_enabled = 0;
  889. unregister_ftrace_function(&trace_ops);
  890. }
  891. #endif
  892. enum trace_file_type {
  893. TRACE_FILE_LAT_FMT = 1,
  894. };
  895. static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
  896. {
  897. /* Don't allow ftrace to trace into the ring buffers */
  898. ftrace_disable_cpu();
  899. iter->idx++;
  900. if (iter->buffer_iter[iter->cpu])
  901. ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
  902. ftrace_enable_cpu();
  903. }
  904. static struct trace_entry *
  905. peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
  906. {
  907. struct ring_buffer_event *event;
  908. struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
  909. /* Don't allow ftrace to trace into the ring buffers */
  910. ftrace_disable_cpu();
  911. if (buf_iter)
  912. event = ring_buffer_iter_peek(buf_iter, ts);
  913. else
  914. event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
  915. ftrace_enable_cpu();
  916. return event ? ring_buffer_event_data(event) : NULL;
  917. }
  918. static struct trace_entry *
  919. __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
  920. {
  921. struct ring_buffer *buffer = iter->tr->buffer;
  922. struct trace_entry *ent, *next = NULL;
  923. u64 next_ts = 0, ts;
  924. int next_cpu = -1;
  925. int cpu;
  926. for_each_tracing_cpu(cpu) {
  927. if (ring_buffer_empty_cpu(buffer, cpu))
  928. continue;
  929. ent = peek_next_entry(iter, cpu, &ts);
  930. /*
  931. * Pick the entry with the smallest timestamp:
  932. */
  933. if (ent && (!next || ts < next_ts)) {
  934. next = ent;
  935. next_cpu = cpu;
  936. next_ts = ts;
  937. }
  938. }
  939. if (ent_cpu)
  940. *ent_cpu = next_cpu;
  941. if (ent_ts)
  942. *ent_ts = next_ts;
  943. return next;
  944. }
  945. /* Find the next real entry, without updating the iterator itself */
  946. static struct trace_entry *
  947. find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
  948. {
  949. return __find_next_entry(iter, ent_cpu, ent_ts);
  950. }
  951. /* Find the next real entry, and increment the iterator to the next entry */
  952. static void *find_next_entry_inc(struct trace_iterator *iter)
  953. {
  954. iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
  955. if (iter->ent)
  956. trace_iterator_increment(iter, iter->cpu);
  957. return iter->ent ? iter : NULL;
  958. }
  959. static void trace_consume(struct trace_iterator *iter)
  960. {
  961. /* Don't allow ftrace to trace into the ring buffers */
  962. ftrace_disable_cpu();
  963. ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
  964. ftrace_enable_cpu();
  965. }
  966. static void *s_next(struct seq_file *m, void *v, loff_t *pos)
  967. {
  968. struct trace_iterator *iter = m->private;
  969. int i = (int)*pos;
  970. void *ent;
  971. (*pos)++;
  972. /* can't go backwards */
  973. if (iter->idx > i)
  974. return NULL;
  975. if (iter->idx < 0)
  976. ent = find_next_entry_inc(iter);
  977. else
  978. ent = iter;
  979. while (ent && iter->idx < i)
  980. ent = find_next_entry_inc(iter);
  981. iter->pos = *pos;
  982. return ent;
  983. }
  984. static void *s_start(struct seq_file *m, loff_t *pos)
  985. {
  986. struct trace_iterator *iter = m->private;
  987. void *p = NULL;
  988. loff_t l = 0;
  989. int cpu;
  990. mutex_lock(&trace_types_lock);
  991. if (!current_trace || current_trace != iter->trace) {
  992. mutex_unlock(&trace_types_lock);
  993. return NULL;
  994. }
  995. atomic_inc(&trace_record_cmdline_disabled);
  996. if (*pos != iter->pos) {
  997. iter->ent = NULL;
  998. iter->cpu = 0;
  999. iter->idx = -1;
  1000. ftrace_disable_cpu();
  1001. for_each_tracing_cpu(cpu) {
  1002. ring_buffer_iter_reset(iter->buffer_iter[cpu]);
  1003. }
  1004. ftrace_enable_cpu();
  1005. for (p = iter; p && l < *pos; p = s_next(m, p, &l))
  1006. ;
  1007. } else {
  1008. l = *pos - 1;
  1009. p = s_next(m, p, &l);
  1010. }
  1011. return p;
  1012. }
  1013. static void s_stop(struct seq_file *m, void *p)
  1014. {
  1015. atomic_dec(&trace_record_cmdline_disabled);
  1016. mutex_unlock(&trace_types_lock);
  1017. }
  1018. #ifdef CONFIG_KRETPROBES
  1019. static inline const char *kretprobed(const char *name)
  1020. {
  1021. static const char tramp_name[] = "kretprobe_trampoline";
  1022. int size = sizeof(tramp_name);
  1023. if (strncmp(tramp_name, name, size) == 0)
  1024. return "[unknown/kretprobe'd]";
  1025. return name;
  1026. }
  1027. #else
  1028. static inline const char *kretprobed(const char *name)
  1029. {
  1030. return name;
  1031. }
  1032. #endif /* CONFIG_KRETPROBES */
  1033. static int
  1034. seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
  1035. {
  1036. #ifdef CONFIG_KALLSYMS
  1037. char str[KSYM_SYMBOL_LEN];
  1038. const char *name;
  1039. kallsyms_lookup(address, NULL, NULL, NULL, str);
  1040. name = kretprobed(str);
  1041. return trace_seq_printf(s, fmt, name);
  1042. #endif
  1043. return 1;
  1044. }
  1045. static int
  1046. seq_print_sym_offset(struct trace_seq *s, const char *fmt,
  1047. unsigned long address)
  1048. {
  1049. #ifdef CONFIG_KALLSYMS
  1050. char str[KSYM_SYMBOL_LEN];
  1051. const char *name;
  1052. sprint_symbol(str, address);
  1053. name = kretprobed(str);
  1054. return trace_seq_printf(s, fmt, name);
  1055. #endif
  1056. return 1;
  1057. }
  1058. #ifndef CONFIG_64BIT
  1059. # define IP_FMT "%08lx"
  1060. #else
  1061. # define IP_FMT "%016lx"
  1062. #endif
  1063. static int
  1064. seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
  1065. {
  1066. int ret;
  1067. if (!ip)
  1068. return trace_seq_printf(s, "0");
  1069. if (sym_flags & TRACE_ITER_SYM_OFFSET)
  1070. ret = seq_print_sym_offset(s, "%s", ip);
  1071. else
  1072. ret = seq_print_sym_short(s, "%s", ip);
  1073. if (!ret)
  1074. return 0;
  1075. if (sym_flags & TRACE_ITER_SYM_ADDR)
  1076. ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
  1077. return ret;
  1078. }
  1079. static void print_lat_help_header(struct seq_file *m)
  1080. {
  1081. seq_puts(m, "# _------=> CPU# \n");
  1082. seq_puts(m, "# / _-----=> irqs-off \n");
  1083. seq_puts(m, "# | / _----=> need-resched \n");
  1084. seq_puts(m, "# || / _---=> hardirq/softirq \n");
  1085. seq_puts(m, "# ||| / _--=> preempt-depth \n");
  1086. seq_puts(m, "# |||| / \n");
  1087. seq_puts(m, "# ||||| delay \n");
  1088. seq_puts(m, "# cmd pid ||||| time | caller \n");
  1089. seq_puts(m, "# \\ / ||||| \\ | / \n");
  1090. }
  1091. static void print_func_help_header(struct seq_file *m)
  1092. {
  1093. seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
  1094. seq_puts(m, "# | | | | |\n");
  1095. }
  1096. static void
  1097. print_trace_header(struct seq_file *m, struct trace_iterator *iter)
  1098. {
  1099. unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
  1100. struct trace_array *tr = iter->tr;
  1101. struct trace_array_cpu *data = tr->data[tr->cpu];
  1102. struct tracer *type = current_trace;
  1103. unsigned long total;
  1104. unsigned long entries;
  1105. const char *name = "preemption";
  1106. if (type)
  1107. name = type->name;
  1108. entries = ring_buffer_entries(iter->tr->buffer);
  1109. total = entries +
  1110. ring_buffer_overruns(iter->tr->buffer);
  1111. seq_printf(m, "%s latency trace v1.1.5 on %s\n",
  1112. name, UTS_RELEASE);
  1113. seq_puts(m, "-----------------------------------"
  1114. "---------------------------------\n");
  1115. seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
  1116. " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
  1117. nsecs_to_usecs(data->saved_latency),
  1118. entries,
  1119. total,
  1120. tr->cpu,
  1121. #if defined(CONFIG_PREEMPT_NONE)
  1122. "server",
  1123. #elif defined(CONFIG_PREEMPT_VOLUNTARY)
  1124. "desktop",
  1125. #elif defined(CONFIG_PREEMPT)
  1126. "preempt",
  1127. #else
  1128. "unknown",
  1129. #endif
  1130. /* These are reserved for later use */
  1131. 0, 0, 0, 0);
  1132. #ifdef CONFIG_SMP
  1133. seq_printf(m, " #P:%d)\n", num_online_cpus());
  1134. #else
  1135. seq_puts(m, ")\n");
  1136. #endif
  1137. seq_puts(m, " -----------------\n");
  1138. seq_printf(m, " | task: %.16s-%d "
  1139. "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
  1140. data->comm, data->pid, data->uid, data->nice,
  1141. data->policy, data->rt_priority);
  1142. seq_puts(m, " -----------------\n");
  1143. if (data->critical_start) {
  1144. seq_puts(m, " => started at: ");
  1145. seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
  1146. trace_print_seq(m, &iter->seq);
  1147. seq_puts(m, "\n => ended at: ");
  1148. seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
  1149. trace_print_seq(m, &iter->seq);
  1150. seq_puts(m, "\n");
  1151. }
  1152. seq_puts(m, "\n");
  1153. }
  1154. static void
  1155. lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
  1156. {
  1157. int hardirq, softirq;
  1158. char *comm;
  1159. comm = trace_find_cmdline(entry->pid);
  1160. trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
  1161. trace_seq_printf(s, "%3d", cpu);
  1162. trace_seq_printf(s, "%c%c",
  1163. (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
  1164. (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
  1165. ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
  1166. hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
  1167. softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
  1168. if (hardirq && softirq) {
  1169. trace_seq_putc(s, 'H');
  1170. } else {
  1171. if (hardirq) {
  1172. trace_seq_putc(s, 'h');
  1173. } else {
  1174. if (softirq)
  1175. trace_seq_putc(s, 's');
  1176. else
  1177. trace_seq_putc(s, '.');
  1178. }
  1179. }
  1180. if (entry->preempt_count)
  1181. trace_seq_printf(s, "%x", entry->preempt_count);
  1182. else
  1183. trace_seq_puts(s, ".");
  1184. }
  1185. unsigned long preempt_mark_thresh = 100;
  1186. static void
  1187. lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
  1188. unsigned long rel_usecs)
  1189. {
  1190. trace_seq_printf(s, " %4lldus", abs_usecs);
  1191. if (rel_usecs > preempt_mark_thresh)
  1192. trace_seq_puts(s, "!: ");
  1193. else if (rel_usecs > 1)
  1194. trace_seq_puts(s, "+: ");
  1195. else
  1196. trace_seq_puts(s, " : ");
  1197. }
  1198. static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
  1199. /*
  1200. * The message is supposed to contain an ending newline.
  1201. * If the printing stops prematurely, try to add a newline of our own.
  1202. */
  1203. void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
  1204. {
  1205. struct trace_entry *ent;
  1206. struct trace_field_cont *cont;
  1207. bool ok = true;
  1208. ent = peek_next_entry(iter, iter->cpu, NULL);
  1209. if (!ent || ent->type != TRACE_CONT) {
  1210. trace_seq_putc(s, '\n');
  1211. return;
  1212. }
  1213. do {
  1214. cont = (struct trace_field_cont *)ent;
  1215. if (ok)
  1216. ok = (trace_seq_printf(s, "%s", cont->buf) > 0);
  1217. ftrace_disable_cpu();
  1218. if (iter->buffer_iter[iter->cpu])
  1219. ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
  1220. else
  1221. ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
  1222. ftrace_enable_cpu();
  1223. ent = peek_next_entry(iter, iter->cpu, NULL);
  1224. } while (ent && ent->type == TRACE_CONT);
  1225. if (!ok)
  1226. trace_seq_putc(s, '\n');
  1227. }
  1228. static void test_cpu_buff_start(struct trace_iterator *iter)
  1229. {
  1230. struct trace_seq *s = &iter->seq;
  1231. if (cpu_isset(iter->cpu, iter->started))
  1232. return;
  1233. cpu_set(iter->cpu, iter->started);
  1234. trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
  1235. }
  1236. static enum print_line_t
  1237. print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
  1238. {
  1239. struct trace_seq *s = &iter->seq;
  1240. unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
  1241. struct trace_entry *next_entry;
  1242. unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
  1243. struct trace_entry *entry = iter->ent;
  1244. unsigned long abs_usecs;
  1245. unsigned long rel_usecs;
  1246. u64 next_ts;
  1247. char *comm;
  1248. int S, T;
  1249. int i;
  1250. unsigned state;
  1251. if (entry->type == TRACE_CONT)
  1252. return TRACE_TYPE_HANDLED;
  1253. test_cpu_buff_start(iter);
  1254. next_entry = find_next_entry(iter, NULL, &next_ts);
  1255. if (!next_entry)
  1256. next_ts = iter->ts;
  1257. rel_usecs = ns2usecs(next_ts - iter->ts);
  1258. abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
  1259. if (verbose) {
  1260. comm = trace_find_cmdline(entry->pid);
  1261. trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]"
  1262. " %ld.%03ldms (+%ld.%03ldms): ",
  1263. comm,
  1264. entry->pid, cpu, entry->flags,
  1265. entry->preempt_count, trace_idx,
  1266. ns2usecs(iter->ts),
  1267. abs_usecs/1000,
  1268. abs_usecs % 1000, rel_usecs/1000,
  1269. rel_usecs % 1000);
  1270. } else {
  1271. lat_print_generic(s, entry, cpu);
  1272. lat_print_timestamp(s, abs_usecs, rel_usecs);
  1273. }
  1274. switch (entry->type) {
  1275. case TRACE_FN: {
  1276. struct ftrace_entry *field;
  1277. trace_assign_type(field, entry);
  1278. seq_print_ip_sym(s, field->ip, sym_flags);
  1279. trace_seq_puts(s, " (");
  1280. seq_print_ip_sym(s, field->parent_ip, sym_flags);
  1281. trace_seq_puts(s, ")\n");
  1282. break;
  1283. }
  1284. case TRACE_CTX:
  1285. case TRACE_WAKE: {
  1286. struct ctx_switch_entry *field;
  1287. trace_assign_type(field, entry);
  1288. T = field->next_state < sizeof(state_to_char) ?
  1289. state_to_char[field->next_state] : 'X';
  1290. state = field->prev_state ?
  1291. __ffs(field->prev_state) + 1 : 0;
  1292. S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
  1293. comm = trace_find_cmdline(field->next_pid);
  1294. trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
  1295. field->prev_pid,
  1296. field->prev_prio,
  1297. S, entry->type == TRACE_CTX ? "==>" : " +",
  1298. field->next_cpu,
  1299. field->next_pid,
  1300. field->next_prio,
  1301. T, comm);
  1302. break;
  1303. }
  1304. case TRACE_SPECIAL: {
  1305. struct special_entry *field;
  1306. trace_assign_type(field, entry);
  1307. trace_seq_printf(s, "# %ld %ld %ld\n",
  1308. field->arg1,
  1309. field->arg2,
  1310. field->arg3);
  1311. break;
  1312. }
  1313. case TRACE_STACK: {
  1314. struct stack_entry *field;
  1315. trace_assign_type(field, entry);
  1316. for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
  1317. if (i)
  1318. trace_seq_puts(s, " <= ");
  1319. seq_print_ip_sym(s, field->caller[i], sym_flags);
  1320. }
  1321. trace_seq_puts(s, "\n");
  1322. break;
  1323. }
  1324. case TRACE_PRINT: {
  1325. struct print_entry *field;
  1326. trace_assign_type(field, entry);
  1327. seq_print_ip_sym(s, field->ip, sym_flags);
  1328. trace_seq_printf(s, ": %s", field->buf);
  1329. if (entry->flags & TRACE_FLAG_CONT)
  1330. trace_seq_print_cont(s, iter);
  1331. break;
  1332. }
  1333. default:
  1334. trace_seq_printf(s, "Unknown type %d\n", entry->type);
  1335. }
  1336. return TRACE_TYPE_HANDLED;
  1337. }
  1338. static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
  1339. {
  1340. struct trace_seq *s = &iter->seq;
  1341. unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
  1342. struct trace_entry *entry;
  1343. unsigned long usec_rem;
  1344. unsigned long long t;
  1345. unsigned long secs;
  1346. char *comm;
  1347. int ret;
  1348. int S, T;
  1349. int i;
  1350. entry = iter->ent;
  1351. if (entry->type == TRACE_CONT)
  1352. return TRACE_TYPE_HANDLED;
  1353. test_cpu_buff_start(iter);
  1354. comm = trace_find_cmdline(iter->ent->pid);
  1355. t = ns2usecs(iter->ts);
  1356. usec_rem = do_div(t, 1000000ULL);
  1357. secs = (unsigned long)t;
  1358. ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
  1359. if (!ret)
  1360. return TRACE_TYPE_PARTIAL_LINE;
  1361. ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
  1362. if (!ret)
  1363. return TRACE_TYPE_PARTIAL_LINE;
  1364. ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
  1365. if (!ret)
  1366. return TRACE_TYPE_PARTIAL_LINE;
  1367. switch (entry->type) {
  1368. case TRACE_FN: {
  1369. struct ftrace_entry *field;
  1370. trace_assign_type(field, entry);
  1371. ret = seq_print_ip_sym(s, field->ip, sym_flags);
  1372. if (!ret)
  1373. return TRACE_TYPE_PARTIAL_LINE;
  1374. if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
  1375. field->parent_ip) {
  1376. ret = trace_seq_printf(s, " <-");
  1377. if (!ret)
  1378. return TRACE_TYPE_PARTIAL_LINE;
  1379. ret = seq_print_ip_sym(s,
  1380. field->parent_ip,
  1381. sym_flags);
  1382. if (!ret)
  1383. return TRACE_TYPE_PARTIAL_LINE;
  1384. }
  1385. ret = trace_seq_printf(s, "\n");
  1386. if (!ret)
  1387. return TRACE_TYPE_PARTIAL_LINE;
  1388. break;
  1389. }
  1390. case TRACE_CTX:
  1391. case TRACE_WAKE: {
  1392. struct ctx_switch_entry *field;
  1393. trace_assign_type(field, entry);
  1394. S = field->prev_state < sizeof(state_to_char) ?
  1395. state_to_char[field->prev_state] : 'X';
  1396. T = field->next_state < sizeof(state_to_char) ?
  1397. state_to_char[field->next_state] : 'X';
  1398. ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
  1399. field->prev_pid,
  1400. field->prev_prio,
  1401. S,
  1402. entry->type == TRACE_CTX ? "==>" : " +",
  1403. field->next_cpu,
  1404. field->next_pid,
  1405. field->next_prio,
  1406. T);
  1407. if (!ret)
  1408. return TRACE_TYPE_PARTIAL_LINE;
  1409. break;
  1410. }
  1411. case TRACE_SPECIAL: {
  1412. struct special_entry *field;
  1413. trace_assign_type(field, entry);
  1414. ret = trace_seq_printf(s, "# %ld %ld %ld\n",
  1415. field->arg1,
  1416. field->arg2,
  1417. field->arg3);
  1418. if (!ret)
  1419. return TRACE_TYPE_PARTIAL_LINE;
  1420. break;
  1421. }
  1422. case TRACE_STACK: {
  1423. struct stack_entry *field;
  1424. trace_assign_type(field, entry);
  1425. for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
  1426. if (i) {
  1427. ret = trace_seq_puts(s, " <= ");
  1428. if (!ret)
  1429. return TRACE_TYPE_PARTIAL_LINE;
  1430. }
  1431. ret = seq_print_ip_sym(s, field->caller[i],
  1432. sym_flags);
  1433. if (!ret)
  1434. return TRACE_TYPE_PARTIAL_LINE;
  1435. }
  1436. ret = trace_seq_puts(s, "\n");
  1437. if (!ret)
  1438. return TRACE_TYPE_PARTIAL_LINE;
  1439. break;
  1440. }
  1441. case TRACE_PRINT: {
  1442. struct print_entry *field;
  1443. trace_assign_type(field, entry);
  1444. seq_print_ip_sym(s, field->ip, sym_flags);
  1445. trace_seq_printf(s, ": %s", field->buf);
  1446. if (entry->flags & TRACE_FLAG_CONT)
  1447. trace_seq_print_cont(s, iter);
  1448. break;
  1449. }
  1450. }
  1451. return TRACE_TYPE_HANDLED;
  1452. }
  1453. static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
  1454. {
  1455. struct trace_seq *s = &iter->seq;
  1456. struct trace_entry *entry;
  1457. int ret;
  1458. int S, T;
  1459. entry = iter->ent;
  1460. if (entry->type == TRACE_CONT)
  1461. return TRACE_TYPE_HANDLED;
  1462. ret = trace_seq_printf(s, "%d %d %llu ",
  1463. entry->pid, iter->cpu, iter->ts);
  1464. if (!ret)
  1465. return TRACE_TYPE_PARTIAL_LINE;
  1466. switch (entry->type) {
  1467. case TRACE_FN: {
  1468. struct ftrace_entry *field;
  1469. trace_assign_type(field, entry);
  1470. ret = trace_seq_printf(s, "%x %x\n",
  1471. field->ip,
  1472. field->parent_ip);
  1473. if (!ret)
  1474. return TRACE_TYPE_PARTIAL_LINE;
  1475. break;
  1476. }
  1477. case TRACE_CTX:
  1478. case TRACE_WAKE: {
  1479. struct ctx_switch_entry *field;
  1480. trace_assign_type(field, entry);
  1481. S = field->prev_state < sizeof(state_to_char) ?
  1482. state_to_char[field->prev_state] : 'X';
  1483. T = field->next_state < sizeof(state_to_char) ?
  1484. state_to_char[field->next_state] : 'X';
  1485. if (entry->type == TRACE_WAKE)
  1486. S = '+';
  1487. ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
  1488. field->prev_pid,
  1489. field->prev_prio,
  1490. S,
  1491. field->next_cpu,
  1492. field->next_pid,
  1493. field->next_prio,
  1494. T);
  1495. if (!ret)
  1496. return TRACE_TYPE_PARTIAL_LINE;
  1497. break;
  1498. }
  1499. case TRACE_SPECIAL:
  1500. case TRACE_STACK: {
  1501. struct special_entry *field;
  1502. trace_assign_type(field, entry);
  1503. ret = trace_seq_printf(s, "# %ld %ld %ld\n",
  1504. field->arg1,
  1505. field->arg2,
  1506. field->arg3);
  1507. if (!ret)
  1508. return TRACE_TYPE_PARTIAL_LINE;
  1509. break;
  1510. }
  1511. case TRACE_PRINT: {
  1512. struct print_entry *field;
  1513. trace_assign_type(field, entry);
  1514. trace_seq_printf(s, "# %lx %s", field->ip, field->buf);
  1515. if (entry->flags & TRACE_FLAG_CONT)
  1516. trace_seq_print_cont(s, iter);
  1517. break;
  1518. }
  1519. }
  1520. return TRACE_TYPE_HANDLED;
  1521. }
  1522. #define SEQ_PUT_FIELD_RET(s, x) \
  1523. do { \
  1524. if (!trace_seq_putmem(s, &(x), sizeof(x))) \
  1525. return 0; \
  1526. } while (0)
  1527. #define SEQ_PUT_HEX_FIELD_RET(s, x) \
  1528. do { \
  1529. BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \
  1530. if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
  1531. return 0; \
  1532. } while (0)
  1533. static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
  1534. {
  1535. struct trace_seq *s = &iter->seq;
  1536. unsigned char newline = '\n';
  1537. struct trace_entry *entry;
  1538. int S, T;
  1539. entry = iter->ent;
  1540. if (entry->type == TRACE_CONT)
  1541. return TRACE_TYPE_HANDLED;
  1542. SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
  1543. SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
  1544. SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
  1545. switch (entry->type) {
  1546. case TRACE_FN: {
  1547. struct ftrace_entry *field;
  1548. trace_assign_type(field, entry);
  1549. SEQ_PUT_HEX_FIELD_RET(s, field->ip);
  1550. SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
  1551. break;
  1552. }
  1553. case TRACE_CTX:
  1554. case TRACE_WAKE: {
  1555. struct ctx_switch_entry *field;
  1556. trace_assign_type(field, entry);
  1557. S = field->prev_state < sizeof(state_to_char) ?
  1558. state_to_char[field->prev_state] : 'X';
  1559. T = field->next_state < sizeof(state_to_char) ?
  1560. state_to_char[field->next_state] : 'X';
  1561. if (entry->type == TRACE_WAKE)
  1562. S = '+';
  1563. SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
  1564. SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
  1565. SEQ_PUT_HEX_FIELD_RET(s, S);
  1566. SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
  1567. SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
  1568. SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
  1569. SEQ_PUT_HEX_FIELD_RET(s, T);
  1570. break;
  1571. }
  1572. case TRACE_SPECIAL:
  1573. case TRACE_STACK: {
  1574. struct special_entry *field;
  1575. trace_assign_type(field, entry);
  1576. SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
  1577. SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
  1578. SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
  1579. break;
  1580. }
  1581. }
  1582. SEQ_PUT_FIELD_RET(s, newline);
  1583. return TRACE_TYPE_HANDLED;
  1584. }
  1585. static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
  1586. {
  1587. struct trace_seq *s = &iter->seq;
  1588. struct trace_entry *entry;
  1589. entry = iter->ent;
  1590. if (entry->type == TRACE_CONT)
  1591. return TRACE_TYPE_HANDLED;
  1592. SEQ_PUT_FIELD_RET(s, entry->pid);
  1593. SEQ_PUT_FIELD_RET(s, entry->cpu);
  1594. SEQ_PUT_FIELD_RET(s, iter->ts);
  1595. switch (entry->type) {
  1596. case TRACE_FN: {
  1597. struct ftrace_entry *field;
  1598. trace_assign_type(field, entry);
  1599. SEQ_PUT_FIELD_RET(s, field->ip);
  1600. SEQ_PUT_FIELD_RET(s, field->parent_ip);
  1601. break;
  1602. }
  1603. case TRACE_CTX: {
  1604. struct ctx_switch_entry *field;
  1605. trace_assign_type(field, entry);
  1606. SEQ_PUT_FIELD_RET(s, field->prev_pid);
  1607. SEQ_PUT_FIELD_RET(s, field->prev_prio);
  1608. SEQ_PUT_FIELD_RET(s, field->prev_state);
  1609. SEQ_PUT_FIELD_RET(s, field->next_pid);
  1610. SEQ_PUT_FIELD_RET(s, field->next_prio);
  1611. SEQ_PUT_FIELD_RET(s, field->next_state);
  1612. break;
  1613. }
  1614. case TRACE_SPECIAL:
  1615. case TRACE_STACK: {
  1616. struct special_entry *field;
  1617. trace_assign_type(field, entry);
  1618. SEQ_PUT_FIELD_RET(s, field->arg1);
  1619. SEQ_PUT_FIELD_RET(s, field->arg2);
  1620. SEQ_PUT_FIELD_RET(s, field->arg3);
  1621. break;
  1622. }
  1623. }
  1624. return 1;
  1625. }
  1626. static int trace_empty(struct trace_iterator *iter)
  1627. {
  1628. int cpu;
  1629. for_each_tracing_cpu(cpu) {
  1630. if (iter->buffer_iter[cpu]) {
  1631. if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
  1632. return 0;
  1633. } else {
  1634. if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
  1635. return 0;
  1636. }
  1637. }
  1638. return 1;
  1639. }
  1640. static enum print_line_t print_trace_line(struct trace_iterator *iter)
  1641. {
  1642. enum print_line_t ret;
  1643. if (iter->trace && iter->trace->print_line) {
  1644. ret = iter->trace->print_line(iter);
  1645. if (ret != TRACE_TYPE_UNHANDLED)
  1646. return ret;
  1647. }
  1648. if (trace_flags & TRACE_ITER_BIN)
  1649. return print_bin_fmt(iter);
  1650. if (trace_flags & TRACE_ITER_HEX)
  1651. return print_hex_fmt(iter);
  1652. if (trace_flags & TRACE_ITER_RAW)
  1653. return print_raw_fmt(iter);
  1654. if (iter->iter_flags & TRACE_FILE_LAT_FMT)
  1655. return print_lat_fmt(iter, iter->idx, iter->cpu);
  1656. return print_trace_fmt(iter);
  1657. }
  1658. static int s_show(struct seq_file *m, void *v)
  1659. {
  1660. struct trace_iterator *iter = v;
  1661. if (iter->ent == NULL) {
  1662. if (iter->tr) {
  1663. seq_printf(m, "# tracer: %s\n", iter->trace->name);
  1664. seq_puts(m, "#\n");
  1665. }
  1666. if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
  1667. /* print nothing if the buffers are empty */
  1668. if (trace_empty(iter))
  1669. return 0;
  1670. print_trace_header(m, iter);
  1671. if (!(trace_flags & TRACE_ITER_VERBOSE))
  1672. print_lat_help_header(m);
  1673. } else {
  1674. if (!(trace_flags & TRACE_ITER_VERBOSE))
  1675. print_func_help_header(m);
  1676. }
  1677. } else {
  1678. print_trace_line(iter);
  1679. trace_print_seq(m, &iter->seq);
  1680. }
  1681. return 0;
  1682. }
  1683. static struct seq_operations tracer_seq_ops = {
  1684. .start = s_start,
  1685. .next = s_next,
  1686. .stop = s_stop,
  1687. .show = s_show,
  1688. };
  1689. static struct trace_iterator *
  1690. __tracing_open(struct inode *inode, struct file *file, int *ret)
  1691. {
  1692. struct trace_iterator *iter;
  1693. struct seq_file *m;
  1694. int cpu;
  1695. if (tracing_disabled) {
  1696. *ret = -ENODEV;
  1697. return NULL;
  1698. }
  1699. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  1700. if (!iter) {
  1701. *ret = -ENOMEM;
  1702. goto out;
  1703. }
  1704. mutex_lock(&trace_types_lock);
  1705. if (current_trace && current_trace->print_max)
  1706. iter->tr = &max_tr;
  1707. else
  1708. iter->tr = inode->i_private;
  1709. iter->trace = current_trace;
  1710. iter->pos = -1;
  1711. for_each_tracing_cpu(cpu) {
  1712. iter->buffer_iter[cpu] =
  1713. ring_buffer_read_start(iter->tr->buffer, cpu);
  1714. if (!iter->buffer_iter[cpu])
  1715. goto fail_buffer;
  1716. }
  1717. /* TODO stop tracer */
  1718. *ret = seq_open(file, &tracer_seq_ops);
  1719. if (*ret)
  1720. goto fail_buffer;
  1721. m = file->private_data;
  1722. m->private = iter;
  1723. /* stop the trace while dumping */
  1724. tracing_stop();
  1725. if (iter->trace && iter->trace->open)
  1726. iter->trace->open(iter);
  1727. mutex_unlock(&trace_types_lock);
  1728. out:
  1729. return iter;
  1730. fail_buffer:
  1731. for_each_tracing_cpu(cpu) {
  1732. if (iter->buffer_iter[cpu])
  1733. ring_buffer_read_finish(iter->buffer_iter[cpu]);
  1734. }
  1735. mutex_unlock(&trace_types_lock);
  1736. return ERR_PTR(-ENOMEM);
  1737. }
  1738. int tracing_open_generic(struct inode *inode, struct file *filp)
  1739. {
  1740. if (tracing_disabled)
  1741. return -ENODEV;
  1742. filp->private_data = inode->i_private;
  1743. return 0;
  1744. }
  1745. int tracing_release(struct inode *inode, struct file *file)
  1746. {
  1747. struct seq_file *m = (struct seq_file *)file->private_data;
  1748. struct trace_iterator *iter = m->private;
  1749. int cpu;
  1750. mutex_lock(&trace_types_lock);
  1751. for_each_tracing_cpu(cpu) {
  1752. if (iter->buffer_iter[cpu])
  1753. ring_buffer_read_finish(iter->buffer_iter[cpu]);
  1754. }
  1755. if (iter->trace && iter->trace->close)
  1756. iter->trace->close(iter);
  1757. /* reenable tracing if it was previously enabled */
  1758. tracing_start();
  1759. mutex_unlock(&trace_types_lock);
  1760. seq_release(inode, file);
  1761. kfree(iter);
  1762. return 0;
  1763. }
  1764. static int tracing_open(struct inode *inode, struct file *file)
  1765. {
  1766. int ret;
  1767. __tracing_open(inode, file, &ret);
  1768. return ret;
  1769. }
  1770. static int tracing_lt_open(struct inode *inode, struct file *file)
  1771. {
  1772. struct trace_iterator *iter;
  1773. int ret;
  1774. iter = __tracing_open(inode, file, &ret);
  1775. if (!ret)
  1776. iter->iter_flags |= TRACE_FILE_LAT_FMT;
  1777. return ret;
  1778. }
  1779. static void *
  1780. t_next(struct seq_file *m, void *v, loff_t *pos)
  1781. {
  1782. struct tracer *t = m->private;
  1783. (*pos)++;
  1784. if (t)
  1785. t = t->next;
  1786. m->private = t;
  1787. return t;
  1788. }
  1789. static void *t_start(struct seq_file *m, loff_t *pos)
  1790. {
  1791. struct tracer *t = m->private;
  1792. loff_t l = 0;
  1793. mutex_lock(&trace_types_lock);
  1794. for (; t && l < *pos; t = t_next(m, t, &l))
  1795. ;
  1796. return t;
  1797. }
  1798. static void t_stop(struct seq_file *m, void *p)
  1799. {
  1800. mutex_unlock(&trace_types_lock);
  1801. }
  1802. static int t_show(struct seq_file *m, void *v)
  1803. {
  1804. struct tracer *t = v;
  1805. if (!t)
  1806. return 0;
  1807. seq_printf(m, "%s", t->name);
  1808. if (t->next)
  1809. seq_putc(m, ' ');
  1810. else
  1811. seq_putc(m, '\n');
  1812. return 0;
  1813. }
  1814. static struct seq_operations show_traces_seq_ops = {
  1815. .start = t_start,
  1816. .next = t_next,
  1817. .stop = t_stop,
  1818. .show = t_show,
  1819. };
  1820. static int show_traces_open(struct inode *inode, struct file *file)
  1821. {
  1822. int ret;
  1823. if (tracing_disabled)
  1824. return -ENODEV;
  1825. ret = seq_open(file, &show_traces_seq_ops);
  1826. if (!ret) {
  1827. struct seq_file *m = file->private_data;
  1828. m->private = trace_types;
  1829. }
  1830. return ret;
  1831. }
  1832. static struct file_operations tracing_fops = {
  1833. .open = tracing_open,
  1834. .read = seq_read,
  1835. .llseek = seq_lseek,
  1836. .release = tracing_release,
  1837. };
  1838. static struct file_operations tracing_lt_fops = {
  1839. .open = tracing_lt_open,
  1840. .read = seq_read,
  1841. .llseek = seq_lseek,
  1842. .release = tracing_release,
  1843. };
  1844. static struct file_operations show_traces_fops = {
  1845. .open = show_traces_open,
  1846. .read = seq_read,
  1847. .release = seq_release,
  1848. };
  1849. /*
  1850. * Only trace on a CPU if the bitmask is set:
  1851. */
  1852. static cpumask_t tracing_cpumask = CPU_MASK_ALL;
  1853. /*
  1854. * When tracing/tracing_cpu_mask is modified then this holds
  1855. * the new bitmask we are about to install:
  1856. */
  1857. static cpumask_t tracing_cpumask_new;
  1858. /*
  1859. * The tracer itself will not take this lock, but still we want
  1860. * to provide a consistent cpumask to user-space:
  1861. */
  1862. static DEFINE_MUTEX(tracing_cpumask_update_lock);
  1863. /*
  1864. * Temporary storage for the character representation of the
  1865. * CPU bitmask (and one more byte for the newline):
  1866. */
  1867. static char mask_str[NR_CPUS + 1];
  1868. static ssize_t
  1869. tracing_cpumask_read(struct file *filp, char __user *ubuf,
  1870. size_t count, loff_t *ppos)
  1871. {
  1872. int len;
  1873. mutex_lock(&tracing_cpumask_update_lock);
  1874. len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
  1875. if (count - len < 2) {
  1876. count = -EINVAL;
  1877. goto out_err;
  1878. }
  1879. len += sprintf(mask_str + len, "\n");
  1880. count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
  1881. out_err:
  1882. mutex_unlock(&tracing_cpumask_update_lock);
  1883. return count;
  1884. }
  1885. static ssize_t
  1886. tracing_cpumask_write(struct file *filp, const char __user *ubuf,
  1887. size_t count, loff_t *ppos)
  1888. {
  1889. int err, cpu;
  1890. mutex_lock(&tracing_cpumask_update_lock);
  1891. err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
  1892. if (err)
  1893. goto err_unlock;
  1894. raw_local_irq_disable();
  1895. __raw_spin_lock(&ftrace_max_lock);
  1896. for_each_tracing_cpu(cpu) {
  1897. /*
  1898. * Increase/decrease the disabled counter if we are
  1899. * about to flip a bit in the cpumask:
  1900. */
  1901. if (cpu_isset(cpu, tracing_cpumask) &&
  1902. !cpu_isset(cpu, tracing_cpumask_new)) {
  1903. atomic_inc(&global_trace.data[cpu]->disabled);
  1904. }
  1905. if (!cpu_isset(cpu, tracing_cpumask) &&
  1906. cpu_isset(cpu, tracing_cpumask_new)) {
  1907. atomic_dec(&global_trace.data[cpu]->disabled);
  1908. }
  1909. }
  1910. __raw_spin_unlock(&ftrace_max_lock);
  1911. raw_local_irq_enable();
  1912. tracing_cpumask = tracing_cpumask_new;
  1913. mutex_unlock(&tracing_cpumask_update_lock);
  1914. return count;
  1915. err_unlock:
  1916. mutex_unlock(&tracing_cpumask_update_lock);
  1917. return err;
  1918. }
  1919. static struct file_operations tracing_cpumask_fops = {
  1920. .open = tracing_open_generic,
  1921. .read = tracing_cpumask_read,
  1922. .write = tracing_cpumask_write,
  1923. };
  1924. static ssize_t
  1925. tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
  1926. size_t cnt, loff_t *ppos)
  1927. {
  1928. char *buf;
  1929. int r = 0;
  1930. int len = 0;
  1931. int i;
  1932. /* calulate max size */
  1933. for (i = 0; trace_options[i]; i++) {
  1934. len += strlen(trace_options[i]);
  1935. len += 3; /* "no" and space */
  1936. }
  1937. /* +2 for \n and \0 */
  1938. buf = kmalloc(len + 2, GFP_KERNEL);
  1939. if (!buf)
  1940. return -ENOMEM;
  1941. for (i = 0; trace_options[i]; i++) {
  1942. if (trace_flags & (1 << i))
  1943. r += sprintf(buf + r, "%s ", trace_options[i]);
  1944. else
  1945. r += sprintf(buf + r, "no%s ", trace_options[i]);
  1946. }
  1947. r += sprintf(buf + r, "\n");
  1948. WARN_ON(r >= len + 2);
  1949. r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  1950. kfree(buf);
  1951. return r;
  1952. }
  1953. static ssize_t
  1954. tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
  1955. size_t cnt, loff_t *ppos)
  1956. {
  1957. char buf[64];
  1958. char *cmp = buf;
  1959. int neg = 0;
  1960. int i;
  1961. if (cnt >= sizeof(buf))
  1962. return -EINVAL;
  1963. if (copy_from_user(&buf, ubuf, cnt))
  1964. return -EFAULT;
  1965. buf[cnt] = 0;
  1966. if (strncmp(buf, "no", 2) == 0) {
  1967. neg = 1;
  1968. cmp += 2;
  1969. }
  1970. for (i = 0; trace_options[i]; i++) {
  1971. int len = strlen(trace_options[i]);
  1972. if (strncmp(cmp, trace_options[i], len) == 0) {
  1973. if (neg)
  1974. trace_flags &= ~(1 << i);
  1975. else
  1976. trace_flags |= (1 << i);
  1977. break;
  1978. }
  1979. }
  1980. /*
  1981. * If no option could be set, return an error:
  1982. */
  1983. if (!trace_options[i])
  1984. return -EINVAL;
  1985. filp->f_pos += cnt;
  1986. return cnt;
  1987. }
  1988. static struct file_operations tracing_iter_fops = {
  1989. .open = tracing_open_generic,
  1990. .read = tracing_iter_ctrl_read,
  1991. .write = tracing_iter_ctrl_write,
  1992. };
  1993. static const char readme_msg[] =
  1994. "tracing mini-HOWTO:\n\n"
  1995. "# mkdir /debug\n"
  1996. "# mount -t debugfs nodev /debug\n\n"
  1997. "# cat /debug/tracing/available_tracers\n"
  1998. "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
  1999. "# cat /debug/tracing/current_tracer\n"
  2000. "none\n"
  2001. "# echo sched_switch > /debug/tracing/current_tracer\n"
  2002. "# cat /debug/tracing/current_tracer\n"
  2003. "sched_switch\n"
  2004. "# cat /debug/tracing/iter_ctrl\n"
  2005. "noprint-parent nosym-offset nosym-addr noverbose\n"
  2006. "# echo print-parent > /debug/tracing/iter_ctrl\n"
  2007. "# echo 1 > /debug/tracing/tracing_enabled\n"
  2008. "# cat /debug/tracing/trace > /tmp/trace.txt\n"
  2009. "echo 0 > /debug/tracing/tracing_enabled\n"
  2010. ;
  2011. static ssize_t
  2012. tracing_readme_read(struct file *filp, char __user *ubuf,
  2013. size_t cnt, loff_t *ppos)
  2014. {
  2015. return simple_read_from_buffer(ubuf, cnt, ppos,
  2016. readme_msg, strlen(readme_msg));
  2017. }
  2018. static struct file_operations tracing_readme_fops = {
  2019. .open = tracing_open_generic,
  2020. .read = tracing_readme_read,
  2021. };
  2022. static ssize_t
  2023. tracing_ctrl_read(struct file *filp, char __user *ubuf,
  2024. size_t cnt, loff_t *ppos)
  2025. {
  2026. char buf[64];
  2027. int r;
  2028. r = sprintf(buf, "%u\n", tracer_enabled);
  2029. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  2030. }
  2031. static ssize_t
  2032. tracing_ctrl_write(struct file *filp, const char __user *ubuf,
  2033. size_t cnt, loff_t *ppos)
  2034. {
  2035. struct trace_array *tr = filp->private_data;
  2036. char buf[64];
  2037. long val;
  2038. int ret;
  2039. if (cnt >= sizeof(buf))
  2040. return -EINVAL;
  2041. if (copy_from_user(&buf, ubuf, cnt))
  2042. return -EFAULT;
  2043. buf[cnt] = 0;
  2044. ret = strict_strtoul(buf, 10, &val);
  2045. if (ret < 0)
  2046. return ret;
  2047. val = !!val;
  2048. mutex_lock(&trace_types_lock);
  2049. if (tracer_enabled ^ val) {
  2050. if (val) {
  2051. tracer_enabled = 1;
  2052. if (current_trace->start)
  2053. current_trace->start(tr);
  2054. tracing_start();
  2055. } else {
  2056. tracer_enabled = 0;
  2057. tracing_stop();
  2058. if (current_trace->stop)
  2059. current_trace->stop(tr);
  2060. }
  2061. }
  2062. mutex_unlock(&trace_types_lock);
  2063. filp->f_pos += cnt;
  2064. return cnt;
  2065. }
  2066. static ssize_t
  2067. tracing_set_trace_read(struct file *filp, char __user *ubuf,
  2068. size_t cnt, loff_t *ppos)
  2069. {
  2070. char buf[max_tracer_type_len+2];
  2071. int r;
  2072. mutex_lock(&trace_types_lock);
  2073. if (current_trace)
  2074. r = sprintf(buf, "%s\n", current_trace->name);
  2075. else
  2076. r = sprintf(buf, "\n");
  2077. mutex_unlock(&trace_types_lock);
  2078. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  2079. }
  2080. static int tracing_set_tracer(char *buf)
  2081. {
  2082. struct trace_array *tr = &global_trace;
  2083. struct tracer *t;
  2084. int ret = 0;
  2085. mutex_lock(&trace_types_lock);
  2086. for (t = trace_types; t; t = t->next) {
  2087. if (strcmp(t->name, buf) == 0)
  2088. break;
  2089. }
  2090. if (!t) {
  2091. ret = -EINVAL;
  2092. goto out;
  2093. }
  2094. if (t == current_trace)
  2095. goto out;
  2096. if (current_trace && current_trace->reset)
  2097. current_trace->reset(tr);
  2098. current_trace = t;
  2099. if (t->init)
  2100. t->init(tr);
  2101. out:
  2102. mutex_unlock(&trace_types_lock);
  2103. return ret;
  2104. }
  2105. static ssize_t
  2106. tracing_set_trace_write(struct file *filp, const char __user *ubuf,
  2107. size_t cnt, loff_t *ppos)
  2108. {
  2109. char buf[max_tracer_type_len+1];
  2110. int i;
  2111. size_t ret;
  2112. if (cnt > max_tracer_type_len)
  2113. cnt = max_tracer_type_len;
  2114. if (copy_from_user(&buf, ubuf, cnt))
  2115. return -EFAULT;
  2116. buf[cnt] = 0;
  2117. /* strip ending whitespace. */
  2118. for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
  2119. buf[i] = 0;
  2120. ret = tracing_set_tracer(buf);
  2121. if (!ret)
  2122. ret = cnt;
  2123. if (ret > 0)
  2124. filp->f_pos += ret;
  2125. return ret;
  2126. }
  2127. static ssize_t
  2128. tracing_max_lat_read(struct file *filp, char __user *ubuf,
  2129. size_t cnt, loff_t *ppos)
  2130. {
  2131. unsigned long *ptr = filp->private_data;
  2132. char buf[64];
  2133. int r;
  2134. r = snprintf(buf, sizeof(buf), "%ld\n",
  2135. *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
  2136. if (r > sizeof(buf))
  2137. r = sizeof(buf);
  2138. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  2139. }
  2140. static ssize_t
  2141. tracing_max_lat_write(struct file *filp, const char __user *ubuf,
  2142. size_t cnt, loff_t *ppos)
  2143. {
  2144. long *ptr = filp->private_data;
  2145. char buf[64];
  2146. long val;
  2147. int ret;
  2148. if (cnt >= sizeof(buf))
  2149. return -EINVAL;
  2150. if (copy_from_user(&buf, ubuf, cnt))
  2151. return -EFAULT;
  2152. buf[cnt] = 0;
  2153. ret = strict_strtoul(buf, 10, &val);
  2154. if (ret < 0)
  2155. return ret;
  2156. *ptr = val * 1000;
  2157. return cnt;
  2158. }
  2159. static atomic_t tracing_reader;
  2160. static int tracing_open_pipe(struct inode *inode, struct file *filp)
  2161. {
  2162. struct trace_iterator *iter;
  2163. if (tracing_disabled)
  2164. return -ENODEV;
  2165. /* We only allow for reader of the pipe */
  2166. if (atomic_inc_return(&tracing_reader) != 1) {
  2167. atomic_dec(&tracing_reader);
  2168. return -EBUSY;
  2169. }
  2170. /* create a buffer to store the information to pass to userspace */
  2171. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  2172. if (!iter)
  2173. return -ENOMEM;
  2174. mutex_lock(&trace_types_lock);
  2175. /* trace pipe does not show start of buffer */
  2176. cpus_setall(iter->started);
  2177. iter->tr = &global_trace;
  2178. iter->trace = current_trace;
  2179. filp->private_data = iter;
  2180. if (iter->trace->pipe_open)
  2181. iter->trace->pipe_open(iter);
  2182. mutex_unlock(&trace_types_lock);
  2183. return 0;
  2184. }
  2185. static int tracing_release_pipe(struct inode *inode, struct file *file)
  2186. {
  2187. struct trace_iterator *iter = file->private_data;
  2188. kfree(iter);
  2189. atomic_dec(&tracing_reader);
  2190. return 0;
  2191. }
  2192. static unsigned int
  2193. tracing_poll_pipe(struct file *filp, poll_table *poll_table)
  2194. {
  2195. struct trace_iterator *iter = filp->private_data;
  2196. if (trace_flags & TRACE_ITER_BLOCK) {
  2197. /*
  2198. * Always select as readable when in blocking mode
  2199. */
  2200. return POLLIN | POLLRDNORM;
  2201. } else {
  2202. if (!trace_empty(iter))
  2203. return POLLIN | POLLRDNORM;
  2204. poll_wait(filp, &trace_wait, poll_table);
  2205. if (!trace_empty(iter))
  2206. return POLLIN | POLLRDNORM;
  2207. return 0;
  2208. }
  2209. }
  2210. /*
  2211. * Consumer reader.
  2212. */
  2213. static ssize_t
  2214. tracing_read_pipe(struct file *filp, char __user *ubuf,
  2215. size_t cnt, loff_t *ppos)
  2216. {
  2217. struct trace_iterator *iter = filp->private_data;
  2218. ssize_t sret;
  2219. /* return any leftover data */
  2220. sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
  2221. if (sret != -EBUSY)
  2222. return sret;
  2223. trace_seq_reset(&iter->seq);
  2224. mutex_lock(&trace_types_lock);
  2225. if (iter->trace->read) {
  2226. sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
  2227. if (sret)
  2228. goto out;
  2229. }
  2230. waitagain:
  2231. sret = 0;
  2232. while (trace_empty(iter)) {
  2233. if ((filp->f_flags & O_NONBLOCK)) {
  2234. sret = -EAGAIN;
  2235. goto out;
  2236. }
  2237. /*
  2238. * This is a make-shift waitqueue. The reason we don't use
  2239. * an actual wait queue is because:
  2240. * 1) we only ever have one waiter
  2241. * 2) the tracing, traces all functions, we don't want
  2242. * the overhead of calling wake_up and friends
  2243. * (and tracing them too)
  2244. * Anyway, this is really very primitive wakeup.
  2245. */
  2246. set_current_state(TASK_INTERRUPTIBLE);
  2247. iter->tr->waiter = current;
  2248. mutex_unlock(&trace_types_lock);
  2249. /* sleep for 100 msecs, and try again. */
  2250. schedule_timeout(HZ/10);
  2251. mutex_lock(&trace_types_lock);
  2252. iter->tr->waiter = NULL;
  2253. if (signal_pending(current)) {
  2254. sret = -EINTR;
  2255. goto out;
  2256. }
  2257. if (iter->trace != current_trace)
  2258. goto out;
  2259. /*
  2260. * We block until we read something and tracing is disabled.
  2261. * We still block if tracing is disabled, but we have never
  2262. * read anything. This allows a user to cat this file, and
  2263. * then enable tracing. But after we have read something,
  2264. * we give an EOF when tracing is again disabled.
  2265. *
  2266. * iter->pos will be 0 if we haven't read anything.
  2267. */
  2268. if (!tracer_enabled && iter->pos)
  2269. break;
  2270. continue;
  2271. }
  2272. /* stop when tracing is finished */
  2273. if (trace_empty(iter))
  2274. goto out;
  2275. if (cnt >= PAGE_SIZE)
  2276. cnt = PAGE_SIZE - 1;
  2277. /* reset all but tr, trace, and overruns */
  2278. memset(&iter->seq, 0,
  2279. sizeof(struct trace_iterator) -
  2280. offsetof(struct trace_iterator, seq));
  2281. iter->pos = -1;
  2282. while (find_next_entry_inc(iter) != NULL) {
  2283. enum print_line_t ret;
  2284. int len = iter->seq.len;
  2285. ret = print_trace_line(iter);
  2286. if (ret == TRACE_TYPE_PARTIAL_LINE) {
  2287. /* don't print partial lines */
  2288. iter->seq.len = len;
  2289. break;
  2290. }
  2291. trace_consume(iter);
  2292. if (iter->seq.len >= cnt)
  2293. break;
  2294. }
  2295. /* Now copy what we have to the user */
  2296. sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
  2297. if (iter->seq.readpos >= iter->seq.len)
  2298. trace_seq_reset(&iter->seq);
  2299. /*
  2300. * If there was nothing to send to user, inspite of consuming trace
  2301. * entries, go back to wait for more entries.
  2302. */
  2303. if (sret == -EBUSY)
  2304. goto waitagain;
  2305. out:
  2306. mutex_unlock(&trace_types_lock);
  2307. return sret;
  2308. }
  2309. static ssize_t
  2310. tracing_entries_read(struct file *filp, char __user *ubuf,
  2311. size_t cnt, loff_t *ppos)
  2312. {
  2313. struct trace_array *tr = filp->private_data;
  2314. char buf[64];
  2315. int r;
  2316. r = sprintf(buf, "%lu\n", tr->entries);
  2317. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  2318. }
  2319. static ssize_t
  2320. tracing_entries_write(struct file *filp, const char __user *ubuf,
  2321. size_t cnt, loff_t *ppos)
  2322. {
  2323. unsigned long val;
  2324. char buf[64];
  2325. int ret, cpu;
  2326. if (cnt >= sizeof(buf))
  2327. return -EINVAL;
  2328. if (copy_from_user(&buf, ubuf, cnt))
  2329. return -EFAULT;
  2330. buf[cnt] = 0;
  2331. ret = strict_strtoul(buf, 10, &val);
  2332. if (ret < 0)
  2333. return ret;
  2334. /* must have at least 1 entry */
  2335. if (!val)
  2336. return -EINVAL;
  2337. mutex_lock(&trace_types_lock);
  2338. tracing_stop();
  2339. /* disable all cpu buffers */
  2340. for_each_tracing_cpu(cpu) {
  2341. if (global_trace.data[cpu])
  2342. atomic_inc(&global_trace.data[cpu]->disabled);
  2343. if (max_tr.data[cpu])
  2344. atomic_inc(&max_tr.data[cpu]->disabled);
  2345. }
  2346. if (val != global_trace.entries) {
  2347. ret = ring_buffer_resize(global_trace.buffer, val);
  2348. if (ret < 0) {
  2349. cnt = ret;
  2350. goto out;
  2351. }
  2352. ret = ring_buffer_resize(max_tr.buffer, val);
  2353. if (ret < 0) {
  2354. int r;
  2355. cnt = ret;
  2356. r = ring_buffer_resize(global_trace.buffer,
  2357. global_trace.entries);
  2358. if (r < 0) {
  2359. /* AARGH! We are left with different
  2360. * size max buffer!!!! */
  2361. WARN_ON(1);
  2362. tracing_disabled = 1;
  2363. }
  2364. goto out;
  2365. }
  2366. global_trace.entries = val;
  2367. }
  2368. filp->f_pos += cnt;
  2369. /* If check pages failed, return ENOMEM */
  2370. if (tracing_disabled)
  2371. cnt = -ENOMEM;
  2372. out:
  2373. for_each_tracing_cpu(cpu) {
  2374. if (global_trace.data[cpu])
  2375. atomic_dec(&global_trace.data[cpu]->disabled);
  2376. if (max_tr.data[cpu])
  2377. atomic_dec(&max_tr.data[cpu]->disabled);
  2378. }
  2379. tracing_start();
  2380. max_tr.entries = global_trace.entries;
  2381. mutex_unlock(&trace_types_lock);
  2382. return cnt;
  2383. }
  2384. static int mark_printk(const char *fmt, ...)
  2385. {
  2386. int ret;
  2387. va_list args;
  2388. va_start(args, fmt);
  2389. ret = trace_vprintk(0, fmt, args);
  2390. va_end(args);
  2391. return ret;
  2392. }
  2393. static ssize_t
  2394. tracing_mark_write(struct file *filp, const char __user *ubuf,
  2395. size_t cnt, loff_t *fpos)
  2396. {
  2397. char *buf;
  2398. char *end;
  2399. if (tracing_disabled)
  2400. return -EINVAL;
  2401. if (cnt > TRACE_BUF_SIZE)
  2402. cnt = TRACE_BUF_SIZE;
  2403. buf = kmalloc(cnt + 1, GFP_KERNEL);
  2404. if (buf == NULL)
  2405. return -ENOMEM;
  2406. if (copy_from_user(buf, ubuf, cnt)) {
  2407. kfree(buf);
  2408. return -EFAULT;
  2409. }
  2410. /* Cut from the first nil or newline. */
  2411. buf[cnt] = '\0';
  2412. end = strchr(buf, '\n');
  2413. if (end)
  2414. *end = '\0';
  2415. cnt = mark_printk("%s\n", buf);
  2416. kfree(buf);
  2417. *fpos += cnt;
  2418. return cnt;
  2419. }
  2420. static struct file_operations tracing_max_lat_fops = {
  2421. .open = tracing_open_generic,
  2422. .read = tracing_max_lat_read,
  2423. .write = tracing_max_lat_write,
  2424. };
  2425. static struct file_operations tracing_ctrl_fops = {
  2426. .open = tracing_open_generic,
  2427. .read = tracing_ctrl_read,
  2428. .write = tracing_ctrl_write,
  2429. };
  2430. static struct file_operations set_tracer_fops = {
  2431. .open = tracing_open_generic,
  2432. .read = tracing_set_trace_read,
  2433. .write = tracing_set_trace_write,
  2434. };
  2435. static struct file_operations tracing_pipe_fops = {
  2436. .open = tracing_open_pipe,
  2437. .poll = tracing_poll_pipe,
  2438. .read = tracing_read_pipe,
  2439. .release = tracing_release_pipe,
  2440. };
  2441. static struct file_operations tracing_entries_fops = {
  2442. .open = tracing_open_generic,
  2443. .read = tracing_entries_read,
  2444. .write = tracing_entries_write,
  2445. };
  2446. static struct file_operations tracing_mark_fops = {
  2447. .open = tracing_open_generic,
  2448. .write = tracing_mark_write,
  2449. };
  2450. #ifdef CONFIG_DYNAMIC_FTRACE
  2451. int __weak ftrace_arch_read_dyn_info(char *buf, int size)
  2452. {
  2453. return 0;
  2454. }
  2455. static ssize_t
  2456. tracing_read_dyn_info(struct file *filp, char __user *ubuf,
  2457. size_t cnt, loff_t *ppos)
  2458. {
  2459. static char ftrace_dyn_info_buffer[1024];
  2460. static DEFINE_MUTEX(dyn_info_mutex);
  2461. unsigned long *p = filp->private_data;
  2462. char *buf = ftrace_dyn_info_buffer;
  2463. int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
  2464. int r;
  2465. mutex_lock(&dyn_info_mutex);
  2466. r = sprintf(buf, "%ld ", *p);
  2467. r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
  2468. buf[r++] = '\n';
  2469. r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  2470. mutex_unlock(&dyn_info_mutex);
  2471. return r;
  2472. }
  2473. static struct file_operations tracing_dyn_info_fops = {
  2474. .open = tracing_open_generic,
  2475. .read = tracing_read_dyn_info,
  2476. };
  2477. #endif
  2478. static struct dentry *d_tracer;
  2479. struct dentry *tracing_init_dentry(void)
  2480. {
  2481. static int once;
  2482. if (d_tracer)
  2483. return d_tracer;
  2484. d_tracer = debugfs_create_dir("tracing", NULL);
  2485. if (!d_tracer && !once) {
  2486. once = 1;
  2487. pr_warning("Could not create debugfs directory 'tracing'\n");
  2488. return NULL;
  2489. }
  2490. return d_tracer;
  2491. }
  2492. #ifdef CONFIG_FTRACE_SELFTEST
  2493. /* Let selftest have access to static functions in this file */
  2494. #include "trace_selftest.c"
  2495. #endif
  2496. static __init int tracer_init_debugfs(void)
  2497. {
  2498. struct dentry *d_tracer;
  2499. struct dentry *entry;
  2500. d_tracer = tracing_init_dentry();
  2501. entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
  2502. &global_trace, &tracing_ctrl_fops);
  2503. if (!entry)
  2504. pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
  2505. entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
  2506. NULL, &tracing_iter_fops);
  2507. if (!entry)
  2508. pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
  2509. entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
  2510. NULL, &tracing_cpumask_fops);
  2511. if (!entry)
  2512. pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
  2513. entry = debugfs_create_file("latency_trace", 0444, d_tracer,
  2514. &global_trace, &tracing_lt_fops);
  2515. if (!entry)
  2516. pr_warning("Could not create debugfs 'latency_trace' entry\n");
  2517. entry = debugfs_create_file("trace", 0444, d_tracer,
  2518. &global_trace, &tracing_fops);
  2519. if (!entry)
  2520. pr_warning("Could not create debugfs 'trace' entry\n");
  2521. entry = debugfs_create_file("available_tracers", 0444, d_tracer,
  2522. &global_trace, &show_traces_fops);
  2523. if (!entry)
  2524. pr_warning("Could not create debugfs 'available_tracers' entry\n");
  2525. entry = debugfs_create_file("current_tracer", 0444, d_tracer,
  2526. &global_trace, &set_tracer_fops);
  2527. if (!entry)
  2528. pr_warning("Could not create debugfs 'current_tracer' entry\n");
  2529. entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
  2530. &tracing_max_latency,
  2531. &tracing_max_lat_fops);
  2532. if (!entry)
  2533. pr_warning("Could not create debugfs "
  2534. "'tracing_max_latency' entry\n");
  2535. entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
  2536. &tracing_thresh, &tracing_max_lat_fops);
  2537. if (!entry)
  2538. pr_warning("Could not create debugfs "
  2539. "'tracing_thresh' entry\n");
  2540. entry = debugfs_create_file("README", 0644, d_tracer,
  2541. NULL, &tracing_readme_fops);
  2542. if (!entry)
  2543. pr_warning("Could not create debugfs 'README' entry\n");
  2544. entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
  2545. NULL, &tracing_pipe_fops);
  2546. if (!entry)
  2547. pr_warning("Could not create debugfs "
  2548. "'trace_pipe' entry\n");
  2549. entry = debugfs_create_file("trace_entries", 0644, d_tracer,
  2550. &global_trace, &tracing_entries_fops);
  2551. if (!entry)
  2552. pr_warning("Could not create debugfs "
  2553. "'trace_entries' entry\n");
  2554. entry = debugfs_create_file("trace_marker", 0220, d_tracer,
  2555. NULL, &tracing_mark_fops);
  2556. if (!entry)
  2557. pr_warning("Could not create debugfs "
  2558. "'trace_marker' entry\n");
  2559. #ifdef CONFIG_DYNAMIC_FTRACE
  2560. entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
  2561. &ftrace_update_tot_cnt,
  2562. &tracing_dyn_info_fops);
  2563. if (!entry)
  2564. pr_warning("Could not create debugfs "
  2565. "'dyn_ftrace_total_info' entry\n");
  2566. #endif
  2567. #ifdef CONFIG_SYSPROF_TRACER
  2568. init_tracer_sysprof_debugfs(d_tracer);
  2569. #endif
  2570. return 0;
  2571. }
  2572. int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
  2573. {
  2574. static DEFINE_SPINLOCK(trace_buf_lock);
  2575. static char trace_buf[TRACE_BUF_SIZE];
  2576. struct ring_buffer_event *event;
  2577. struct trace_array *tr = &global_trace;
  2578. struct trace_array_cpu *data;
  2579. struct print_entry *entry;
  2580. unsigned long flags, irq_flags;
  2581. int cpu, len = 0, size, pc;
  2582. if (tracing_disabled)
  2583. return 0;
  2584. pc = preempt_count();
  2585. preempt_disable_notrace();
  2586. cpu = raw_smp_processor_id();
  2587. data = tr->data[cpu];
  2588. if (unlikely(atomic_read(&data->disabled)))
  2589. goto out;
  2590. spin_lock_irqsave(&trace_buf_lock, flags);
  2591. len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
  2592. len = min(len, TRACE_BUF_SIZE-1);
  2593. trace_buf[len] = 0;
  2594. size = sizeof(*entry) + len + 1;
  2595. event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
  2596. if (!event)
  2597. goto out_unlock;
  2598. entry = ring_buffer_event_data(event);
  2599. tracing_generic_entry_update(&entry->ent, flags, pc);
  2600. entry->ent.type = TRACE_PRINT;
  2601. entry->ip = ip;
  2602. memcpy(&entry->buf, trace_buf, len);
  2603. entry->buf[len] = 0;
  2604. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  2605. out_unlock:
  2606. spin_unlock_irqrestore(&trace_buf_lock, flags);
  2607. out:
  2608. preempt_enable_notrace();
  2609. return len;
  2610. }
  2611. EXPORT_SYMBOL_GPL(trace_vprintk);
  2612. int __ftrace_printk(unsigned long ip, const char *fmt, ...)
  2613. {
  2614. int ret;
  2615. va_list ap;
  2616. if (!(trace_flags & TRACE_ITER_PRINTK))
  2617. return 0;
  2618. va_start(ap, fmt);
  2619. ret = trace_vprintk(ip, fmt, ap);
  2620. va_end(ap);
  2621. return ret;
  2622. }
  2623. EXPORT_SYMBOL_GPL(__ftrace_printk);
  2624. static int trace_panic_handler(struct notifier_block *this,
  2625. unsigned long event, void *unused)
  2626. {
  2627. if (ftrace_dump_on_oops)
  2628. ftrace_dump();
  2629. return NOTIFY_OK;
  2630. }
  2631. static struct notifier_block trace_panic_notifier = {
  2632. .notifier_call = trace_panic_handler,
  2633. .next = NULL,
  2634. .priority = 150 /* priority: INT_MAX >= x >= 0 */
  2635. };
  2636. static int trace_die_handler(struct notifier_block *self,
  2637. unsigned long val,
  2638. void *data)
  2639. {
  2640. switch (val) {
  2641. case DIE_OOPS:
  2642. if (ftrace_dump_on_oops)
  2643. ftrace_dump();
  2644. break;
  2645. default:
  2646. break;
  2647. }
  2648. return NOTIFY_OK;
  2649. }
  2650. static struct notifier_block trace_die_notifier = {
  2651. .notifier_call = trace_die_handler,
  2652. .priority = 200
  2653. };
  2654. /*
  2655. * printk is set to max of 1024, we really don't need it that big.
  2656. * Nothing should be printing 1000 characters anyway.
  2657. */
  2658. #define TRACE_MAX_PRINT 1000
  2659. /*
  2660. * Define here KERN_TRACE so that we have one place to modify
  2661. * it if we decide to change what log level the ftrace dump
  2662. * should be at.
  2663. */
  2664. #define KERN_TRACE KERN_INFO
  2665. static void
  2666. trace_printk_seq(struct trace_seq *s)
  2667. {
  2668. /* Probably should print a warning here. */
  2669. if (s->len >= 1000)
  2670. s->len = 1000;
  2671. /* should be zero ended, but we are paranoid. */
  2672. s->buffer[s->len] = 0;
  2673. printk(KERN_TRACE "%s", s->buffer);
  2674. trace_seq_reset(s);
  2675. }
  2676. void ftrace_dump(void)
  2677. {
  2678. static DEFINE_SPINLOCK(ftrace_dump_lock);
  2679. /* use static because iter can be a bit big for the stack */
  2680. static struct trace_iterator iter;
  2681. static cpumask_t mask;
  2682. static int dump_ran;
  2683. unsigned long flags;
  2684. int cnt = 0, cpu;
  2685. /* only one dump */
  2686. spin_lock_irqsave(&ftrace_dump_lock, flags);
  2687. if (dump_ran)
  2688. goto out;
  2689. dump_ran = 1;
  2690. /* No turning back! */
  2691. ftrace_kill();
  2692. for_each_tracing_cpu(cpu) {
  2693. atomic_inc(&global_trace.data[cpu]->disabled);
  2694. }
  2695. printk(KERN_TRACE "Dumping ftrace buffer:\n");
  2696. iter.tr = &global_trace;
  2697. iter.trace = current_trace;
  2698. /*
  2699. * We need to stop all tracing on all CPUS to read the
  2700. * the next buffer. This is a bit expensive, but is
  2701. * not done often. We fill all what we can read,
  2702. * and then release the locks again.
  2703. */
  2704. cpus_clear(mask);
  2705. while (!trace_empty(&iter)) {
  2706. if (!cnt)
  2707. printk(KERN_TRACE "---------------------------------\n");
  2708. cnt++;
  2709. /* reset all but tr, trace, and overruns */
  2710. memset(&iter.seq, 0,
  2711. sizeof(struct trace_iterator) -
  2712. offsetof(struct trace_iterator, seq));
  2713. iter.iter_flags |= TRACE_FILE_LAT_FMT;
  2714. iter.pos = -1;
  2715. if (find_next_entry_inc(&iter) != NULL) {
  2716. print_trace_line(&iter);
  2717. trace_consume(&iter);
  2718. }
  2719. trace_printk_seq(&iter.seq);
  2720. }
  2721. if (!cnt)
  2722. printk(KERN_TRACE " (ftrace buffer empty)\n");
  2723. else
  2724. printk(KERN_TRACE "---------------------------------\n");
  2725. out:
  2726. spin_unlock_irqrestore(&ftrace_dump_lock, flags);
  2727. }
  2728. __init static int tracer_alloc_buffers(void)
  2729. {
  2730. struct trace_array_cpu *data;
  2731. int i;
  2732. /* TODO: make the number of buffers hot pluggable with CPUS */
  2733. tracing_buffer_mask = cpu_possible_map;
  2734. global_trace.buffer = ring_buffer_alloc(trace_buf_size,
  2735. TRACE_BUFFER_FLAGS);
  2736. if (!global_trace.buffer) {
  2737. printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
  2738. WARN_ON(1);
  2739. return 0;
  2740. }
  2741. global_trace.entries = ring_buffer_size(global_trace.buffer);
  2742. #ifdef CONFIG_TRACER_MAX_TRACE
  2743. max_tr.buffer = ring_buffer_alloc(trace_buf_size,
  2744. TRACE_BUFFER_FLAGS);
  2745. if (!max_tr.buffer) {
  2746. printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
  2747. WARN_ON(1);
  2748. ring_buffer_free(global_trace.buffer);
  2749. return 0;
  2750. }
  2751. max_tr.entries = ring_buffer_size(max_tr.buffer);
  2752. WARN_ON(max_tr.entries != global_trace.entries);
  2753. #endif
  2754. /* Allocate the first page for all buffers */
  2755. for_each_tracing_cpu(i) {
  2756. data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
  2757. max_tr.data[i] = &per_cpu(max_data, i);
  2758. }
  2759. trace_init_cmdlines();
  2760. register_tracer(&nop_trace);
  2761. #ifdef CONFIG_BOOT_TRACER
  2762. register_tracer(&boot_tracer);
  2763. current_trace = &boot_tracer;
  2764. current_trace->init(&global_trace);
  2765. #else
  2766. current_trace = &nop_trace;
  2767. #endif
  2768. /* All seems OK, enable tracing */
  2769. tracing_disabled = 0;
  2770. atomic_notifier_chain_register(&panic_notifier_list,
  2771. &trace_panic_notifier);
  2772. register_die_notifier(&trace_die_notifier);
  2773. return 0;
  2774. }
  2775. early_initcall(tracer_alloc_buffers);
  2776. fs_initcall(tracer_init_debugfs);