trace.c 94 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Originally taken from the RT patch by:
  8. * Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Based on code from the latency_tracer, that is:
  11. * Copyright (C) 2004-2006 Ingo Molnar
  12. * Copyright (C) 2004 William Lee Irwin III
  13. */
  14. #include <linux/ring_buffer.h>
  15. #include <linux/utsrelease.h>
  16. #include <linux/stacktrace.h>
  17. #include <linux/writeback.h>
  18. #include <linux/kallsyms.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/notifier.h>
  21. #include <linux/irqflags.h>
  22. #include <linux/debugfs.h>
  23. #include <linux/pagemap.h>
  24. #include <linux/hardirq.h>
  25. #include <linux/linkage.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/kprobes.h>
  28. #include <linux/ftrace.h>
  29. #include <linux/module.h>
  30. #include <linux/percpu.h>
  31. #include <linux/splice.h>
  32. #include <linux/kdebug.h>
  33. #include <linux/ctype.h>
  34. #include <linux/init.h>
  35. #include <linux/poll.h>
  36. #include <linux/gfp.h>
  37. #include <linux/fs.h>
  38. #include "trace.h"
  39. #include "trace_output.h"
  40. #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
  41. unsigned long __read_mostly tracing_max_latency;
  42. unsigned long __read_mostly tracing_thresh;
  43. /*
  44. * On boot up, the ring buffer is set to the minimum size, so that
  45. * we do not waste memory on systems that are not using tracing.
  46. */
  47. static int ring_buffer_expanded;
  48. /*
  49. * We need to change this state when a selftest is running.
  50. * A selftest will lurk into the ring-buffer to count the
  51. * entries inserted during the selftest although some concurrent
  52. * insertions into the ring-buffer such as trace_printk could occurred
  53. * at the same time, giving false positive or negative results.
  54. */
  55. static bool __read_mostly tracing_selftest_running;
  56. /*
  57. * If a tracer is running, we do not want to run SELFTEST.
  58. */
  59. static bool __read_mostly tracing_selftest_disabled;
  60. /* For tracers that don't implement custom flags */
  61. static struct tracer_opt dummy_tracer_opt[] = {
  62. { }
  63. };
  64. static struct tracer_flags dummy_tracer_flags = {
  65. .val = 0,
  66. .opts = dummy_tracer_opt
  67. };
  68. static int dummy_set_flag(u32 old_flags, u32 bit, int set)
  69. {
  70. return 0;
  71. }
  72. /*
  73. * Kill all tracing for good (never come back).
  74. * It is initialized to 1 but will turn to zero if the initialization
  75. * of the tracer is successful. But that is the only place that sets
  76. * this back to zero.
  77. */
  78. static int tracing_disabled = 1;
  79. static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
  80. static inline void ftrace_disable_cpu(void)
  81. {
  82. preempt_disable();
  83. local_inc(&__get_cpu_var(ftrace_cpu_disabled));
  84. }
  85. static inline void ftrace_enable_cpu(void)
  86. {
  87. local_dec(&__get_cpu_var(ftrace_cpu_disabled));
  88. preempt_enable();
  89. }
  90. static cpumask_var_t __read_mostly tracing_buffer_mask;
  91. /* Define which cpu buffers are currently read in trace_pipe */
  92. static cpumask_var_t tracing_reader_cpumask;
  93. #define for_each_tracing_cpu(cpu) \
  94. for_each_cpu(cpu, tracing_buffer_mask)
  95. /*
  96. * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
  97. *
  98. * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
  99. * is set, then ftrace_dump is called. This will output the contents
  100. * of the ftrace buffers to the console. This is very useful for
  101. * capturing traces that lead to crashes and outputing it to a
  102. * serial console.
  103. *
  104. * It is default off, but you can enable it with either specifying
  105. * "ftrace_dump_on_oops" in the kernel command line, or setting
  106. * /proc/sys/kernel/ftrace_dump_on_oops to true.
  107. */
  108. int ftrace_dump_on_oops;
  109. static int tracing_set_tracer(const char *buf);
  110. #define BOOTUP_TRACER_SIZE 100
  111. static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata;
  112. static char *default_bootup_tracer;
  113. static int __init set_ftrace(char *str)
  114. {
  115. strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE);
  116. default_bootup_tracer = bootup_tracer_buf;
  117. /* We are using ftrace early, expand it */
  118. ring_buffer_expanded = 1;
  119. return 1;
  120. }
  121. __setup("ftrace=", set_ftrace);
  122. static int __init set_ftrace_dump_on_oops(char *str)
  123. {
  124. ftrace_dump_on_oops = 1;
  125. return 1;
  126. }
  127. __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
  128. long
  129. ns2usecs(cycle_t nsec)
  130. {
  131. nsec += 500;
  132. do_div(nsec, 1000);
  133. return nsec;
  134. }
  135. /*
  136. * The global_trace is the descriptor that holds the tracing
  137. * buffers for the live tracing. For each CPU, it contains
  138. * a link list of pages that will store trace entries. The
  139. * page descriptor of the pages in the memory is used to hold
  140. * the link list by linking the lru item in the page descriptor
  141. * to each of the pages in the buffer per CPU.
  142. *
  143. * For each active CPU there is a data field that holds the
  144. * pages for the buffer for that CPU. Each CPU has the same number
  145. * of pages allocated for its buffer.
  146. */
  147. static struct trace_array global_trace;
  148. static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
  149. cycle_t ftrace_now(int cpu)
  150. {
  151. u64 ts;
  152. /* Early boot up does not have a buffer yet */
  153. if (!global_trace.buffer)
  154. return trace_clock_local();
  155. ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
  156. ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
  157. return ts;
  158. }
  159. /*
  160. * The max_tr is used to snapshot the global_trace when a maximum
  161. * latency is reached. Some tracers will use this to store a maximum
  162. * trace while it continues examining live traces.
  163. *
  164. * The buffers for the max_tr are set up the same as the global_trace.
  165. * When a snapshot is taken, the link list of the max_tr is swapped
  166. * with the link list of the global_trace and the buffers are reset for
  167. * the global_trace so the tracing can continue.
  168. */
  169. static struct trace_array max_tr;
  170. static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
  171. /* tracer_enabled is used to toggle activation of a tracer */
  172. static int tracer_enabled = 1;
  173. /**
  174. * tracing_is_enabled - return tracer_enabled status
  175. *
  176. * This function is used by other tracers to know the status
  177. * of the tracer_enabled flag. Tracers may use this function
  178. * to know if it should enable their features when starting
  179. * up. See irqsoff tracer for an example (start_irqsoff_tracer).
  180. */
  181. int tracing_is_enabled(void)
  182. {
  183. return tracer_enabled;
  184. }
  185. /*
  186. * trace_buf_size is the size in bytes that is allocated
  187. * for a buffer. Note, the number of bytes is always rounded
  188. * to page size.
  189. *
  190. * This number is purposely set to a low number of 16384.
  191. * If the dump on oops happens, it will be much appreciated
  192. * to not have to wait for all that output. Anyway this can be
  193. * boot time and run time configurable.
  194. */
  195. #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
  196. static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
  197. /* trace_types holds a link list of available tracers. */
  198. static struct tracer *trace_types __read_mostly;
  199. /* current_trace points to the tracer that is currently active */
  200. static struct tracer *current_trace __read_mostly;
  201. /*
  202. * max_tracer_type_len is used to simplify the allocating of
  203. * buffers to read userspace tracer names. We keep track of
  204. * the longest tracer name registered.
  205. */
  206. static int max_tracer_type_len;
  207. /*
  208. * trace_types_lock is used to protect the trace_types list.
  209. * This lock is also used to keep user access serialized.
  210. * Accesses from userspace will grab this lock while userspace
  211. * activities happen inside the kernel.
  212. */
  213. static DEFINE_MUTEX(trace_types_lock);
  214. /* trace_wait is a waitqueue for tasks blocked on trace_poll */
  215. static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
  216. /* trace_flags holds trace_options default values */
  217. unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
  218. TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO;
  219. /**
  220. * trace_wake_up - wake up tasks waiting for trace input
  221. *
  222. * Simply wakes up any task that is blocked on the trace_wait
  223. * queue. These is used with trace_poll for tasks polling the trace.
  224. */
  225. void trace_wake_up(void)
  226. {
  227. /*
  228. * The runqueue_is_locked() can fail, but this is the best we
  229. * have for now:
  230. */
  231. if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
  232. wake_up(&trace_wait);
  233. }
  234. static int __init set_buf_size(char *str)
  235. {
  236. unsigned long buf_size;
  237. int ret;
  238. if (!str)
  239. return 0;
  240. ret = strict_strtoul(str, 0, &buf_size);
  241. /* nr_entries can not be zero */
  242. if (ret < 0 || buf_size == 0)
  243. return 0;
  244. trace_buf_size = buf_size;
  245. return 1;
  246. }
  247. __setup("trace_buf_size=", set_buf_size);
  248. unsigned long nsecs_to_usecs(unsigned long nsecs)
  249. {
  250. return nsecs / 1000;
  251. }
  252. /* These must match the bit postions in trace_iterator_flags */
  253. static const char *trace_options[] = {
  254. "print-parent",
  255. "sym-offset",
  256. "sym-addr",
  257. "verbose",
  258. "raw",
  259. "hex",
  260. "bin",
  261. "block",
  262. "stacktrace",
  263. "sched-tree",
  264. "trace_printk",
  265. "ftrace_preempt",
  266. "branch",
  267. "annotate",
  268. "userstacktrace",
  269. "sym-userobj",
  270. "printk-msg-only",
  271. "context-info",
  272. "latency-format",
  273. "global-clock",
  274. NULL
  275. };
  276. /*
  277. * ftrace_max_lock is used to protect the swapping of buffers
  278. * when taking a max snapshot. The buffers themselves are
  279. * protected by per_cpu spinlocks. But the action of the swap
  280. * needs its own lock.
  281. *
  282. * This is defined as a raw_spinlock_t in order to help
  283. * with performance when lockdep debugging is enabled.
  284. */
  285. static raw_spinlock_t ftrace_max_lock =
  286. (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
  287. /*
  288. * Copy the new maximum trace into the separate maximum-trace
  289. * structure. (this way the maximum trace is permanently saved,
  290. * for later retrieval via /debugfs/tracing/latency_trace)
  291. */
  292. static void
  293. __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
  294. {
  295. struct trace_array_cpu *data = tr->data[cpu];
  296. max_tr.cpu = cpu;
  297. max_tr.time_start = data->preempt_timestamp;
  298. data = max_tr.data[cpu];
  299. data->saved_latency = tracing_max_latency;
  300. memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
  301. data->pid = tsk->pid;
  302. data->uid = task_uid(tsk);
  303. data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
  304. data->policy = tsk->policy;
  305. data->rt_priority = tsk->rt_priority;
  306. /* record this tasks comm */
  307. tracing_record_cmdline(tsk);
  308. }
  309. ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
  310. {
  311. int len;
  312. int ret;
  313. if (!cnt)
  314. return 0;
  315. if (s->len <= s->readpos)
  316. return -EBUSY;
  317. len = s->len - s->readpos;
  318. if (cnt > len)
  319. cnt = len;
  320. ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
  321. if (ret == cnt)
  322. return -EFAULT;
  323. cnt -= ret;
  324. s->readpos += cnt;
  325. return cnt;
  326. }
  327. ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
  328. {
  329. int len;
  330. void *ret;
  331. if (s->len <= s->readpos)
  332. return -EBUSY;
  333. len = s->len - s->readpos;
  334. if (cnt > len)
  335. cnt = len;
  336. ret = memcpy(buf, s->buffer + s->readpos, cnt);
  337. if (!ret)
  338. return -EFAULT;
  339. s->readpos += cnt;
  340. return cnt;
  341. }
  342. static void
  343. trace_print_seq(struct seq_file *m, struct trace_seq *s)
  344. {
  345. int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
  346. s->buffer[len] = 0;
  347. seq_puts(m, s->buffer);
  348. trace_seq_init(s);
  349. }
  350. /**
  351. * update_max_tr - snapshot all trace buffers from global_trace to max_tr
  352. * @tr: tracer
  353. * @tsk: the task with the latency
  354. * @cpu: The cpu that initiated the trace.
  355. *
  356. * Flip the buffers between the @tr and the max_tr and record information
  357. * about which task was the cause of this latency.
  358. */
  359. void
  360. update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
  361. {
  362. struct ring_buffer *buf = tr->buffer;
  363. WARN_ON_ONCE(!irqs_disabled());
  364. __raw_spin_lock(&ftrace_max_lock);
  365. tr->buffer = max_tr.buffer;
  366. max_tr.buffer = buf;
  367. ftrace_disable_cpu();
  368. ring_buffer_reset(tr->buffer);
  369. ftrace_enable_cpu();
  370. __update_max_tr(tr, tsk, cpu);
  371. __raw_spin_unlock(&ftrace_max_lock);
  372. }
  373. /**
  374. * update_max_tr_single - only copy one trace over, and reset the rest
  375. * @tr - tracer
  376. * @tsk - task with the latency
  377. * @cpu - the cpu of the buffer to copy.
  378. *
  379. * Flip the trace of a single CPU buffer between the @tr and the max_tr.
  380. */
  381. void
  382. update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
  383. {
  384. int ret;
  385. WARN_ON_ONCE(!irqs_disabled());
  386. __raw_spin_lock(&ftrace_max_lock);
  387. ftrace_disable_cpu();
  388. ring_buffer_reset(max_tr.buffer);
  389. ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
  390. ftrace_enable_cpu();
  391. WARN_ON_ONCE(ret && ret != -EAGAIN);
  392. __update_max_tr(tr, tsk, cpu);
  393. __raw_spin_unlock(&ftrace_max_lock);
  394. }
  395. /**
  396. * register_tracer - register a tracer with the ftrace system.
  397. * @type - the plugin for the tracer
  398. *
  399. * Register a new plugin tracer.
  400. */
  401. int register_tracer(struct tracer *type)
  402. __releases(kernel_lock)
  403. __acquires(kernel_lock)
  404. {
  405. struct tracer *t;
  406. int len;
  407. int ret = 0;
  408. if (!type->name) {
  409. pr_info("Tracer must have a name\n");
  410. return -1;
  411. }
  412. /*
  413. * When this gets called we hold the BKL which means that
  414. * preemption is disabled. Various trace selftests however
  415. * need to disable and enable preemption for successful tests.
  416. * So we drop the BKL here and grab it after the tests again.
  417. */
  418. unlock_kernel();
  419. mutex_lock(&trace_types_lock);
  420. tracing_selftest_running = true;
  421. for (t = trace_types; t; t = t->next) {
  422. if (strcmp(type->name, t->name) == 0) {
  423. /* already found */
  424. pr_info("Trace %s already registered\n",
  425. type->name);
  426. ret = -1;
  427. goto out;
  428. }
  429. }
  430. if (!type->set_flag)
  431. type->set_flag = &dummy_set_flag;
  432. if (!type->flags)
  433. type->flags = &dummy_tracer_flags;
  434. else
  435. if (!type->flags->opts)
  436. type->flags->opts = dummy_tracer_opt;
  437. if (!type->wait_pipe)
  438. type->wait_pipe = default_wait_pipe;
  439. #ifdef CONFIG_FTRACE_STARTUP_TEST
  440. if (type->selftest && !tracing_selftest_disabled) {
  441. struct tracer *saved_tracer = current_trace;
  442. struct trace_array *tr = &global_trace;
  443. int i;
  444. /*
  445. * Run a selftest on this tracer.
  446. * Here we reset the trace buffer, and set the current
  447. * tracer to be this tracer. The tracer can then run some
  448. * internal tracing to verify that everything is in order.
  449. * If we fail, we do not register this tracer.
  450. */
  451. for_each_tracing_cpu(i)
  452. tracing_reset(tr, i);
  453. current_trace = type;
  454. /* the test is responsible for initializing and enabling */
  455. pr_info("Testing tracer %s: ", type->name);
  456. ret = type->selftest(type, tr);
  457. /* the test is responsible for resetting too */
  458. current_trace = saved_tracer;
  459. if (ret) {
  460. printk(KERN_CONT "FAILED!\n");
  461. goto out;
  462. }
  463. /* Only reset on passing, to avoid touching corrupted buffers */
  464. for_each_tracing_cpu(i)
  465. tracing_reset(tr, i);
  466. printk(KERN_CONT "PASSED\n");
  467. }
  468. #endif
  469. type->next = trace_types;
  470. trace_types = type;
  471. len = strlen(type->name);
  472. if (len > max_tracer_type_len)
  473. max_tracer_type_len = len;
  474. out:
  475. tracing_selftest_running = false;
  476. mutex_unlock(&trace_types_lock);
  477. if (ret || !default_bootup_tracer)
  478. goto out_unlock;
  479. if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE))
  480. goto out_unlock;
  481. printk(KERN_INFO "Starting tracer '%s'\n", type->name);
  482. /* Do we want this tracer to start on bootup? */
  483. tracing_set_tracer(type->name);
  484. default_bootup_tracer = NULL;
  485. /* disable other selftests, since this will break it. */
  486. tracing_selftest_disabled = 1;
  487. #ifdef CONFIG_FTRACE_STARTUP_TEST
  488. printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
  489. type->name);
  490. #endif
  491. out_unlock:
  492. lock_kernel();
  493. return ret;
  494. }
  495. void unregister_tracer(struct tracer *type)
  496. {
  497. struct tracer **t;
  498. int len;
  499. mutex_lock(&trace_types_lock);
  500. for (t = &trace_types; *t; t = &(*t)->next) {
  501. if (*t == type)
  502. goto found;
  503. }
  504. pr_info("Trace %s not registered\n", type->name);
  505. goto out;
  506. found:
  507. *t = (*t)->next;
  508. if (type == current_trace && tracer_enabled) {
  509. tracer_enabled = 0;
  510. tracing_stop();
  511. if (current_trace->stop)
  512. current_trace->stop(&global_trace);
  513. current_trace = &nop_trace;
  514. }
  515. if (strlen(type->name) != max_tracer_type_len)
  516. goto out;
  517. max_tracer_type_len = 0;
  518. for (t = &trace_types; *t; t = &(*t)->next) {
  519. len = strlen((*t)->name);
  520. if (len > max_tracer_type_len)
  521. max_tracer_type_len = len;
  522. }
  523. out:
  524. mutex_unlock(&trace_types_lock);
  525. }
  526. void tracing_reset(struct trace_array *tr, int cpu)
  527. {
  528. ftrace_disable_cpu();
  529. ring_buffer_reset_cpu(tr->buffer, cpu);
  530. ftrace_enable_cpu();
  531. }
  532. void tracing_reset_online_cpus(struct trace_array *tr)
  533. {
  534. int cpu;
  535. tr->time_start = ftrace_now(tr->cpu);
  536. for_each_online_cpu(cpu)
  537. tracing_reset(tr, cpu);
  538. }
  539. #define SAVED_CMDLINES 128
  540. #define NO_CMDLINE_MAP UINT_MAX
  541. static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
  542. static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
  543. static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
  544. static int cmdline_idx;
  545. static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
  546. /* temporary disable recording */
  547. static atomic_t trace_record_cmdline_disabled __read_mostly;
  548. static void trace_init_cmdlines(void)
  549. {
  550. memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
  551. memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
  552. cmdline_idx = 0;
  553. }
  554. static int trace_stop_count;
  555. static DEFINE_SPINLOCK(tracing_start_lock);
  556. /**
  557. * ftrace_off_permanent - disable all ftrace code permanently
  558. *
  559. * This should only be called when a serious anomally has
  560. * been detected. This will turn off the function tracing,
  561. * ring buffers, and other tracing utilites. It takes no
  562. * locks and can be called from any context.
  563. */
  564. void ftrace_off_permanent(void)
  565. {
  566. tracing_disabled = 1;
  567. ftrace_stop();
  568. tracing_off_permanent();
  569. }
  570. /**
  571. * tracing_start - quick start of the tracer
  572. *
  573. * If tracing is enabled but was stopped by tracing_stop,
  574. * this will start the tracer back up.
  575. */
  576. void tracing_start(void)
  577. {
  578. struct ring_buffer *buffer;
  579. unsigned long flags;
  580. if (tracing_disabled)
  581. return;
  582. spin_lock_irqsave(&tracing_start_lock, flags);
  583. if (--trace_stop_count) {
  584. if (trace_stop_count < 0) {
  585. /* Someone screwed up their debugging */
  586. WARN_ON_ONCE(1);
  587. trace_stop_count = 0;
  588. }
  589. goto out;
  590. }
  591. buffer = global_trace.buffer;
  592. if (buffer)
  593. ring_buffer_record_enable(buffer);
  594. buffer = max_tr.buffer;
  595. if (buffer)
  596. ring_buffer_record_enable(buffer);
  597. ftrace_start();
  598. out:
  599. spin_unlock_irqrestore(&tracing_start_lock, flags);
  600. }
  601. /**
  602. * tracing_stop - quick stop of the tracer
  603. *
  604. * Light weight way to stop tracing. Use in conjunction with
  605. * tracing_start.
  606. */
  607. void tracing_stop(void)
  608. {
  609. struct ring_buffer *buffer;
  610. unsigned long flags;
  611. ftrace_stop();
  612. spin_lock_irqsave(&tracing_start_lock, flags);
  613. if (trace_stop_count++)
  614. goto out;
  615. buffer = global_trace.buffer;
  616. if (buffer)
  617. ring_buffer_record_disable(buffer);
  618. buffer = max_tr.buffer;
  619. if (buffer)
  620. ring_buffer_record_disable(buffer);
  621. out:
  622. spin_unlock_irqrestore(&tracing_start_lock, flags);
  623. }
  624. void trace_stop_cmdline_recording(void);
  625. static void trace_save_cmdline(struct task_struct *tsk)
  626. {
  627. unsigned pid, idx;
  628. if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
  629. return;
  630. /*
  631. * It's not the end of the world if we don't get
  632. * the lock, but we also don't want to spin
  633. * nor do we want to disable interrupts,
  634. * so if we miss here, then better luck next time.
  635. */
  636. if (!__raw_spin_trylock(&trace_cmdline_lock))
  637. return;
  638. idx = map_pid_to_cmdline[tsk->pid];
  639. if (idx == NO_CMDLINE_MAP) {
  640. idx = (cmdline_idx + 1) % SAVED_CMDLINES;
  641. /*
  642. * Check whether the cmdline buffer at idx has a pid
  643. * mapped. We are going to overwrite that entry so we
  644. * need to clear the map_pid_to_cmdline. Otherwise we
  645. * would read the new comm for the old pid.
  646. */
  647. pid = map_cmdline_to_pid[idx];
  648. if (pid != NO_CMDLINE_MAP)
  649. map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
  650. map_cmdline_to_pid[idx] = tsk->pid;
  651. map_pid_to_cmdline[tsk->pid] = idx;
  652. cmdline_idx = idx;
  653. }
  654. memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
  655. __raw_spin_unlock(&trace_cmdline_lock);
  656. }
  657. void trace_find_cmdline(int pid, char comm[])
  658. {
  659. unsigned map;
  660. if (!pid) {
  661. strcpy(comm, "<idle>");
  662. return;
  663. }
  664. if (pid > PID_MAX_DEFAULT) {
  665. strcpy(comm, "<...>");
  666. return;
  667. }
  668. __raw_spin_lock(&trace_cmdline_lock);
  669. map = map_pid_to_cmdline[pid];
  670. if (map != NO_CMDLINE_MAP)
  671. strcpy(comm, saved_cmdlines[map]);
  672. else
  673. strcpy(comm, "<...>");
  674. __raw_spin_unlock(&trace_cmdline_lock);
  675. }
  676. void tracing_record_cmdline(struct task_struct *tsk)
  677. {
  678. if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
  679. !tracing_is_on())
  680. return;
  681. trace_save_cmdline(tsk);
  682. }
  683. void
  684. tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
  685. int pc)
  686. {
  687. struct task_struct *tsk = current;
  688. entry->preempt_count = pc & 0xff;
  689. entry->pid = (tsk) ? tsk->pid : 0;
  690. entry->tgid = (tsk) ? tsk->tgid : 0;
  691. entry->flags =
  692. #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
  693. (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
  694. #else
  695. TRACE_FLAG_IRQS_NOSUPPORT |
  696. #endif
  697. ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
  698. ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
  699. (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
  700. }
  701. struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
  702. unsigned char type,
  703. unsigned long len,
  704. unsigned long flags, int pc)
  705. {
  706. struct ring_buffer_event *event;
  707. event = ring_buffer_lock_reserve(tr->buffer, len);
  708. if (event != NULL) {
  709. struct trace_entry *ent = ring_buffer_event_data(event);
  710. tracing_generic_entry_update(ent, flags, pc);
  711. ent->type = type;
  712. }
  713. return event;
  714. }
  715. static void ftrace_trace_stack(struct trace_array *tr,
  716. unsigned long flags, int skip, int pc);
  717. static void ftrace_trace_userstack(struct trace_array *tr,
  718. unsigned long flags, int pc);
  719. void trace_buffer_unlock_commit(struct trace_array *tr,
  720. struct ring_buffer_event *event,
  721. unsigned long flags, int pc)
  722. {
  723. ring_buffer_unlock_commit(tr->buffer, event);
  724. ftrace_trace_stack(tr, flags, 6, pc);
  725. ftrace_trace_userstack(tr, flags, pc);
  726. trace_wake_up();
  727. }
  728. struct ring_buffer_event *
  729. trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
  730. unsigned long flags, int pc)
  731. {
  732. return trace_buffer_lock_reserve(&global_trace,
  733. type, len, flags, pc);
  734. }
  735. void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
  736. unsigned long flags, int pc)
  737. {
  738. return trace_buffer_unlock_commit(&global_trace, event, flags, pc);
  739. }
  740. void
  741. trace_function(struct trace_array *tr,
  742. unsigned long ip, unsigned long parent_ip, unsigned long flags,
  743. int pc)
  744. {
  745. struct ring_buffer_event *event;
  746. struct ftrace_entry *entry;
  747. /* If we are reading the ring buffer, don't trace */
  748. if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
  749. return;
  750. event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry),
  751. flags, pc);
  752. if (!event)
  753. return;
  754. entry = ring_buffer_event_data(event);
  755. entry->ip = ip;
  756. entry->parent_ip = parent_ip;
  757. ring_buffer_unlock_commit(tr->buffer, event);
  758. }
  759. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  760. static void __trace_graph_entry(struct trace_array *tr,
  761. struct ftrace_graph_ent *trace,
  762. unsigned long flags,
  763. int pc)
  764. {
  765. struct ring_buffer_event *event;
  766. struct ftrace_graph_ent_entry *entry;
  767. if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
  768. return;
  769. event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
  770. sizeof(*entry), flags, pc);
  771. if (!event)
  772. return;
  773. entry = ring_buffer_event_data(event);
  774. entry->graph_ent = *trace;
  775. ring_buffer_unlock_commit(global_trace.buffer, event);
  776. }
  777. static void __trace_graph_return(struct trace_array *tr,
  778. struct ftrace_graph_ret *trace,
  779. unsigned long flags,
  780. int pc)
  781. {
  782. struct ring_buffer_event *event;
  783. struct ftrace_graph_ret_entry *entry;
  784. if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
  785. return;
  786. event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET,
  787. sizeof(*entry), flags, pc);
  788. if (!event)
  789. return;
  790. entry = ring_buffer_event_data(event);
  791. entry->ret = *trace;
  792. ring_buffer_unlock_commit(global_trace.buffer, event);
  793. }
  794. #endif
  795. void
  796. ftrace(struct trace_array *tr, struct trace_array_cpu *data,
  797. unsigned long ip, unsigned long parent_ip, unsigned long flags,
  798. int pc)
  799. {
  800. if (likely(!atomic_read(&data->disabled)))
  801. trace_function(tr, ip, parent_ip, flags, pc);
  802. }
  803. static void __ftrace_trace_stack(struct trace_array *tr,
  804. unsigned long flags,
  805. int skip, int pc)
  806. {
  807. #ifdef CONFIG_STACKTRACE
  808. struct ring_buffer_event *event;
  809. struct stack_entry *entry;
  810. struct stack_trace trace;
  811. event = trace_buffer_lock_reserve(tr, TRACE_STACK,
  812. sizeof(*entry), flags, pc);
  813. if (!event)
  814. return;
  815. entry = ring_buffer_event_data(event);
  816. memset(&entry->caller, 0, sizeof(entry->caller));
  817. trace.nr_entries = 0;
  818. trace.max_entries = FTRACE_STACK_ENTRIES;
  819. trace.skip = skip;
  820. trace.entries = entry->caller;
  821. save_stack_trace(&trace);
  822. ring_buffer_unlock_commit(tr->buffer, event);
  823. #endif
  824. }
  825. static void ftrace_trace_stack(struct trace_array *tr,
  826. unsigned long flags,
  827. int skip, int pc)
  828. {
  829. if (!(trace_flags & TRACE_ITER_STACKTRACE))
  830. return;
  831. __ftrace_trace_stack(tr, flags, skip, pc);
  832. }
  833. void __trace_stack(struct trace_array *tr,
  834. unsigned long flags,
  835. int skip, int pc)
  836. {
  837. __ftrace_trace_stack(tr, flags, skip, pc);
  838. }
  839. static void ftrace_trace_userstack(struct trace_array *tr,
  840. unsigned long flags, int pc)
  841. {
  842. #ifdef CONFIG_STACKTRACE
  843. struct ring_buffer_event *event;
  844. struct userstack_entry *entry;
  845. struct stack_trace trace;
  846. if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
  847. return;
  848. event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK,
  849. sizeof(*entry), flags, pc);
  850. if (!event)
  851. return;
  852. entry = ring_buffer_event_data(event);
  853. memset(&entry->caller, 0, sizeof(entry->caller));
  854. trace.nr_entries = 0;
  855. trace.max_entries = FTRACE_STACK_ENTRIES;
  856. trace.skip = 0;
  857. trace.entries = entry->caller;
  858. save_stack_trace_user(&trace);
  859. ring_buffer_unlock_commit(tr->buffer, event);
  860. #endif
  861. }
  862. #ifdef UNUSED
  863. static void __trace_userstack(struct trace_array *tr, unsigned long flags)
  864. {
  865. ftrace_trace_userstack(tr, flags, preempt_count());
  866. }
  867. #endif /* UNUSED */
  868. static void
  869. ftrace_trace_special(void *__tr,
  870. unsigned long arg1, unsigned long arg2, unsigned long arg3,
  871. int pc)
  872. {
  873. struct ring_buffer_event *event;
  874. struct trace_array *tr = __tr;
  875. struct special_entry *entry;
  876. event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL,
  877. sizeof(*entry), 0, pc);
  878. if (!event)
  879. return;
  880. entry = ring_buffer_event_data(event);
  881. entry->arg1 = arg1;
  882. entry->arg2 = arg2;
  883. entry->arg3 = arg3;
  884. trace_buffer_unlock_commit(tr, event, 0, pc);
  885. }
  886. void
  887. __trace_special(void *__tr, void *__data,
  888. unsigned long arg1, unsigned long arg2, unsigned long arg3)
  889. {
  890. ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
  891. }
  892. void
  893. tracing_sched_switch_trace(struct trace_array *tr,
  894. struct task_struct *prev,
  895. struct task_struct *next,
  896. unsigned long flags, int pc)
  897. {
  898. struct ring_buffer_event *event;
  899. struct ctx_switch_entry *entry;
  900. event = trace_buffer_lock_reserve(tr, TRACE_CTX,
  901. sizeof(*entry), flags, pc);
  902. if (!event)
  903. return;
  904. entry = ring_buffer_event_data(event);
  905. entry->prev_pid = prev->pid;
  906. entry->prev_prio = prev->prio;
  907. entry->prev_state = prev->state;
  908. entry->next_pid = next->pid;
  909. entry->next_prio = next->prio;
  910. entry->next_state = next->state;
  911. entry->next_cpu = task_cpu(next);
  912. trace_buffer_unlock_commit(tr, event, flags, pc);
  913. }
  914. void
  915. tracing_sched_wakeup_trace(struct trace_array *tr,
  916. struct task_struct *wakee,
  917. struct task_struct *curr,
  918. unsigned long flags, int pc)
  919. {
  920. struct ring_buffer_event *event;
  921. struct ctx_switch_entry *entry;
  922. event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
  923. sizeof(*entry), flags, pc);
  924. if (!event)
  925. return;
  926. entry = ring_buffer_event_data(event);
  927. entry->prev_pid = curr->pid;
  928. entry->prev_prio = curr->prio;
  929. entry->prev_state = curr->state;
  930. entry->next_pid = wakee->pid;
  931. entry->next_prio = wakee->prio;
  932. entry->next_state = wakee->state;
  933. entry->next_cpu = task_cpu(wakee);
  934. ring_buffer_unlock_commit(tr->buffer, event);
  935. ftrace_trace_stack(tr, flags, 6, pc);
  936. ftrace_trace_userstack(tr, flags, pc);
  937. }
  938. void
  939. ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
  940. {
  941. struct trace_array *tr = &global_trace;
  942. struct trace_array_cpu *data;
  943. unsigned long flags;
  944. int cpu;
  945. int pc;
  946. if (tracing_disabled)
  947. return;
  948. pc = preempt_count();
  949. local_irq_save(flags);
  950. cpu = raw_smp_processor_id();
  951. data = tr->data[cpu];
  952. if (likely(atomic_inc_return(&data->disabled) == 1))
  953. ftrace_trace_special(tr, arg1, arg2, arg3, pc);
  954. atomic_dec(&data->disabled);
  955. local_irq_restore(flags);
  956. }
  957. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  958. int trace_graph_entry(struct ftrace_graph_ent *trace)
  959. {
  960. struct trace_array *tr = &global_trace;
  961. struct trace_array_cpu *data;
  962. unsigned long flags;
  963. long disabled;
  964. int cpu;
  965. int pc;
  966. if (!ftrace_trace_task(current))
  967. return 0;
  968. if (!ftrace_graph_addr(trace->func))
  969. return 0;
  970. local_irq_save(flags);
  971. cpu = raw_smp_processor_id();
  972. data = tr->data[cpu];
  973. disabled = atomic_inc_return(&data->disabled);
  974. if (likely(disabled == 1)) {
  975. pc = preempt_count();
  976. __trace_graph_entry(tr, trace, flags, pc);
  977. }
  978. /* Only do the atomic if it is not already set */
  979. if (!test_tsk_trace_graph(current))
  980. set_tsk_trace_graph(current);
  981. atomic_dec(&data->disabled);
  982. local_irq_restore(flags);
  983. return 1;
  984. }
  985. void trace_graph_return(struct ftrace_graph_ret *trace)
  986. {
  987. struct trace_array *tr = &global_trace;
  988. struct trace_array_cpu *data;
  989. unsigned long flags;
  990. long disabled;
  991. int cpu;
  992. int pc;
  993. local_irq_save(flags);
  994. cpu = raw_smp_processor_id();
  995. data = tr->data[cpu];
  996. disabled = atomic_inc_return(&data->disabled);
  997. if (likely(disabled == 1)) {
  998. pc = preempt_count();
  999. __trace_graph_return(tr, trace, flags, pc);
  1000. }
  1001. if (!trace->depth)
  1002. clear_tsk_trace_graph(current);
  1003. atomic_dec(&data->disabled);
  1004. local_irq_restore(flags);
  1005. }
  1006. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  1007. /**
  1008. * trace_vbprintk - write binary msg to tracing buffer
  1009. *
  1010. */
  1011. int trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args)
  1012. {
  1013. static raw_spinlock_t trace_buf_lock =
  1014. (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
  1015. static u32 trace_buf[TRACE_BUF_SIZE];
  1016. struct ring_buffer_event *event;
  1017. struct trace_array *tr = &global_trace;
  1018. struct trace_array_cpu *data;
  1019. struct bprint_entry *entry;
  1020. unsigned long flags;
  1021. int resched;
  1022. int cpu, len = 0, size, pc;
  1023. if (unlikely(tracing_selftest_running || tracing_disabled))
  1024. return 0;
  1025. /* Don't pollute graph traces with trace_vprintk internals */
  1026. pause_graph_tracing();
  1027. pc = preempt_count();
  1028. resched = ftrace_preempt_disable();
  1029. cpu = raw_smp_processor_id();
  1030. data = tr->data[cpu];
  1031. if (unlikely(atomic_read(&data->disabled)))
  1032. goto out;
  1033. /* Lockdep uses trace_printk for lock tracing */
  1034. local_irq_save(flags);
  1035. __raw_spin_lock(&trace_buf_lock);
  1036. len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
  1037. if (len > TRACE_BUF_SIZE || len < 0)
  1038. goto out_unlock;
  1039. size = sizeof(*entry) + sizeof(u32) * len;
  1040. event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc);
  1041. if (!event)
  1042. goto out_unlock;
  1043. entry = ring_buffer_event_data(event);
  1044. entry->ip = ip;
  1045. entry->depth = depth;
  1046. entry->fmt = fmt;
  1047. memcpy(entry->buf, trace_buf, sizeof(u32) * len);
  1048. ring_buffer_unlock_commit(tr->buffer, event);
  1049. out_unlock:
  1050. __raw_spin_unlock(&trace_buf_lock);
  1051. local_irq_restore(flags);
  1052. out:
  1053. ftrace_preempt_enable(resched);
  1054. unpause_graph_tracing();
  1055. return len;
  1056. }
  1057. EXPORT_SYMBOL_GPL(trace_vbprintk);
  1058. int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
  1059. {
  1060. static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
  1061. static char trace_buf[TRACE_BUF_SIZE];
  1062. struct ring_buffer_event *event;
  1063. struct trace_array *tr = &global_trace;
  1064. struct trace_array_cpu *data;
  1065. int cpu, len = 0, size, pc;
  1066. struct print_entry *entry;
  1067. unsigned long irq_flags;
  1068. if (tracing_disabled || tracing_selftest_running)
  1069. return 0;
  1070. pc = preempt_count();
  1071. preempt_disable_notrace();
  1072. cpu = raw_smp_processor_id();
  1073. data = tr->data[cpu];
  1074. if (unlikely(atomic_read(&data->disabled)))
  1075. goto out;
  1076. pause_graph_tracing();
  1077. raw_local_irq_save(irq_flags);
  1078. __raw_spin_lock(&trace_buf_lock);
  1079. len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
  1080. len = min(len, TRACE_BUF_SIZE-1);
  1081. trace_buf[len] = 0;
  1082. size = sizeof(*entry) + len + 1;
  1083. event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc);
  1084. if (!event)
  1085. goto out_unlock;
  1086. entry = ring_buffer_event_data(event);
  1087. entry->ip = ip;
  1088. entry->depth = depth;
  1089. memcpy(&entry->buf, trace_buf, len);
  1090. entry->buf[len] = 0;
  1091. ring_buffer_unlock_commit(tr->buffer, event);
  1092. out_unlock:
  1093. __raw_spin_unlock(&trace_buf_lock);
  1094. raw_local_irq_restore(irq_flags);
  1095. unpause_graph_tracing();
  1096. out:
  1097. preempt_enable_notrace();
  1098. return len;
  1099. }
  1100. EXPORT_SYMBOL_GPL(trace_vprintk);
  1101. enum trace_file_type {
  1102. TRACE_FILE_LAT_FMT = 1,
  1103. TRACE_FILE_ANNOTATE = 2,
  1104. };
  1105. static void trace_iterator_increment(struct trace_iterator *iter)
  1106. {
  1107. /* Don't allow ftrace to trace into the ring buffers */
  1108. ftrace_disable_cpu();
  1109. iter->idx++;
  1110. if (iter->buffer_iter[iter->cpu])
  1111. ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
  1112. ftrace_enable_cpu();
  1113. }
  1114. static struct trace_entry *
  1115. peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
  1116. {
  1117. struct ring_buffer_event *event;
  1118. struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
  1119. /* Don't allow ftrace to trace into the ring buffers */
  1120. ftrace_disable_cpu();
  1121. if (buf_iter)
  1122. event = ring_buffer_iter_peek(buf_iter, ts);
  1123. else
  1124. event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
  1125. ftrace_enable_cpu();
  1126. return event ? ring_buffer_event_data(event) : NULL;
  1127. }
  1128. static struct trace_entry *
  1129. __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
  1130. {
  1131. struct ring_buffer *buffer = iter->tr->buffer;
  1132. struct trace_entry *ent, *next = NULL;
  1133. int cpu_file = iter->cpu_file;
  1134. u64 next_ts = 0, ts;
  1135. int next_cpu = -1;
  1136. int cpu;
  1137. /*
  1138. * If we are in a per_cpu trace file, don't bother by iterating over
  1139. * all cpu and peek directly.
  1140. */
  1141. if (cpu_file > TRACE_PIPE_ALL_CPU) {
  1142. if (ring_buffer_empty_cpu(buffer, cpu_file))
  1143. return NULL;
  1144. ent = peek_next_entry(iter, cpu_file, ent_ts);
  1145. if (ent_cpu)
  1146. *ent_cpu = cpu_file;
  1147. return ent;
  1148. }
  1149. for_each_tracing_cpu(cpu) {
  1150. if (ring_buffer_empty_cpu(buffer, cpu))
  1151. continue;
  1152. ent = peek_next_entry(iter, cpu, &ts);
  1153. /*
  1154. * Pick the entry with the smallest timestamp:
  1155. */
  1156. if (ent && (!next || ts < next_ts)) {
  1157. next = ent;
  1158. next_cpu = cpu;
  1159. next_ts = ts;
  1160. }
  1161. }
  1162. if (ent_cpu)
  1163. *ent_cpu = next_cpu;
  1164. if (ent_ts)
  1165. *ent_ts = next_ts;
  1166. return next;
  1167. }
  1168. /* Find the next real entry, without updating the iterator itself */
  1169. struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
  1170. int *ent_cpu, u64 *ent_ts)
  1171. {
  1172. return __find_next_entry(iter, ent_cpu, ent_ts);
  1173. }
  1174. /* Find the next real entry, and increment the iterator to the next entry */
  1175. static void *find_next_entry_inc(struct trace_iterator *iter)
  1176. {
  1177. iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
  1178. if (iter->ent)
  1179. trace_iterator_increment(iter);
  1180. return iter->ent ? iter : NULL;
  1181. }
  1182. static void trace_consume(struct trace_iterator *iter)
  1183. {
  1184. /* Don't allow ftrace to trace into the ring buffers */
  1185. ftrace_disable_cpu();
  1186. ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
  1187. ftrace_enable_cpu();
  1188. }
  1189. static void *s_next(struct seq_file *m, void *v, loff_t *pos)
  1190. {
  1191. struct trace_iterator *iter = m->private;
  1192. int i = (int)*pos;
  1193. void *ent;
  1194. (*pos)++;
  1195. /* can't go backwards */
  1196. if (iter->idx > i)
  1197. return NULL;
  1198. if (iter->idx < 0)
  1199. ent = find_next_entry_inc(iter);
  1200. else
  1201. ent = iter;
  1202. while (ent && iter->idx < i)
  1203. ent = find_next_entry_inc(iter);
  1204. iter->pos = *pos;
  1205. return ent;
  1206. }
  1207. /*
  1208. * No necessary locking here. The worst thing which can
  1209. * happen is loosing events consumed at the same time
  1210. * by a trace_pipe reader.
  1211. * Other than that, we don't risk to crash the ring buffer
  1212. * because it serializes the readers.
  1213. *
  1214. * The current tracer is copied to avoid a global locking
  1215. * all around.
  1216. */
  1217. static void *s_start(struct seq_file *m, loff_t *pos)
  1218. {
  1219. struct trace_iterator *iter = m->private;
  1220. static struct tracer *old_tracer;
  1221. int cpu_file = iter->cpu_file;
  1222. void *p = NULL;
  1223. loff_t l = 0;
  1224. int cpu;
  1225. /* copy the tracer to avoid using a global lock all around */
  1226. mutex_lock(&trace_types_lock);
  1227. if (unlikely(old_tracer != current_trace && current_trace)) {
  1228. old_tracer = current_trace;
  1229. *iter->trace = *current_trace;
  1230. }
  1231. mutex_unlock(&trace_types_lock);
  1232. atomic_inc(&trace_record_cmdline_disabled);
  1233. if (*pos != iter->pos) {
  1234. iter->ent = NULL;
  1235. iter->cpu = 0;
  1236. iter->idx = -1;
  1237. ftrace_disable_cpu();
  1238. if (cpu_file == TRACE_PIPE_ALL_CPU) {
  1239. for_each_tracing_cpu(cpu)
  1240. ring_buffer_iter_reset(iter->buffer_iter[cpu]);
  1241. } else
  1242. ring_buffer_iter_reset(iter->buffer_iter[cpu_file]);
  1243. ftrace_enable_cpu();
  1244. for (p = iter; p && l < *pos; p = s_next(m, p, &l))
  1245. ;
  1246. } else {
  1247. l = *pos - 1;
  1248. p = s_next(m, p, &l);
  1249. }
  1250. return p;
  1251. }
  1252. static void s_stop(struct seq_file *m, void *p)
  1253. {
  1254. atomic_dec(&trace_record_cmdline_disabled);
  1255. }
  1256. static void print_lat_help_header(struct seq_file *m)
  1257. {
  1258. seq_puts(m, "# _------=> CPU# \n");
  1259. seq_puts(m, "# / _-----=> irqs-off \n");
  1260. seq_puts(m, "# | / _----=> need-resched \n");
  1261. seq_puts(m, "# || / _---=> hardirq/softirq \n");
  1262. seq_puts(m, "# ||| / _--=> preempt-depth \n");
  1263. seq_puts(m, "# |||| / \n");
  1264. seq_puts(m, "# ||||| delay \n");
  1265. seq_puts(m, "# cmd pid ||||| time | caller \n");
  1266. seq_puts(m, "# \\ / ||||| \\ | / \n");
  1267. }
  1268. static void print_func_help_header(struct seq_file *m)
  1269. {
  1270. seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
  1271. seq_puts(m, "# | | | | |\n");
  1272. }
  1273. static void
  1274. print_trace_header(struct seq_file *m, struct trace_iterator *iter)
  1275. {
  1276. unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
  1277. struct trace_array *tr = iter->tr;
  1278. struct trace_array_cpu *data = tr->data[tr->cpu];
  1279. struct tracer *type = current_trace;
  1280. unsigned long total;
  1281. unsigned long entries;
  1282. const char *name = "preemption";
  1283. if (type)
  1284. name = type->name;
  1285. entries = ring_buffer_entries(iter->tr->buffer);
  1286. total = entries +
  1287. ring_buffer_overruns(iter->tr->buffer);
  1288. seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
  1289. name, UTS_RELEASE);
  1290. seq_puts(m, "# -----------------------------------"
  1291. "---------------------------------\n");
  1292. seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
  1293. " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
  1294. nsecs_to_usecs(data->saved_latency),
  1295. entries,
  1296. total,
  1297. tr->cpu,
  1298. #if defined(CONFIG_PREEMPT_NONE)
  1299. "server",
  1300. #elif defined(CONFIG_PREEMPT_VOLUNTARY)
  1301. "desktop",
  1302. #elif defined(CONFIG_PREEMPT)
  1303. "preempt",
  1304. #else
  1305. "unknown",
  1306. #endif
  1307. /* These are reserved for later use */
  1308. 0, 0, 0, 0);
  1309. #ifdef CONFIG_SMP
  1310. seq_printf(m, " #P:%d)\n", num_online_cpus());
  1311. #else
  1312. seq_puts(m, ")\n");
  1313. #endif
  1314. seq_puts(m, "# -----------------\n");
  1315. seq_printf(m, "# | task: %.16s-%d "
  1316. "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
  1317. data->comm, data->pid, data->uid, data->nice,
  1318. data->policy, data->rt_priority);
  1319. seq_puts(m, "# -----------------\n");
  1320. if (data->critical_start) {
  1321. seq_puts(m, "# => started at: ");
  1322. seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
  1323. trace_print_seq(m, &iter->seq);
  1324. seq_puts(m, "\n# => ended at: ");
  1325. seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
  1326. trace_print_seq(m, &iter->seq);
  1327. seq_puts(m, "#\n");
  1328. }
  1329. seq_puts(m, "#\n");
  1330. }
  1331. static void test_cpu_buff_start(struct trace_iterator *iter)
  1332. {
  1333. struct trace_seq *s = &iter->seq;
  1334. if (!(trace_flags & TRACE_ITER_ANNOTATE))
  1335. return;
  1336. if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
  1337. return;
  1338. if (cpumask_test_cpu(iter->cpu, iter->started))
  1339. return;
  1340. cpumask_set_cpu(iter->cpu, iter->started);
  1341. trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
  1342. }
  1343. static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
  1344. {
  1345. struct trace_seq *s = &iter->seq;
  1346. unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
  1347. struct trace_entry *entry;
  1348. struct trace_event *event;
  1349. entry = iter->ent;
  1350. test_cpu_buff_start(iter);
  1351. event = ftrace_find_event(entry->type);
  1352. if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
  1353. if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
  1354. if (!trace_print_lat_context(iter))
  1355. goto partial;
  1356. } else {
  1357. if (!trace_print_context(iter))
  1358. goto partial;
  1359. }
  1360. }
  1361. if (event)
  1362. return event->trace(iter, sym_flags);
  1363. if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
  1364. goto partial;
  1365. return TRACE_TYPE_HANDLED;
  1366. partial:
  1367. return TRACE_TYPE_PARTIAL_LINE;
  1368. }
  1369. static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
  1370. {
  1371. struct trace_seq *s = &iter->seq;
  1372. struct trace_entry *entry;
  1373. struct trace_event *event;
  1374. entry = iter->ent;
  1375. if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
  1376. if (!trace_seq_printf(s, "%d %d %llu ",
  1377. entry->pid, iter->cpu, iter->ts))
  1378. goto partial;
  1379. }
  1380. event = ftrace_find_event(entry->type);
  1381. if (event)
  1382. return event->raw(iter, 0);
  1383. if (!trace_seq_printf(s, "%d ?\n", entry->type))
  1384. goto partial;
  1385. return TRACE_TYPE_HANDLED;
  1386. partial:
  1387. return TRACE_TYPE_PARTIAL_LINE;
  1388. }
  1389. static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
  1390. {
  1391. struct trace_seq *s = &iter->seq;
  1392. unsigned char newline = '\n';
  1393. struct trace_entry *entry;
  1394. struct trace_event *event;
  1395. entry = iter->ent;
  1396. if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
  1397. SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
  1398. SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
  1399. SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
  1400. }
  1401. event = ftrace_find_event(entry->type);
  1402. if (event) {
  1403. enum print_line_t ret = event->hex(iter, 0);
  1404. if (ret != TRACE_TYPE_HANDLED)
  1405. return ret;
  1406. }
  1407. SEQ_PUT_FIELD_RET(s, newline);
  1408. return TRACE_TYPE_HANDLED;
  1409. }
  1410. static enum print_line_t print_bprintk_msg_only(struct trace_iterator *iter)
  1411. {
  1412. struct trace_seq *s = &iter->seq;
  1413. struct trace_entry *entry = iter->ent;
  1414. struct bprint_entry *field;
  1415. int ret;
  1416. trace_assign_type(field, entry);
  1417. ret = trace_seq_bprintf(s, field->fmt, field->buf);
  1418. if (!ret)
  1419. return TRACE_TYPE_PARTIAL_LINE;
  1420. return TRACE_TYPE_HANDLED;
  1421. }
  1422. static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
  1423. {
  1424. struct trace_seq *s = &iter->seq;
  1425. struct trace_entry *entry = iter->ent;
  1426. struct print_entry *field;
  1427. int ret;
  1428. trace_assign_type(field, entry);
  1429. ret = trace_seq_printf(s, "%s", field->buf);
  1430. if (!ret)
  1431. return TRACE_TYPE_PARTIAL_LINE;
  1432. return TRACE_TYPE_HANDLED;
  1433. }
  1434. static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
  1435. {
  1436. struct trace_seq *s = &iter->seq;
  1437. struct trace_entry *entry;
  1438. struct trace_event *event;
  1439. entry = iter->ent;
  1440. if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
  1441. SEQ_PUT_FIELD_RET(s, entry->pid);
  1442. SEQ_PUT_FIELD_RET(s, iter->cpu);
  1443. SEQ_PUT_FIELD_RET(s, iter->ts);
  1444. }
  1445. event = ftrace_find_event(entry->type);
  1446. return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED;
  1447. }
  1448. static int trace_empty(struct trace_iterator *iter)
  1449. {
  1450. int cpu;
  1451. /* If we are looking at one CPU buffer, only check that one */
  1452. if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
  1453. cpu = iter->cpu_file;
  1454. if (iter->buffer_iter[cpu]) {
  1455. if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
  1456. return 0;
  1457. } else {
  1458. if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
  1459. return 0;
  1460. }
  1461. return 1;
  1462. }
  1463. for_each_tracing_cpu(cpu) {
  1464. if (iter->buffer_iter[cpu]) {
  1465. if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
  1466. return 0;
  1467. } else {
  1468. if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
  1469. return 0;
  1470. }
  1471. }
  1472. return 1;
  1473. }
  1474. static enum print_line_t print_trace_line(struct trace_iterator *iter)
  1475. {
  1476. enum print_line_t ret;
  1477. if (iter->trace && iter->trace->print_line) {
  1478. ret = iter->trace->print_line(iter);
  1479. if (ret != TRACE_TYPE_UNHANDLED)
  1480. return ret;
  1481. }
  1482. if (iter->ent->type == TRACE_BPRINT &&
  1483. trace_flags & TRACE_ITER_PRINTK &&
  1484. trace_flags & TRACE_ITER_PRINTK_MSGONLY)
  1485. return print_bprintk_msg_only(iter);
  1486. if (iter->ent->type == TRACE_PRINT &&
  1487. trace_flags & TRACE_ITER_PRINTK &&
  1488. trace_flags & TRACE_ITER_PRINTK_MSGONLY)
  1489. return print_printk_msg_only(iter);
  1490. if (trace_flags & TRACE_ITER_BIN)
  1491. return print_bin_fmt(iter);
  1492. if (trace_flags & TRACE_ITER_HEX)
  1493. return print_hex_fmt(iter);
  1494. if (trace_flags & TRACE_ITER_RAW)
  1495. return print_raw_fmt(iter);
  1496. return print_trace_fmt(iter);
  1497. }
  1498. static int s_show(struct seq_file *m, void *v)
  1499. {
  1500. struct trace_iterator *iter = v;
  1501. if (iter->ent == NULL) {
  1502. if (iter->tr) {
  1503. seq_printf(m, "# tracer: %s\n", iter->trace->name);
  1504. seq_puts(m, "#\n");
  1505. }
  1506. if (iter->trace && iter->trace->print_header)
  1507. iter->trace->print_header(m);
  1508. else if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
  1509. /* print nothing if the buffers are empty */
  1510. if (trace_empty(iter))
  1511. return 0;
  1512. print_trace_header(m, iter);
  1513. if (!(trace_flags & TRACE_ITER_VERBOSE))
  1514. print_lat_help_header(m);
  1515. } else {
  1516. if (!(trace_flags & TRACE_ITER_VERBOSE))
  1517. print_func_help_header(m);
  1518. }
  1519. } else {
  1520. print_trace_line(iter);
  1521. trace_print_seq(m, &iter->seq);
  1522. }
  1523. return 0;
  1524. }
  1525. static struct seq_operations tracer_seq_ops = {
  1526. .start = s_start,
  1527. .next = s_next,
  1528. .stop = s_stop,
  1529. .show = s_show,
  1530. };
  1531. static struct trace_iterator *
  1532. __tracing_open(struct inode *inode, struct file *file)
  1533. {
  1534. long cpu_file = (long) inode->i_private;
  1535. void *fail_ret = ERR_PTR(-ENOMEM);
  1536. struct trace_iterator *iter;
  1537. struct seq_file *m;
  1538. int cpu, ret;
  1539. if (tracing_disabled)
  1540. return ERR_PTR(-ENODEV);
  1541. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  1542. if (!iter)
  1543. return ERR_PTR(-ENOMEM);
  1544. /*
  1545. * We make a copy of the current tracer to avoid concurrent
  1546. * changes on it while we are reading.
  1547. */
  1548. mutex_lock(&trace_types_lock);
  1549. iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
  1550. if (!iter->trace)
  1551. goto fail;
  1552. if (current_trace)
  1553. *iter->trace = *current_trace;
  1554. if (current_trace && current_trace->print_max)
  1555. iter->tr = &max_tr;
  1556. else
  1557. iter->tr = &global_trace;
  1558. iter->pos = -1;
  1559. mutex_init(&iter->mutex);
  1560. iter->cpu_file = cpu_file;
  1561. /* Notify the tracer early; before we stop tracing. */
  1562. if (iter->trace && iter->trace->open)
  1563. iter->trace->open(iter);
  1564. /* Annotate start of buffers if we had overruns */
  1565. if (ring_buffer_overruns(iter->tr->buffer))
  1566. iter->iter_flags |= TRACE_FILE_ANNOTATE;
  1567. if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
  1568. for_each_tracing_cpu(cpu) {
  1569. iter->buffer_iter[cpu] =
  1570. ring_buffer_read_start(iter->tr->buffer, cpu);
  1571. }
  1572. } else {
  1573. cpu = iter->cpu_file;
  1574. iter->buffer_iter[cpu] =
  1575. ring_buffer_read_start(iter->tr->buffer, cpu);
  1576. }
  1577. /* TODO stop tracer */
  1578. ret = seq_open(file, &tracer_seq_ops);
  1579. if (ret < 0) {
  1580. fail_ret = ERR_PTR(ret);
  1581. goto fail_buffer;
  1582. }
  1583. m = file->private_data;
  1584. m->private = iter;
  1585. /* stop the trace while dumping */
  1586. tracing_stop();
  1587. mutex_unlock(&trace_types_lock);
  1588. return iter;
  1589. fail_buffer:
  1590. for_each_tracing_cpu(cpu) {
  1591. if (iter->buffer_iter[cpu])
  1592. ring_buffer_read_finish(iter->buffer_iter[cpu]);
  1593. }
  1594. fail:
  1595. mutex_unlock(&trace_types_lock);
  1596. kfree(iter->trace);
  1597. kfree(iter);
  1598. return fail_ret;
  1599. }
  1600. int tracing_open_generic(struct inode *inode, struct file *filp)
  1601. {
  1602. if (tracing_disabled)
  1603. return -ENODEV;
  1604. filp->private_data = inode->i_private;
  1605. return 0;
  1606. }
  1607. static int tracing_release(struct inode *inode, struct file *file)
  1608. {
  1609. struct seq_file *m = (struct seq_file *)file->private_data;
  1610. struct trace_iterator *iter = m->private;
  1611. int cpu;
  1612. mutex_lock(&trace_types_lock);
  1613. for_each_tracing_cpu(cpu) {
  1614. if (iter->buffer_iter[cpu])
  1615. ring_buffer_read_finish(iter->buffer_iter[cpu]);
  1616. }
  1617. if (iter->trace && iter->trace->close)
  1618. iter->trace->close(iter);
  1619. /* reenable tracing if it was previously enabled */
  1620. tracing_start();
  1621. mutex_unlock(&trace_types_lock);
  1622. seq_release(inode, file);
  1623. mutex_destroy(&iter->mutex);
  1624. kfree(iter->trace);
  1625. kfree(iter);
  1626. return 0;
  1627. }
  1628. static int tracing_open(struct inode *inode, struct file *file)
  1629. {
  1630. struct trace_iterator *iter;
  1631. int ret = 0;
  1632. iter = __tracing_open(inode, file);
  1633. if (IS_ERR(iter))
  1634. ret = PTR_ERR(iter);
  1635. else if (trace_flags & TRACE_ITER_LATENCY_FMT)
  1636. iter->iter_flags |= TRACE_FILE_LAT_FMT;
  1637. return ret;
  1638. }
  1639. static void *
  1640. t_next(struct seq_file *m, void *v, loff_t *pos)
  1641. {
  1642. struct tracer *t = m->private;
  1643. (*pos)++;
  1644. if (t)
  1645. t = t->next;
  1646. m->private = t;
  1647. return t;
  1648. }
  1649. static void *t_start(struct seq_file *m, loff_t *pos)
  1650. {
  1651. struct tracer *t = m->private;
  1652. loff_t l = 0;
  1653. mutex_lock(&trace_types_lock);
  1654. for (; t && l < *pos; t = t_next(m, t, &l))
  1655. ;
  1656. return t;
  1657. }
  1658. static void t_stop(struct seq_file *m, void *p)
  1659. {
  1660. mutex_unlock(&trace_types_lock);
  1661. }
  1662. static int t_show(struct seq_file *m, void *v)
  1663. {
  1664. struct tracer *t = v;
  1665. if (!t)
  1666. return 0;
  1667. seq_printf(m, "%s", t->name);
  1668. if (t->next)
  1669. seq_putc(m, ' ');
  1670. else
  1671. seq_putc(m, '\n');
  1672. return 0;
  1673. }
  1674. static struct seq_operations show_traces_seq_ops = {
  1675. .start = t_start,
  1676. .next = t_next,
  1677. .stop = t_stop,
  1678. .show = t_show,
  1679. };
  1680. static int show_traces_open(struct inode *inode, struct file *file)
  1681. {
  1682. int ret;
  1683. if (tracing_disabled)
  1684. return -ENODEV;
  1685. ret = seq_open(file, &show_traces_seq_ops);
  1686. if (!ret) {
  1687. struct seq_file *m = file->private_data;
  1688. m->private = trace_types;
  1689. }
  1690. return ret;
  1691. }
  1692. static const struct file_operations tracing_fops = {
  1693. .open = tracing_open,
  1694. .read = seq_read,
  1695. .llseek = seq_lseek,
  1696. .release = tracing_release,
  1697. };
  1698. static const struct file_operations show_traces_fops = {
  1699. .open = show_traces_open,
  1700. .read = seq_read,
  1701. .release = seq_release,
  1702. };
  1703. /*
  1704. * Only trace on a CPU if the bitmask is set:
  1705. */
  1706. static cpumask_var_t tracing_cpumask;
  1707. /*
  1708. * The tracer itself will not take this lock, but still we want
  1709. * to provide a consistent cpumask to user-space:
  1710. */
  1711. static DEFINE_MUTEX(tracing_cpumask_update_lock);
  1712. /*
  1713. * Temporary storage for the character representation of the
  1714. * CPU bitmask (and one more byte for the newline):
  1715. */
  1716. static char mask_str[NR_CPUS + 1];
  1717. static ssize_t
  1718. tracing_cpumask_read(struct file *filp, char __user *ubuf,
  1719. size_t count, loff_t *ppos)
  1720. {
  1721. int len;
  1722. mutex_lock(&tracing_cpumask_update_lock);
  1723. len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
  1724. if (count - len < 2) {
  1725. count = -EINVAL;
  1726. goto out_err;
  1727. }
  1728. len += sprintf(mask_str + len, "\n");
  1729. count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
  1730. out_err:
  1731. mutex_unlock(&tracing_cpumask_update_lock);
  1732. return count;
  1733. }
  1734. static ssize_t
  1735. tracing_cpumask_write(struct file *filp, const char __user *ubuf,
  1736. size_t count, loff_t *ppos)
  1737. {
  1738. int err, cpu;
  1739. cpumask_var_t tracing_cpumask_new;
  1740. if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
  1741. return -ENOMEM;
  1742. mutex_lock(&tracing_cpumask_update_lock);
  1743. err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
  1744. if (err)
  1745. goto err_unlock;
  1746. local_irq_disable();
  1747. __raw_spin_lock(&ftrace_max_lock);
  1748. for_each_tracing_cpu(cpu) {
  1749. /*
  1750. * Increase/decrease the disabled counter if we are
  1751. * about to flip a bit in the cpumask:
  1752. */
  1753. if (cpumask_test_cpu(cpu, tracing_cpumask) &&
  1754. !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
  1755. atomic_inc(&global_trace.data[cpu]->disabled);
  1756. }
  1757. if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
  1758. cpumask_test_cpu(cpu, tracing_cpumask_new)) {
  1759. atomic_dec(&global_trace.data[cpu]->disabled);
  1760. }
  1761. }
  1762. __raw_spin_unlock(&ftrace_max_lock);
  1763. local_irq_enable();
  1764. cpumask_copy(tracing_cpumask, tracing_cpumask_new);
  1765. mutex_unlock(&tracing_cpumask_update_lock);
  1766. free_cpumask_var(tracing_cpumask_new);
  1767. return count;
  1768. err_unlock:
  1769. mutex_unlock(&tracing_cpumask_update_lock);
  1770. free_cpumask_var(tracing_cpumask);
  1771. return err;
  1772. }
  1773. static const struct file_operations tracing_cpumask_fops = {
  1774. .open = tracing_open_generic,
  1775. .read = tracing_cpumask_read,
  1776. .write = tracing_cpumask_write,
  1777. };
  1778. static ssize_t
  1779. tracing_trace_options_read(struct file *filp, char __user *ubuf,
  1780. size_t cnt, loff_t *ppos)
  1781. {
  1782. struct tracer_opt *trace_opts;
  1783. u32 tracer_flags;
  1784. int len = 0;
  1785. char *buf;
  1786. int r = 0;
  1787. int i;
  1788. /* calculate max size */
  1789. for (i = 0; trace_options[i]; i++) {
  1790. len += strlen(trace_options[i]);
  1791. len += 3; /* "no" and newline */
  1792. }
  1793. mutex_lock(&trace_types_lock);
  1794. tracer_flags = current_trace->flags->val;
  1795. trace_opts = current_trace->flags->opts;
  1796. /*
  1797. * Increase the size with names of options specific
  1798. * of the current tracer.
  1799. */
  1800. for (i = 0; trace_opts[i].name; i++) {
  1801. len += strlen(trace_opts[i].name);
  1802. len += 3; /* "no" and newline */
  1803. }
  1804. /* +2 for \n and \0 */
  1805. buf = kmalloc(len + 2, GFP_KERNEL);
  1806. if (!buf) {
  1807. mutex_unlock(&trace_types_lock);
  1808. return -ENOMEM;
  1809. }
  1810. for (i = 0; trace_options[i]; i++) {
  1811. if (trace_flags & (1 << i))
  1812. r += sprintf(buf + r, "%s\n", trace_options[i]);
  1813. else
  1814. r += sprintf(buf + r, "no%s\n", trace_options[i]);
  1815. }
  1816. for (i = 0; trace_opts[i].name; i++) {
  1817. if (tracer_flags & trace_opts[i].bit)
  1818. r += sprintf(buf + r, "%s\n",
  1819. trace_opts[i].name);
  1820. else
  1821. r += sprintf(buf + r, "no%s\n",
  1822. trace_opts[i].name);
  1823. }
  1824. mutex_unlock(&trace_types_lock);
  1825. WARN_ON(r >= len + 2);
  1826. r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  1827. kfree(buf);
  1828. return r;
  1829. }
  1830. /* Try to assign a tracer specific option */
  1831. static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
  1832. {
  1833. struct tracer_flags *trace_flags = trace->flags;
  1834. struct tracer_opt *opts = NULL;
  1835. int ret = 0, i = 0;
  1836. int len;
  1837. for (i = 0; trace_flags->opts[i].name; i++) {
  1838. opts = &trace_flags->opts[i];
  1839. len = strlen(opts->name);
  1840. if (strncmp(cmp, opts->name, len) == 0) {
  1841. ret = trace->set_flag(trace_flags->val,
  1842. opts->bit, !neg);
  1843. break;
  1844. }
  1845. }
  1846. /* Not found */
  1847. if (!trace_flags->opts[i].name)
  1848. return -EINVAL;
  1849. /* Refused to handle */
  1850. if (ret)
  1851. return ret;
  1852. if (neg)
  1853. trace_flags->val &= ~opts->bit;
  1854. else
  1855. trace_flags->val |= opts->bit;
  1856. return 0;
  1857. }
  1858. static void set_tracer_flags(unsigned int mask, int enabled)
  1859. {
  1860. /* do nothing if flag is already set */
  1861. if (!!(trace_flags & mask) == !!enabled)
  1862. return;
  1863. if (enabled)
  1864. trace_flags |= mask;
  1865. else
  1866. trace_flags &= ~mask;
  1867. if (mask == TRACE_ITER_GLOBAL_CLK) {
  1868. u64 (*func)(void);
  1869. if (enabled)
  1870. func = trace_clock_global;
  1871. else
  1872. func = trace_clock_local;
  1873. mutex_lock(&trace_types_lock);
  1874. ring_buffer_set_clock(global_trace.buffer, func);
  1875. if (max_tr.buffer)
  1876. ring_buffer_set_clock(max_tr.buffer, func);
  1877. mutex_unlock(&trace_types_lock);
  1878. }
  1879. }
  1880. static ssize_t
  1881. tracing_trace_options_write(struct file *filp, const char __user *ubuf,
  1882. size_t cnt, loff_t *ppos)
  1883. {
  1884. char buf[64];
  1885. char *cmp = buf;
  1886. int neg = 0;
  1887. int ret;
  1888. int i;
  1889. if (cnt >= sizeof(buf))
  1890. return -EINVAL;
  1891. if (copy_from_user(&buf, ubuf, cnt))
  1892. return -EFAULT;
  1893. buf[cnt] = 0;
  1894. if (strncmp(buf, "no", 2) == 0) {
  1895. neg = 1;
  1896. cmp += 2;
  1897. }
  1898. for (i = 0; trace_options[i]; i++) {
  1899. int len = strlen(trace_options[i]);
  1900. if (strncmp(cmp, trace_options[i], len) == 0) {
  1901. set_tracer_flags(1 << i, !neg);
  1902. break;
  1903. }
  1904. }
  1905. /* If no option could be set, test the specific tracer options */
  1906. if (!trace_options[i]) {
  1907. mutex_lock(&trace_types_lock);
  1908. ret = set_tracer_option(current_trace, cmp, neg);
  1909. mutex_unlock(&trace_types_lock);
  1910. if (ret)
  1911. return ret;
  1912. }
  1913. filp->f_pos += cnt;
  1914. return cnt;
  1915. }
  1916. static const struct file_operations tracing_iter_fops = {
  1917. .open = tracing_open_generic,
  1918. .read = tracing_trace_options_read,
  1919. .write = tracing_trace_options_write,
  1920. };
  1921. static const char readme_msg[] =
  1922. "tracing mini-HOWTO:\n\n"
  1923. "# mkdir /debug\n"
  1924. "# mount -t debugfs nodev /debug\n\n"
  1925. "# cat /debug/tracing/available_tracers\n"
  1926. "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
  1927. "# cat /debug/tracing/current_tracer\n"
  1928. "none\n"
  1929. "# echo sched_switch > /debug/tracing/current_tracer\n"
  1930. "# cat /debug/tracing/current_tracer\n"
  1931. "sched_switch\n"
  1932. "# cat /debug/tracing/trace_options\n"
  1933. "noprint-parent nosym-offset nosym-addr noverbose\n"
  1934. "# echo print-parent > /debug/tracing/trace_options\n"
  1935. "# echo 1 > /debug/tracing/tracing_enabled\n"
  1936. "# cat /debug/tracing/trace > /tmp/trace.txt\n"
  1937. "echo 0 > /debug/tracing/tracing_enabled\n"
  1938. ;
  1939. static ssize_t
  1940. tracing_readme_read(struct file *filp, char __user *ubuf,
  1941. size_t cnt, loff_t *ppos)
  1942. {
  1943. return simple_read_from_buffer(ubuf, cnt, ppos,
  1944. readme_msg, strlen(readme_msg));
  1945. }
  1946. static const struct file_operations tracing_readme_fops = {
  1947. .open = tracing_open_generic,
  1948. .read = tracing_readme_read,
  1949. };
  1950. static ssize_t
  1951. tracing_ctrl_read(struct file *filp, char __user *ubuf,
  1952. size_t cnt, loff_t *ppos)
  1953. {
  1954. char buf[64];
  1955. int r;
  1956. r = sprintf(buf, "%u\n", tracer_enabled);
  1957. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  1958. }
  1959. static ssize_t
  1960. tracing_ctrl_write(struct file *filp, const char __user *ubuf,
  1961. size_t cnt, loff_t *ppos)
  1962. {
  1963. struct trace_array *tr = filp->private_data;
  1964. char buf[64];
  1965. unsigned long val;
  1966. int ret;
  1967. if (cnt >= sizeof(buf))
  1968. return -EINVAL;
  1969. if (copy_from_user(&buf, ubuf, cnt))
  1970. return -EFAULT;
  1971. buf[cnt] = 0;
  1972. ret = strict_strtoul(buf, 10, &val);
  1973. if (ret < 0)
  1974. return ret;
  1975. val = !!val;
  1976. mutex_lock(&trace_types_lock);
  1977. if (tracer_enabled ^ val) {
  1978. if (val) {
  1979. tracer_enabled = 1;
  1980. if (current_trace->start)
  1981. current_trace->start(tr);
  1982. tracing_start();
  1983. } else {
  1984. tracer_enabled = 0;
  1985. tracing_stop();
  1986. if (current_trace->stop)
  1987. current_trace->stop(tr);
  1988. }
  1989. }
  1990. mutex_unlock(&trace_types_lock);
  1991. filp->f_pos += cnt;
  1992. return cnt;
  1993. }
  1994. static ssize_t
  1995. tracing_set_trace_read(struct file *filp, char __user *ubuf,
  1996. size_t cnt, loff_t *ppos)
  1997. {
  1998. char buf[max_tracer_type_len+2];
  1999. int r;
  2000. mutex_lock(&trace_types_lock);
  2001. if (current_trace)
  2002. r = sprintf(buf, "%s\n", current_trace->name);
  2003. else
  2004. r = sprintf(buf, "\n");
  2005. mutex_unlock(&trace_types_lock);
  2006. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  2007. }
  2008. int tracer_init(struct tracer *t, struct trace_array *tr)
  2009. {
  2010. tracing_reset_online_cpus(tr);
  2011. return t->init(tr);
  2012. }
  2013. static int tracing_resize_ring_buffer(unsigned long size)
  2014. {
  2015. int ret;
  2016. /*
  2017. * If kernel or user changes the size of the ring buffer
  2018. * we use the size that was given, and we can forget about
  2019. * expanding it later.
  2020. */
  2021. ring_buffer_expanded = 1;
  2022. ret = ring_buffer_resize(global_trace.buffer, size);
  2023. if (ret < 0)
  2024. return ret;
  2025. ret = ring_buffer_resize(max_tr.buffer, size);
  2026. if (ret < 0) {
  2027. int r;
  2028. r = ring_buffer_resize(global_trace.buffer,
  2029. global_trace.entries);
  2030. if (r < 0) {
  2031. /*
  2032. * AARGH! We are left with different
  2033. * size max buffer!!!!
  2034. * The max buffer is our "snapshot" buffer.
  2035. * When a tracer needs a snapshot (one of the
  2036. * latency tracers), it swaps the max buffer
  2037. * with the saved snap shot. We succeeded to
  2038. * update the size of the main buffer, but failed to
  2039. * update the size of the max buffer. But when we tried
  2040. * to reset the main buffer to the original size, we
  2041. * failed there too. This is very unlikely to
  2042. * happen, but if it does, warn and kill all
  2043. * tracing.
  2044. */
  2045. WARN_ON(1);
  2046. tracing_disabled = 1;
  2047. }
  2048. return ret;
  2049. }
  2050. global_trace.entries = size;
  2051. return ret;
  2052. }
  2053. /**
  2054. * tracing_update_buffers - used by tracing facility to expand ring buffers
  2055. *
  2056. * To save on memory when the tracing is never used on a system with it
  2057. * configured in. The ring buffers are set to a minimum size. But once
  2058. * a user starts to use the tracing facility, then they need to grow
  2059. * to their default size.
  2060. *
  2061. * This function is to be called when a tracer is about to be used.
  2062. */
  2063. int tracing_update_buffers(void)
  2064. {
  2065. int ret = 0;
  2066. mutex_lock(&trace_types_lock);
  2067. if (!ring_buffer_expanded)
  2068. ret = tracing_resize_ring_buffer(trace_buf_size);
  2069. mutex_unlock(&trace_types_lock);
  2070. return ret;
  2071. }
  2072. struct trace_option_dentry;
  2073. static struct trace_option_dentry *
  2074. create_trace_option_files(struct tracer *tracer);
  2075. static void
  2076. destroy_trace_option_files(struct trace_option_dentry *topts);
  2077. static int tracing_set_tracer(const char *buf)
  2078. {
  2079. static struct trace_option_dentry *topts;
  2080. struct trace_array *tr = &global_trace;
  2081. struct tracer *t;
  2082. int ret = 0;
  2083. mutex_lock(&trace_types_lock);
  2084. if (!ring_buffer_expanded) {
  2085. ret = tracing_resize_ring_buffer(trace_buf_size);
  2086. if (ret < 0)
  2087. goto out;
  2088. ret = 0;
  2089. }
  2090. for (t = trace_types; t; t = t->next) {
  2091. if (strcmp(t->name, buf) == 0)
  2092. break;
  2093. }
  2094. if (!t) {
  2095. ret = -EINVAL;
  2096. goto out;
  2097. }
  2098. if (t == current_trace)
  2099. goto out;
  2100. trace_branch_disable();
  2101. if (current_trace && current_trace->reset)
  2102. current_trace->reset(tr);
  2103. destroy_trace_option_files(topts);
  2104. current_trace = t;
  2105. topts = create_trace_option_files(current_trace);
  2106. if (t->init) {
  2107. ret = tracer_init(t, tr);
  2108. if (ret)
  2109. goto out;
  2110. }
  2111. trace_branch_enable(tr);
  2112. out:
  2113. mutex_unlock(&trace_types_lock);
  2114. return ret;
  2115. }
  2116. static ssize_t
  2117. tracing_set_trace_write(struct file *filp, const char __user *ubuf,
  2118. size_t cnt, loff_t *ppos)
  2119. {
  2120. char buf[max_tracer_type_len+1];
  2121. int i;
  2122. size_t ret;
  2123. int err;
  2124. ret = cnt;
  2125. if (cnt > max_tracer_type_len)
  2126. cnt = max_tracer_type_len;
  2127. if (copy_from_user(&buf, ubuf, cnt))
  2128. return -EFAULT;
  2129. buf[cnt] = 0;
  2130. /* strip ending whitespace. */
  2131. for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
  2132. buf[i] = 0;
  2133. err = tracing_set_tracer(buf);
  2134. if (err)
  2135. return err;
  2136. filp->f_pos += ret;
  2137. return ret;
  2138. }
  2139. static ssize_t
  2140. tracing_max_lat_read(struct file *filp, char __user *ubuf,
  2141. size_t cnt, loff_t *ppos)
  2142. {
  2143. unsigned long *ptr = filp->private_data;
  2144. char buf[64];
  2145. int r;
  2146. r = snprintf(buf, sizeof(buf), "%ld\n",
  2147. *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
  2148. if (r > sizeof(buf))
  2149. r = sizeof(buf);
  2150. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  2151. }
  2152. static ssize_t
  2153. tracing_max_lat_write(struct file *filp, const char __user *ubuf,
  2154. size_t cnt, loff_t *ppos)
  2155. {
  2156. unsigned long *ptr = filp->private_data;
  2157. char buf[64];
  2158. unsigned long val;
  2159. int ret;
  2160. if (cnt >= sizeof(buf))
  2161. return -EINVAL;
  2162. if (copy_from_user(&buf, ubuf, cnt))
  2163. return -EFAULT;
  2164. buf[cnt] = 0;
  2165. ret = strict_strtoul(buf, 10, &val);
  2166. if (ret < 0)
  2167. return ret;
  2168. *ptr = val * 1000;
  2169. return cnt;
  2170. }
  2171. static int tracing_open_pipe(struct inode *inode, struct file *filp)
  2172. {
  2173. long cpu_file = (long) inode->i_private;
  2174. struct trace_iterator *iter;
  2175. int ret = 0;
  2176. if (tracing_disabled)
  2177. return -ENODEV;
  2178. mutex_lock(&trace_types_lock);
  2179. /* We only allow one reader per cpu */
  2180. if (cpu_file == TRACE_PIPE_ALL_CPU) {
  2181. if (!cpumask_empty(tracing_reader_cpumask)) {
  2182. ret = -EBUSY;
  2183. goto out;
  2184. }
  2185. cpumask_setall(tracing_reader_cpumask);
  2186. } else {
  2187. if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask))
  2188. cpumask_set_cpu(cpu_file, tracing_reader_cpumask);
  2189. else {
  2190. ret = -EBUSY;
  2191. goto out;
  2192. }
  2193. }
  2194. /* create a buffer to store the information to pass to userspace */
  2195. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  2196. if (!iter) {
  2197. ret = -ENOMEM;
  2198. goto out;
  2199. }
  2200. /*
  2201. * We make a copy of the current tracer to avoid concurrent
  2202. * changes on it while we are reading.
  2203. */
  2204. iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
  2205. if (!iter->trace) {
  2206. ret = -ENOMEM;
  2207. goto fail;
  2208. }
  2209. if (current_trace)
  2210. *iter->trace = *current_trace;
  2211. if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
  2212. ret = -ENOMEM;
  2213. goto fail;
  2214. }
  2215. /* trace pipe does not show start of buffer */
  2216. cpumask_setall(iter->started);
  2217. iter->cpu_file = cpu_file;
  2218. iter->tr = &global_trace;
  2219. mutex_init(&iter->mutex);
  2220. filp->private_data = iter;
  2221. if (iter->trace->pipe_open)
  2222. iter->trace->pipe_open(iter);
  2223. out:
  2224. mutex_unlock(&trace_types_lock);
  2225. return ret;
  2226. fail:
  2227. kfree(iter->trace);
  2228. kfree(iter);
  2229. mutex_unlock(&trace_types_lock);
  2230. return ret;
  2231. }
  2232. static int tracing_release_pipe(struct inode *inode, struct file *file)
  2233. {
  2234. struct trace_iterator *iter = file->private_data;
  2235. mutex_lock(&trace_types_lock);
  2236. if (iter->cpu_file == TRACE_PIPE_ALL_CPU)
  2237. cpumask_clear(tracing_reader_cpumask);
  2238. else
  2239. cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
  2240. mutex_unlock(&trace_types_lock);
  2241. free_cpumask_var(iter->started);
  2242. mutex_destroy(&iter->mutex);
  2243. kfree(iter->trace);
  2244. kfree(iter);
  2245. return 0;
  2246. }
  2247. static unsigned int
  2248. tracing_poll_pipe(struct file *filp, poll_table *poll_table)
  2249. {
  2250. struct trace_iterator *iter = filp->private_data;
  2251. if (trace_flags & TRACE_ITER_BLOCK) {
  2252. /*
  2253. * Always select as readable when in blocking mode
  2254. */
  2255. return POLLIN | POLLRDNORM;
  2256. } else {
  2257. if (!trace_empty(iter))
  2258. return POLLIN | POLLRDNORM;
  2259. poll_wait(filp, &trace_wait, poll_table);
  2260. if (!trace_empty(iter))
  2261. return POLLIN | POLLRDNORM;
  2262. return 0;
  2263. }
  2264. }
  2265. void default_wait_pipe(struct trace_iterator *iter)
  2266. {
  2267. DEFINE_WAIT(wait);
  2268. prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
  2269. if (trace_empty(iter))
  2270. schedule();
  2271. finish_wait(&trace_wait, &wait);
  2272. }
  2273. /*
  2274. * This is a make-shift waitqueue.
  2275. * A tracer might use this callback on some rare cases:
  2276. *
  2277. * 1) the current tracer might hold the runqueue lock when it wakes up
  2278. * a reader, hence a deadlock (sched, function, and function graph tracers)
  2279. * 2) the function tracers, trace all functions, we don't want
  2280. * the overhead of calling wake_up and friends
  2281. * (and tracing them too)
  2282. *
  2283. * Anyway, this is really very primitive wakeup.
  2284. */
  2285. void poll_wait_pipe(struct trace_iterator *iter)
  2286. {
  2287. set_current_state(TASK_INTERRUPTIBLE);
  2288. /* sleep for 100 msecs, and try again. */
  2289. schedule_timeout(HZ / 10);
  2290. }
  2291. /* Must be called with trace_types_lock mutex held. */
  2292. static int tracing_wait_pipe(struct file *filp)
  2293. {
  2294. struct trace_iterator *iter = filp->private_data;
  2295. while (trace_empty(iter)) {
  2296. if ((filp->f_flags & O_NONBLOCK)) {
  2297. return -EAGAIN;
  2298. }
  2299. mutex_unlock(&iter->mutex);
  2300. iter->trace->wait_pipe(iter);
  2301. mutex_lock(&iter->mutex);
  2302. if (signal_pending(current))
  2303. return -EINTR;
  2304. /*
  2305. * We block until we read something and tracing is disabled.
  2306. * We still block if tracing is disabled, but we have never
  2307. * read anything. This allows a user to cat this file, and
  2308. * then enable tracing. But after we have read something,
  2309. * we give an EOF when tracing is again disabled.
  2310. *
  2311. * iter->pos will be 0 if we haven't read anything.
  2312. */
  2313. if (!tracer_enabled && iter->pos)
  2314. break;
  2315. }
  2316. return 1;
  2317. }
  2318. /*
  2319. * Consumer reader.
  2320. */
  2321. static ssize_t
  2322. tracing_read_pipe(struct file *filp, char __user *ubuf,
  2323. size_t cnt, loff_t *ppos)
  2324. {
  2325. struct trace_iterator *iter = filp->private_data;
  2326. static struct tracer *old_tracer;
  2327. ssize_t sret;
  2328. /* return any leftover data */
  2329. sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
  2330. if (sret != -EBUSY)
  2331. return sret;
  2332. trace_seq_init(&iter->seq);
  2333. /* copy the tracer to avoid using a global lock all around */
  2334. mutex_lock(&trace_types_lock);
  2335. if (unlikely(old_tracer != current_trace && current_trace)) {
  2336. old_tracer = current_trace;
  2337. *iter->trace = *current_trace;
  2338. }
  2339. mutex_unlock(&trace_types_lock);
  2340. /*
  2341. * Avoid more than one consumer on a single file descriptor
  2342. * This is just a matter of traces coherency, the ring buffer itself
  2343. * is protected.
  2344. */
  2345. mutex_lock(&iter->mutex);
  2346. if (iter->trace->read) {
  2347. sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
  2348. if (sret)
  2349. goto out;
  2350. }
  2351. waitagain:
  2352. sret = tracing_wait_pipe(filp);
  2353. if (sret <= 0)
  2354. goto out;
  2355. /* stop when tracing is finished */
  2356. if (trace_empty(iter)) {
  2357. sret = 0;
  2358. goto out;
  2359. }
  2360. if (cnt >= PAGE_SIZE)
  2361. cnt = PAGE_SIZE - 1;
  2362. /* reset all but tr, trace, and overruns */
  2363. memset(&iter->seq, 0,
  2364. sizeof(struct trace_iterator) -
  2365. offsetof(struct trace_iterator, seq));
  2366. iter->pos = -1;
  2367. while (find_next_entry_inc(iter) != NULL) {
  2368. enum print_line_t ret;
  2369. int len = iter->seq.len;
  2370. ret = print_trace_line(iter);
  2371. if (ret == TRACE_TYPE_PARTIAL_LINE) {
  2372. /* don't print partial lines */
  2373. iter->seq.len = len;
  2374. break;
  2375. }
  2376. if (ret != TRACE_TYPE_NO_CONSUME)
  2377. trace_consume(iter);
  2378. if (iter->seq.len >= cnt)
  2379. break;
  2380. }
  2381. /* Now copy what we have to the user */
  2382. sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
  2383. if (iter->seq.readpos >= iter->seq.len)
  2384. trace_seq_init(&iter->seq);
  2385. /*
  2386. * If there was nothing to send to user, inspite of consuming trace
  2387. * entries, go back to wait for more entries.
  2388. */
  2389. if (sret == -EBUSY)
  2390. goto waitagain;
  2391. out:
  2392. mutex_unlock(&iter->mutex);
  2393. return sret;
  2394. }
  2395. static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
  2396. struct pipe_buffer *buf)
  2397. {
  2398. __free_page(buf->page);
  2399. }
  2400. static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
  2401. unsigned int idx)
  2402. {
  2403. __free_page(spd->pages[idx]);
  2404. }
  2405. static struct pipe_buf_operations tracing_pipe_buf_ops = {
  2406. .can_merge = 0,
  2407. .map = generic_pipe_buf_map,
  2408. .unmap = generic_pipe_buf_unmap,
  2409. .confirm = generic_pipe_buf_confirm,
  2410. .release = tracing_pipe_buf_release,
  2411. .steal = generic_pipe_buf_steal,
  2412. .get = generic_pipe_buf_get,
  2413. };
  2414. static size_t
  2415. tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
  2416. {
  2417. size_t count;
  2418. int ret;
  2419. /* Seq buffer is page-sized, exactly what we need. */
  2420. for (;;) {
  2421. count = iter->seq.len;
  2422. ret = print_trace_line(iter);
  2423. count = iter->seq.len - count;
  2424. if (rem < count) {
  2425. rem = 0;
  2426. iter->seq.len -= count;
  2427. break;
  2428. }
  2429. if (ret == TRACE_TYPE_PARTIAL_LINE) {
  2430. iter->seq.len -= count;
  2431. break;
  2432. }
  2433. trace_consume(iter);
  2434. rem -= count;
  2435. if (!find_next_entry_inc(iter)) {
  2436. rem = 0;
  2437. iter->ent = NULL;
  2438. break;
  2439. }
  2440. }
  2441. return rem;
  2442. }
  2443. static ssize_t tracing_splice_read_pipe(struct file *filp,
  2444. loff_t *ppos,
  2445. struct pipe_inode_info *pipe,
  2446. size_t len,
  2447. unsigned int flags)
  2448. {
  2449. struct page *pages[PIPE_BUFFERS];
  2450. struct partial_page partial[PIPE_BUFFERS];
  2451. struct trace_iterator *iter = filp->private_data;
  2452. struct splice_pipe_desc spd = {
  2453. .pages = pages,
  2454. .partial = partial,
  2455. .nr_pages = 0, /* This gets updated below. */
  2456. .flags = flags,
  2457. .ops = &tracing_pipe_buf_ops,
  2458. .spd_release = tracing_spd_release_pipe,
  2459. };
  2460. static struct tracer *old_tracer;
  2461. ssize_t ret;
  2462. size_t rem;
  2463. unsigned int i;
  2464. /* copy the tracer to avoid using a global lock all around */
  2465. mutex_lock(&trace_types_lock);
  2466. if (unlikely(old_tracer != current_trace && current_trace)) {
  2467. old_tracer = current_trace;
  2468. *iter->trace = *current_trace;
  2469. }
  2470. mutex_unlock(&trace_types_lock);
  2471. mutex_lock(&iter->mutex);
  2472. if (iter->trace->splice_read) {
  2473. ret = iter->trace->splice_read(iter, filp,
  2474. ppos, pipe, len, flags);
  2475. if (ret)
  2476. goto out_err;
  2477. }
  2478. ret = tracing_wait_pipe(filp);
  2479. if (ret <= 0)
  2480. goto out_err;
  2481. if (!iter->ent && !find_next_entry_inc(iter)) {
  2482. ret = -EFAULT;
  2483. goto out_err;
  2484. }
  2485. /* Fill as many pages as possible. */
  2486. for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) {
  2487. pages[i] = alloc_page(GFP_KERNEL);
  2488. if (!pages[i])
  2489. break;
  2490. rem = tracing_fill_pipe_page(rem, iter);
  2491. /* Copy the data into the page, so we can start over. */
  2492. ret = trace_seq_to_buffer(&iter->seq,
  2493. page_address(pages[i]),
  2494. iter->seq.len);
  2495. if (ret < 0) {
  2496. __free_page(pages[i]);
  2497. break;
  2498. }
  2499. partial[i].offset = 0;
  2500. partial[i].len = iter->seq.len;
  2501. trace_seq_init(&iter->seq);
  2502. }
  2503. mutex_unlock(&iter->mutex);
  2504. spd.nr_pages = i;
  2505. return splice_to_pipe(pipe, &spd);
  2506. out_err:
  2507. mutex_unlock(&iter->mutex);
  2508. return ret;
  2509. }
  2510. static ssize_t
  2511. tracing_entries_read(struct file *filp, char __user *ubuf,
  2512. size_t cnt, loff_t *ppos)
  2513. {
  2514. struct trace_array *tr = filp->private_data;
  2515. char buf[96];
  2516. int r;
  2517. mutex_lock(&trace_types_lock);
  2518. if (!ring_buffer_expanded)
  2519. r = sprintf(buf, "%lu (expanded: %lu)\n",
  2520. tr->entries >> 10,
  2521. trace_buf_size >> 10);
  2522. else
  2523. r = sprintf(buf, "%lu\n", tr->entries >> 10);
  2524. mutex_unlock(&trace_types_lock);
  2525. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  2526. }
  2527. static ssize_t
  2528. tracing_entries_write(struct file *filp, const char __user *ubuf,
  2529. size_t cnt, loff_t *ppos)
  2530. {
  2531. unsigned long val;
  2532. char buf[64];
  2533. int ret, cpu;
  2534. if (cnt >= sizeof(buf))
  2535. return -EINVAL;
  2536. if (copy_from_user(&buf, ubuf, cnt))
  2537. return -EFAULT;
  2538. buf[cnt] = 0;
  2539. ret = strict_strtoul(buf, 10, &val);
  2540. if (ret < 0)
  2541. return ret;
  2542. /* must have at least 1 entry */
  2543. if (!val)
  2544. return -EINVAL;
  2545. mutex_lock(&trace_types_lock);
  2546. tracing_stop();
  2547. /* disable all cpu buffers */
  2548. for_each_tracing_cpu(cpu) {
  2549. if (global_trace.data[cpu])
  2550. atomic_inc(&global_trace.data[cpu]->disabled);
  2551. if (max_tr.data[cpu])
  2552. atomic_inc(&max_tr.data[cpu]->disabled);
  2553. }
  2554. /* value is in KB */
  2555. val <<= 10;
  2556. if (val != global_trace.entries) {
  2557. ret = tracing_resize_ring_buffer(val);
  2558. if (ret < 0) {
  2559. cnt = ret;
  2560. goto out;
  2561. }
  2562. }
  2563. filp->f_pos += cnt;
  2564. /* If check pages failed, return ENOMEM */
  2565. if (tracing_disabled)
  2566. cnt = -ENOMEM;
  2567. out:
  2568. for_each_tracing_cpu(cpu) {
  2569. if (global_trace.data[cpu])
  2570. atomic_dec(&global_trace.data[cpu]->disabled);
  2571. if (max_tr.data[cpu])
  2572. atomic_dec(&max_tr.data[cpu]->disabled);
  2573. }
  2574. tracing_start();
  2575. max_tr.entries = global_trace.entries;
  2576. mutex_unlock(&trace_types_lock);
  2577. return cnt;
  2578. }
  2579. static int mark_printk(const char *fmt, ...)
  2580. {
  2581. int ret;
  2582. va_list args;
  2583. va_start(args, fmt);
  2584. ret = trace_vprintk(0, -1, fmt, args);
  2585. va_end(args);
  2586. return ret;
  2587. }
  2588. static ssize_t
  2589. tracing_mark_write(struct file *filp, const char __user *ubuf,
  2590. size_t cnt, loff_t *fpos)
  2591. {
  2592. char *buf;
  2593. char *end;
  2594. if (tracing_disabled)
  2595. return -EINVAL;
  2596. if (cnt > TRACE_BUF_SIZE)
  2597. cnt = TRACE_BUF_SIZE;
  2598. buf = kmalloc(cnt + 1, GFP_KERNEL);
  2599. if (buf == NULL)
  2600. return -ENOMEM;
  2601. if (copy_from_user(buf, ubuf, cnt)) {
  2602. kfree(buf);
  2603. return -EFAULT;
  2604. }
  2605. /* Cut from the first nil or newline. */
  2606. buf[cnt] = '\0';
  2607. end = strchr(buf, '\n');
  2608. if (end)
  2609. *end = '\0';
  2610. cnt = mark_printk("%s\n", buf);
  2611. kfree(buf);
  2612. *fpos += cnt;
  2613. return cnt;
  2614. }
  2615. static const struct file_operations tracing_max_lat_fops = {
  2616. .open = tracing_open_generic,
  2617. .read = tracing_max_lat_read,
  2618. .write = tracing_max_lat_write,
  2619. };
  2620. static const struct file_operations tracing_ctrl_fops = {
  2621. .open = tracing_open_generic,
  2622. .read = tracing_ctrl_read,
  2623. .write = tracing_ctrl_write,
  2624. };
  2625. static const struct file_operations set_tracer_fops = {
  2626. .open = tracing_open_generic,
  2627. .read = tracing_set_trace_read,
  2628. .write = tracing_set_trace_write,
  2629. };
  2630. static const struct file_operations tracing_pipe_fops = {
  2631. .open = tracing_open_pipe,
  2632. .poll = tracing_poll_pipe,
  2633. .read = tracing_read_pipe,
  2634. .splice_read = tracing_splice_read_pipe,
  2635. .release = tracing_release_pipe,
  2636. };
  2637. static const struct file_operations tracing_entries_fops = {
  2638. .open = tracing_open_generic,
  2639. .read = tracing_entries_read,
  2640. .write = tracing_entries_write,
  2641. };
  2642. static const struct file_operations tracing_mark_fops = {
  2643. .open = tracing_open_generic,
  2644. .write = tracing_mark_write,
  2645. };
  2646. struct ftrace_buffer_info {
  2647. struct trace_array *tr;
  2648. void *spare;
  2649. int cpu;
  2650. unsigned int read;
  2651. };
  2652. static int tracing_buffers_open(struct inode *inode, struct file *filp)
  2653. {
  2654. int cpu = (int)(long)inode->i_private;
  2655. struct ftrace_buffer_info *info;
  2656. if (tracing_disabled)
  2657. return -ENODEV;
  2658. info = kzalloc(sizeof(*info), GFP_KERNEL);
  2659. if (!info)
  2660. return -ENOMEM;
  2661. info->tr = &global_trace;
  2662. info->cpu = cpu;
  2663. info->spare = ring_buffer_alloc_read_page(info->tr->buffer);
  2664. /* Force reading ring buffer for first read */
  2665. info->read = (unsigned int)-1;
  2666. if (!info->spare)
  2667. goto out;
  2668. filp->private_data = info;
  2669. return 0;
  2670. out:
  2671. kfree(info);
  2672. return -ENOMEM;
  2673. }
  2674. static ssize_t
  2675. tracing_buffers_read(struct file *filp, char __user *ubuf,
  2676. size_t count, loff_t *ppos)
  2677. {
  2678. struct ftrace_buffer_info *info = filp->private_data;
  2679. unsigned int pos;
  2680. ssize_t ret;
  2681. size_t size;
  2682. if (!count)
  2683. return 0;
  2684. /* Do we have previous read data to read? */
  2685. if (info->read < PAGE_SIZE)
  2686. goto read;
  2687. info->read = 0;
  2688. ret = ring_buffer_read_page(info->tr->buffer,
  2689. &info->spare,
  2690. count,
  2691. info->cpu, 0);
  2692. if (ret < 0)
  2693. return 0;
  2694. pos = ring_buffer_page_len(info->spare);
  2695. if (pos < PAGE_SIZE)
  2696. memset(info->spare + pos, 0, PAGE_SIZE - pos);
  2697. read:
  2698. size = PAGE_SIZE - info->read;
  2699. if (size > count)
  2700. size = count;
  2701. ret = copy_to_user(ubuf, info->spare + info->read, size);
  2702. if (ret == size)
  2703. return -EFAULT;
  2704. size -= ret;
  2705. *ppos += size;
  2706. info->read += size;
  2707. return size;
  2708. }
  2709. static int tracing_buffers_release(struct inode *inode, struct file *file)
  2710. {
  2711. struct ftrace_buffer_info *info = file->private_data;
  2712. ring_buffer_free_read_page(info->tr->buffer, info->spare);
  2713. kfree(info);
  2714. return 0;
  2715. }
  2716. struct buffer_ref {
  2717. struct ring_buffer *buffer;
  2718. void *page;
  2719. int ref;
  2720. };
  2721. static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
  2722. struct pipe_buffer *buf)
  2723. {
  2724. struct buffer_ref *ref = (struct buffer_ref *)buf->private;
  2725. if (--ref->ref)
  2726. return;
  2727. ring_buffer_free_read_page(ref->buffer, ref->page);
  2728. kfree(ref);
  2729. buf->private = 0;
  2730. }
  2731. static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
  2732. struct pipe_buffer *buf)
  2733. {
  2734. return 1;
  2735. }
  2736. static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
  2737. struct pipe_buffer *buf)
  2738. {
  2739. struct buffer_ref *ref = (struct buffer_ref *)buf->private;
  2740. ref->ref++;
  2741. }
  2742. /* Pipe buffer operations for a buffer. */
  2743. static struct pipe_buf_operations buffer_pipe_buf_ops = {
  2744. .can_merge = 0,
  2745. .map = generic_pipe_buf_map,
  2746. .unmap = generic_pipe_buf_unmap,
  2747. .confirm = generic_pipe_buf_confirm,
  2748. .release = buffer_pipe_buf_release,
  2749. .steal = buffer_pipe_buf_steal,
  2750. .get = buffer_pipe_buf_get,
  2751. };
  2752. /*
  2753. * Callback from splice_to_pipe(), if we need to release some pages
  2754. * at the end of the spd in case we error'ed out in filling the pipe.
  2755. */
  2756. static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
  2757. {
  2758. struct buffer_ref *ref =
  2759. (struct buffer_ref *)spd->partial[i].private;
  2760. if (--ref->ref)
  2761. return;
  2762. ring_buffer_free_read_page(ref->buffer, ref->page);
  2763. kfree(ref);
  2764. spd->partial[i].private = 0;
  2765. }
  2766. static ssize_t
  2767. tracing_buffers_splice_read(struct file *file, loff_t *ppos,
  2768. struct pipe_inode_info *pipe, size_t len,
  2769. unsigned int flags)
  2770. {
  2771. struct ftrace_buffer_info *info = file->private_data;
  2772. struct partial_page partial[PIPE_BUFFERS];
  2773. struct page *pages[PIPE_BUFFERS];
  2774. struct splice_pipe_desc spd = {
  2775. .pages = pages,
  2776. .partial = partial,
  2777. .flags = flags,
  2778. .ops = &buffer_pipe_buf_ops,
  2779. .spd_release = buffer_spd_release,
  2780. };
  2781. struct buffer_ref *ref;
  2782. int size, i;
  2783. size_t ret;
  2784. /*
  2785. * We can't seek on a buffer input
  2786. */
  2787. if (unlikely(*ppos))
  2788. return -ESPIPE;
  2789. for (i = 0; i < PIPE_BUFFERS && len; i++, len -= size) {
  2790. struct page *page;
  2791. int r;
  2792. ref = kzalloc(sizeof(*ref), GFP_KERNEL);
  2793. if (!ref)
  2794. break;
  2795. ref->buffer = info->tr->buffer;
  2796. ref->page = ring_buffer_alloc_read_page(ref->buffer);
  2797. if (!ref->page) {
  2798. kfree(ref);
  2799. break;
  2800. }
  2801. r = ring_buffer_read_page(ref->buffer, &ref->page,
  2802. len, info->cpu, 0);
  2803. if (r < 0) {
  2804. ring_buffer_free_read_page(ref->buffer,
  2805. ref->page);
  2806. kfree(ref);
  2807. break;
  2808. }
  2809. /*
  2810. * zero out any left over data, this is going to
  2811. * user land.
  2812. */
  2813. size = ring_buffer_page_len(ref->page);
  2814. if (size < PAGE_SIZE)
  2815. memset(ref->page + size, 0, PAGE_SIZE - size);
  2816. page = virt_to_page(ref->page);
  2817. spd.pages[i] = page;
  2818. spd.partial[i].len = PAGE_SIZE;
  2819. spd.partial[i].offset = 0;
  2820. spd.partial[i].private = (unsigned long)ref;
  2821. spd.nr_pages++;
  2822. }
  2823. spd.nr_pages = i;
  2824. /* did we read anything? */
  2825. if (!spd.nr_pages) {
  2826. if (flags & SPLICE_F_NONBLOCK)
  2827. ret = -EAGAIN;
  2828. else
  2829. ret = 0;
  2830. /* TODO: block */
  2831. return ret;
  2832. }
  2833. ret = splice_to_pipe(pipe, &spd);
  2834. return ret;
  2835. }
  2836. static const struct file_operations tracing_buffers_fops = {
  2837. .open = tracing_buffers_open,
  2838. .read = tracing_buffers_read,
  2839. .release = tracing_buffers_release,
  2840. .splice_read = tracing_buffers_splice_read,
  2841. .llseek = no_llseek,
  2842. };
  2843. #ifdef CONFIG_DYNAMIC_FTRACE
  2844. int __weak ftrace_arch_read_dyn_info(char *buf, int size)
  2845. {
  2846. return 0;
  2847. }
  2848. static ssize_t
  2849. tracing_read_dyn_info(struct file *filp, char __user *ubuf,
  2850. size_t cnt, loff_t *ppos)
  2851. {
  2852. static char ftrace_dyn_info_buffer[1024];
  2853. static DEFINE_MUTEX(dyn_info_mutex);
  2854. unsigned long *p = filp->private_data;
  2855. char *buf = ftrace_dyn_info_buffer;
  2856. int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
  2857. int r;
  2858. mutex_lock(&dyn_info_mutex);
  2859. r = sprintf(buf, "%ld ", *p);
  2860. r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
  2861. buf[r++] = '\n';
  2862. r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  2863. mutex_unlock(&dyn_info_mutex);
  2864. return r;
  2865. }
  2866. static const struct file_operations tracing_dyn_info_fops = {
  2867. .open = tracing_open_generic,
  2868. .read = tracing_read_dyn_info,
  2869. };
  2870. #endif
  2871. static struct dentry *d_tracer;
  2872. struct dentry *tracing_init_dentry(void)
  2873. {
  2874. static int once;
  2875. if (d_tracer)
  2876. return d_tracer;
  2877. d_tracer = debugfs_create_dir("tracing", NULL);
  2878. if (!d_tracer && !once) {
  2879. once = 1;
  2880. pr_warning("Could not create debugfs directory 'tracing'\n");
  2881. return NULL;
  2882. }
  2883. return d_tracer;
  2884. }
  2885. static struct dentry *d_percpu;
  2886. struct dentry *tracing_dentry_percpu(void)
  2887. {
  2888. static int once;
  2889. struct dentry *d_tracer;
  2890. if (d_percpu)
  2891. return d_percpu;
  2892. d_tracer = tracing_init_dentry();
  2893. if (!d_tracer)
  2894. return NULL;
  2895. d_percpu = debugfs_create_dir("per_cpu", d_tracer);
  2896. if (!d_percpu && !once) {
  2897. once = 1;
  2898. pr_warning("Could not create debugfs directory 'per_cpu'\n");
  2899. return NULL;
  2900. }
  2901. return d_percpu;
  2902. }
  2903. static void tracing_init_debugfs_percpu(long cpu)
  2904. {
  2905. struct dentry *d_percpu = tracing_dentry_percpu();
  2906. struct dentry *entry, *d_cpu;
  2907. /* strlen(cpu) + MAX(log10(cpu)) + '\0' */
  2908. char cpu_dir[7];
  2909. if (cpu > 999 || cpu < 0)
  2910. return;
  2911. sprintf(cpu_dir, "cpu%ld", cpu);
  2912. d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
  2913. if (!d_cpu) {
  2914. pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
  2915. return;
  2916. }
  2917. /* per cpu trace_pipe */
  2918. entry = debugfs_create_file("trace_pipe", 0444, d_cpu,
  2919. (void *) cpu, &tracing_pipe_fops);
  2920. if (!entry)
  2921. pr_warning("Could not create debugfs 'trace_pipe' entry\n");
  2922. /* per cpu trace */
  2923. entry = debugfs_create_file("trace", 0444, d_cpu,
  2924. (void *) cpu, &tracing_fops);
  2925. if (!entry)
  2926. pr_warning("Could not create debugfs 'trace' entry\n");
  2927. entry = debugfs_create_file("trace_pipe_raw", 0444, d_cpu,
  2928. (void *) cpu, &tracing_buffers_fops);
  2929. if (!entry)
  2930. pr_warning("Could not create debugfs 'trace_pipe_raw' entry\n");
  2931. }
  2932. #ifdef CONFIG_FTRACE_SELFTEST
  2933. /* Let selftest have access to static functions in this file */
  2934. #include "trace_selftest.c"
  2935. #endif
  2936. struct trace_option_dentry {
  2937. struct tracer_opt *opt;
  2938. struct tracer_flags *flags;
  2939. struct dentry *entry;
  2940. };
  2941. static ssize_t
  2942. trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
  2943. loff_t *ppos)
  2944. {
  2945. struct trace_option_dentry *topt = filp->private_data;
  2946. char *buf;
  2947. if (topt->flags->val & topt->opt->bit)
  2948. buf = "1\n";
  2949. else
  2950. buf = "0\n";
  2951. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  2952. }
  2953. static ssize_t
  2954. trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
  2955. loff_t *ppos)
  2956. {
  2957. struct trace_option_dentry *topt = filp->private_data;
  2958. unsigned long val;
  2959. char buf[64];
  2960. int ret;
  2961. if (cnt >= sizeof(buf))
  2962. return -EINVAL;
  2963. if (copy_from_user(&buf, ubuf, cnt))
  2964. return -EFAULT;
  2965. buf[cnt] = 0;
  2966. ret = strict_strtoul(buf, 10, &val);
  2967. if (ret < 0)
  2968. return ret;
  2969. ret = 0;
  2970. switch (val) {
  2971. case 0:
  2972. /* do nothing if already cleared */
  2973. if (!(topt->flags->val & topt->opt->bit))
  2974. break;
  2975. mutex_lock(&trace_types_lock);
  2976. if (current_trace->set_flag)
  2977. ret = current_trace->set_flag(topt->flags->val,
  2978. topt->opt->bit, 0);
  2979. mutex_unlock(&trace_types_lock);
  2980. if (ret)
  2981. return ret;
  2982. topt->flags->val &= ~topt->opt->bit;
  2983. break;
  2984. case 1:
  2985. /* do nothing if already set */
  2986. if (topt->flags->val & topt->opt->bit)
  2987. break;
  2988. mutex_lock(&trace_types_lock);
  2989. if (current_trace->set_flag)
  2990. ret = current_trace->set_flag(topt->flags->val,
  2991. topt->opt->bit, 1);
  2992. mutex_unlock(&trace_types_lock);
  2993. if (ret)
  2994. return ret;
  2995. topt->flags->val |= topt->opt->bit;
  2996. break;
  2997. default:
  2998. return -EINVAL;
  2999. }
  3000. *ppos += cnt;
  3001. return cnt;
  3002. }
  3003. static const struct file_operations trace_options_fops = {
  3004. .open = tracing_open_generic,
  3005. .read = trace_options_read,
  3006. .write = trace_options_write,
  3007. };
  3008. static ssize_t
  3009. trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
  3010. loff_t *ppos)
  3011. {
  3012. long index = (long)filp->private_data;
  3013. char *buf;
  3014. if (trace_flags & (1 << index))
  3015. buf = "1\n";
  3016. else
  3017. buf = "0\n";
  3018. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  3019. }
  3020. static ssize_t
  3021. trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
  3022. loff_t *ppos)
  3023. {
  3024. long index = (long)filp->private_data;
  3025. char buf[64];
  3026. unsigned long val;
  3027. int ret;
  3028. if (cnt >= sizeof(buf))
  3029. return -EINVAL;
  3030. if (copy_from_user(&buf, ubuf, cnt))
  3031. return -EFAULT;
  3032. buf[cnt] = 0;
  3033. ret = strict_strtoul(buf, 10, &val);
  3034. if (ret < 0)
  3035. return ret;
  3036. switch (val) {
  3037. case 0:
  3038. trace_flags &= ~(1 << index);
  3039. break;
  3040. case 1:
  3041. trace_flags |= 1 << index;
  3042. break;
  3043. default:
  3044. return -EINVAL;
  3045. }
  3046. *ppos += cnt;
  3047. return cnt;
  3048. }
  3049. static const struct file_operations trace_options_core_fops = {
  3050. .open = tracing_open_generic,
  3051. .read = trace_options_core_read,
  3052. .write = trace_options_core_write,
  3053. };
  3054. static struct dentry *trace_options_init_dentry(void)
  3055. {
  3056. struct dentry *d_tracer;
  3057. static struct dentry *t_options;
  3058. if (t_options)
  3059. return t_options;
  3060. d_tracer = tracing_init_dentry();
  3061. if (!d_tracer)
  3062. return NULL;
  3063. t_options = debugfs_create_dir("options", d_tracer);
  3064. if (!t_options) {
  3065. pr_warning("Could not create debugfs directory 'options'\n");
  3066. return NULL;
  3067. }
  3068. return t_options;
  3069. }
  3070. static void
  3071. create_trace_option_file(struct trace_option_dentry *topt,
  3072. struct tracer_flags *flags,
  3073. struct tracer_opt *opt)
  3074. {
  3075. struct dentry *t_options;
  3076. struct dentry *entry;
  3077. t_options = trace_options_init_dentry();
  3078. if (!t_options)
  3079. return;
  3080. topt->flags = flags;
  3081. topt->opt = opt;
  3082. entry = debugfs_create_file(opt->name, 0644, t_options, topt,
  3083. &trace_options_fops);
  3084. topt->entry = entry;
  3085. }
  3086. static struct trace_option_dentry *
  3087. create_trace_option_files(struct tracer *tracer)
  3088. {
  3089. struct trace_option_dentry *topts;
  3090. struct tracer_flags *flags;
  3091. struct tracer_opt *opts;
  3092. int cnt;
  3093. if (!tracer)
  3094. return NULL;
  3095. flags = tracer->flags;
  3096. if (!flags || !flags->opts)
  3097. return NULL;
  3098. opts = flags->opts;
  3099. for (cnt = 0; opts[cnt].name; cnt++)
  3100. ;
  3101. topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
  3102. if (!topts)
  3103. return NULL;
  3104. for (cnt = 0; opts[cnt].name; cnt++)
  3105. create_trace_option_file(&topts[cnt], flags,
  3106. &opts[cnt]);
  3107. return topts;
  3108. }
  3109. static void
  3110. destroy_trace_option_files(struct trace_option_dentry *topts)
  3111. {
  3112. int cnt;
  3113. if (!topts)
  3114. return;
  3115. for (cnt = 0; topts[cnt].opt; cnt++) {
  3116. if (topts[cnt].entry)
  3117. debugfs_remove(topts[cnt].entry);
  3118. }
  3119. kfree(topts);
  3120. }
  3121. static struct dentry *
  3122. create_trace_option_core_file(const char *option, long index)
  3123. {
  3124. struct dentry *t_options;
  3125. struct dentry *entry;
  3126. t_options = trace_options_init_dentry();
  3127. if (!t_options)
  3128. return NULL;
  3129. entry = debugfs_create_file(option, 0644, t_options, (void *)index,
  3130. &trace_options_core_fops);
  3131. return entry;
  3132. }
  3133. static __init void create_trace_options_dir(void)
  3134. {
  3135. struct dentry *t_options;
  3136. struct dentry *entry;
  3137. int i;
  3138. t_options = trace_options_init_dentry();
  3139. if (!t_options)
  3140. return;
  3141. for (i = 0; trace_options[i]; i++) {
  3142. entry = create_trace_option_core_file(trace_options[i], i);
  3143. if (!entry)
  3144. pr_warning("Could not create debugfs %s entry\n",
  3145. trace_options[i]);
  3146. }
  3147. }
  3148. static __init int tracer_init_debugfs(void)
  3149. {
  3150. struct dentry *d_tracer;
  3151. struct dentry *entry;
  3152. int cpu;
  3153. d_tracer = tracing_init_dentry();
  3154. entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
  3155. &global_trace, &tracing_ctrl_fops);
  3156. if (!entry)
  3157. pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
  3158. entry = debugfs_create_file("trace_options", 0644, d_tracer,
  3159. NULL, &tracing_iter_fops);
  3160. if (!entry)
  3161. pr_warning("Could not create debugfs 'trace_options' entry\n");
  3162. create_trace_options_dir();
  3163. entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
  3164. NULL, &tracing_cpumask_fops);
  3165. if (!entry)
  3166. pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
  3167. entry = debugfs_create_file("trace", 0444, d_tracer,
  3168. (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
  3169. if (!entry)
  3170. pr_warning("Could not create debugfs 'trace' entry\n");
  3171. entry = debugfs_create_file("available_tracers", 0444, d_tracer,
  3172. &global_trace, &show_traces_fops);
  3173. if (!entry)
  3174. pr_warning("Could not create debugfs 'available_tracers' entry\n");
  3175. entry = debugfs_create_file("current_tracer", 0444, d_tracer,
  3176. &global_trace, &set_tracer_fops);
  3177. if (!entry)
  3178. pr_warning("Could not create debugfs 'current_tracer' entry\n");
  3179. entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
  3180. &tracing_max_latency,
  3181. &tracing_max_lat_fops);
  3182. if (!entry)
  3183. pr_warning("Could not create debugfs "
  3184. "'tracing_max_latency' entry\n");
  3185. entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
  3186. &tracing_thresh, &tracing_max_lat_fops);
  3187. if (!entry)
  3188. pr_warning("Could not create debugfs "
  3189. "'tracing_thresh' entry\n");
  3190. entry = debugfs_create_file("README", 0644, d_tracer,
  3191. NULL, &tracing_readme_fops);
  3192. if (!entry)
  3193. pr_warning("Could not create debugfs 'README' entry\n");
  3194. entry = debugfs_create_file("trace_pipe", 0444, d_tracer,
  3195. (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
  3196. if (!entry)
  3197. pr_warning("Could not create debugfs "
  3198. "'trace_pipe' entry\n");
  3199. entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer,
  3200. &global_trace, &tracing_entries_fops);
  3201. if (!entry)
  3202. pr_warning("Could not create debugfs "
  3203. "'buffer_size_kb' entry\n");
  3204. entry = debugfs_create_file("trace_marker", 0220, d_tracer,
  3205. NULL, &tracing_mark_fops);
  3206. if (!entry)
  3207. pr_warning("Could not create debugfs "
  3208. "'trace_marker' entry\n");
  3209. #ifdef CONFIG_DYNAMIC_FTRACE
  3210. entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
  3211. &ftrace_update_tot_cnt,
  3212. &tracing_dyn_info_fops);
  3213. if (!entry)
  3214. pr_warning("Could not create debugfs "
  3215. "'dyn_ftrace_total_info' entry\n");
  3216. #endif
  3217. #ifdef CONFIG_SYSPROF_TRACER
  3218. init_tracer_sysprof_debugfs(d_tracer);
  3219. #endif
  3220. for_each_tracing_cpu(cpu)
  3221. tracing_init_debugfs_percpu(cpu);
  3222. return 0;
  3223. }
  3224. static int trace_panic_handler(struct notifier_block *this,
  3225. unsigned long event, void *unused)
  3226. {
  3227. if (ftrace_dump_on_oops)
  3228. ftrace_dump();
  3229. return NOTIFY_OK;
  3230. }
  3231. static struct notifier_block trace_panic_notifier = {
  3232. .notifier_call = trace_panic_handler,
  3233. .next = NULL,
  3234. .priority = 150 /* priority: INT_MAX >= x >= 0 */
  3235. };
  3236. static int trace_die_handler(struct notifier_block *self,
  3237. unsigned long val,
  3238. void *data)
  3239. {
  3240. switch (val) {
  3241. case DIE_OOPS:
  3242. if (ftrace_dump_on_oops)
  3243. ftrace_dump();
  3244. break;
  3245. default:
  3246. break;
  3247. }
  3248. return NOTIFY_OK;
  3249. }
  3250. static struct notifier_block trace_die_notifier = {
  3251. .notifier_call = trace_die_handler,
  3252. .priority = 200
  3253. };
  3254. /*
  3255. * printk is set to max of 1024, we really don't need it that big.
  3256. * Nothing should be printing 1000 characters anyway.
  3257. */
  3258. #define TRACE_MAX_PRINT 1000
  3259. /*
  3260. * Define here KERN_TRACE so that we have one place to modify
  3261. * it if we decide to change what log level the ftrace dump
  3262. * should be at.
  3263. */
  3264. #define KERN_TRACE KERN_EMERG
  3265. static void
  3266. trace_printk_seq(struct trace_seq *s)
  3267. {
  3268. /* Probably should print a warning here. */
  3269. if (s->len >= 1000)
  3270. s->len = 1000;
  3271. /* should be zero ended, but we are paranoid. */
  3272. s->buffer[s->len] = 0;
  3273. printk(KERN_TRACE "%s", s->buffer);
  3274. trace_seq_init(s);
  3275. }
  3276. void ftrace_dump(void)
  3277. {
  3278. static DEFINE_SPINLOCK(ftrace_dump_lock);
  3279. /* use static because iter can be a bit big for the stack */
  3280. static struct trace_iterator iter;
  3281. static int dump_ran;
  3282. unsigned long flags;
  3283. int cnt = 0, cpu;
  3284. /* only one dump */
  3285. spin_lock_irqsave(&ftrace_dump_lock, flags);
  3286. if (dump_ran)
  3287. goto out;
  3288. dump_ran = 1;
  3289. /* No turning back! */
  3290. tracing_off();
  3291. ftrace_kill();
  3292. for_each_tracing_cpu(cpu) {
  3293. atomic_inc(&global_trace.data[cpu]->disabled);
  3294. }
  3295. /* don't look at user memory in panic mode */
  3296. trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
  3297. printk(KERN_TRACE "Dumping ftrace buffer:\n");
  3298. /* Simulate the iterator */
  3299. iter.tr = &global_trace;
  3300. iter.trace = current_trace;
  3301. iter.cpu_file = TRACE_PIPE_ALL_CPU;
  3302. /*
  3303. * We need to stop all tracing on all CPUS to read the
  3304. * the next buffer. This is a bit expensive, but is
  3305. * not done often. We fill all what we can read,
  3306. * and then release the locks again.
  3307. */
  3308. while (!trace_empty(&iter)) {
  3309. if (!cnt)
  3310. printk(KERN_TRACE "---------------------------------\n");
  3311. cnt++;
  3312. /* reset all but tr, trace, and overruns */
  3313. memset(&iter.seq, 0,
  3314. sizeof(struct trace_iterator) -
  3315. offsetof(struct trace_iterator, seq));
  3316. iter.iter_flags |= TRACE_FILE_LAT_FMT;
  3317. iter.pos = -1;
  3318. if (find_next_entry_inc(&iter) != NULL) {
  3319. print_trace_line(&iter);
  3320. trace_consume(&iter);
  3321. }
  3322. trace_printk_seq(&iter.seq);
  3323. }
  3324. if (!cnt)
  3325. printk(KERN_TRACE " (ftrace buffer empty)\n");
  3326. else
  3327. printk(KERN_TRACE "---------------------------------\n");
  3328. out:
  3329. spin_unlock_irqrestore(&ftrace_dump_lock, flags);
  3330. }
  3331. __init static int tracer_alloc_buffers(void)
  3332. {
  3333. struct trace_array_cpu *data;
  3334. int ring_buf_size;
  3335. int i;
  3336. int ret = -ENOMEM;
  3337. if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
  3338. goto out;
  3339. if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
  3340. goto out_free_buffer_mask;
  3341. if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
  3342. goto out_free_tracing_cpumask;
  3343. /* To save memory, keep the ring buffer size to its minimum */
  3344. if (ring_buffer_expanded)
  3345. ring_buf_size = trace_buf_size;
  3346. else
  3347. ring_buf_size = 1;
  3348. cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
  3349. cpumask_copy(tracing_cpumask, cpu_all_mask);
  3350. cpumask_clear(tracing_reader_cpumask);
  3351. /* TODO: make the number of buffers hot pluggable with CPUS */
  3352. global_trace.buffer = ring_buffer_alloc(ring_buf_size,
  3353. TRACE_BUFFER_FLAGS);
  3354. if (!global_trace.buffer) {
  3355. printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
  3356. WARN_ON(1);
  3357. goto out_free_cpumask;
  3358. }
  3359. global_trace.entries = ring_buffer_size(global_trace.buffer);
  3360. #ifdef CONFIG_TRACER_MAX_TRACE
  3361. max_tr.buffer = ring_buffer_alloc(ring_buf_size,
  3362. TRACE_BUFFER_FLAGS);
  3363. if (!max_tr.buffer) {
  3364. printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
  3365. WARN_ON(1);
  3366. ring_buffer_free(global_trace.buffer);
  3367. goto out_free_cpumask;
  3368. }
  3369. max_tr.entries = ring_buffer_size(max_tr.buffer);
  3370. WARN_ON(max_tr.entries != global_trace.entries);
  3371. #endif
  3372. /* Allocate the first page for all buffers */
  3373. for_each_tracing_cpu(i) {
  3374. data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
  3375. max_tr.data[i] = &per_cpu(max_data, i);
  3376. }
  3377. trace_init_cmdlines();
  3378. register_tracer(&nop_trace);
  3379. current_trace = &nop_trace;
  3380. #ifdef CONFIG_BOOT_TRACER
  3381. register_tracer(&boot_tracer);
  3382. #endif
  3383. /* All seems OK, enable tracing */
  3384. tracing_disabled = 0;
  3385. atomic_notifier_chain_register(&panic_notifier_list,
  3386. &trace_panic_notifier);
  3387. register_die_notifier(&trace_die_notifier);
  3388. return 0;
  3389. out_free_cpumask:
  3390. free_cpumask_var(tracing_reader_cpumask);
  3391. out_free_tracing_cpumask:
  3392. free_cpumask_var(tracing_cpumask);
  3393. out_free_buffer_mask:
  3394. free_cpumask_var(tracing_buffer_mask);
  3395. out:
  3396. return ret;
  3397. }
  3398. __init static int clear_boot_tracer(void)
  3399. {
  3400. /*
  3401. * The default tracer at boot buffer is an init section.
  3402. * This function is called in lateinit. If we did not
  3403. * find the boot tracer, then clear it out, to prevent
  3404. * later registration from accessing the buffer that is
  3405. * about to be freed.
  3406. */
  3407. if (!default_bootup_tracer)
  3408. return 0;
  3409. printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
  3410. default_bootup_tracer);
  3411. default_bootup_tracer = NULL;
  3412. return 0;
  3413. }
  3414. early_initcall(tracer_alloc_buffers);
  3415. fs_initcall(tracer_init_debugfs);
  3416. late_initcall(clear_boot_tracer);