ring_buffer.c 97 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900
  1. /*
  2. * Generic ring buffer
  3. *
  4. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/ring_buffer.h>
  7. #include <linux/trace_clock.h>
  8. #include <linux/ftrace_irq.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/hardirq.h>
  13. #include <linux/kmemcheck.h>
  14. #include <linux/module.h>
  15. #include <linux/percpu.h>
  16. #include <linux/mutex.h>
  17. #include <linux/slab.h>
  18. #include <linux/init.h>
  19. #include <linux/hash.h>
  20. #include <linux/list.h>
  21. #include <linux/cpu.h>
  22. #include <linux/fs.h>
  23. #include <asm/local.h>
  24. #include "trace.h"
  25. /*
  26. * The ring buffer header is special. We must manually up keep it.
  27. */
  28. int ring_buffer_print_entry_header(struct trace_seq *s)
  29. {
  30. int ret;
  31. ret = trace_seq_printf(s, "# compressed entry header\n");
  32. ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
  33. ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
  34. ret = trace_seq_printf(s, "\tarray : 32 bits\n");
  35. ret = trace_seq_printf(s, "\n");
  36. ret = trace_seq_printf(s, "\tpadding : type == %d\n",
  37. RINGBUF_TYPE_PADDING);
  38. ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
  39. RINGBUF_TYPE_TIME_EXTEND);
  40. ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
  41. RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  42. return ret;
  43. }
  44. /*
  45. * The ring buffer is made up of a list of pages. A separate list of pages is
  46. * allocated for each CPU. A writer may only write to a buffer that is
  47. * associated with the CPU it is currently executing on. A reader may read
  48. * from any per cpu buffer.
  49. *
  50. * The reader is special. For each per cpu buffer, the reader has its own
  51. * reader page. When a reader has read the entire reader page, this reader
  52. * page is swapped with another page in the ring buffer.
  53. *
  54. * Now, as long as the writer is off the reader page, the reader can do what
  55. * ever it wants with that page. The writer will never write to that page
  56. * again (as long as it is out of the ring buffer).
  57. *
  58. * Here's some silly ASCII art.
  59. *
  60. * +------+
  61. * |reader| RING BUFFER
  62. * |page |
  63. * +------+ +---+ +---+ +---+
  64. * | |-->| |-->| |
  65. * +---+ +---+ +---+
  66. * ^ |
  67. * | |
  68. * +---------------+
  69. *
  70. *
  71. * +------+
  72. * |reader| RING BUFFER
  73. * |page |------------------v
  74. * +------+ +---+ +---+ +---+
  75. * | |-->| |-->| |
  76. * +---+ +---+ +---+
  77. * ^ |
  78. * | |
  79. * +---------------+
  80. *
  81. *
  82. * +------+
  83. * |reader| RING BUFFER
  84. * |page |------------------v
  85. * +------+ +---+ +---+ +---+
  86. * ^ | |-->| |-->| |
  87. * | +---+ +---+ +---+
  88. * | |
  89. * | |
  90. * +------------------------------+
  91. *
  92. *
  93. * +------+
  94. * |buffer| RING BUFFER
  95. * |page |------------------v
  96. * +------+ +---+ +---+ +---+
  97. * ^ | | | |-->| |
  98. * | New +---+ +---+ +---+
  99. * | Reader------^ |
  100. * | page |
  101. * +------------------------------+
  102. *
  103. *
  104. * After we make this swap, the reader can hand this page off to the splice
  105. * code and be done with it. It can even allocate a new page if it needs to
  106. * and swap that into the ring buffer.
  107. *
  108. * We will be using cmpxchg soon to make all this lockless.
  109. *
  110. */
  111. /*
  112. * A fast way to enable or disable all ring buffers is to
  113. * call tracing_on or tracing_off. Turning off the ring buffers
  114. * prevents all ring buffers from being recorded to.
  115. * Turning this switch on, makes it OK to write to the
  116. * ring buffer, if the ring buffer is enabled itself.
  117. *
  118. * There's three layers that must be on in order to write
  119. * to the ring buffer.
  120. *
  121. * 1) This global flag must be set.
  122. * 2) The ring buffer must be enabled for recording.
  123. * 3) The per cpu buffer must be enabled for recording.
  124. *
  125. * In case of an anomaly, this global flag has a bit set that
  126. * will permantly disable all ring buffers.
  127. */
  128. /*
  129. * Global flag to disable all recording to ring buffers
  130. * This has two bits: ON, DISABLED
  131. *
  132. * ON DISABLED
  133. * ---- ----------
  134. * 0 0 : ring buffers are off
  135. * 1 0 : ring buffers are on
  136. * X 1 : ring buffers are permanently disabled
  137. */
  138. enum {
  139. RB_BUFFERS_ON_BIT = 0,
  140. RB_BUFFERS_DISABLED_BIT = 1,
  141. };
  142. enum {
  143. RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
  144. RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
  145. };
  146. static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
  147. #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
  148. /**
  149. * tracing_on - enable all tracing buffers
  150. *
  151. * This function enables all tracing buffers that may have been
  152. * disabled with tracing_off.
  153. */
  154. void tracing_on(void)
  155. {
  156. set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
  157. }
  158. EXPORT_SYMBOL_GPL(tracing_on);
  159. /**
  160. * tracing_off - turn off all tracing buffers
  161. *
  162. * This function stops all tracing buffers from recording data.
  163. * It does not disable any overhead the tracers themselves may
  164. * be causing. This function simply causes all recording to
  165. * the ring buffers to fail.
  166. */
  167. void tracing_off(void)
  168. {
  169. clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
  170. }
  171. EXPORT_SYMBOL_GPL(tracing_off);
  172. /**
  173. * tracing_off_permanent - permanently disable ring buffers
  174. *
  175. * This function, once called, will disable all ring buffers
  176. * permanently.
  177. */
  178. void tracing_off_permanent(void)
  179. {
  180. set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
  181. }
  182. /**
  183. * tracing_is_on - show state of ring buffers enabled
  184. */
  185. int tracing_is_on(void)
  186. {
  187. return ring_buffer_flags == RB_BUFFERS_ON;
  188. }
  189. EXPORT_SYMBOL_GPL(tracing_is_on);
  190. #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
  191. #define RB_ALIGNMENT 4U
  192. #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  193. #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
  194. #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  195. # define RB_FORCE_8BYTE_ALIGNMENT 0
  196. # define RB_ARCH_ALIGNMENT RB_ALIGNMENT
  197. #else
  198. # define RB_FORCE_8BYTE_ALIGNMENT 1
  199. # define RB_ARCH_ALIGNMENT 8U
  200. #endif
  201. /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
  202. #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
  203. enum {
  204. RB_LEN_TIME_EXTEND = 8,
  205. RB_LEN_TIME_STAMP = 16,
  206. };
  207. static inline int rb_null_event(struct ring_buffer_event *event)
  208. {
  209. return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
  210. }
  211. static void rb_event_set_padding(struct ring_buffer_event *event)
  212. {
  213. /* padding has a NULL time_delta */
  214. event->type_len = RINGBUF_TYPE_PADDING;
  215. event->time_delta = 0;
  216. }
  217. static unsigned
  218. rb_event_data_length(struct ring_buffer_event *event)
  219. {
  220. unsigned length;
  221. if (event->type_len)
  222. length = event->type_len * RB_ALIGNMENT;
  223. else
  224. length = event->array[0];
  225. return length + RB_EVNT_HDR_SIZE;
  226. }
  227. /* inline for ring buffer fast paths */
  228. static unsigned
  229. rb_event_length(struct ring_buffer_event *event)
  230. {
  231. switch (event->type_len) {
  232. case RINGBUF_TYPE_PADDING:
  233. if (rb_null_event(event))
  234. /* undefined */
  235. return -1;
  236. return event->array[0] + RB_EVNT_HDR_SIZE;
  237. case RINGBUF_TYPE_TIME_EXTEND:
  238. return RB_LEN_TIME_EXTEND;
  239. case RINGBUF_TYPE_TIME_STAMP:
  240. return RB_LEN_TIME_STAMP;
  241. case RINGBUF_TYPE_DATA:
  242. return rb_event_data_length(event);
  243. default:
  244. BUG();
  245. }
  246. /* not hit */
  247. return 0;
  248. }
  249. /**
  250. * ring_buffer_event_length - return the length of the event
  251. * @event: the event to get the length of
  252. */
  253. unsigned ring_buffer_event_length(struct ring_buffer_event *event)
  254. {
  255. unsigned length = rb_event_length(event);
  256. if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  257. return length;
  258. length -= RB_EVNT_HDR_SIZE;
  259. if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
  260. length -= sizeof(event->array[0]);
  261. return length;
  262. }
  263. EXPORT_SYMBOL_GPL(ring_buffer_event_length);
  264. /* inline for ring buffer fast paths */
  265. static void *
  266. rb_event_data(struct ring_buffer_event *event)
  267. {
  268. BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  269. /* If length is in len field, then array[0] has the data */
  270. if (event->type_len)
  271. return (void *)&event->array[0];
  272. /* Otherwise length is in array[0] and array[1] has the data */
  273. return (void *)&event->array[1];
  274. }
  275. /**
  276. * ring_buffer_event_data - return the data of the event
  277. * @event: the event to get the data from
  278. */
  279. void *ring_buffer_event_data(struct ring_buffer_event *event)
  280. {
  281. return rb_event_data(event);
  282. }
  283. EXPORT_SYMBOL_GPL(ring_buffer_event_data);
  284. #define for_each_buffer_cpu(buffer, cpu) \
  285. for_each_cpu(cpu, buffer->cpumask)
  286. #define TS_SHIFT 27
  287. #define TS_MASK ((1ULL << TS_SHIFT) - 1)
  288. #define TS_DELTA_TEST (~TS_MASK)
  289. struct buffer_data_page {
  290. u64 time_stamp; /* page time stamp */
  291. local_t commit; /* write committed index */
  292. unsigned char data[]; /* data of buffer page */
  293. };
  294. /*
  295. * Note, the buffer_page list must be first. The buffer pages
  296. * are allocated in cache lines, which means that each buffer
  297. * page will be at the beginning of a cache line, and thus
  298. * the least significant bits will be zero. We use this to
  299. * add flags in the list struct pointers, to make the ring buffer
  300. * lockless.
  301. */
  302. struct buffer_page {
  303. struct list_head list; /* list of buffer pages */
  304. local_t write; /* index for next write */
  305. unsigned read; /* index for next read */
  306. local_t entries; /* entries on this page */
  307. struct buffer_data_page *page; /* Actual data page */
  308. };
  309. /*
  310. * The buffer page counters, write and entries, must be reset
  311. * atomically when crossing page boundaries. To synchronize this
  312. * update, two counters are inserted into the number. One is
  313. * the actual counter for the write position or count on the page.
  314. *
  315. * The other is a counter of updaters. Before an update happens
  316. * the update partition of the counter is incremented. This will
  317. * allow the updater to update the counter atomically.
  318. *
  319. * The counter is 20 bits, and the state data is 12.
  320. */
  321. #define RB_WRITE_MASK 0xfffff
  322. #define RB_WRITE_INTCNT (1 << 20)
  323. static void rb_init_page(struct buffer_data_page *bpage)
  324. {
  325. local_set(&bpage->commit, 0);
  326. }
  327. /**
  328. * ring_buffer_page_len - the size of data on the page.
  329. * @page: The page to read
  330. *
  331. * Returns the amount of data on the page, including buffer page header.
  332. */
  333. size_t ring_buffer_page_len(void *page)
  334. {
  335. return local_read(&((struct buffer_data_page *)page)->commit)
  336. + BUF_PAGE_HDR_SIZE;
  337. }
  338. /*
  339. * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
  340. * this issue out.
  341. */
  342. static void free_buffer_page(struct buffer_page *bpage)
  343. {
  344. free_page((unsigned long)bpage->page);
  345. kfree(bpage);
  346. }
  347. /*
  348. * We need to fit the time_stamp delta into 27 bits.
  349. */
  350. static inline int test_time_stamp(u64 delta)
  351. {
  352. if (delta & TS_DELTA_TEST)
  353. return 1;
  354. return 0;
  355. }
  356. #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
  357. /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
  358. #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
  359. /* Max number of timestamps that can fit on a page */
  360. #define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
  361. int ring_buffer_print_page_header(struct trace_seq *s)
  362. {
  363. struct buffer_data_page field;
  364. int ret;
  365. ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
  366. "offset:0;\tsize:%u;\tsigned:%u;\n",
  367. (unsigned int)sizeof(field.time_stamp),
  368. (unsigned int)is_signed_type(u64));
  369. ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
  370. "offset:%u;\tsize:%u;\tsigned:%u;\n",
  371. (unsigned int)offsetof(typeof(field), commit),
  372. (unsigned int)sizeof(field.commit),
  373. (unsigned int)is_signed_type(long));
  374. ret = trace_seq_printf(s, "\tfield: char data;\t"
  375. "offset:%u;\tsize:%u;\tsigned:%u;\n",
  376. (unsigned int)offsetof(typeof(field), data),
  377. (unsigned int)BUF_PAGE_SIZE,
  378. (unsigned int)is_signed_type(char));
  379. return ret;
  380. }
  381. /*
  382. * head_page == tail_page && head == tail then buffer is empty.
  383. */
  384. struct ring_buffer_per_cpu {
  385. int cpu;
  386. struct ring_buffer *buffer;
  387. spinlock_t reader_lock; /* serialize readers */
  388. arch_spinlock_t lock;
  389. struct lock_class_key lock_key;
  390. struct list_head *pages;
  391. struct buffer_page *head_page; /* read from head */
  392. struct buffer_page *tail_page; /* write to tail */
  393. struct buffer_page *commit_page; /* committed pages */
  394. struct buffer_page *reader_page;
  395. local_t commit_overrun;
  396. local_t overrun;
  397. local_t entries;
  398. local_t committing;
  399. local_t commits;
  400. unsigned long read;
  401. u64 write_stamp;
  402. u64 read_stamp;
  403. atomic_t record_disabled;
  404. };
  405. struct ring_buffer {
  406. unsigned pages;
  407. unsigned flags;
  408. int cpus;
  409. atomic_t record_disabled;
  410. cpumask_var_t cpumask;
  411. struct lock_class_key *reader_lock_key;
  412. struct mutex mutex;
  413. struct ring_buffer_per_cpu **buffers;
  414. #ifdef CONFIG_HOTPLUG_CPU
  415. struct notifier_block cpu_notify;
  416. #endif
  417. u64 (*clock)(void);
  418. };
  419. struct ring_buffer_iter {
  420. struct ring_buffer_per_cpu *cpu_buffer;
  421. unsigned long head;
  422. struct buffer_page *head_page;
  423. struct buffer_page *cache_reader_page;
  424. unsigned long cache_read;
  425. u64 read_stamp;
  426. };
  427. /* buffer may be either ring_buffer or ring_buffer_per_cpu */
  428. #define RB_WARN_ON(b, cond) \
  429. ({ \
  430. int _____ret = unlikely(cond); \
  431. if (_____ret) { \
  432. if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
  433. struct ring_buffer_per_cpu *__b = \
  434. (void *)b; \
  435. atomic_inc(&__b->buffer->record_disabled); \
  436. } else \
  437. atomic_inc(&b->record_disabled); \
  438. WARN_ON(1); \
  439. } \
  440. _____ret; \
  441. })
  442. /* Up this if you want to test the TIME_EXTENTS and normalization */
  443. #define DEBUG_SHIFT 0
  444. static inline u64 rb_time_stamp(struct ring_buffer *buffer)
  445. {
  446. /* shift to debug/test normalization and TIME_EXTENTS */
  447. return buffer->clock() << DEBUG_SHIFT;
  448. }
  449. u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
  450. {
  451. u64 time;
  452. preempt_disable_notrace();
  453. time = rb_time_stamp(buffer);
  454. preempt_enable_no_resched_notrace();
  455. return time;
  456. }
  457. EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
  458. void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
  459. int cpu, u64 *ts)
  460. {
  461. /* Just stupid testing the normalize function and deltas */
  462. *ts >>= DEBUG_SHIFT;
  463. }
  464. EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
  465. /*
  466. * Making the ring buffer lockless makes things tricky.
  467. * Although writes only happen on the CPU that they are on,
  468. * and they only need to worry about interrupts. Reads can
  469. * happen on any CPU.
  470. *
  471. * The reader page is always off the ring buffer, but when the
  472. * reader finishes with a page, it needs to swap its page with
  473. * a new one from the buffer. The reader needs to take from
  474. * the head (writes go to the tail). But if a writer is in overwrite
  475. * mode and wraps, it must push the head page forward.
  476. *
  477. * Here lies the problem.
  478. *
  479. * The reader must be careful to replace only the head page, and
  480. * not another one. As described at the top of the file in the
  481. * ASCII art, the reader sets its old page to point to the next
  482. * page after head. It then sets the page after head to point to
  483. * the old reader page. But if the writer moves the head page
  484. * during this operation, the reader could end up with the tail.
  485. *
  486. * We use cmpxchg to help prevent this race. We also do something
  487. * special with the page before head. We set the LSB to 1.
  488. *
  489. * When the writer must push the page forward, it will clear the
  490. * bit that points to the head page, move the head, and then set
  491. * the bit that points to the new head page.
  492. *
  493. * We also don't want an interrupt coming in and moving the head
  494. * page on another writer. Thus we use the second LSB to catch
  495. * that too. Thus:
  496. *
  497. * head->list->prev->next bit 1 bit 0
  498. * ------- -------
  499. * Normal page 0 0
  500. * Points to head page 0 1
  501. * New head page 1 0
  502. *
  503. * Note we can not trust the prev pointer of the head page, because:
  504. *
  505. * +----+ +-----+ +-----+
  506. * | |------>| T |---X--->| N |
  507. * | |<------| | | |
  508. * +----+ +-----+ +-----+
  509. * ^ ^ |
  510. * | +-----+ | |
  511. * +----------| R |----------+ |
  512. * | |<-----------+
  513. * +-----+
  514. *
  515. * Key: ---X--> HEAD flag set in pointer
  516. * T Tail page
  517. * R Reader page
  518. * N Next page
  519. *
  520. * (see __rb_reserve_next() to see where this happens)
  521. *
  522. * What the above shows is that the reader just swapped out
  523. * the reader page with a page in the buffer, but before it
  524. * could make the new header point back to the new page added
  525. * it was preempted by a writer. The writer moved forward onto
  526. * the new page added by the reader and is about to move forward
  527. * again.
  528. *
  529. * You can see, it is legitimate for the previous pointer of
  530. * the head (or any page) not to point back to itself. But only
  531. * temporarially.
  532. */
  533. #define RB_PAGE_NORMAL 0UL
  534. #define RB_PAGE_HEAD 1UL
  535. #define RB_PAGE_UPDATE 2UL
  536. #define RB_FLAG_MASK 3UL
  537. /* PAGE_MOVED is not part of the mask */
  538. #define RB_PAGE_MOVED 4UL
  539. /*
  540. * rb_list_head - remove any bit
  541. */
  542. static struct list_head *rb_list_head(struct list_head *list)
  543. {
  544. unsigned long val = (unsigned long)list;
  545. return (struct list_head *)(val & ~RB_FLAG_MASK);
  546. }
  547. /*
  548. * rb_is_head_page - test if the given page is the head page
  549. *
  550. * Because the reader may move the head_page pointer, we can
  551. * not trust what the head page is (it may be pointing to
  552. * the reader page). But if the next page is a header page,
  553. * its flags will be non zero.
  554. */
  555. static int inline
  556. rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
  557. struct buffer_page *page, struct list_head *list)
  558. {
  559. unsigned long val;
  560. val = (unsigned long)list->next;
  561. if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
  562. return RB_PAGE_MOVED;
  563. return val & RB_FLAG_MASK;
  564. }
  565. /*
  566. * rb_is_reader_page
  567. *
  568. * The unique thing about the reader page, is that, if the
  569. * writer is ever on it, the previous pointer never points
  570. * back to the reader page.
  571. */
  572. static int rb_is_reader_page(struct buffer_page *page)
  573. {
  574. struct list_head *list = page->list.prev;
  575. return rb_list_head(list->next) != &page->list;
  576. }
  577. /*
  578. * rb_set_list_to_head - set a list_head to be pointing to head.
  579. */
  580. static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
  581. struct list_head *list)
  582. {
  583. unsigned long *ptr;
  584. ptr = (unsigned long *)&list->next;
  585. *ptr |= RB_PAGE_HEAD;
  586. *ptr &= ~RB_PAGE_UPDATE;
  587. }
  588. /*
  589. * rb_head_page_activate - sets up head page
  590. */
  591. static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
  592. {
  593. struct buffer_page *head;
  594. head = cpu_buffer->head_page;
  595. if (!head)
  596. return;
  597. /*
  598. * Set the previous list pointer to have the HEAD flag.
  599. */
  600. rb_set_list_to_head(cpu_buffer, head->list.prev);
  601. }
  602. static void rb_list_head_clear(struct list_head *list)
  603. {
  604. unsigned long *ptr = (unsigned long *)&list->next;
  605. *ptr &= ~RB_FLAG_MASK;
  606. }
  607. /*
  608. * rb_head_page_dactivate - clears head page ptr (for free list)
  609. */
  610. static void
  611. rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
  612. {
  613. struct list_head *hd;
  614. /* Go through the whole list and clear any pointers found. */
  615. rb_list_head_clear(cpu_buffer->pages);
  616. list_for_each(hd, cpu_buffer->pages)
  617. rb_list_head_clear(hd);
  618. }
  619. static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
  620. struct buffer_page *head,
  621. struct buffer_page *prev,
  622. int old_flag, int new_flag)
  623. {
  624. struct list_head *list;
  625. unsigned long val = (unsigned long)&head->list;
  626. unsigned long ret;
  627. list = &prev->list;
  628. val &= ~RB_FLAG_MASK;
  629. ret = cmpxchg((unsigned long *)&list->next,
  630. val | old_flag, val | new_flag);
  631. /* check if the reader took the page */
  632. if ((ret & ~RB_FLAG_MASK) != val)
  633. return RB_PAGE_MOVED;
  634. return ret & RB_FLAG_MASK;
  635. }
  636. static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
  637. struct buffer_page *head,
  638. struct buffer_page *prev,
  639. int old_flag)
  640. {
  641. return rb_head_page_set(cpu_buffer, head, prev,
  642. old_flag, RB_PAGE_UPDATE);
  643. }
  644. static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
  645. struct buffer_page *head,
  646. struct buffer_page *prev,
  647. int old_flag)
  648. {
  649. return rb_head_page_set(cpu_buffer, head, prev,
  650. old_flag, RB_PAGE_HEAD);
  651. }
  652. static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
  653. struct buffer_page *head,
  654. struct buffer_page *prev,
  655. int old_flag)
  656. {
  657. return rb_head_page_set(cpu_buffer, head, prev,
  658. old_flag, RB_PAGE_NORMAL);
  659. }
  660. static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
  661. struct buffer_page **bpage)
  662. {
  663. struct list_head *p = rb_list_head((*bpage)->list.next);
  664. *bpage = list_entry(p, struct buffer_page, list);
  665. }
  666. static struct buffer_page *
  667. rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
  668. {
  669. struct buffer_page *head;
  670. struct buffer_page *page;
  671. struct list_head *list;
  672. int i;
  673. if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
  674. return NULL;
  675. /* sanity check */
  676. list = cpu_buffer->pages;
  677. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
  678. return NULL;
  679. page = head = cpu_buffer->head_page;
  680. /*
  681. * It is possible that the writer moves the header behind
  682. * where we started, and we miss in one loop.
  683. * A second loop should grab the header, but we'll do
  684. * three loops just because I'm paranoid.
  685. */
  686. for (i = 0; i < 3; i++) {
  687. do {
  688. if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
  689. cpu_buffer->head_page = page;
  690. return page;
  691. }
  692. rb_inc_page(cpu_buffer, &page);
  693. } while (page != head);
  694. }
  695. RB_WARN_ON(cpu_buffer, 1);
  696. return NULL;
  697. }
  698. static int rb_head_page_replace(struct buffer_page *old,
  699. struct buffer_page *new)
  700. {
  701. unsigned long *ptr = (unsigned long *)&old->list.prev->next;
  702. unsigned long val;
  703. unsigned long ret;
  704. val = *ptr & ~RB_FLAG_MASK;
  705. val |= RB_PAGE_HEAD;
  706. ret = cmpxchg(ptr, val, (unsigned long)&new->list);
  707. return ret == val;
  708. }
  709. /*
  710. * rb_tail_page_update - move the tail page forward
  711. *
  712. * Returns 1 if moved tail page, 0 if someone else did.
  713. */
  714. static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
  715. struct buffer_page *tail_page,
  716. struct buffer_page *next_page)
  717. {
  718. struct buffer_page *old_tail;
  719. unsigned long old_entries;
  720. unsigned long old_write;
  721. int ret = 0;
  722. /*
  723. * The tail page now needs to be moved forward.
  724. *
  725. * We need to reset the tail page, but without messing
  726. * with possible erasing of data brought in by interrupts
  727. * that have moved the tail page and are currently on it.
  728. *
  729. * We add a counter to the write field to denote this.
  730. */
  731. old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
  732. old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
  733. /*
  734. * Just make sure we have seen our old_write and synchronize
  735. * with any interrupts that come in.
  736. */
  737. barrier();
  738. /*
  739. * If the tail page is still the same as what we think
  740. * it is, then it is up to us to update the tail
  741. * pointer.
  742. */
  743. if (tail_page == cpu_buffer->tail_page) {
  744. /* Zero the write counter */
  745. unsigned long val = old_write & ~RB_WRITE_MASK;
  746. unsigned long eval = old_entries & ~RB_WRITE_MASK;
  747. /*
  748. * This will only succeed if an interrupt did
  749. * not come in and change it. In which case, we
  750. * do not want to modify it.
  751. *
  752. * We add (void) to let the compiler know that we do not care
  753. * about the return value of these functions. We use the
  754. * cmpxchg to only update if an interrupt did not already
  755. * do it for us. If the cmpxchg fails, we don't care.
  756. */
  757. (void)local_cmpxchg(&next_page->write, old_write, val);
  758. (void)local_cmpxchg(&next_page->entries, old_entries, eval);
  759. /*
  760. * No need to worry about races with clearing out the commit.
  761. * it only can increment when a commit takes place. But that
  762. * only happens in the outer most nested commit.
  763. */
  764. local_set(&next_page->page->commit, 0);
  765. old_tail = cmpxchg(&cpu_buffer->tail_page,
  766. tail_page, next_page);
  767. if (old_tail == tail_page)
  768. ret = 1;
  769. }
  770. return ret;
  771. }
  772. static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
  773. struct buffer_page *bpage)
  774. {
  775. unsigned long val = (unsigned long)bpage;
  776. if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
  777. return 1;
  778. return 0;
  779. }
  780. /**
  781. * rb_check_list - make sure a pointer to a list has the last bits zero
  782. */
  783. static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
  784. struct list_head *list)
  785. {
  786. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
  787. return 1;
  788. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
  789. return 1;
  790. return 0;
  791. }
  792. /**
  793. * check_pages - integrity check of buffer pages
  794. * @cpu_buffer: CPU buffer with pages to test
  795. *
  796. * As a safety measure we check to make sure the data pages have not
  797. * been corrupted.
  798. */
  799. static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
  800. {
  801. struct list_head *head = cpu_buffer->pages;
  802. struct buffer_page *bpage, *tmp;
  803. rb_head_page_deactivate(cpu_buffer);
  804. if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
  805. return -1;
  806. if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
  807. return -1;
  808. if (rb_check_list(cpu_buffer, head))
  809. return -1;
  810. list_for_each_entry_safe(bpage, tmp, head, list) {
  811. if (RB_WARN_ON(cpu_buffer,
  812. bpage->list.next->prev != &bpage->list))
  813. return -1;
  814. if (RB_WARN_ON(cpu_buffer,
  815. bpage->list.prev->next != &bpage->list))
  816. return -1;
  817. if (rb_check_list(cpu_buffer, &bpage->list))
  818. return -1;
  819. }
  820. rb_head_page_activate(cpu_buffer);
  821. return 0;
  822. }
  823. static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
  824. unsigned nr_pages)
  825. {
  826. struct buffer_page *bpage, *tmp;
  827. unsigned long addr;
  828. LIST_HEAD(pages);
  829. unsigned i;
  830. WARN_ON(!nr_pages);
  831. for (i = 0; i < nr_pages; i++) {
  832. bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
  833. GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
  834. if (!bpage)
  835. goto free_pages;
  836. rb_check_bpage(cpu_buffer, bpage);
  837. list_add(&bpage->list, &pages);
  838. addr = __get_free_page(GFP_KERNEL);
  839. if (!addr)
  840. goto free_pages;
  841. bpage->page = (void *)addr;
  842. rb_init_page(bpage->page);
  843. }
  844. /*
  845. * The ring buffer page list is a circular list that does not
  846. * start and end with a list head. All page list items point to
  847. * other pages.
  848. */
  849. cpu_buffer->pages = pages.next;
  850. list_del(&pages);
  851. rb_check_pages(cpu_buffer);
  852. return 0;
  853. free_pages:
  854. list_for_each_entry_safe(bpage, tmp, &pages, list) {
  855. list_del_init(&bpage->list);
  856. free_buffer_page(bpage);
  857. }
  858. return -ENOMEM;
  859. }
  860. static struct ring_buffer_per_cpu *
  861. rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
  862. {
  863. struct ring_buffer_per_cpu *cpu_buffer;
  864. struct buffer_page *bpage;
  865. unsigned long addr;
  866. int ret;
  867. cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
  868. GFP_KERNEL, cpu_to_node(cpu));
  869. if (!cpu_buffer)
  870. return NULL;
  871. cpu_buffer->cpu = cpu;
  872. cpu_buffer->buffer = buffer;
  873. spin_lock_init(&cpu_buffer->reader_lock);
  874. lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
  875. cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  876. bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
  877. GFP_KERNEL, cpu_to_node(cpu));
  878. if (!bpage)
  879. goto fail_free_buffer;
  880. rb_check_bpage(cpu_buffer, bpage);
  881. cpu_buffer->reader_page = bpage;
  882. addr = __get_free_page(GFP_KERNEL);
  883. if (!addr)
  884. goto fail_free_reader;
  885. bpage->page = (void *)addr;
  886. rb_init_page(bpage->page);
  887. INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
  888. ret = rb_allocate_pages(cpu_buffer, buffer->pages);
  889. if (ret < 0)
  890. goto fail_free_reader;
  891. cpu_buffer->head_page
  892. = list_entry(cpu_buffer->pages, struct buffer_page, list);
  893. cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
  894. rb_head_page_activate(cpu_buffer);
  895. return cpu_buffer;
  896. fail_free_reader:
  897. free_buffer_page(cpu_buffer->reader_page);
  898. fail_free_buffer:
  899. kfree(cpu_buffer);
  900. return NULL;
  901. }
  902. static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
  903. {
  904. struct list_head *head = cpu_buffer->pages;
  905. struct buffer_page *bpage, *tmp;
  906. free_buffer_page(cpu_buffer->reader_page);
  907. rb_head_page_deactivate(cpu_buffer);
  908. if (head) {
  909. list_for_each_entry_safe(bpage, tmp, head, list) {
  910. list_del_init(&bpage->list);
  911. free_buffer_page(bpage);
  912. }
  913. bpage = list_entry(head, struct buffer_page, list);
  914. free_buffer_page(bpage);
  915. }
  916. kfree(cpu_buffer);
  917. }
  918. #ifdef CONFIG_HOTPLUG_CPU
  919. static int rb_cpu_notify(struct notifier_block *self,
  920. unsigned long action, void *hcpu);
  921. #endif
  922. /**
  923. * ring_buffer_alloc - allocate a new ring_buffer
  924. * @size: the size in bytes per cpu that is needed.
  925. * @flags: attributes to set for the ring buffer.
  926. *
  927. * Currently the only flag that is available is the RB_FL_OVERWRITE
  928. * flag. This flag means that the buffer will overwrite old data
  929. * when the buffer wraps. If this flag is not set, the buffer will
  930. * drop data when the tail hits the head.
  931. */
  932. struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
  933. struct lock_class_key *key)
  934. {
  935. struct ring_buffer *buffer;
  936. int bsize;
  937. int cpu;
  938. /* keep it in its own cache line */
  939. buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
  940. GFP_KERNEL);
  941. if (!buffer)
  942. return NULL;
  943. if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
  944. goto fail_free_buffer;
  945. buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  946. buffer->flags = flags;
  947. buffer->clock = trace_clock_local;
  948. buffer->reader_lock_key = key;
  949. /* need at least two pages */
  950. if (buffer->pages < 2)
  951. buffer->pages = 2;
  952. /*
  953. * In case of non-hotplug cpu, if the ring-buffer is allocated
  954. * in early initcall, it will not be notified of secondary cpus.
  955. * In that off case, we need to allocate for all possible cpus.
  956. */
  957. #ifdef CONFIG_HOTPLUG_CPU
  958. get_online_cpus();
  959. cpumask_copy(buffer->cpumask, cpu_online_mask);
  960. #else
  961. cpumask_copy(buffer->cpumask, cpu_possible_mask);
  962. #endif
  963. buffer->cpus = nr_cpu_ids;
  964. bsize = sizeof(void *) * nr_cpu_ids;
  965. buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
  966. GFP_KERNEL);
  967. if (!buffer->buffers)
  968. goto fail_free_cpumask;
  969. for_each_buffer_cpu(buffer, cpu) {
  970. buffer->buffers[cpu] =
  971. rb_allocate_cpu_buffer(buffer, cpu);
  972. if (!buffer->buffers[cpu])
  973. goto fail_free_buffers;
  974. }
  975. #ifdef CONFIG_HOTPLUG_CPU
  976. buffer->cpu_notify.notifier_call = rb_cpu_notify;
  977. buffer->cpu_notify.priority = 0;
  978. register_cpu_notifier(&buffer->cpu_notify);
  979. #endif
  980. put_online_cpus();
  981. mutex_init(&buffer->mutex);
  982. return buffer;
  983. fail_free_buffers:
  984. for_each_buffer_cpu(buffer, cpu) {
  985. if (buffer->buffers[cpu])
  986. rb_free_cpu_buffer(buffer->buffers[cpu]);
  987. }
  988. kfree(buffer->buffers);
  989. fail_free_cpumask:
  990. free_cpumask_var(buffer->cpumask);
  991. put_online_cpus();
  992. fail_free_buffer:
  993. kfree(buffer);
  994. return NULL;
  995. }
  996. EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
  997. /**
  998. * ring_buffer_free - free a ring buffer.
  999. * @buffer: the buffer to free.
  1000. */
  1001. void
  1002. ring_buffer_free(struct ring_buffer *buffer)
  1003. {
  1004. int cpu;
  1005. get_online_cpus();
  1006. #ifdef CONFIG_HOTPLUG_CPU
  1007. unregister_cpu_notifier(&buffer->cpu_notify);
  1008. #endif
  1009. for_each_buffer_cpu(buffer, cpu)
  1010. rb_free_cpu_buffer(buffer->buffers[cpu]);
  1011. put_online_cpus();
  1012. kfree(buffer->buffers);
  1013. free_cpumask_var(buffer->cpumask);
  1014. kfree(buffer);
  1015. }
  1016. EXPORT_SYMBOL_GPL(ring_buffer_free);
  1017. void ring_buffer_set_clock(struct ring_buffer *buffer,
  1018. u64 (*clock)(void))
  1019. {
  1020. buffer->clock = clock;
  1021. }
  1022. static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
  1023. static void
  1024. rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
  1025. {
  1026. struct buffer_page *bpage;
  1027. struct list_head *p;
  1028. unsigned i;
  1029. spin_lock_irq(&cpu_buffer->reader_lock);
  1030. rb_head_page_deactivate(cpu_buffer);
  1031. for (i = 0; i < nr_pages; i++) {
  1032. if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
  1033. goto out;
  1034. p = cpu_buffer->pages->next;
  1035. bpage = list_entry(p, struct buffer_page, list);
  1036. list_del_init(&bpage->list);
  1037. free_buffer_page(bpage);
  1038. }
  1039. if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
  1040. goto out;
  1041. rb_reset_cpu(cpu_buffer);
  1042. rb_check_pages(cpu_buffer);
  1043. out:
  1044. spin_unlock_irq(&cpu_buffer->reader_lock);
  1045. }
  1046. static void
  1047. rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
  1048. struct list_head *pages, unsigned nr_pages)
  1049. {
  1050. struct buffer_page *bpage;
  1051. struct list_head *p;
  1052. unsigned i;
  1053. spin_lock_irq(&cpu_buffer->reader_lock);
  1054. rb_head_page_deactivate(cpu_buffer);
  1055. for (i = 0; i < nr_pages; i++) {
  1056. if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
  1057. goto out;
  1058. p = pages->next;
  1059. bpage = list_entry(p, struct buffer_page, list);
  1060. list_del_init(&bpage->list);
  1061. list_add_tail(&bpage->list, cpu_buffer->pages);
  1062. }
  1063. rb_reset_cpu(cpu_buffer);
  1064. rb_check_pages(cpu_buffer);
  1065. out:
  1066. spin_unlock_irq(&cpu_buffer->reader_lock);
  1067. }
  1068. /**
  1069. * ring_buffer_resize - resize the ring buffer
  1070. * @buffer: the buffer to resize.
  1071. * @size: the new size.
  1072. *
  1073. * Minimum size is 2 * BUF_PAGE_SIZE.
  1074. *
  1075. * Returns -1 on failure.
  1076. */
  1077. int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
  1078. {
  1079. struct ring_buffer_per_cpu *cpu_buffer;
  1080. unsigned nr_pages, rm_pages, new_pages;
  1081. struct buffer_page *bpage, *tmp;
  1082. unsigned long buffer_size;
  1083. unsigned long addr;
  1084. LIST_HEAD(pages);
  1085. int i, cpu;
  1086. /*
  1087. * Always succeed at resizing a non-existent buffer:
  1088. */
  1089. if (!buffer)
  1090. return size;
  1091. size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  1092. size *= BUF_PAGE_SIZE;
  1093. buffer_size = buffer->pages * BUF_PAGE_SIZE;
  1094. /* we need a minimum of two pages */
  1095. if (size < BUF_PAGE_SIZE * 2)
  1096. size = BUF_PAGE_SIZE * 2;
  1097. if (size == buffer_size)
  1098. return size;
  1099. atomic_inc(&buffer->record_disabled);
  1100. /* Make sure all writers are done with this buffer. */
  1101. synchronize_sched();
  1102. mutex_lock(&buffer->mutex);
  1103. get_online_cpus();
  1104. nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  1105. if (size < buffer_size) {
  1106. /* easy case, just free pages */
  1107. if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
  1108. goto out_fail;
  1109. rm_pages = buffer->pages - nr_pages;
  1110. for_each_buffer_cpu(buffer, cpu) {
  1111. cpu_buffer = buffer->buffers[cpu];
  1112. rb_remove_pages(cpu_buffer, rm_pages);
  1113. }
  1114. goto out;
  1115. }
  1116. /*
  1117. * This is a bit more difficult. We only want to add pages
  1118. * when we can allocate enough for all CPUs. We do this
  1119. * by allocating all the pages and storing them on a local
  1120. * link list. If we succeed in our allocation, then we
  1121. * add these pages to the cpu_buffers. Otherwise we just free
  1122. * them all and return -ENOMEM;
  1123. */
  1124. if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
  1125. goto out_fail;
  1126. new_pages = nr_pages - buffer->pages;
  1127. for_each_buffer_cpu(buffer, cpu) {
  1128. for (i = 0; i < new_pages; i++) {
  1129. bpage = kzalloc_node(ALIGN(sizeof(*bpage),
  1130. cache_line_size()),
  1131. GFP_KERNEL, cpu_to_node(cpu));
  1132. if (!bpage)
  1133. goto free_pages;
  1134. list_add(&bpage->list, &pages);
  1135. addr = __get_free_page(GFP_KERNEL);
  1136. if (!addr)
  1137. goto free_pages;
  1138. bpage->page = (void *)addr;
  1139. rb_init_page(bpage->page);
  1140. }
  1141. }
  1142. for_each_buffer_cpu(buffer, cpu) {
  1143. cpu_buffer = buffer->buffers[cpu];
  1144. rb_insert_pages(cpu_buffer, &pages, new_pages);
  1145. }
  1146. if (RB_WARN_ON(buffer, !list_empty(&pages)))
  1147. goto out_fail;
  1148. out:
  1149. buffer->pages = nr_pages;
  1150. put_online_cpus();
  1151. mutex_unlock(&buffer->mutex);
  1152. atomic_dec(&buffer->record_disabled);
  1153. return size;
  1154. free_pages:
  1155. list_for_each_entry_safe(bpage, tmp, &pages, list) {
  1156. list_del_init(&bpage->list);
  1157. free_buffer_page(bpage);
  1158. }
  1159. put_online_cpus();
  1160. mutex_unlock(&buffer->mutex);
  1161. atomic_dec(&buffer->record_disabled);
  1162. return -ENOMEM;
  1163. /*
  1164. * Something went totally wrong, and we are too paranoid
  1165. * to even clean up the mess.
  1166. */
  1167. out_fail:
  1168. put_online_cpus();
  1169. mutex_unlock(&buffer->mutex);
  1170. atomic_dec(&buffer->record_disabled);
  1171. return -1;
  1172. }
  1173. EXPORT_SYMBOL_GPL(ring_buffer_resize);
  1174. static inline void *
  1175. __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
  1176. {
  1177. return bpage->data + index;
  1178. }
  1179. static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
  1180. {
  1181. return bpage->page->data + index;
  1182. }
  1183. static inline struct ring_buffer_event *
  1184. rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
  1185. {
  1186. return __rb_page_index(cpu_buffer->reader_page,
  1187. cpu_buffer->reader_page->read);
  1188. }
  1189. static inline struct ring_buffer_event *
  1190. rb_iter_head_event(struct ring_buffer_iter *iter)
  1191. {
  1192. return __rb_page_index(iter->head_page, iter->head);
  1193. }
  1194. static inline unsigned long rb_page_write(struct buffer_page *bpage)
  1195. {
  1196. return local_read(&bpage->write) & RB_WRITE_MASK;
  1197. }
  1198. static inline unsigned rb_page_commit(struct buffer_page *bpage)
  1199. {
  1200. return local_read(&bpage->page->commit);
  1201. }
  1202. static inline unsigned long rb_page_entries(struct buffer_page *bpage)
  1203. {
  1204. return local_read(&bpage->entries) & RB_WRITE_MASK;
  1205. }
  1206. /* Size is determined by what has been commited */
  1207. static inline unsigned rb_page_size(struct buffer_page *bpage)
  1208. {
  1209. return rb_page_commit(bpage);
  1210. }
  1211. static inline unsigned
  1212. rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
  1213. {
  1214. return rb_page_commit(cpu_buffer->commit_page);
  1215. }
  1216. static inline unsigned
  1217. rb_event_index(struct ring_buffer_event *event)
  1218. {
  1219. unsigned long addr = (unsigned long)event;
  1220. return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
  1221. }
  1222. static inline int
  1223. rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
  1224. struct ring_buffer_event *event)
  1225. {
  1226. unsigned long addr = (unsigned long)event;
  1227. unsigned long index;
  1228. index = rb_event_index(event);
  1229. addr &= PAGE_MASK;
  1230. return cpu_buffer->commit_page->page == (void *)addr &&
  1231. rb_commit_index(cpu_buffer) == index;
  1232. }
  1233. static void
  1234. rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
  1235. {
  1236. unsigned long max_count;
  1237. /*
  1238. * We only race with interrupts and NMIs on this CPU.
  1239. * If we own the commit event, then we can commit
  1240. * all others that interrupted us, since the interruptions
  1241. * are in stack format (they finish before they come
  1242. * back to us). This allows us to do a simple loop to
  1243. * assign the commit to the tail.
  1244. */
  1245. again:
  1246. max_count = cpu_buffer->buffer->pages * 100;
  1247. while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
  1248. if (RB_WARN_ON(cpu_buffer, !(--max_count)))
  1249. return;
  1250. if (RB_WARN_ON(cpu_buffer,
  1251. rb_is_reader_page(cpu_buffer->tail_page)))
  1252. return;
  1253. local_set(&cpu_buffer->commit_page->page->commit,
  1254. rb_page_write(cpu_buffer->commit_page));
  1255. rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
  1256. cpu_buffer->write_stamp =
  1257. cpu_buffer->commit_page->page->time_stamp;
  1258. /* add barrier to keep gcc from optimizing too much */
  1259. barrier();
  1260. }
  1261. while (rb_commit_index(cpu_buffer) !=
  1262. rb_page_write(cpu_buffer->commit_page)) {
  1263. local_set(&cpu_buffer->commit_page->page->commit,
  1264. rb_page_write(cpu_buffer->commit_page));
  1265. RB_WARN_ON(cpu_buffer,
  1266. local_read(&cpu_buffer->commit_page->page->commit) &
  1267. ~RB_WRITE_MASK);
  1268. barrier();
  1269. }
  1270. /* again, keep gcc from optimizing */
  1271. barrier();
  1272. /*
  1273. * If an interrupt came in just after the first while loop
  1274. * and pushed the tail page forward, we will be left with
  1275. * a dangling commit that will never go forward.
  1276. */
  1277. if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
  1278. goto again;
  1279. }
  1280. static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
  1281. {
  1282. cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
  1283. cpu_buffer->reader_page->read = 0;
  1284. }
  1285. static void rb_inc_iter(struct ring_buffer_iter *iter)
  1286. {
  1287. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  1288. /*
  1289. * The iterator could be on the reader page (it starts there).
  1290. * But the head could have moved, since the reader was
  1291. * found. Check for this case and assign the iterator
  1292. * to the head page instead of next.
  1293. */
  1294. if (iter->head_page == cpu_buffer->reader_page)
  1295. iter->head_page = rb_set_head_page(cpu_buffer);
  1296. else
  1297. rb_inc_page(cpu_buffer, &iter->head_page);
  1298. iter->read_stamp = iter->head_page->page->time_stamp;
  1299. iter->head = 0;
  1300. }
  1301. /**
  1302. * ring_buffer_update_event - update event type and data
  1303. * @event: the even to update
  1304. * @type: the type of event
  1305. * @length: the size of the event field in the ring buffer
  1306. *
  1307. * Update the type and data fields of the event. The length
  1308. * is the actual size that is written to the ring buffer,
  1309. * and with this, we can determine what to place into the
  1310. * data field.
  1311. */
  1312. static void
  1313. rb_update_event(struct ring_buffer_event *event,
  1314. unsigned type, unsigned length)
  1315. {
  1316. event->type_len = type;
  1317. switch (type) {
  1318. case RINGBUF_TYPE_PADDING:
  1319. case RINGBUF_TYPE_TIME_EXTEND:
  1320. case RINGBUF_TYPE_TIME_STAMP:
  1321. break;
  1322. case 0:
  1323. length -= RB_EVNT_HDR_SIZE;
  1324. if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
  1325. event->array[0] = length;
  1326. else
  1327. event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
  1328. break;
  1329. default:
  1330. BUG();
  1331. }
  1332. }
  1333. /*
  1334. * rb_handle_head_page - writer hit the head page
  1335. *
  1336. * Returns: +1 to retry page
  1337. * 0 to continue
  1338. * -1 on error
  1339. */
  1340. static int
  1341. rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
  1342. struct buffer_page *tail_page,
  1343. struct buffer_page *next_page)
  1344. {
  1345. struct buffer_page *new_head;
  1346. int entries;
  1347. int type;
  1348. int ret;
  1349. entries = rb_page_entries(next_page);
  1350. /*
  1351. * The hard part is here. We need to move the head
  1352. * forward, and protect against both readers on
  1353. * other CPUs and writers coming in via interrupts.
  1354. */
  1355. type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
  1356. RB_PAGE_HEAD);
  1357. /*
  1358. * type can be one of four:
  1359. * NORMAL - an interrupt already moved it for us
  1360. * HEAD - we are the first to get here.
  1361. * UPDATE - we are the interrupt interrupting
  1362. * a current move.
  1363. * MOVED - a reader on another CPU moved the next
  1364. * pointer to its reader page. Give up
  1365. * and try again.
  1366. */
  1367. switch (type) {
  1368. case RB_PAGE_HEAD:
  1369. /*
  1370. * We changed the head to UPDATE, thus
  1371. * it is our responsibility to update
  1372. * the counters.
  1373. */
  1374. local_add(entries, &cpu_buffer->overrun);
  1375. /*
  1376. * The entries will be zeroed out when we move the
  1377. * tail page.
  1378. */
  1379. /* still more to do */
  1380. break;
  1381. case RB_PAGE_UPDATE:
  1382. /*
  1383. * This is an interrupt that interrupt the
  1384. * previous update. Still more to do.
  1385. */
  1386. break;
  1387. case RB_PAGE_NORMAL:
  1388. /*
  1389. * An interrupt came in before the update
  1390. * and processed this for us.
  1391. * Nothing left to do.
  1392. */
  1393. return 1;
  1394. case RB_PAGE_MOVED:
  1395. /*
  1396. * The reader is on another CPU and just did
  1397. * a swap with our next_page.
  1398. * Try again.
  1399. */
  1400. return 1;
  1401. default:
  1402. RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
  1403. return -1;
  1404. }
  1405. /*
  1406. * Now that we are here, the old head pointer is
  1407. * set to UPDATE. This will keep the reader from
  1408. * swapping the head page with the reader page.
  1409. * The reader (on another CPU) will spin till
  1410. * we are finished.
  1411. *
  1412. * We just need to protect against interrupts
  1413. * doing the job. We will set the next pointer
  1414. * to HEAD. After that, we set the old pointer
  1415. * to NORMAL, but only if it was HEAD before.
  1416. * otherwise we are an interrupt, and only
  1417. * want the outer most commit to reset it.
  1418. */
  1419. new_head = next_page;
  1420. rb_inc_page(cpu_buffer, &new_head);
  1421. ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
  1422. RB_PAGE_NORMAL);
  1423. /*
  1424. * Valid returns are:
  1425. * HEAD - an interrupt came in and already set it.
  1426. * NORMAL - One of two things:
  1427. * 1) We really set it.
  1428. * 2) A bunch of interrupts came in and moved
  1429. * the page forward again.
  1430. */
  1431. switch (ret) {
  1432. case RB_PAGE_HEAD:
  1433. case RB_PAGE_NORMAL:
  1434. /* OK */
  1435. break;
  1436. default:
  1437. RB_WARN_ON(cpu_buffer, 1);
  1438. return -1;
  1439. }
  1440. /*
  1441. * It is possible that an interrupt came in,
  1442. * set the head up, then more interrupts came in
  1443. * and moved it again. When we get back here,
  1444. * the page would have been set to NORMAL but we
  1445. * just set it back to HEAD.
  1446. *
  1447. * How do you detect this? Well, if that happened
  1448. * the tail page would have moved.
  1449. */
  1450. if (ret == RB_PAGE_NORMAL) {
  1451. /*
  1452. * If the tail had moved passed next, then we need
  1453. * to reset the pointer.
  1454. */
  1455. if (cpu_buffer->tail_page != tail_page &&
  1456. cpu_buffer->tail_page != next_page)
  1457. rb_head_page_set_normal(cpu_buffer, new_head,
  1458. next_page,
  1459. RB_PAGE_HEAD);
  1460. }
  1461. /*
  1462. * If this was the outer most commit (the one that
  1463. * changed the original pointer from HEAD to UPDATE),
  1464. * then it is up to us to reset it to NORMAL.
  1465. */
  1466. if (type == RB_PAGE_HEAD) {
  1467. ret = rb_head_page_set_normal(cpu_buffer, next_page,
  1468. tail_page,
  1469. RB_PAGE_UPDATE);
  1470. if (RB_WARN_ON(cpu_buffer,
  1471. ret != RB_PAGE_UPDATE))
  1472. return -1;
  1473. }
  1474. return 0;
  1475. }
  1476. static unsigned rb_calculate_event_length(unsigned length)
  1477. {
  1478. struct ring_buffer_event event; /* Used only for sizeof array */
  1479. /* zero length can cause confusions */
  1480. if (!length)
  1481. length = 1;
  1482. if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
  1483. length += sizeof(event.array[0]);
  1484. length += RB_EVNT_HDR_SIZE;
  1485. length = ALIGN(length, RB_ARCH_ALIGNMENT);
  1486. return length;
  1487. }
  1488. static inline void
  1489. rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
  1490. struct buffer_page *tail_page,
  1491. unsigned long tail, unsigned long length)
  1492. {
  1493. struct ring_buffer_event *event;
  1494. /*
  1495. * Only the event that crossed the page boundary
  1496. * must fill the old tail_page with padding.
  1497. */
  1498. if (tail >= BUF_PAGE_SIZE) {
  1499. local_sub(length, &tail_page->write);
  1500. return;
  1501. }
  1502. event = __rb_page_index(tail_page, tail);
  1503. kmemcheck_annotate_bitfield(event, bitfield);
  1504. /*
  1505. * If this event is bigger than the minimum size, then
  1506. * we need to be careful that we don't subtract the
  1507. * write counter enough to allow another writer to slip
  1508. * in on this page.
  1509. * We put in a discarded commit instead, to make sure
  1510. * that this space is not used again.
  1511. *
  1512. * If we are less than the minimum size, we don't need to
  1513. * worry about it.
  1514. */
  1515. if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
  1516. /* No room for any events */
  1517. /* Mark the rest of the page with padding */
  1518. rb_event_set_padding(event);
  1519. /* Set the write back to the previous setting */
  1520. local_sub(length, &tail_page->write);
  1521. return;
  1522. }
  1523. /* Put in a discarded event */
  1524. event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
  1525. event->type_len = RINGBUF_TYPE_PADDING;
  1526. /* time delta must be non zero */
  1527. event->time_delta = 1;
  1528. /* Set write to end of buffer */
  1529. length = (tail + length) - BUF_PAGE_SIZE;
  1530. local_sub(length, &tail_page->write);
  1531. }
  1532. static struct ring_buffer_event *
  1533. rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
  1534. unsigned long length, unsigned long tail,
  1535. struct buffer_page *tail_page, u64 *ts)
  1536. {
  1537. struct buffer_page *commit_page = cpu_buffer->commit_page;
  1538. struct ring_buffer *buffer = cpu_buffer->buffer;
  1539. struct buffer_page *next_page;
  1540. int ret;
  1541. next_page = tail_page;
  1542. rb_inc_page(cpu_buffer, &next_page);
  1543. /*
  1544. * If for some reason, we had an interrupt storm that made
  1545. * it all the way around the buffer, bail, and warn
  1546. * about it.
  1547. */
  1548. if (unlikely(next_page == commit_page)) {
  1549. local_inc(&cpu_buffer->commit_overrun);
  1550. goto out_reset;
  1551. }
  1552. /*
  1553. * This is where the fun begins!
  1554. *
  1555. * We are fighting against races between a reader that
  1556. * could be on another CPU trying to swap its reader
  1557. * page with the buffer head.
  1558. *
  1559. * We are also fighting against interrupts coming in and
  1560. * moving the head or tail on us as well.
  1561. *
  1562. * If the next page is the head page then we have filled
  1563. * the buffer, unless the commit page is still on the
  1564. * reader page.
  1565. */
  1566. if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
  1567. /*
  1568. * If the commit is not on the reader page, then
  1569. * move the header page.
  1570. */
  1571. if (!rb_is_reader_page(cpu_buffer->commit_page)) {
  1572. /*
  1573. * If we are not in overwrite mode,
  1574. * this is easy, just stop here.
  1575. */
  1576. if (!(buffer->flags & RB_FL_OVERWRITE))
  1577. goto out_reset;
  1578. ret = rb_handle_head_page(cpu_buffer,
  1579. tail_page,
  1580. next_page);
  1581. if (ret < 0)
  1582. goto out_reset;
  1583. if (ret)
  1584. goto out_again;
  1585. } else {
  1586. /*
  1587. * We need to be careful here too. The
  1588. * commit page could still be on the reader
  1589. * page. We could have a small buffer, and
  1590. * have filled up the buffer with events
  1591. * from interrupts and such, and wrapped.
  1592. *
  1593. * Note, if the tail page is also the on the
  1594. * reader_page, we let it move out.
  1595. */
  1596. if (unlikely((cpu_buffer->commit_page !=
  1597. cpu_buffer->tail_page) &&
  1598. (cpu_buffer->commit_page ==
  1599. cpu_buffer->reader_page))) {
  1600. local_inc(&cpu_buffer->commit_overrun);
  1601. goto out_reset;
  1602. }
  1603. }
  1604. }
  1605. ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
  1606. if (ret) {
  1607. /*
  1608. * Nested commits always have zero deltas, so
  1609. * just reread the time stamp
  1610. */
  1611. *ts = rb_time_stamp(buffer);
  1612. next_page->page->time_stamp = *ts;
  1613. }
  1614. out_again:
  1615. rb_reset_tail(cpu_buffer, tail_page, tail, length);
  1616. /* fail and let the caller try again */
  1617. return ERR_PTR(-EAGAIN);
  1618. out_reset:
  1619. /* reset write */
  1620. rb_reset_tail(cpu_buffer, tail_page, tail, length);
  1621. return NULL;
  1622. }
  1623. static struct ring_buffer_event *
  1624. __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
  1625. unsigned type, unsigned long length, u64 *ts)
  1626. {
  1627. struct buffer_page *tail_page;
  1628. struct ring_buffer_event *event;
  1629. unsigned long tail, write;
  1630. tail_page = cpu_buffer->tail_page;
  1631. write = local_add_return(length, &tail_page->write);
  1632. /* set write to only the index of the write */
  1633. write &= RB_WRITE_MASK;
  1634. tail = write - length;
  1635. /* See if we shot pass the end of this buffer page */
  1636. if (write > BUF_PAGE_SIZE)
  1637. return rb_move_tail(cpu_buffer, length, tail,
  1638. tail_page, ts);
  1639. /* We reserved something on the buffer */
  1640. event = __rb_page_index(tail_page, tail);
  1641. kmemcheck_annotate_bitfield(event, bitfield);
  1642. rb_update_event(event, type, length);
  1643. /* The passed in type is zero for DATA */
  1644. if (likely(!type))
  1645. local_inc(&tail_page->entries);
  1646. /*
  1647. * If this is the first commit on the page, then update
  1648. * its timestamp.
  1649. */
  1650. if (!tail)
  1651. tail_page->page->time_stamp = *ts;
  1652. return event;
  1653. }
  1654. static inline int
  1655. rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
  1656. struct ring_buffer_event *event)
  1657. {
  1658. unsigned long new_index, old_index;
  1659. struct buffer_page *bpage;
  1660. unsigned long index;
  1661. unsigned long addr;
  1662. new_index = rb_event_index(event);
  1663. old_index = new_index + rb_event_length(event);
  1664. addr = (unsigned long)event;
  1665. addr &= PAGE_MASK;
  1666. bpage = cpu_buffer->tail_page;
  1667. if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
  1668. unsigned long write_mask =
  1669. local_read(&bpage->write) & ~RB_WRITE_MASK;
  1670. /*
  1671. * This is on the tail page. It is possible that
  1672. * a write could come in and move the tail page
  1673. * and write to the next page. That is fine
  1674. * because we just shorten what is on this page.
  1675. */
  1676. old_index += write_mask;
  1677. new_index += write_mask;
  1678. index = local_cmpxchg(&bpage->write, old_index, new_index);
  1679. if (index == old_index)
  1680. return 1;
  1681. }
  1682. /* could not discard */
  1683. return 0;
  1684. }
  1685. static int
  1686. rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  1687. u64 *ts, u64 *delta)
  1688. {
  1689. struct ring_buffer_event *event;
  1690. static int once;
  1691. int ret;
  1692. if (unlikely(*delta > (1ULL << 59) && !once++)) {
  1693. printk(KERN_WARNING "Delta way too big! %llu"
  1694. " ts=%llu write stamp = %llu\n",
  1695. (unsigned long long)*delta,
  1696. (unsigned long long)*ts,
  1697. (unsigned long long)cpu_buffer->write_stamp);
  1698. WARN_ON(1);
  1699. }
  1700. /*
  1701. * The delta is too big, we to add a
  1702. * new timestamp.
  1703. */
  1704. event = __rb_reserve_next(cpu_buffer,
  1705. RINGBUF_TYPE_TIME_EXTEND,
  1706. RB_LEN_TIME_EXTEND,
  1707. ts);
  1708. if (!event)
  1709. return -EBUSY;
  1710. if (PTR_ERR(event) == -EAGAIN)
  1711. return -EAGAIN;
  1712. /* Only a commited time event can update the write stamp */
  1713. if (rb_event_is_commit(cpu_buffer, event)) {
  1714. /*
  1715. * If this is the first on the page, then it was
  1716. * updated with the page itself. Try to discard it
  1717. * and if we can't just make it zero.
  1718. */
  1719. if (rb_event_index(event)) {
  1720. event->time_delta = *delta & TS_MASK;
  1721. event->array[0] = *delta >> TS_SHIFT;
  1722. } else {
  1723. /* try to discard, since we do not need this */
  1724. if (!rb_try_to_discard(cpu_buffer, event)) {
  1725. /* nope, just zero it */
  1726. event->time_delta = 0;
  1727. event->array[0] = 0;
  1728. }
  1729. }
  1730. cpu_buffer->write_stamp = *ts;
  1731. /* let the caller know this was the commit */
  1732. ret = 1;
  1733. } else {
  1734. /* Try to discard the event */
  1735. if (!rb_try_to_discard(cpu_buffer, event)) {
  1736. /* Darn, this is just wasted space */
  1737. event->time_delta = 0;
  1738. event->array[0] = 0;
  1739. }
  1740. ret = 0;
  1741. }
  1742. *delta = 0;
  1743. return ret;
  1744. }
  1745. static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
  1746. {
  1747. local_inc(&cpu_buffer->committing);
  1748. local_inc(&cpu_buffer->commits);
  1749. }
  1750. static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
  1751. {
  1752. unsigned long commits;
  1753. if (RB_WARN_ON(cpu_buffer,
  1754. !local_read(&cpu_buffer->committing)))
  1755. return;
  1756. again:
  1757. commits = local_read(&cpu_buffer->commits);
  1758. /* synchronize with interrupts */
  1759. barrier();
  1760. if (local_read(&cpu_buffer->committing) == 1)
  1761. rb_set_commit_to_write(cpu_buffer);
  1762. local_dec(&cpu_buffer->committing);
  1763. /* synchronize with interrupts */
  1764. barrier();
  1765. /*
  1766. * Need to account for interrupts coming in between the
  1767. * updating of the commit page and the clearing of the
  1768. * committing counter.
  1769. */
  1770. if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
  1771. !local_read(&cpu_buffer->committing)) {
  1772. local_inc(&cpu_buffer->committing);
  1773. goto again;
  1774. }
  1775. }
  1776. static struct ring_buffer_event *
  1777. rb_reserve_next_event(struct ring_buffer *buffer,
  1778. struct ring_buffer_per_cpu *cpu_buffer,
  1779. unsigned long length)
  1780. {
  1781. struct ring_buffer_event *event;
  1782. u64 ts, delta = 0;
  1783. int commit = 0;
  1784. int nr_loops = 0;
  1785. rb_start_commit(cpu_buffer);
  1786. #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
  1787. /*
  1788. * Due to the ability to swap a cpu buffer from a buffer
  1789. * it is possible it was swapped before we committed.
  1790. * (committing stops a swap). We check for it here and
  1791. * if it happened, we have to fail the write.
  1792. */
  1793. barrier();
  1794. if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
  1795. local_dec(&cpu_buffer->committing);
  1796. local_dec(&cpu_buffer->commits);
  1797. return NULL;
  1798. }
  1799. #endif
  1800. length = rb_calculate_event_length(length);
  1801. again:
  1802. /*
  1803. * We allow for interrupts to reenter here and do a trace.
  1804. * If one does, it will cause this original code to loop
  1805. * back here. Even with heavy interrupts happening, this
  1806. * should only happen a few times in a row. If this happens
  1807. * 1000 times in a row, there must be either an interrupt
  1808. * storm or we have something buggy.
  1809. * Bail!
  1810. */
  1811. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
  1812. goto out_fail;
  1813. ts = rb_time_stamp(cpu_buffer->buffer);
  1814. /*
  1815. * Only the first commit can update the timestamp.
  1816. * Yes there is a race here. If an interrupt comes in
  1817. * just after the conditional and it traces too, then it
  1818. * will also check the deltas. More than one timestamp may
  1819. * also be made. But only the entry that did the actual
  1820. * commit will be something other than zero.
  1821. */
  1822. if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
  1823. rb_page_write(cpu_buffer->tail_page) ==
  1824. rb_commit_index(cpu_buffer))) {
  1825. u64 diff;
  1826. diff = ts - cpu_buffer->write_stamp;
  1827. /* make sure this diff is calculated here */
  1828. barrier();
  1829. /* Did the write stamp get updated already? */
  1830. if (unlikely(ts < cpu_buffer->write_stamp))
  1831. goto get_event;
  1832. delta = diff;
  1833. if (unlikely(test_time_stamp(delta))) {
  1834. commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
  1835. if (commit == -EBUSY)
  1836. goto out_fail;
  1837. if (commit == -EAGAIN)
  1838. goto again;
  1839. RB_WARN_ON(cpu_buffer, commit < 0);
  1840. }
  1841. }
  1842. get_event:
  1843. event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
  1844. if (unlikely(PTR_ERR(event) == -EAGAIN))
  1845. goto again;
  1846. if (!event)
  1847. goto out_fail;
  1848. if (!rb_event_is_commit(cpu_buffer, event))
  1849. delta = 0;
  1850. event->time_delta = delta;
  1851. return event;
  1852. out_fail:
  1853. rb_end_commit(cpu_buffer);
  1854. return NULL;
  1855. }
  1856. #ifdef CONFIG_TRACING
  1857. #define TRACE_RECURSIVE_DEPTH 16
  1858. static int trace_recursive_lock(void)
  1859. {
  1860. current->trace_recursion++;
  1861. if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
  1862. return 0;
  1863. /* Disable all tracing before we do anything else */
  1864. tracing_off_permanent();
  1865. printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
  1866. "HC[%lu]:SC[%lu]:NMI[%lu]\n",
  1867. current->trace_recursion,
  1868. hardirq_count() >> HARDIRQ_SHIFT,
  1869. softirq_count() >> SOFTIRQ_SHIFT,
  1870. in_nmi());
  1871. WARN_ON_ONCE(1);
  1872. return -1;
  1873. }
  1874. static void trace_recursive_unlock(void)
  1875. {
  1876. WARN_ON_ONCE(!current->trace_recursion);
  1877. current->trace_recursion--;
  1878. }
  1879. #else
  1880. #define trace_recursive_lock() (0)
  1881. #define trace_recursive_unlock() do { } while (0)
  1882. #endif
  1883. static DEFINE_PER_CPU(int, rb_need_resched);
  1884. /**
  1885. * ring_buffer_lock_reserve - reserve a part of the buffer
  1886. * @buffer: the ring buffer to reserve from
  1887. * @length: the length of the data to reserve (excluding event header)
  1888. *
  1889. * Returns a reseverd event on the ring buffer to copy directly to.
  1890. * The user of this interface will need to get the body to write into
  1891. * and can use the ring_buffer_event_data() interface.
  1892. *
  1893. * The length is the length of the data needed, not the event length
  1894. * which also includes the event header.
  1895. *
  1896. * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
  1897. * If NULL is returned, then nothing has been allocated or locked.
  1898. */
  1899. struct ring_buffer_event *
  1900. ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
  1901. {
  1902. struct ring_buffer_per_cpu *cpu_buffer;
  1903. struct ring_buffer_event *event;
  1904. int cpu, resched;
  1905. if (ring_buffer_flags != RB_BUFFERS_ON)
  1906. return NULL;
  1907. /* If we are tracing schedule, we don't want to recurse */
  1908. resched = ftrace_preempt_disable();
  1909. if (atomic_read(&buffer->record_disabled))
  1910. goto out_nocheck;
  1911. if (trace_recursive_lock())
  1912. goto out_nocheck;
  1913. cpu = raw_smp_processor_id();
  1914. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  1915. goto out;
  1916. cpu_buffer = buffer->buffers[cpu];
  1917. if (atomic_read(&cpu_buffer->record_disabled))
  1918. goto out;
  1919. if (length > BUF_MAX_DATA_SIZE)
  1920. goto out;
  1921. event = rb_reserve_next_event(buffer, cpu_buffer, length);
  1922. if (!event)
  1923. goto out;
  1924. /*
  1925. * Need to store resched state on this cpu.
  1926. * Only the first needs to.
  1927. */
  1928. if (preempt_count() == 1)
  1929. per_cpu(rb_need_resched, cpu) = resched;
  1930. return event;
  1931. out:
  1932. trace_recursive_unlock();
  1933. out_nocheck:
  1934. ftrace_preempt_enable(resched);
  1935. return NULL;
  1936. }
  1937. EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
  1938. static void
  1939. rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  1940. struct ring_buffer_event *event)
  1941. {
  1942. /*
  1943. * The event first in the commit queue updates the
  1944. * time stamp.
  1945. */
  1946. if (rb_event_is_commit(cpu_buffer, event))
  1947. cpu_buffer->write_stamp += event->time_delta;
  1948. }
  1949. static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
  1950. struct ring_buffer_event *event)
  1951. {
  1952. local_inc(&cpu_buffer->entries);
  1953. rb_update_write_stamp(cpu_buffer, event);
  1954. rb_end_commit(cpu_buffer);
  1955. }
  1956. /**
  1957. * ring_buffer_unlock_commit - commit a reserved
  1958. * @buffer: The buffer to commit to
  1959. * @event: The event pointer to commit.
  1960. *
  1961. * This commits the data to the ring buffer, and releases any locks held.
  1962. *
  1963. * Must be paired with ring_buffer_lock_reserve.
  1964. */
  1965. int ring_buffer_unlock_commit(struct ring_buffer *buffer,
  1966. struct ring_buffer_event *event)
  1967. {
  1968. struct ring_buffer_per_cpu *cpu_buffer;
  1969. int cpu = raw_smp_processor_id();
  1970. cpu_buffer = buffer->buffers[cpu];
  1971. rb_commit(cpu_buffer, event);
  1972. trace_recursive_unlock();
  1973. /*
  1974. * Only the last preempt count needs to restore preemption.
  1975. */
  1976. if (preempt_count() == 1)
  1977. ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
  1978. else
  1979. preempt_enable_no_resched_notrace();
  1980. return 0;
  1981. }
  1982. EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
  1983. static inline void rb_event_discard(struct ring_buffer_event *event)
  1984. {
  1985. /* array[0] holds the actual length for the discarded event */
  1986. event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
  1987. event->type_len = RINGBUF_TYPE_PADDING;
  1988. /* time delta must be non zero */
  1989. if (!event->time_delta)
  1990. event->time_delta = 1;
  1991. }
  1992. /*
  1993. * Decrement the entries to the page that an event is on.
  1994. * The event does not even need to exist, only the pointer
  1995. * to the page it is on. This may only be called before the commit
  1996. * takes place.
  1997. */
  1998. static inline void
  1999. rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
  2000. struct ring_buffer_event *event)
  2001. {
  2002. unsigned long addr = (unsigned long)event;
  2003. struct buffer_page *bpage = cpu_buffer->commit_page;
  2004. struct buffer_page *start;
  2005. addr &= PAGE_MASK;
  2006. /* Do the likely case first */
  2007. if (likely(bpage->page == (void *)addr)) {
  2008. local_dec(&bpage->entries);
  2009. return;
  2010. }
  2011. /*
  2012. * Because the commit page may be on the reader page we
  2013. * start with the next page and check the end loop there.
  2014. */
  2015. rb_inc_page(cpu_buffer, &bpage);
  2016. start = bpage;
  2017. do {
  2018. if (bpage->page == (void *)addr) {
  2019. local_dec(&bpage->entries);
  2020. return;
  2021. }
  2022. rb_inc_page(cpu_buffer, &bpage);
  2023. } while (bpage != start);
  2024. /* commit not part of this buffer?? */
  2025. RB_WARN_ON(cpu_buffer, 1);
  2026. }
  2027. /**
  2028. * ring_buffer_commit_discard - discard an event that has not been committed
  2029. * @buffer: the ring buffer
  2030. * @event: non committed event to discard
  2031. *
  2032. * Sometimes an event that is in the ring buffer needs to be ignored.
  2033. * This function lets the user discard an event in the ring buffer
  2034. * and then that event will not be read later.
  2035. *
  2036. * This function only works if it is called before the the item has been
  2037. * committed. It will try to free the event from the ring buffer
  2038. * if another event has not been added behind it.
  2039. *
  2040. * If another event has been added behind it, it will set the event
  2041. * up as discarded, and perform the commit.
  2042. *
  2043. * If this function is called, do not call ring_buffer_unlock_commit on
  2044. * the event.
  2045. */
  2046. void ring_buffer_discard_commit(struct ring_buffer *buffer,
  2047. struct ring_buffer_event *event)
  2048. {
  2049. struct ring_buffer_per_cpu *cpu_buffer;
  2050. int cpu;
  2051. /* The event is discarded regardless */
  2052. rb_event_discard(event);
  2053. cpu = smp_processor_id();
  2054. cpu_buffer = buffer->buffers[cpu];
  2055. /*
  2056. * This must only be called if the event has not been
  2057. * committed yet. Thus we can assume that preemption
  2058. * is still disabled.
  2059. */
  2060. RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
  2061. rb_decrement_entry(cpu_buffer, event);
  2062. if (rb_try_to_discard(cpu_buffer, event))
  2063. goto out;
  2064. /*
  2065. * The commit is still visible by the reader, so we
  2066. * must still update the timestamp.
  2067. */
  2068. rb_update_write_stamp(cpu_buffer, event);
  2069. out:
  2070. rb_end_commit(cpu_buffer);
  2071. trace_recursive_unlock();
  2072. /*
  2073. * Only the last preempt count needs to restore preemption.
  2074. */
  2075. if (preempt_count() == 1)
  2076. ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
  2077. else
  2078. preempt_enable_no_resched_notrace();
  2079. }
  2080. EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
  2081. /**
  2082. * ring_buffer_write - write data to the buffer without reserving
  2083. * @buffer: The ring buffer to write to.
  2084. * @length: The length of the data being written (excluding the event header)
  2085. * @data: The data to write to the buffer.
  2086. *
  2087. * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
  2088. * one function. If you already have the data to write to the buffer, it
  2089. * may be easier to simply call this function.
  2090. *
  2091. * Note, like ring_buffer_lock_reserve, the length is the length of the data
  2092. * and not the length of the event which would hold the header.
  2093. */
  2094. int ring_buffer_write(struct ring_buffer *buffer,
  2095. unsigned long length,
  2096. void *data)
  2097. {
  2098. struct ring_buffer_per_cpu *cpu_buffer;
  2099. struct ring_buffer_event *event;
  2100. void *body;
  2101. int ret = -EBUSY;
  2102. int cpu, resched;
  2103. if (ring_buffer_flags != RB_BUFFERS_ON)
  2104. return -EBUSY;
  2105. resched = ftrace_preempt_disable();
  2106. if (atomic_read(&buffer->record_disabled))
  2107. goto out;
  2108. cpu = raw_smp_processor_id();
  2109. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2110. goto out;
  2111. cpu_buffer = buffer->buffers[cpu];
  2112. if (atomic_read(&cpu_buffer->record_disabled))
  2113. goto out;
  2114. if (length > BUF_MAX_DATA_SIZE)
  2115. goto out;
  2116. event = rb_reserve_next_event(buffer, cpu_buffer, length);
  2117. if (!event)
  2118. goto out;
  2119. body = rb_event_data(event);
  2120. memcpy(body, data, length);
  2121. rb_commit(cpu_buffer, event);
  2122. ret = 0;
  2123. out:
  2124. ftrace_preempt_enable(resched);
  2125. return ret;
  2126. }
  2127. EXPORT_SYMBOL_GPL(ring_buffer_write);
  2128. static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
  2129. {
  2130. struct buffer_page *reader = cpu_buffer->reader_page;
  2131. struct buffer_page *head = rb_set_head_page(cpu_buffer);
  2132. struct buffer_page *commit = cpu_buffer->commit_page;
  2133. /* In case of error, head will be NULL */
  2134. if (unlikely(!head))
  2135. return 1;
  2136. return reader->read == rb_page_commit(reader) &&
  2137. (commit == reader ||
  2138. (commit == head &&
  2139. head->read == rb_page_commit(commit)));
  2140. }
  2141. /**
  2142. * ring_buffer_record_disable - stop all writes into the buffer
  2143. * @buffer: The ring buffer to stop writes to.
  2144. *
  2145. * This prevents all writes to the buffer. Any attempt to write
  2146. * to the buffer after this will fail and return NULL.
  2147. *
  2148. * The caller should call synchronize_sched() after this.
  2149. */
  2150. void ring_buffer_record_disable(struct ring_buffer *buffer)
  2151. {
  2152. atomic_inc(&buffer->record_disabled);
  2153. }
  2154. EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
  2155. /**
  2156. * ring_buffer_record_enable - enable writes to the buffer
  2157. * @buffer: The ring buffer to enable writes
  2158. *
  2159. * Note, multiple disables will need the same number of enables
  2160. * to truly enable the writing (much like preempt_disable).
  2161. */
  2162. void ring_buffer_record_enable(struct ring_buffer *buffer)
  2163. {
  2164. atomic_dec(&buffer->record_disabled);
  2165. }
  2166. EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
  2167. /**
  2168. * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
  2169. * @buffer: The ring buffer to stop writes to.
  2170. * @cpu: The CPU buffer to stop
  2171. *
  2172. * This prevents all writes to the buffer. Any attempt to write
  2173. * to the buffer after this will fail and return NULL.
  2174. *
  2175. * The caller should call synchronize_sched() after this.
  2176. */
  2177. void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
  2178. {
  2179. struct ring_buffer_per_cpu *cpu_buffer;
  2180. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2181. return;
  2182. cpu_buffer = buffer->buffers[cpu];
  2183. atomic_inc(&cpu_buffer->record_disabled);
  2184. }
  2185. EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
  2186. /**
  2187. * ring_buffer_record_enable_cpu - enable writes to the buffer
  2188. * @buffer: The ring buffer to enable writes
  2189. * @cpu: The CPU to enable.
  2190. *
  2191. * Note, multiple disables will need the same number of enables
  2192. * to truly enable the writing (much like preempt_disable).
  2193. */
  2194. void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
  2195. {
  2196. struct ring_buffer_per_cpu *cpu_buffer;
  2197. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2198. return;
  2199. cpu_buffer = buffer->buffers[cpu];
  2200. atomic_dec(&cpu_buffer->record_disabled);
  2201. }
  2202. EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
  2203. /**
  2204. * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
  2205. * @buffer: The ring buffer
  2206. * @cpu: The per CPU buffer to get the entries from.
  2207. */
  2208. unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
  2209. {
  2210. struct ring_buffer_per_cpu *cpu_buffer;
  2211. unsigned long ret;
  2212. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2213. return 0;
  2214. cpu_buffer = buffer->buffers[cpu];
  2215. ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
  2216. - cpu_buffer->read;
  2217. return ret;
  2218. }
  2219. EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
  2220. /**
  2221. * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
  2222. * @buffer: The ring buffer
  2223. * @cpu: The per CPU buffer to get the number of overruns from
  2224. */
  2225. unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
  2226. {
  2227. struct ring_buffer_per_cpu *cpu_buffer;
  2228. unsigned long ret;
  2229. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2230. return 0;
  2231. cpu_buffer = buffer->buffers[cpu];
  2232. ret = local_read(&cpu_buffer->overrun);
  2233. return ret;
  2234. }
  2235. EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
  2236. /**
  2237. * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
  2238. * @buffer: The ring buffer
  2239. * @cpu: The per CPU buffer to get the number of overruns from
  2240. */
  2241. unsigned long
  2242. ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
  2243. {
  2244. struct ring_buffer_per_cpu *cpu_buffer;
  2245. unsigned long ret;
  2246. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2247. return 0;
  2248. cpu_buffer = buffer->buffers[cpu];
  2249. ret = local_read(&cpu_buffer->commit_overrun);
  2250. return ret;
  2251. }
  2252. EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
  2253. /**
  2254. * ring_buffer_entries - get the number of entries in a buffer
  2255. * @buffer: The ring buffer
  2256. *
  2257. * Returns the total number of entries in the ring buffer
  2258. * (all CPU entries)
  2259. */
  2260. unsigned long ring_buffer_entries(struct ring_buffer *buffer)
  2261. {
  2262. struct ring_buffer_per_cpu *cpu_buffer;
  2263. unsigned long entries = 0;
  2264. int cpu;
  2265. /* if you care about this being correct, lock the buffer */
  2266. for_each_buffer_cpu(buffer, cpu) {
  2267. cpu_buffer = buffer->buffers[cpu];
  2268. entries += (local_read(&cpu_buffer->entries) -
  2269. local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
  2270. }
  2271. return entries;
  2272. }
  2273. EXPORT_SYMBOL_GPL(ring_buffer_entries);
  2274. /**
  2275. * ring_buffer_overruns - get the number of overruns in buffer
  2276. * @buffer: The ring buffer
  2277. *
  2278. * Returns the total number of overruns in the ring buffer
  2279. * (all CPU entries)
  2280. */
  2281. unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
  2282. {
  2283. struct ring_buffer_per_cpu *cpu_buffer;
  2284. unsigned long overruns = 0;
  2285. int cpu;
  2286. /* if you care about this being correct, lock the buffer */
  2287. for_each_buffer_cpu(buffer, cpu) {
  2288. cpu_buffer = buffer->buffers[cpu];
  2289. overruns += local_read(&cpu_buffer->overrun);
  2290. }
  2291. return overruns;
  2292. }
  2293. EXPORT_SYMBOL_GPL(ring_buffer_overruns);
  2294. static void rb_iter_reset(struct ring_buffer_iter *iter)
  2295. {
  2296. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2297. /* Iterator usage is expected to have record disabled */
  2298. if (list_empty(&cpu_buffer->reader_page->list)) {
  2299. iter->head_page = rb_set_head_page(cpu_buffer);
  2300. if (unlikely(!iter->head_page))
  2301. return;
  2302. iter->head = iter->head_page->read;
  2303. } else {
  2304. iter->head_page = cpu_buffer->reader_page;
  2305. iter->head = cpu_buffer->reader_page->read;
  2306. }
  2307. if (iter->head)
  2308. iter->read_stamp = cpu_buffer->read_stamp;
  2309. else
  2310. iter->read_stamp = iter->head_page->page->time_stamp;
  2311. iter->cache_reader_page = cpu_buffer->reader_page;
  2312. iter->cache_read = cpu_buffer->read;
  2313. }
  2314. /**
  2315. * ring_buffer_iter_reset - reset an iterator
  2316. * @iter: The iterator to reset
  2317. *
  2318. * Resets the iterator, so that it will start from the beginning
  2319. * again.
  2320. */
  2321. void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
  2322. {
  2323. struct ring_buffer_per_cpu *cpu_buffer;
  2324. unsigned long flags;
  2325. if (!iter)
  2326. return;
  2327. cpu_buffer = iter->cpu_buffer;
  2328. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2329. rb_iter_reset(iter);
  2330. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2331. }
  2332. EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
  2333. /**
  2334. * ring_buffer_iter_empty - check if an iterator has no more to read
  2335. * @iter: The iterator to check
  2336. */
  2337. int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
  2338. {
  2339. struct ring_buffer_per_cpu *cpu_buffer;
  2340. cpu_buffer = iter->cpu_buffer;
  2341. return iter->head_page == cpu_buffer->commit_page &&
  2342. iter->head == rb_commit_index(cpu_buffer);
  2343. }
  2344. EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
  2345. static void
  2346. rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  2347. struct ring_buffer_event *event)
  2348. {
  2349. u64 delta;
  2350. switch (event->type_len) {
  2351. case RINGBUF_TYPE_PADDING:
  2352. return;
  2353. case RINGBUF_TYPE_TIME_EXTEND:
  2354. delta = event->array[0];
  2355. delta <<= TS_SHIFT;
  2356. delta += event->time_delta;
  2357. cpu_buffer->read_stamp += delta;
  2358. return;
  2359. case RINGBUF_TYPE_TIME_STAMP:
  2360. /* FIXME: not implemented */
  2361. return;
  2362. case RINGBUF_TYPE_DATA:
  2363. cpu_buffer->read_stamp += event->time_delta;
  2364. return;
  2365. default:
  2366. BUG();
  2367. }
  2368. return;
  2369. }
  2370. static void
  2371. rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
  2372. struct ring_buffer_event *event)
  2373. {
  2374. u64 delta;
  2375. switch (event->type_len) {
  2376. case RINGBUF_TYPE_PADDING:
  2377. return;
  2378. case RINGBUF_TYPE_TIME_EXTEND:
  2379. delta = event->array[0];
  2380. delta <<= TS_SHIFT;
  2381. delta += event->time_delta;
  2382. iter->read_stamp += delta;
  2383. return;
  2384. case RINGBUF_TYPE_TIME_STAMP:
  2385. /* FIXME: not implemented */
  2386. return;
  2387. case RINGBUF_TYPE_DATA:
  2388. iter->read_stamp += event->time_delta;
  2389. return;
  2390. default:
  2391. BUG();
  2392. }
  2393. return;
  2394. }
  2395. static struct buffer_page *
  2396. rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
  2397. {
  2398. struct buffer_page *reader = NULL;
  2399. unsigned long flags;
  2400. int nr_loops = 0;
  2401. int ret;
  2402. local_irq_save(flags);
  2403. arch_spin_lock(&cpu_buffer->lock);
  2404. again:
  2405. /*
  2406. * This should normally only loop twice. But because the
  2407. * start of the reader inserts an empty page, it causes
  2408. * a case where we will loop three times. There should be no
  2409. * reason to loop four times (that I know of).
  2410. */
  2411. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
  2412. reader = NULL;
  2413. goto out;
  2414. }
  2415. reader = cpu_buffer->reader_page;
  2416. /* If there's more to read, return this page */
  2417. if (cpu_buffer->reader_page->read < rb_page_size(reader))
  2418. goto out;
  2419. /* Never should we have an index greater than the size */
  2420. if (RB_WARN_ON(cpu_buffer,
  2421. cpu_buffer->reader_page->read > rb_page_size(reader)))
  2422. goto out;
  2423. /* check if we caught up to the tail */
  2424. reader = NULL;
  2425. if (cpu_buffer->commit_page == cpu_buffer->reader_page)
  2426. goto out;
  2427. /*
  2428. * Reset the reader page to size zero.
  2429. */
  2430. local_set(&cpu_buffer->reader_page->write, 0);
  2431. local_set(&cpu_buffer->reader_page->entries, 0);
  2432. local_set(&cpu_buffer->reader_page->page->commit, 0);
  2433. spin:
  2434. /*
  2435. * Splice the empty reader page into the list around the head.
  2436. */
  2437. reader = rb_set_head_page(cpu_buffer);
  2438. cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
  2439. cpu_buffer->reader_page->list.prev = reader->list.prev;
  2440. /*
  2441. * cpu_buffer->pages just needs to point to the buffer, it
  2442. * has no specific buffer page to point to. Lets move it out
  2443. * of our way so we don't accidently swap it.
  2444. */
  2445. cpu_buffer->pages = reader->list.prev;
  2446. /* The reader page will be pointing to the new head */
  2447. rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
  2448. /*
  2449. * Here's the tricky part.
  2450. *
  2451. * We need to move the pointer past the header page.
  2452. * But we can only do that if a writer is not currently
  2453. * moving it. The page before the header page has the
  2454. * flag bit '1' set if it is pointing to the page we want.
  2455. * but if the writer is in the process of moving it
  2456. * than it will be '2' or already moved '0'.
  2457. */
  2458. ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
  2459. /*
  2460. * If we did not convert it, then we must try again.
  2461. */
  2462. if (!ret)
  2463. goto spin;
  2464. /*
  2465. * Yeah! We succeeded in replacing the page.
  2466. *
  2467. * Now make the new head point back to the reader page.
  2468. */
  2469. rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
  2470. rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
  2471. /* Finally update the reader page to the new head */
  2472. cpu_buffer->reader_page = reader;
  2473. rb_reset_reader_page(cpu_buffer);
  2474. goto again;
  2475. out:
  2476. arch_spin_unlock(&cpu_buffer->lock);
  2477. local_irq_restore(flags);
  2478. return reader;
  2479. }
  2480. static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
  2481. {
  2482. struct ring_buffer_event *event;
  2483. struct buffer_page *reader;
  2484. unsigned length;
  2485. reader = rb_get_reader_page(cpu_buffer);
  2486. /* This function should not be called when buffer is empty */
  2487. if (RB_WARN_ON(cpu_buffer, !reader))
  2488. return;
  2489. event = rb_reader_event(cpu_buffer);
  2490. if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  2491. cpu_buffer->read++;
  2492. rb_update_read_stamp(cpu_buffer, event);
  2493. length = rb_event_length(event);
  2494. cpu_buffer->reader_page->read += length;
  2495. }
  2496. static void rb_advance_iter(struct ring_buffer_iter *iter)
  2497. {
  2498. struct ring_buffer *buffer;
  2499. struct ring_buffer_per_cpu *cpu_buffer;
  2500. struct ring_buffer_event *event;
  2501. unsigned length;
  2502. cpu_buffer = iter->cpu_buffer;
  2503. buffer = cpu_buffer->buffer;
  2504. /*
  2505. * Check if we are at the end of the buffer.
  2506. */
  2507. if (iter->head >= rb_page_size(iter->head_page)) {
  2508. /* discarded commits can make the page empty */
  2509. if (iter->head_page == cpu_buffer->commit_page)
  2510. return;
  2511. rb_inc_iter(iter);
  2512. return;
  2513. }
  2514. event = rb_iter_head_event(iter);
  2515. length = rb_event_length(event);
  2516. /*
  2517. * This should not be called to advance the header if we are
  2518. * at the tail of the buffer.
  2519. */
  2520. if (RB_WARN_ON(cpu_buffer,
  2521. (iter->head_page == cpu_buffer->commit_page) &&
  2522. (iter->head + length > rb_commit_index(cpu_buffer))))
  2523. return;
  2524. rb_update_iter_read_stamp(iter, event);
  2525. iter->head += length;
  2526. /* check for end of page padding */
  2527. if ((iter->head >= rb_page_size(iter->head_page)) &&
  2528. (iter->head_page != cpu_buffer->commit_page))
  2529. rb_advance_iter(iter);
  2530. }
  2531. static struct ring_buffer_event *
  2532. rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
  2533. {
  2534. struct ring_buffer_event *event;
  2535. struct buffer_page *reader;
  2536. int nr_loops = 0;
  2537. again:
  2538. /*
  2539. * We repeat when a timestamp is encountered. It is possible
  2540. * to get multiple timestamps from an interrupt entering just
  2541. * as one timestamp is about to be written, or from discarded
  2542. * commits. The most that we can have is the number on a single page.
  2543. */
  2544. if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
  2545. return NULL;
  2546. reader = rb_get_reader_page(cpu_buffer);
  2547. if (!reader)
  2548. return NULL;
  2549. event = rb_reader_event(cpu_buffer);
  2550. switch (event->type_len) {
  2551. case RINGBUF_TYPE_PADDING:
  2552. if (rb_null_event(event))
  2553. RB_WARN_ON(cpu_buffer, 1);
  2554. /*
  2555. * Because the writer could be discarding every
  2556. * event it creates (which would probably be bad)
  2557. * if we were to go back to "again" then we may never
  2558. * catch up, and will trigger the warn on, or lock
  2559. * the box. Return the padding, and we will release
  2560. * the current locks, and try again.
  2561. */
  2562. return event;
  2563. case RINGBUF_TYPE_TIME_EXTEND:
  2564. /* Internal data, OK to advance */
  2565. rb_advance_reader(cpu_buffer);
  2566. goto again;
  2567. case RINGBUF_TYPE_TIME_STAMP:
  2568. /* FIXME: not implemented */
  2569. rb_advance_reader(cpu_buffer);
  2570. goto again;
  2571. case RINGBUF_TYPE_DATA:
  2572. if (ts) {
  2573. *ts = cpu_buffer->read_stamp + event->time_delta;
  2574. ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
  2575. cpu_buffer->cpu, ts);
  2576. }
  2577. return event;
  2578. default:
  2579. BUG();
  2580. }
  2581. return NULL;
  2582. }
  2583. EXPORT_SYMBOL_GPL(ring_buffer_peek);
  2584. static struct ring_buffer_event *
  2585. rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  2586. {
  2587. struct ring_buffer *buffer;
  2588. struct ring_buffer_per_cpu *cpu_buffer;
  2589. struct ring_buffer_event *event;
  2590. int nr_loops = 0;
  2591. cpu_buffer = iter->cpu_buffer;
  2592. buffer = cpu_buffer->buffer;
  2593. /*
  2594. * Check if someone performed a consuming read to
  2595. * the buffer. A consuming read invalidates the iterator
  2596. * and we need to reset the iterator in this case.
  2597. */
  2598. if (unlikely(iter->cache_read != cpu_buffer->read ||
  2599. iter->cache_reader_page != cpu_buffer->reader_page))
  2600. rb_iter_reset(iter);
  2601. again:
  2602. if (ring_buffer_iter_empty(iter))
  2603. return NULL;
  2604. /*
  2605. * We repeat when a timestamp is encountered.
  2606. * We can get multiple timestamps by nested interrupts or also
  2607. * if filtering is on (discarding commits). Since discarding
  2608. * commits can be frequent we can get a lot of timestamps.
  2609. * But we limit them by not adding timestamps if they begin
  2610. * at the start of a page.
  2611. */
  2612. if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
  2613. return NULL;
  2614. if (rb_per_cpu_empty(cpu_buffer))
  2615. return NULL;
  2616. if (iter->head >= local_read(&iter->head_page->page->commit)) {
  2617. rb_inc_iter(iter);
  2618. goto again;
  2619. }
  2620. event = rb_iter_head_event(iter);
  2621. switch (event->type_len) {
  2622. case RINGBUF_TYPE_PADDING:
  2623. if (rb_null_event(event)) {
  2624. rb_inc_iter(iter);
  2625. goto again;
  2626. }
  2627. rb_advance_iter(iter);
  2628. return event;
  2629. case RINGBUF_TYPE_TIME_EXTEND:
  2630. /* Internal data, OK to advance */
  2631. rb_advance_iter(iter);
  2632. goto again;
  2633. case RINGBUF_TYPE_TIME_STAMP:
  2634. /* FIXME: not implemented */
  2635. rb_advance_iter(iter);
  2636. goto again;
  2637. case RINGBUF_TYPE_DATA:
  2638. if (ts) {
  2639. *ts = iter->read_stamp + event->time_delta;
  2640. ring_buffer_normalize_time_stamp(buffer,
  2641. cpu_buffer->cpu, ts);
  2642. }
  2643. return event;
  2644. default:
  2645. BUG();
  2646. }
  2647. return NULL;
  2648. }
  2649. EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
  2650. static inline int rb_ok_to_lock(void)
  2651. {
  2652. /*
  2653. * If an NMI die dumps out the content of the ring buffer
  2654. * do not grab locks. We also permanently disable the ring
  2655. * buffer too. A one time deal is all you get from reading
  2656. * the ring buffer from an NMI.
  2657. */
  2658. if (likely(!in_nmi()))
  2659. return 1;
  2660. tracing_off_permanent();
  2661. return 0;
  2662. }
  2663. /**
  2664. * ring_buffer_peek - peek at the next event to be read
  2665. * @buffer: The ring buffer to read
  2666. * @cpu: The cpu to peak at
  2667. * @ts: The timestamp counter of this event.
  2668. *
  2669. * This will return the event that will be read next, but does
  2670. * not consume the data.
  2671. */
  2672. struct ring_buffer_event *
  2673. ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
  2674. {
  2675. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  2676. struct ring_buffer_event *event;
  2677. unsigned long flags;
  2678. int dolock;
  2679. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2680. return NULL;
  2681. dolock = rb_ok_to_lock();
  2682. again:
  2683. local_irq_save(flags);
  2684. if (dolock)
  2685. spin_lock(&cpu_buffer->reader_lock);
  2686. event = rb_buffer_peek(cpu_buffer, ts);
  2687. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2688. rb_advance_reader(cpu_buffer);
  2689. if (dolock)
  2690. spin_unlock(&cpu_buffer->reader_lock);
  2691. local_irq_restore(flags);
  2692. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2693. goto again;
  2694. return event;
  2695. }
  2696. /**
  2697. * ring_buffer_iter_peek - peek at the next event to be read
  2698. * @iter: The ring buffer iterator
  2699. * @ts: The timestamp counter of this event.
  2700. *
  2701. * This will return the event that will be read next, but does
  2702. * not increment the iterator.
  2703. */
  2704. struct ring_buffer_event *
  2705. ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  2706. {
  2707. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2708. struct ring_buffer_event *event;
  2709. unsigned long flags;
  2710. again:
  2711. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2712. event = rb_iter_peek(iter, ts);
  2713. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2714. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2715. goto again;
  2716. return event;
  2717. }
  2718. /**
  2719. * ring_buffer_consume - return an event and consume it
  2720. * @buffer: The ring buffer to get the next event from
  2721. *
  2722. * Returns the next event in the ring buffer, and that event is consumed.
  2723. * Meaning, that sequential reads will keep returning a different event,
  2724. * and eventually empty the ring buffer if the producer is slower.
  2725. */
  2726. struct ring_buffer_event *
  2727. ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
  2728. {
  2729. struct ring_buffer_per_cpu *cpu_buffer;
  2730. struct ring_buffer_event *event = NULL;
  2731. unsigned long flags;
  2732. int dolock;
  2733. dolock = rb_ok_to_lock();
  2734. again:
  2735. /* might be called in atomic */
  2736. preempt_disable();
  2737. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2738. goto out;
  2739. cpu_buffer = buffer->buffers[cpu];
  2740. local_irq_save(flags);
  2741. if (dolock)
  2742. spin_lock(&cpu_buffer->reader_lock);
  2743. event = rb_buffer_peek(cpu_buffer, ts);
  2744. if (event)
  2745. rb_advance_reader(cpu_buffer);
  2746. if (dolock)
  2747. spin_unlock(&cpu_buffer->reader_lock);
  2748. local_irq_restore(flags);
  2749. out:
  2750. preempt_enable();
  2751. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2752. goto again;
  2753. return event;
  2754. }
  2755. EXPORT_SYMBOL_GPL(ring_buffer_consume);
  2756. /**
  2757. * ring_buffer_read_start - start a non consuming read of the buffer
  2758. * @buffer: The ring buffer to read from
  2759. * @cpu: The cpu buffer to iterate over
  2760. *
  2761. * This starts up an iteration through the buffer. It also disables
  2762. * the recording to the buffer until the reading is finished.
  2763. * This prevents the reading from being corrupted. This is not
  2764. * a consuming read, so a producer is not expected.
  2765. *
  2766. * Must be paired with ring_buffer_finish.
  2767. */
  2768. struct ring_buffer_iter *
  2769. ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
  2770. {
  2771. struct ring_buffer_per_cpu *cpu_buffer;
  2772. struct ring_buffer_iter *iter;
  2773. unsigned long flags;
  2774. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2775. return NULL;
  2776. iter = kmalloc(sizeof(*iter), GFP_KERNEL);
  2777. if (!iter)
  2778. return NULL;
  2779. cpu_buffer = buffer->buffers[cpu];
  2780. iter->cpu_buffer = cpu_buffer;
  2781. atomic_inc(&cpu_buffer->record_disabled);
  2782. synchronize_sched();
  2783. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2784. arch_spin_lock(&cpu_buffer->lock);
  2785. rb_iter_reset(iter);
  2786. arch_spin_unlock(&cpu_buffer->lock);
  2787. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2788. return iter;
  2789. }
  2790. EXPORT_SYMBOL_GPL(ring_buffer_read_start);
  2791. /**
  2792. * ring_buffer_finish - finish reading the iterator of the buffer
  2793. * @iter: The iterator retrieved by ring_buffer_start
  2794. *
  2795. * This re-enables the recording to the buffer, and frees the
  2796. * iterator.
  2797. */
  2798. void
  2799. ring_buffer_read_finish(struct ring_buffer_iter *iter)
  2800. {
  2801. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2802. atomic_dec(&cpu_buffer->record_disabled);
  2803. kfree(iter);
  2804. }
  2805. EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
  2806. /**
  2807. * ring_buffer_read - read the next item in the ring buffer by the iterator
  2808. * @iter: The ring buffer iterator
  2809. * @ts: The time stamp of the event read.
  2810. *
  2811. * This reads the next event in the ring buffer and increments the iterator.
  2812. */
  2813. struct ring_buffer_event *
  2814. ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
  2815. {
  2816. struct ring_buffer_event *event;
  2817. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2818. unsigned long flags;
  2819. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2820. again:
  2821. event = rb_iter_peek(iter, ts);
  2822. if (!event)
  2823. goto out;
  2824. if (event->type_len == RINGBUF_TYPE_PADDING)
  2825. goto again;
  2826. rb_advance_iter(iter);
  2827. out:
  2828. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2829. return event;
  2830. }
  2831. EXPORT_SYMBOL_GPL(ring_buffer_read);
  2832. /**
  2833. * ring_buffer_size - return the size of the ring buffer (in bytes)
  2834. * @buffer: The ring buffer.
  2835. */
  2836. unsigned long ring_buffer_size(struct ring_buffer *buffer)
  2837. {
  2838. return BUF_PAGE_SIZE * buffer->pages;
  2839. }
  2840. EXPORT_SYMBOL_GPL(ring_buffer_size);
  2841. static void
  2842. rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
  2843. {
  2844. rb_head_page_deactivate(cpu_buffer);
  2845. cpu_buffer->head_page
  2846. = list_entry(cpu_buffer->pages, struct buffer_page, list);
  2847. local_set(&cpu_buffer->head_page->write, 0);
  2848. local_set(&cpu_buffer->head_page->entries, 0);
  2849. local_set(&cpu_buffer->head_page->page->commit, 0);
  2850. cpu_buffer->head_page->read = 0;
  2851. cpu_buffer->tail_page = cpu_buffer->head_page;
  2852. cpu_buffer->commit_page = cpu_buffer->head_page;
  2853. INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
  2854. local_set(&cpu_buffer->reader_page->write, 0);
  2855. local_set(&cpu_buffer->reader_page->entries, 0);
  2856. local_set(&cpu_buffer->reader_page->page->commit, 0);
  2857. cpu_buffer->reader_page->read = 0;
  2858. local_set(&cpu_buffer->commit_overrun, 0);
  2859. local_set(&cpu_buffer->overrun, 0);
  2860. local_set(&cpu_buffer->entries, 0);
  2861. local_set(&cpu_buffer->committing, 0);
  2862. local_set(&cpu_buffer->commits, 0);
  2863. cpu_buffer->read = 0;
  2864. cpu_buffer->write_stamp = 0;
  2865. cpu_buffer->read_stamp = 0;
  2866. rb_head_page_activate(cpu_buffer);
  2867. }
  2868. /**
  2869. * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
  2870. * @buffer: The ring buffer to reset a per cpu buffer of
  2871. * @cpu: The CPU buffer to be reset
  2872. */
  2873. void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
  2874. {
  2875. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  2876. unsigned long flags;
  2877. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2878. return;
  2879. atomic_inc(&cpu_buffer->record_disabled);
  2880. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2881. if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
  2882. goto out;
  2883. arch_spin_lock(&cpu_buffer->lock);
  2884. rb_reset_cpu(cpu_buffer);
  2885. arch_spin_unlock(&cpu_buffer->lock);
  2886. out:
  2887. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2888. atomic_dec(&cpu_buffer->record_disabled);
  2889. }
  2890. EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
  2891. /**
  2892. * ring_buffer_reset - reset a ring buffer
  2893. * @buffer: The ring buffer to reset all cpu buffers
  2894. */
  2895. void ring_buffer_reset(struct ring_buffer *buffer)
  2896. {
  2897. int cpu;
  2898. for_each_buffer_cpu(buffer, cpu)
  2899. ring_buffer_reset_cpu(buffer, cpu);
  2900. }
  2901. EXPORT_SYMBOL_GPL(ring_buffer_reset);
  2902. /**
  2903. * rind_buffer_empty - is the ring buffer empty?
  2904. * @buffer: The ring buffer to test
  2905. */
  2906. int ring_buffer_empty(struct ring_buffer *buffer)
  2907. {
  2908. struct ring_buffer_per_cpu *cpu_buffer;
  2909. unsigned long flags;
  2910. int dolock;
  2911. int cpu;
  2912. int ret;
  2913. dolock = rb_ok_to_lock();
  2914. /* yes this is racy, but if you don't like the race, lock the buffer */
  2915. for_each_buffer_cpu(buffer, cpu) {
  2916. cpu_buffer = buffer->buffers[cpu];
  2917. local_irq_save(flags);
  2918. if (dolock)
  2919. spin_lock(&cpu_buffer->reader_lock);
  2920. ret = rb_per_cpu_empty(cpu_buffer);
  2921. if (dolock)
  2922. spin_unlock(&cpu_buffer->reader_lock);
  2923. local_irq_restore(flags);
  2924. if (!ret)
  2925. return 0;
  2926. }
  2927. return 1;
  2928. }
  2929. EXPORT_SYMBOL_GPL(ring_buffer_empty);
  2930. /**
  2931. * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
  2932. * @buffer: The ring buffer
  2933. * @cpu: The CPU buffer to test
  2934. */
  2935. int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
  2936. {
  2937. struct ring_buffer_per_cpu *cpu_buffer;
  2938. unsigned long flags;
  2939. int dolock;
  2940. int ret;
  2941. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2942. return 1;
  2943. dolock = rb_ok_to_lock();
  2944. cpu_buffer = buffer->buffers[cpu];
  2945. local_irq_save(flags);
  2946. if (dolock)
  2947. spin_lock(&cpu_buffer->reader_lock);
  2948. ret = rb_per_cpu_empty(cpu_buffer);
  2949. if (dolock)
  2950. spin_unlock(&cpu_buffer->reader_lock);
  2951. local_irq_restore(flags);
  2952. return ret;
  2953. }
  2954. EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
  2955. #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
  2956. /**
  2957. * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
  2958. * @buffer_a: One buffer to swap with
  2959. * @buffer_b: The other buffer to swap with
  2960. *
  2961. * This function is useful for tracers that want to take a "snapshot"
  2962. * of a CPU buffer and has another back up buffer lying around.
  2963. * it is expected that the tracer handles the cpu buffer not being
  2964. * used at the moment.
  2965. */
  2966. int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
  2967. struct ring_buffer *buffer_b, int cpu)
  2968. {
  2969. struct ring_buffer_per_cpu *cpu_buffer_a;
  2970. struct ring_buffer_per_cpu *cpu_buffer_b;
  2971. int ret = -EINVAL;
  2972. if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
  2973. !cpumask_test_cpu(cpu, buffer_b->cpumask))
  2974. goto out;
  2975. /* At least make sure the two buffers are somewhat the same */
  2976. if (buffer_a->pages != buffer_b->pages)
  2977. goto out;
  2978. ret = -EAGAIN;
  2979. if (ring_buffer_flags != RB_BUFFERS_ON)
  2980. goto out;
  2981. if (atomic_read(&buffer_a->record_disabled))
  2982. goto out;
  2983. if (atomic_read(&buffer_b->record_disabled))
  2984. goto out;
  2985. cpu_buffer_a = buffer_a->buffers[cpu];
  2986. cpu_buffer_b = buffer_b->buffers[cpu];
  2987. if (atomic_read(&cpu_buffer_a->record_disabled))
  2988. goto out;
  2989. if (atomic_read(&cpu_buffer_b->record_disabled))
  2990. goto out;
  2991. /*
  2992. * We can't do a synchronize_sched here because this
  2993. * function can be called in atomic context.
  2994. * Normally this will be called from the same CPU as cpu.
  2995. * If not it's up to the caller to protect this.
  2996. */
  2997. atomic_inc(&cpu_buffer_a->record_disabled);
  2998. atomic_inc(&cpu_buffer_b->record_disabled);
  2999. ret = -EBUSY;
  3000. if (local_read(&cpu_buffer_a->committing))
  3001. goto out_dec;
  3002. if (local_read(&cpu_buffer_b->committing))
  3003. goto out_dec;
  3004. buffer_a->buffers[cpu] = cpu_buffer_b;
  3005. buffer_b->buffers[cpu] = cpu_buffer_a;
  3006. cpu_buffer_b->buffer = buffer_a;
  3007. cpu_buffer_a->buffer = buffer_b;
  3008. ret = 0;
  3009. out_dec:
  3010. atomic_dec(&cpu_buffer_a->record_disabled);
  3011. atomic_dec(&cpu_buffer_b->record_disabled);
  3012. out:
  3013. return ret;
  3014. }
  3015. EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
  3016. #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
  3017. /**
  3018. * ring_buffer_alloc_read_page - allocate a page to read from buffer
  3019. * @buffer: the buffer to allocate for.
  3020. *
  3021. * This function is used in conjunction with ring_buffer_read_page.
  3022. * When reading a full page from the ring buffer, these functions
  3023. * can be used to speed up the process. The calling function should
  3024. * allocate a few pages first with this function. Then when it
  3025. * needs to get pages from the ring buffer, it passes the result
  3026. * of this function into ring_buffer_read_page, which will swap
  3027. * the page that was allocated, with the read page of the buffer.
  3028. *
  3029. * Returns:
  3030. * The page allocated, or NULL on error.
  3031. */
  3032. void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
  3033. {
  3034. struct buffer_data_page *bpage;
  3035. unsigned long addr;
  3036. addr = __get_free_page(GFP_KERNEL);
  3037. if (!addr)
  3038. return NULL;
  3039. bpage = (void *)addr;
  3040. rb_init_page(bpage);
  3041. return bpage;
  3042. }
  3043. EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
  3044. /**
  3045. * ring_buffer_free_read_page - free an allocated read page
  3046. * @buffer: the buffer the page was allocate for
  3047. * @data: the page to free
  3048. *
  3049. * Free a page allocated from ring_buffer_alloc_read_page.
  3050. */
  3051. void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
  3052. {
  3053. free_page((unsigned long)data);
  3054. }
  3055. EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
  3056. /**
  3057. * ring_buffer_read_page - extract a page from the ring buffer
  3058. * @buffer: buffer to extract from
  3059. * @data_page: the page to use allocated from ring_buffer_alloc_read_page
  3060. * @len: amount to extract
  3061. * @cpu: the cpu of the buffer to extract
  3062. * @full: should the extraction only happen when the page is full.
  3063. *
  3064. * This function will pull out a page from the ring buffer and consume it.
  3065. * @data_page must be the address of the variable that was returned
  3066. * from ring_buffer_alloc_read_page. This is because the page might be used
  3067. * to swap with a page in the ring buffer.
  3068. *
  3069. * for example:
  3070. * rpage = ring_buffer_alloc_read_page(buffer);
  3071. * if (!rpage)
  3072. * return error;
  3073. * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
  3074. * if (ret >= 0)
  3075. * process_page(rpage, ret);
  3076. *
  3077. * When @full is set, the function will not return true unless
  3078. * the writer is off the reader page.
  3079. *
  3080. * Note: it is up to the calling functions to handle sleeps and wakeups.
  3081. * The ring buffer can be used anywhere in the kernel and can not
  3082. * blindly call wake_up. The layer that uses the ring buffer must be
  3083. * responsible for that.
  3084. *
  3085. * Returns:
  3086. * >=0 if data has been transferred, returns the offset of consumed data.
  3087. * <0 if no data has been transferred.
  3088. */
  3089. int ring_buffer_read_page(struct ring_buffer *buffer,
  3090. void **data_page, size_t len, int cpu, int full)
  3091. {
  3092. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  3093. struct ring_buffer_event *event;
  3094. struct buffer_data_page *bpage;
  3095. struct buffer_page *reader;
  3096. unsigned long flags;
  3097. unsigned int commit;
  3098. unsigned int read;
  3099. u64 save_timestamp;
  3100. int ret = -1;
  3101. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  3102. goto out;
  3103. /*
  3104. * If len is not big enough to hold the page header, then
  3105. * we can not copy anything.
  3106. */
  3107. if (len <= BUF_PAGE_HDR_SIZE)
  3108. goto out;
  3109. len -= BUF_PAGE_HDR_SIZE;
  3110. if (!data_page)
  3111. goto out;
  3112. bpage = *data_page;
  3113. if (!bpage)
  3114. goto out;
  3115. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  3116. reader = rb_get_reader_page(cpu_buffer);
  3117. if (!reader)
  3118. goto out_unlock;
  3119. event = rb_reader_event(cpu_buffer);
  3120. read = reader->read;
  3121. commit = rb_page_commit(reader);
  3122. /*
  3123. * If this page has been partially read or
  3124. * if len is not big enough to read the rest of the page or
  3125. * a writer is still on the page, then
  3126. * we must copy the data from the page to the buffer.
  3127. * Otherwise, we can simply swap the page with the one passed in.
  3128. */
  3129. if (read || (len < (commit - read)) ||
  3130. cpu_buffer->reader_page == cpu_buffer->commit_page) {
  3131. struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
  3132. unsigned int rpos = read;
  3133. unsigned int pos = 0;
  3134. unsigned int size;
  3135. if (full)
  3136. goto out_unlock;
  3137. if (len > (commit - read))
  3138. len = (commit - read);
  3139. size = rb_event_length(event);
  3140. if (len < size)
  3141. goto out_unlock;
  3142. /* save the current timestamp, since the user will need it */
  3143. save_timestamp = cpu_buffer->read_stamp;
  3144. /* Need to copy one event at a time */
  3145. do {
  3146. memcpy(bpage->data + pos, rpage->data + rpos, size);
  3147. len -= size;
  3148. rb_advance_reader(cpu_buffer);
  3149. rpos = reader->read;
  3150. pos += size;
  3151. event = rb_reader_event(cpu_buffer);
  3152. size = rb_event_length(event);
  3153. } while (len > size);
  3154. /* update bpage */
  3155. local_set(&bpage->commit, pos);
  3156. bpage->time_stamp = save_timestamp;
  3157. /* we copied everything to the beginning */
  3158. read = 0;
  3159. } else {
  3160. /* update the entry counter */
  3161. cpu_buffer->read += rb_page_entries(reader);
  3162. /* swap the pages */
  3163. rb_init_page(bpage);
  3164. bpage = reader->page;
  3165. reader->page = *data_page;
  3166. local_set(&reader->write, 0);
  3167. local_set(&reader->entries, 0);
  3168. reader->read = 0;
  3169. *data_page = bpage;
  3170. }
  3171. ret = read;
  3172. out_unlock:
  3173. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  3174. out:
  3175. return ret;
  3176. }
  3177. EXPORT_SYMBOL_GPL(ring_buffer_read_page);
  3178. #ifdef CONFIG_TRACING
  3179. static ssize_t
  3180. rb_simple_read(struct file *filp, char __user *ubuf,
  3181. size_t cnt, loff_t *ppos)
  3182. {
  3183. unsigned long *p = filp->private_data;
  3184. char buf[64];
  3185. int r;
  3186. if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
  3187. r = sprintf(buf, "permanently disabled\n");
  3188. else
  3189. r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
  3190. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  3191. }
  3192. static ssize_t
  3193. rb_simple_write(struct file *filp, const char __user *ubuf,
  3194. size_t cnt, loff_t *ppos)
  3195. {
  3196. unsigned long *p = filp->private_data;
  3197. char buf[64];
  3198. unsigned long val;
  3199. int ret;
  3200. if (cnt >= sizeof(buf))
  3201. return -EINVAL;
  3202. if (copy_from_user(&buf, ubuf, cnt))
  3203. return -EFAULT;
  3204. buf[cnt] = 0;
  3205. ret = strict_strtoul(buf, 10, &val);
  3206. if (ret < 0)
  3207. return ret;
  3208. if (val)
  3209. set_bit(RB_BUFFERS_ON_BIT, p);
  3210. else
  3211. clear_bit(RB_BUFFERS_ON_BIT, p);
  3212. (*ppos)++;
  3213. return cnt;
  3214. }
  3215. static const struct file_operations rb_simple_fops = {
  3216. .open = tracing_open_generic,
  3217. .read = rb_simple_read,
  3218. .write = rb_simple_write,
  3219. };
  3220. static __init int rb_init_debugfs(void)
  3221. {
  3222. struct dentry *d_tracer;
  3223. d_tracer = tracing_init_dentry();
  3224. trace_create_file("tracing_on", 0644, d_tracer,
  3225. &ring_buffer_flags, &rb_simple_fops);
  3226. return 0;
  3227. }
  3228. fs_initcall(rb_init_debugfs);
  3229. #endif
  3230. #ifdef CONFIG_HOTPLUG_CPU
  3231. static int rb_cpu_notify(struct notifier_block *self,
  3232. unsigned long action, void *hcpu)
  3233. {
  3234. struct ring_buffer *buffer =
  3235. container_of(self, struct ring_buffer, cpu_notify);
  3236. long cpu = (long)hcpu;
  3237. switch (action) {
  3238. case CPU_UP_PREPARE:
  3239. case CPU_UP_PREPARE_FROZEN:
  3240. if (cpumask_test_cpu(cpu, buffer->cpumask))
  3241. return NOTIFY_OK;
  3242. buffer->buffers[cpu] =
  3243. rb_allocate_cpu_buffer(buffer, cpu);
  3244. if (!buffer->buffers[cpu]) {
  3245. WARN(1, "failed to allocate ring buffer on CPU %ld\n",
  3246. cpu);
  3247. return NOTIFY_OK;
  3248. }
  3249. smp_wmb();
  3250. cpumask_set_cpu(cpu, buffer->cpumask);
  3251. break;
  3252. case CPU_DOWN_PREPARE:
  3253. case CPU_DOWN_PREPARE_FROZEN:
  3254. /*
  3255. * Do nothing.
  3256. * If we were to free the buffer, then the user would
  3257. * lose any trace that was in the buffer.
  3258. */
  3259. break;
  3260. default:
  3261. break;
  3262. }
  3263. return NOTIFY_OK;
  3264. }
  3265. #endif