ring_buffer.c 97 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897
  1. /*
  2. * Generic ring buffer
  3. *
  4. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/ring_buffer.h>
  7. #include <linux/trace_clock.h>
  8. #include <linux/ftrace_irq.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/hardirq.h>
  13. #include <linux/kmemcheck.h>
  14. #include <linux/module.h>
  15. #include <linux/percpu.h>
  16. #include <linux/mutex.h>
  17. #include <linux/init.h>
  18. #include <linux/hash.h>
  19. #include <linux/list.h>
  20. #include <linux/cpu.h>
  21. #include <linux/fs.h>
  22. #include <asm/local.h>
  23. #include "trace.h"
  24. /*
  25. * The ring buffer header is special. We must manually up keep it.
  26. */
  27. int ring_buffer_print_entry_header(struct trace_seq *s)
  28. {
  29. int ret;
  30. ret = trace_seq_printf(s, "# compressed entry header\n");
  31. ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
  32. ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
  33. ret = trace_seq_printf(s, "\tarray : 32 bits\n");
  34. ret = trace_seq_printf(s, "\n");
  35. ret = trace_seq_printf(s, "\tpadding : type == %d\n",
  36. RINGBUF_TYPE_PADDING);
  37. ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
  38. RINGBUF_TYPE_TIME_EXTEND);
  39. ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
  40. RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  41. return ret;
  42. }
  43. /*
  44. * The ring buffer is made up of a list of pages. A separate list of pages is
  45. * allocated for each CPU. A writer may only write to a buffer that is
  46. * associated with the CPU it is currently executing on. A reader may read
  47. * from any per cpu buffer.
  48. *
  49. * The reader is special. For each per cpu buffer, the reader has its own
  50. * reader page. When a reader has read the entire reader page, this reader
  51. * page is swapped with another page in the ring buffer.
  52. *
  53. * Now, as long as the writer is off the reader page, the reader can do what
  54. * ever it wants with that page. The writer will never write to that page
  55. * again (as long as it is out of the ring buffer).
  56. *
  57. * Here's some silly ASCII art.
  58. *
  59. * +------+
  60. * |reader| RING BUFFER
  61. * |page |
  62. * +------+ +---+ +---+ +---+
  63. * | |-->| |-->| |
  64. * +---+ +---+ +---+
  65. * ^ |
  66. * | |
  67. * +---------------+
  68. *
  69. *
  70. * +------+
  71. * |reader| RING BUFFER
  72. * |page |------------------v
  73. * +------+ +---+ +---+ +---+
  74. * | |-->| |-->| |
  75. * +---+ +---+ +---+
  76. * ^ |
  77. * | |
  78. * +---------------+
  79. *
  80. *
  81. * +------+
  82. * |reader| RING BUFFER
  83. * |page |------------------v
  84. * +------+ +---+ +---+ +---+
  85. * ^ | |-->| |-->| |
  86. * | +---+ +---+ +---+
  87. * | |
  88. * | |
  89. * +------------------------------+
  90. *
  91. *
  92. * +------+
  93. * |buffer| RING BUFFER
  94. * |page |------------------v
  95. * +------+ +---+ +---+ +---+
  96. * ^ | | | |-->| |
  97. * | New +---+ +---+ +---+
  98. * | Reader------^ |
  99. * | page |
  100. * +------------------------------+
  101. *
  102. *
  103. * After we make this swap, the reader can hand this page off to the splice
  104. * code and be done with it. It can even allocate a new page if it needs to
  105. * and swap that into the ring buffer.
  106. *
  107. * We will be using cmpxchg soon to make all this lockless.
  108. *
  109. */
  110. /*
  111. * A fast way to enable or disable all ring buffers is to
  112. * call tracing_on or tracing_off. Turning off the ring buffers
  113. * prevents all ring buffers from being recorded to.
  114. * Turning this switch on, makes it OK to write to the
  115. * ring buffer, if the ring buffer is enabled itself.
  116. *
  117. * There's three layers that must be on in order to write
  118. * to the ring buffer.
  119. *
  120. * 1) This global flag must be set.
  121. * 2) The ring buffer must be enabled for recording.
  122. * 3) The per cpu buffer must be enabled for recording.
  123. *
  124. * In case of an anomaly, this global flag has a bit set that
  125. * will permantly disable all ring buffers.
  126. */
  127. /*
  128. * Global flag to disable all recording to ring buffers
  129. * This has two bits: ON, DISABLED
  130. *
  131. * ON DISABLED
  132. * ---- ----------
  133. * 0 0 : ring buffers are off
  134. * 1 0 : ring buffers are on
  135. * X 1 : ring buffers are permanently disabled
  136. */
  137. enum {
  138. RB_BUFFERS_ON_BIT = 0,
  139. RB_BUFFERS_DISABLED_BIT = 1,
  140. };
  141. enum {
  142. RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
  143. RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
  144. };
  145. static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
  146. #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
  147. /**
  148. * tracing_on - enable all tracing buffers
  149. *
  150. * This function enables all tracing buffers that may have been
  151. * disabled with tracing_off.
  152. */
  153. void tracing_on(void)
  154. {
  155. set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
  156. }
  157. EXPORT_SYMBOL_GPL(tracing_on);
  158. /**
  159. * tracing_off - turn off all tracing buffers
  160. *
  161. * This function stops all tracing buffers from recording data.
  162. * It does not disable any overhead the tracers themselves may
  163. * be causing. This function simply causes all recording to
  164. * the ring buffers to fail.
  165. */
  166. void tracing_off(void)
  167. {
  168. clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
  169. }
  170. EXPORT_SYMBOL_GPL(tracing_off);
  171. /**
  172. * tracing_off_permanent - permanently disable ring buffers
  173. *
  174. * This function, once called, will disable all ring buffers
  175. * permanently.
  176. */
  177. void tracing_off_permanent(void)
  178. {
  179. set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
  180. }
  181. /**
  182. * tracing_is_on - show state of ring buffers enabled
  183. */
  184. int tracing_is_on(void)
  185. {
  186. return ring_buffer_flags == RB_BUFFERS_ON;
  187. }
  188. EXPORT_SYMBOL_GPL(tracing_is_on);
  189. #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
  190. #define RB_ALIGNMENT 4U
  191. #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  192. #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
  193. #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  194. # define RB_FORCE_8BYTE_ALIGNMENT 0
  195. # define RB_ARCH_ALIGNMENT RB_ALIGNMENT
  196. #else
  197. # define RB_FORCE_8BYTE_ALIGNMENT 1
  198. # define RB_ARCH_ALIGNMENT 8U
  199. #endif
  200. /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
  201. #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
  202. enum {
  203. RB_LEN_TIME_EXTEND = 8,
  204. RB_LEN_TIME_STAMP = 16,
  205. };
  206. static inline int rb_null_event(struct ring_buffer_event *event)
  207. {
  208. return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
  209. }
  210. static void rb_event_set_padding(struct ring_buffer_event *event)
  211. {
  212. /* padding has a NULL time_delta */
  213. event->type_len = RINGBUF_TYPE_PADDING;
  214. event->time_delta = 0;
  215. }
  216. static unsigned
  217. rb_event_data_length(struct ring_buffer_event *event)
  218. {
  219. unsigned length;
  220. if (event->type_len)
  221. length = event->type_len * RB_ALIGNMENT;
  222. else
  223. length = event->array[0];
  224. return length + RB_EVNT_HDR_SIZE;
  225. }
  226. /* inline for ring buffer fast paths */
  227. static unsigned
  228. rb_event_length(struct ring_buffer_event *event)
  229. {
  230. switch (event->type_len) {
  231. case RINGBUF_TYPE_PADDING:
  232. if (rb_null_event(event))
  233. /* undefined */
  234. return -1;
  235. return event->array[0] + RB_EVNT_HDR_SIZE;
  236. case RINGBUF_TYPE_TIME_EXTEND:
  237. return RB_LEN_TIME_EXTEND;
  238. case RINGBUF_TYPE_TIME_STAMP:
  239. return RB_LEN_TIME_STAMP;
  240. case RINGBUF_TYPE_DATA:
  241. return rb_event_data_length(event);
  242. default:
  243. BUG();
  244. }
  245. /* not hit */
  246. return 0;
  247. }
  248. /**
  249. * ring_buffer_event_length - return the length of the event
  250. * @event: the event to get the length of
  251. */
  252. unsigned ring_buffer_event_length(struct ring_buffer_event *event)
  253. {
  254. unsigned length = rb_event_length(event);
  255. if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  256. return length;
  257. length -= RB_EVNT_HDR_SIZE;
  258. if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
  259. length -= sizeof(event->array[0]);
  260. return length;
  261. }
  262. EXPORT_SYMBOL_GPL(ring_buffer_event_length);
  263. /* inline for ring buffer fast paths */
  264. static void *
  265. rb_event_data(struct ring_buffer_event *event)
  266. {
  267. BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  268. /* If length is in len field, then array[0] has the data */
  269. if (event->type_len)
  270. return (void *)&event->array[0];
  271. /* Otherwise length is in array[0] and array[1] has the data */
  272. return (void *)&event->array[1];
  273. }
  274. /**
  275. * ring_buffer_event_data - return the data of the event
  276. * @event: the event to get the data from
  277. */
  278. void *ring_buffer_event_data(struct ring_buffer_event *event)
  279. {
  280. return rb_event_data(event);
  281. }
  282. EXPORT_SYMBOL_GPL(ring_buffer_event_data);
  283. #define for_each_buffer_cpu(buffer, cpu) \
  284. for_each_cpu(cpu, buffer->cpumask)
  285. #define TS_SHIFT 27
  286. #define TS_MASK ((1ULL << TS_SHIFT) - 1)
  287. #define TS_DELTA_TEST (~TS_MASK)
  288. struct buffer_data_page {
  289. u64 time_stamp; /* page time stamp */
  290. local_t commit; /* write committed index */
  291. unsigned char data[]; /* data of buffer page */
  292. };
  293. /*
  294. * Note, the buffer_page list must be first. The buffer pages
  295. * are allocated in cache lines, which means that each buffer
  296. * page will be at the beginning of a cache line, and thus
  297. * the least significant bits will be zero. We use this to
  298. * add flags in the list struct pointers, to make the ring buffer
  299. * lockless.
  300. */
  301. struct buffer_page {
  302. struct list_head list; /* list of buffer pages */
  303. local_t write; /* index for next write */
  304. unsigned read; /* index for next read */
  305. local_t entries; /* entries on this page */
  306. struct buffer_data_page *page; /* Actual data page */
  307. };
  308. /*
  309. * The buffer page counters, write and entries, must be reset
  310. * atomically when crossing page boundaries. To synchronize this
  311. * update, two counters are inserted into the number. One is
  312. * the actual counter for the write position or count on the page.
  313. *
  314. * The other is a counter of updaters. Before an update happens
  315. * the update partition of the counter is incremented. This will
  316. * allow the updater to update the counter atomically.
  317. *
  318. * The counter is 20 bits, and the state data is 12.
  319. */
  320. #define RB_WRITE_MASK 0xfffff
  321. #define RB_WRITE_INTCNT (1 << 20)
  322. static void rb_init_page(struct buffer_data_page *bpage)
  323. {
  324. local_set(&bpage->commit, 0);
  325. }
  326. /**
  327. * ring_buffer_page_len - the size of data on the page.
  328. * @page: The page to read
  329. *
  330. * Returns the amount of data on the page, including buffer page header.
  331. */
  332. size_t ring_buffer_page_len(void *page)
  333. {
  334. return local_read(&((struct buffer_data_page *)page)->commit)
  335. + BUF_PAGE_HDR_SIZE;
  336. }
  337. /*
  338. * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
  339. * this issue out.
  340. */
  341. static void free_buffer_page(struct buffer_page *bpage)
  342. {
  343. free_page((unsigned long)bpage->page);
  344. kfree(bpage);
  345. }
  346. /*
  347. * We need to fit the time_stamp delta into 27 bits.
  348. */
  349. static inline int test_time_stamp(u64 delta)
  350. {
  351. if (delta & TS_DELTA_TEST)
  352. return 1;
  353. return 0;
  354. }
  355. #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
  356. /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
  357. #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
  358. /* Max number of timestamps that can fit on a page */
  359. #define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
  360. int ring_buffer_print_page_header(struct trace_seq *s)
  361. {
  362. struct buffer_data_page field;
  363. int ret;
  364. ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
  365. "offset:0;\tsize:%u;\tsigned:%u;\n",
  366. (unsigned int)sizeof(field.time_stamp),
  367. (unsigned int)is_signed_type(u64));
  368. ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
  369. "offset:%u;\tsize:%u;\tsigned:%u;\n",
  370. (unsigned int)offsetof(typeof(field), commit),
  371. (unsigned int)sizeof(field.commit),
  372. (unsigned int)is_signed_type(long));
  373. ret = trace_seq_printf(s, "\tfield: char data;\t"
  374. "offset:%u;\tsize:%u;\tsigned:%u;\n",
  375. (unsigned int)offsetof(typeof(field), data),
  376. (unsigned int)BUF_PAGE_SIZE,
  377. (unsigned int)is_signed_type(char));
  378. return ret;
  379. }
  380. /*
  381. * head_page == tail_page && head == tail then buffer is empty.
  382. */
  383. struct ring_buffer_per_cpu {
  384. int cpu;
  385. struct ring_buffer *buffer;
  386. spinlock_t reader_lock; /* serialize readers */
  387. arch_spinlock_t lock;
  388. struct lock_class_key lock_key;
  389. struct list_head *pages;
  390. struct buffer_page *head_page; /* read from head */
  391. struct buffer_page *tail_page; /* write to tail */
  392. struct buffer_page *commit_page; /* committed pages */
  393. struct buffer_page *reader_page;
  394. local_t commit_overrun;
  395. local_t overrun;
  396. local_t entries;
  397. local_t committing;
  398. local_t commits;
  399. unsigned long read;
  400. u64 write_stamp;
  401. u64 read_stamp;
  402. atomic_t record_disabled;
  403. };
  404. struct ring_buffer {
  405. unsigned pages;
  406. unsigned flags;
  407. int cpus;
  408. atomic_t record_disabled;
  409. cpumask_var_t cpumask;
  410. struct lock_class_key *reader_lock_key;
  411. struct mutex mutex;
  412. struct ring_buffer_per_cpu **buffers;
  413. #ifdef CONFIG_HOTPLUG_CPU
  414. struct notifier_block cpu_notify;
  415. #endif
  416. u64 (*clock)(void);
  417. };
  418. struct ring_buffer_iter {
  419. struct ring_buffer_per_cpu *cpu_buffer;
  420. unsigned long head;
  421. struct buffer_page *head_page;
  422. struct buffer_page *cache_reader_page;
  423. unsigned long cache_read;
  424. u64 read_stamp;
  425. };
  426. /* buffer may be either ring_buffer or ring_buffer_per_cpu */
  427. #define RB_WARN_ON(b, cond) \
  428. ({ \
  429. int _____ret = unlikely(cond); \
  430. if (_____ret) { \
  431. if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
  432. struct ring_buffer_per_cpu *__b = \
  433. (void *)b; \
  434. atomic_inc(&__b->buffer->record_disabled); \
  435. } else \
  436. atomic_inc(&b->record_disabled); \
  437. WARN_ON(1); \
  438. } \
  439. _____ret; \
  440. })
  441. /* Up this if you want to test the TIME_EXTENTS and normalization */
  442. #define DEBUG_SHIFT 0
  443. static inline u64 rb_time_stamp(struct ring_buffer *buffer)
  444. {
  445. /* shift to debug/test normalization and TIME_EXTENTS */
  446. return buffer->clock() << DEBUG_SHIFT;
  447. }
  448. u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
  449. {
  450. u64 time;
  451. preempt_disable_notrace();
  452. time = rb_time_stamp(buffer);
  453. preempt_enable_no_resched_notrace();
  454. return time;
  455. }
  456. EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
  457. void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
  458. int cpu, u64 *ts)
  459. {
  460. /* Just stupid testing the normalize function and deltas */
  461. *ts >>= DEBUG_SHIFT;
  462. }
  463. EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
  464. /*
  465. * Making the ring buffer lockless makes things tricky.
  466. * Although writes only happen on the CPU that they are on,
  467. * and they only need to worry about interrupts. Reads can
  468. * happen on any CPU.
  469. *
  470. * The reader page is always off the ring buffer, but when the
  471. * reader finishes with a page, it needs to swap its page with
  472. * a new one from the buffer. The reader needs to take from
  473. * the head (writes go to the tail). But if a writer is in overwrite
  474. * mode and wraps, it must push the head page forward.
  475. *
  476. * Here lies the problem.
  477. *
  478. * The reader must be careful to replace only the head page, and
  479. * not another one. As described at the top of the file in the
  480. * ASCII art, the reader sets its old page to point to the next
  481. * page after head. It then sets the page after head to point to
  482. * the old reader page. But if the writer moves the head page
  483. * during this operation, the reader could end up with the tail.
  484. *
  485. * We use cmpxchg to help prevent this race. We also do something
  486. * special with the page before head. We set the LSB to 1.
  487. *
  488. * When the writer must push the page forward, it will clear the
  489. * bit that points to the head page, move the head, and then set
  490. * the bit that points to the new head page.
  491. *
  492. * We also don't want an interrupt coming in and moving the head
  493. * page on another writer. Thus we use the second LSB to catch
  494. * that too. Thus:
  495. *
  496. * head->list->prev->next bit 1 bit 0
  497. * ------- -------
  498. * Normal page 0 0
  499. * Points to head page 0 1
  500. * New head page 1 0
  501. *
  502. * Note we can not trust the prev pointer of the head page, because:
  503. *
  504. * +----+ +-----+ +-----+
  505. * | |------>| T |---X--->| N |
  506. * | |<------| | | |
  507. * +----+ +-----+ +-----+
  508. * ^ ^ |
  509. * | +-----+ | |
  510. * +----------| R |----------+ |
  511. * | |<-----------+
  512. * +-----+
  513. *
  514. * Key: ---X--> HEAD flag set in pointer
  515. * T Tail page
  516. * R Reader page
  517. * N Next page
  518. *
  519. * (see __rb_reserve_next() to see where this happens)
  520. *
  521. * What the above shows is that the reader just swapped out
  522. * the reader page with a page in the buffer, but before it
  523. * could make the new header point back to the new page added
  524. * it was preempted by a writer. The writer moved forward onto
  525. * the new page added by the reader and is about to move forward
  526. * again.
  527. *
  528. * You can see, it is legitimate for the previous pointer of
  529. * the head (or any page) not to point back to itself. But only
  530. * temporarially.
  531. */
  532. #define RB_PAGE_NORMAL 0UL
  533. #define RB_PAGE_HEAD 1UL
  534. #define RB_PAGE_UPDATE 2UL
  535. #define RB_FLAG_MASK 3UL
  536. /* PAGE_MOVED is not part of the mask */
  537. #define RB_PAGE_MOVED 4UL
  538. /*
  539. * rb_list_head - remove any bit
  540. */
  541. static struct list_head *rb_list_head(struct list_head *list)
  542. {
  543. unsigned long val = (unsigned long)list;
  544. return (struct list_head *)(val & ~RB_FLAG_MASK);
  545. }
  546. /*
  547. * rb_is_head_page - test if the given page is the head page
  548. *
  549. * Because the reader may move the head_page pointer, we can
  550. * not trust what the head page is (it may be pointing to
  551. * the reader page). But if the next page is a header page,
  552. * its flags will be non zero.
  553. */
  554. static int inline
  555. rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
  556. struct buffer_page *page, struct list_head *list)
  557. {
  558. unsigned long val;
  559. val = (unsigned long)list->next;
  560. if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
  561. return RB_PAGE_MOVED;
  562. return val & RB_FLAG_MASK;
  563. }
  564. /*
  565. * rb_is_reader_page
  566. *
  567. * The unique thing about the reader page, is that, if the
  568. * writer is ever on it, the previous pointer never points
  569. * back to the reader page.
  570. */
  571. static int rb_is_reader_page(struct buffer_page *page)
  572. {
  573. struct list_head *list = page->list.prev;
  574. return rb_list_head(list->next) != &page->list;
  575. }
  576. /*
  577. * rb_set_list_to_head - set a list_head to be pointing to head.
  578. */
  579. static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
  580. struct list_head *list)
  581. {
  582. unsigned long *ptr;
  583. ptr = (unsigned long *)&list->next;
  584. *ptr |= RB_PAGE_HEAD;
  585. *ptr &= ~RB_PAGE_UPDATE;
  586. }
  587. /*
  588. * rb_head_page_activate - sets up head page
  589. */
  590. static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
  591. {
  592. struct buffer_page *head;
  593. head = cpu_buffer->head_page;
  594. if (!head)
  595. return;
  596. /*
  597. * Set the previous list pointer to have the HEAD flag.
  598. */
  599. rb_set_list_to_head(cpu_buffer, head->list.prev);
  600. }
  601. static void rb_list_head_clear(struct list_head *list)
  602. {
  603. unsigned long *ptr = (unsigned long *)&list->next;
  604. *ptr &= ~RB_FLAG_MASK;
  605. }
  606. /*
  607. * rb_head_page_dactivate - clears head page ptr (for free list)
  608. */
  609. static void
  610. rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
  611. {
  612. struct list_head *hd;
  613. /* Go through the whole list and clear any pointers found. */
  614. rb_list_head_clear(cpu_buffer->pages);
  615. list_for_each(hd, cpu_buffer->pages)
  616. rb_list_head_clear(hd);
  617. }
  618. static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
  619. struct buffer_page *head,
  620. struct buffer_page *prev,
  621. int old_flag, int new_flag)
  622. {
  623. struct list_head *list;
  624. unsigned long val = (unsigned long)&head->list;
  625. unsigned long ret;
  626. list = &prev->list;
  627. val &= ~RB_FLAG_MASK;
  628. ret = cmpxchg((unsigned long *)&list->next,
  629. val | old_flag, val | new_flag);
  630. /* check if the reader took the page */
  631. if ((ret & ~RB_FLAG_MASK) != val)
  632. return RB_PAGE_MOVED;
  633. return ret & RB_FLAG_MASK;
  634. }
  635. static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
  636. struct buffer_page *head,
  637. struct buffer_page *prev,
  638. int old_flag)
  639. {
  640. return rb_head_page_set(cpu_buffer, head, prev,
  641. old_flag, RB_PAGE_UPDATE);
  642. }
  643. static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
  644. struct buffer_page *head,
  645. struct buffer_page *prev,
  646. int old_flag)
  647. {
  648. return rb_head_page_set(cpu_buffer, head, prev,
  649. old_flag, RB_PAGE_HEAD);
  650. }
  651. static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
  652. struct buffer_page *head,
  653. struct buffer_page *prev,
  654. int old_flag)
  655. {
  656. return rb_head_page_set(cpu_buffer, head, prev,
  657. old_flag, RB_PAGE_NORMAL);
  658. }
  659. static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
  660. struct buffer_page **bpage)
  661. {
  662. struct list_head *p = rb_list_head((*bpage)->list.next);
  663. *bpage = list_entry(p, struct buffer_page, list);
  664. }
  665. static struct buffer_page *
  666. rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
  667. {
  668. struct buffer_page *head;
  669. struct buffer_page *page;
  670. struct list_head *list;
  671. int i;
  672. if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
  673. return NULL;
  674. /* sanity check */
  675. list = cpu_buffer->pages;
  676. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
  677. return NULL;
  678. page = head = cpu_buffer->head_page;
  679. /*
  680. * It is possible that the writer moves the header behind
  681. * where we started, and we miss in one loop.
  682. * A second loop should grab the header, but we'll do
  683. * three loops just because I'm paranoid.
  684. */
  685. for (i = 0; i < 3; i++) {
  686. do {
  687. if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
  688. cpu_buffer->head_page = page;
  689. return page;
  690. }
  691. rb_inc_page(cpu_buffer, &page);
  692. } while (page != head);
  693. }
  694. RB_WARN_ON(cpu_buffer, 1);
  695. return NULL;
  696. }
  697. static int rb_head_page_replace(struct buffer_page *old,
  698. struct buffer_page *new)
  699. {
  700. unsigned long *ptr = (unsigned long *)&old->list.prev->next;
  701. unsigned long val;
  702. unsigned long ret;
  703. val = *ptr & ~RB_FLAG_MASK;
  704. val |= RB_PAGE_HEAD;
  705. ret = cmpxchg(ptr, val, (unsigned long)&new->list);
  706. return ret == val;
  707. }
  708. /*
  709. * rb_tail_page_update - move the tail page forward
  710. *
  711. * Returns 1 if moved tail page, 0 if someone else did.
  712. */
  713. static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
  714. struct buffer_page *tail_page,
  715. struct buffer_page *next_page)
  716. {
  717. struct buffer_page *old_tail;
  718. unsigned long old_entries;
  719. unsigned long old_write;
  720. int ret = 0;
  721. /*
  722. * The tail page now needs to be moved forward.
  723. *
  724. * We need to reset the tail page, but without messing
  725. * with possible erasing of data brought in by interrupts
  726. * that have moved the tail page and are currently on it.
  727. *
  728. * We add a counter to the write field to denote this.
  729. */
  730. old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
  731. old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
  732. /*
  733. * Just make sure we have seen our old_write and synchronize
  734. * with any interrupts that come in.
  735. */
  736. barrier();
  737. /*
  738. * If the tail page is still the same as what we think
  739. * it is, then it is up to us to update the tail
  740. * pointer.
  741. */
  742. if (tail_page == cpu_buffer->tail_page) {
  743. /* Zero the write counter */
  744. unsigned long val = old_write & ~RB_WRITE_MASK;
  745. unsigned long eval = old_entries & ~RB_WRITE_MASK;
  746. /*
  747. * This will only succeed if an interrupt did
  748. * not come in and change it. In which case, we
  749. * do not want to modify it.
  750. *
  751. * We add (void) to let the compiler know that we do not care
  752. * about the return value of these functions. We use the
  753. * cmpxchg to only update if an interrupt did not already
  754. * do it for us. If the cmpxchg fails, we don't care.
  755. */
  756. (void)local_cmpxchg(&next_page->write, old_write, val);
  757. (void)local_cmpxchg(&next_page->entries, old_entries, eval);
  758. /*
  759. * No need to worry about races with clearing out the commit.
  760. * it only can increment when a commit takes place. But that
  761. * only happens in the outer most nested commit.
  762. */
  763. local_set(&next_page->page->commit, 0);
  764. old_tail = cmpxchg(&cpu_buffer->tail_page,
  765. tail_page, next_page);
  766. if (old_tail == tail_page)
  767. ret = 1;
  768. }
  769. return ret;
  770. }
  771. static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
  772. struct buffer_page *bpage)
  773. {
  774. unsigned long val = (unsigned long)bpage;
  775. if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
  776. return 1;
  777. return 0;
  778. }
  779. /**
  780. * rb_check_list - make sure a pointer to a list has the last bits zero
  781. */
  782. static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
  783. struct list_head *list)
  784. {
  785. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
  786. return 1;
  787. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
  788. return 1;
  789. return 0;
  790. }
  791. /**
  792. * check_pages - integrity check of buffer pages
  793. * @cpu_buffer: CPU buffer with pages to test
  794. *
  795. * As a safety measure we check to make sure the data pages have not
  796. * been corrupted.
  797. */
  798. static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
  799. {
  800. struct list_head *head = cpu_buffer->pages;
  801. struct buffer_page *bpage, *tmp;
  802. rb_head_page_deactivate(cpu_buffer);
  803. if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
  804. return -1;
  805. if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
  806. return -1;
  807. if (rb_check_list(cpu_buffer, head))
  808. return -1;
  809. list_for_each_entry_safe(bpage, tmp, head, list) {
  810. if (RB_WARN_ON(cpu_buffer,
  811. bpage->list.next->prev != &bpage->list))
  812. return -1;
  813. if (RB_WARN_ON(cpu_buffer,
  814. bpage->list.prev->next != &bpage->list))
  815. return -1;
  816. if (rb_check_list(cpu_buffer, &bpage->list))
  817. return -1;
  818. }
  819. rb_head_page_activate(cpu_buffer);
  820. return 0;
  821. }
  822. static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
  823. unsigned nr_pages)
  824. {
  825. struct buffer_page *bpage, *tmp;
  826. unsigned long addr;
  827. LIST_HEAD(pages);
  828. unsigned i;
  829. WARN_ON(!nr_pages);
  830. for (i = 0; i < nr_pages; i++) {
  831. bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
  832. GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
  833. if (!bpage)
  834. goto free_pages;
  835. rb_check_bpage(cpu_buffer, bpage);
  836. list_add(&bpage->list, &pages);
  837. addr = __get_free_page(GFP_KERNEL);
  838. if (!addr)
  839. goto free_pages;
  840. bpage->page = (void *)addr;
  841. rb_init_page(bpage->page);
  842. }
  843. /*
  844. * The ring buffer page list is a circular list that does not
  845. * start and end with a list head. All page list items point to
  846. * other pages.
  847. */
  848. cpu_buffer->pages = pages.next;
  849. list_del(&pages);
  850. rb_check_pages(cpu_buffer);
  851. return 0;
  852. free_pages:
  853. list_for_each_entry_safe(bpage, tmp, &pages, list) {
  854. list_del_init(&bpage->list);
  855. free_buffer_page(bpage);
  856. }
  857. return -ENOMEM;
  858. }
  859. static struct ring_buffer_per_cpu *
  860. rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
  861. {
  862. struct ring_buffer_per_cpu *cpu_buffer;
  863. struct buffer_page *bpage;
  864. unsigned long addr;
  865. int ret;
  866. cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
  867. GFP_KERNEL, cpu_to_node(cpu));
  868. if (!cpu_buffer)
  869. return NULL;
  870. cpu_buffer->cpu = cpu;
  871. cpu_buffer->buffer = buffer;
  872. spin_lock_init(&cpu_buffer->reader_lock);
  873. lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
  874. cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  875. bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
  876. GFP_KERNEL, cpu_to_node(cpu));
  877. if (!bpage)
  878. goto fail_free_buffer;
  879. rb_check_bpage(cpu_buffer, bpage);
  880. cpu_buffer->reader_page = bpage;
  881. addr = __get_free_page(GFP_KERNEL);
  882. if (!addr)
  883. goto fail_free_reader;
  884. bpage->page = (void *)addr;
  885. rb_init_page(bpage->page);
  886. INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
  887. ret = rb_allocate_pages(cpu_buffer, buffer->pages);
  888. if (ret < 0)
  889. goto fail_free_reader;
  890. cpu_buffer->head_page
  891. = list_entry(cpu_buffer->pages, struct buffer_page, list);
  892. cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
  893. rb_head_page_activate(cpu_buffer);
  894. return cpu_buffer;
  895. fail_free_reader:
  896. free_buffer_page(cpu_buffer->reader_page);
  897. fail_free_buffer:
  898. kfree(cpu_buffer);
  899. return NULL;
  900. }
  901. static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
  902. {
  903. struct list_head *head = cpu_buffer->pages;
  904. struct buffer_page *bpage, *tmp;
  905. free_buffer_page(cpu_buffer->reader_page);
  906. rb_head_page_deactivate(cpu_buffer);
  907. if (head) {
  908. list_for_each_entry_safe(bpage, tmp, head, list) {
  909. list_del_init(&bpage->list);
  910. free_buffer_page(bpage);
  911. }
  912. bpage = list_entry(head, struct buffer_page, list);
  913. free_buffer_page(bpage);
  914. }
  915. kfree(cpu_buffer);
  916. }
  917. #ifdef CONFIG_HOTPLUG_CPU
  918. static int rb_cpu_notify(struct notifier_block *self,
  919. unsigned long action, void *hcpu);
  920. #endif
  921. /**
  922. * ring_buffer_alloc - allocate a new ring_buffer
  923. * @size: the size in bytes per cpu that is needed.
  924. * @flags: attributes to set for the ring buffer.
  925. *
  926. * Currently the only flag that is available is the RB_FL_OVERWRITE
  927. * flag. This flag means that the buffer will overwrite old data
  928. * when the buffer wraps. If this flag is not set, the buffer will
  929. * drop data when the tail hits the head.
  930. */
  931. struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
  932. struct lock_class_key *key)
  933. {
  934. struct ring_buffer *buffer;
  935. int bsize;
  936. int cpu;
  937. /* keep it in its own cache line */
  938. buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
  939. GFP_KERNEL);
  940. if (!buffer)
  941. return NULL;
  942. if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
  943. goto fail_free_buffer;
  944. buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  945. buffer->flags = flags;
  946. buffer->clock = trace_clock_local;
  947. buffer->reader_lock_key = key;
  948. /* need at least two pages */
  949. if (buffer->pages < 2)
  950. buffer->pages = 2;
  951. /*
  952. * In case of non-hotplug cpu, if the ring-buffer is allocated
  953. * in early initcall, it will not be notified of secondary cpus.
  954. * In that off case, we need to allocate for all possible cpus.
  955. */
  956. #ifdef CONFIG_HOTPLUG_CPU
  957. get_online_cpus();
  958. cpumask_copy(buffer->cpumask, cpu_online_mask);
  959. #else
  960. cpumask_copy(buffer->cpumask, cpu_possible_mask);
  961. #endif
  962. buffer->cpus = nr_cpu_ids;
  963. bsize = sizeof(void *) * nr_cpu_ids;
  964. buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
  965. GFP_KERNEL);
  966. if (!buffer->buffers)
  967. goto fail_free_cpumask;
  968. for_each_buffer_cpu(buffer, cpu) {
  969. buffer->buffers[cpu] =
  970. rb_allocate_cpu_buffer(buffer, cpu);
  971. if (!buffer->buffers[cpu])
  972. goto fail_free_buffers;
  973. }
  974. #ifdef CONFIG_HOTPLUG_CPU
  975. buffer->cpu_notify.notifier_call = rb_cpu_notify;
  976. buffer->cpu_notify.priority = 0;
  977. register_cpu_notifier(&buffer->cpu_notify);
  978. #endif
  979. put_online_cpus();
  980. mutex_init(&buffer->mutex);
  981. return buffer;
  982. fail_free_buffers:
  983. for_each_buffer_cpu(buffer, cpu) {
  984. if (buffer->buffers[cpu])
  985. rb_free_cpu_buffer(buffer->buffers[cpu]);
  986. }
  987. kfree(buffer->buffers);
  988. fail_free_cpumask:
  989. free_cpumask_var(buffer->cpumask);
  990. put_online_cpus();
  991. fail_free_buffer:
  992. kfree(buffer);
  993. return NULL;
  994. }
  995. EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
  996. /**
  997. * ring_buffer_free - free a ring buffer.
  998. * @buffer: the buffer to free.
  999. */
  1000. void
  1001. ring_buffer_free(struct ring_buffer *buffer)
  1002. {
  1003. int cpu;
  1004. get_online_cpus();
  1005. #ifdef CONFIG_HOTPLUG_CPU
  1006. unregister_cpu_notifier(&buffer->cpu_notify);
  1007. #endif
  1008. for_each_buffer_cpu(buffer, cpu)
  1009. rb_free_cpu_buffer(buffer->buffers[cpu]);
  1010. put_online_cpus();
  1011. kfree(buffer->buffers);
  1012. free_cpumask_var(buffer->cpumask);
  1013. kfree(buffer);
  1014. }
  1015. EXPORT_SYMBOL_GPL(ring_buffer_free);
  1016. void ring_buffer_set_clock(struct ring_buffer *buffer,
  1017. u64 (*clock)(void))
  1018. {
  1019. buffer->clock = clock;
  1020. }
  1021. static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
  1022. static void
  1023. rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
  1024. {
  1025. struct buffer_page *bpage;
  1026. struct list_head *p;
  1027. unsigned i;
  1028. spin_lock_irq(&cpu_buffer->reader_lock);
  1029. rb_head_page_deactivate(cpu_buffer);
  1030. for (i = 0; i < nr_pages; i++) {
  1031. if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
  1032. return;
  1033. p = cpu_buffer->pages->next;
  1034. bpage = list_entry(p, struct buffer_page, list);
  1035. list_del_init(&bpage->list);
  1036. free_buffer_page(bpage);
  1037. }
  1038. if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
  1039. return;
  1040. rb_reset_cpu(cpu_buffer);
  1041. rb_check_pages(cpu_buffer);
  1042. spin_unlock_irq(&cpu_buffer->reader_lock);
  1043. }
  1044. static void
  1045. rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
  1046. struct list_head *pages, unsigned nr_pages)
  1047. {
  1048. struct buffer_page *bpage;
  1049. struct list_head *p;
  1050. unsigned i;
  1051. spin_lock_irq(&cpu_buffer->reader_lock);
  1052. rb_head_page_deactivate(cpu_buffer);
  1053. for (i = 0; i < nr_pages; i++) {
  1054. if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
  1055. return;
  1056. p = pages->next;
  1057. bpage = list_entry(p, struct buffer_page, list);
  1058. list_del_init(&bpage->list);
  1059. list_add_tail(&bpage->list, cpu_buffer->pages);
  1060. }
  1061. rb_reset_cpu(cpu_buffer);
  1062. rb_check_pages(cpu_buffer);
  1063. spin_unlock_irq(&cpu_buffer->reader_lock);
  1064. }
  1065. /**
  1066. * ring_buffer_resize - resize the ring buffer
  1067. * @buffer: the buffer to resize.
  1068. * @size: the new size.
  1069. *
  1070. * Minimum size is 2 * BUF_PAGE_SIZE.
  1071. *
  1072. * Returns -1 on failure.
  1073. */
  1074. int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
  1075. {
  1076. struct ring_buffer_per_cpu *cpu_buffer;
  1077. unsigned nr_pages, rm_pages, new_pages;
  1078. struct buffer_page *bpage, *tmp;
  1079. unsigned long buffer_size;
  1080. unsigned long addr;
  1081. LIST_HEAD(pages);
  1082. int i, cpu;
  1083. /*
  1084. * Always succeed at resizing a non-existent buffer:
  1085. */
  1086. if (!buffer)
  1087. return size;
  1088. size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  1089. size *= BUF_PAGE_SIZE;
  1090. buffer_size = buffer->pages * BUF_PAGE_SIZE;
  1091. /* we need a minimum of two pages */
  1092. if (size < BUF_PAGE_SIZE * 2)
  1093. size = BUF_PAGE_SIZE * 2;
  1094. if (size == buffer_size)
  1095. return size;
  1096. atomic_inc(&buffer->record_disabled);
  1097. /* Make sure all writers are done with this buffer. */
  1098. synchronize_sched();
  1099. mutex_lock(&buffer->mutex);
  1100. get_online_cpus();
  1101. nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  1102. if (size < buffer_size) {
  1103. /* easy case, just free pages */
  1104. if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
  1105. goto out_fail;
  1106. rm_pages = buffer->pages - nr_pages;
  1107. for_each_buffer_cpu(buffer, cpu) {
  1108. cpu_buffer = buffer->buffers[cpu];
  1109. rb_remove_pages(cpu_buffer, rm_pages);
  1110. }
  1111. goto out;
  1112. }
  1113. /*
  1114. * This is a bit more difficult. We only want to add pages
  1115. * when we can allocate enough for all CPUs. We do this
  1116. * by allocating all the pages and storing them on a local
  1117. * link list. If we succeed in our allocation, then we
  1118. * add these pages to the cpu_buffers. Otherwise we just free
  1119. * them all and return -ENOMEM;
  1120. */
  1121. if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
  1122. goto out_fail;
  1123. new_pages = nr_pages - buffer->pages;
  1124. for_each_buffer_cpu(buffer, cpu) {
  1125. for (i = 0; i < new_pages; i++) {
  1126. bpage = kzalloc_node(ALIGN(sizeof(*bpage),
  1127. cache_line_size()),
  1128. GFP_KERNEL, cpu_to_node(cpu));
  1129. if (!bpage)
  1130. goto free_pages;
  1131. list_add(&bpage->list, &pages);
  1132. addr = __get_free_page(GFP_KERNEL);
  1133. if (!addr)
  1134. goto free_pages;
  1135. bpage->page = (void *)addr;
  1136. rb_init_page(bpage->page);
  1137. }
  1138. }
  1139. for_each_buffer_cpu(buffer, cpu) {
  1140. cpu_buffer = buffer->buffers[cpu];
  1141. rb_insert_pages(cpu_buffer, &pages, new_pages);
  1142. }
  1143. if (RB_WARN_ON(buffer, !list_empty(&pages)))
  1144. goto out_fail;
  1145. out:
  1146. buffer->pages = nr_pages;
  1147. put_online_cpus();
  1148. mutex_unlock(&buffer->mutex);
  1149. atomic_dec(&buffer->record_disabled);
  1150. return size;
  1151. free_pages:
  1152. list_for_each_entry_safe(bpage, tmp, &pages, list) {
  1153. list_del_init(&bpage->list);
  1154. free_buffer_page(bpage);
  1155. }
  1156. put_online_cpus();
  1157. mutex_unlock(&buffer->mutex);
  1158. atomic_dec(&buffer->record_disabled);
  1159. return -ENOMEM;
  1160. /*
  1161. * Something went totally wrong, and we are too paranoid
  1162. * to even clean up the mess.
  1163. */
  1164. out_fail:
  1165. put_online_cpus();
  1166. mutex_unlock(&buffer->mutex);
  1167. atomic_dec(&buffer->record_disabled);
  1168. return -1;
  1169. }
  1170. EXPORT_SYMBOL_GPL(ring_buffer_resize);
  1171. static inline void *
  1172. __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
  1173. {
  1174. return bpage->data + index;
  1175. }
  1176. static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
  1177. {
  1178. return bpage->page->data + index;
  1179. }
  1180. static inline struct ring_buffer_event *
  1181. rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
  1182. {
  1183. return __rb_page_index(cpu_buffer->reader_page,
  1184. cpu_buffer->reader_page->read);
  1185. }
  1186. static inline struct ring_buffer_event *
  1187. rb_iter_head_event(struct ring_buffer_iter *iter)
  1188. {
  1189. return __rb_page_index(iter->head_page, iter->head);
  1190. }
  1191. static inline unsigned long rb_page_write(struct buffer_page *bpage)
  1192. {
  1193. return local_read(&bpage->write) & RB_WRITE_MASK;
  1194. }
  1195. static inline unsigned rb_page_commit(struct buffer_page *bpage)
  1196. {
  1197. return local_read(&bpage->page->commit);
  1198. }
  1199. static inline unsigned long rb_page_entries(struct buffer_page *bpage)
  1200. {
  1201. return local_read(&bpage->entries) & RB_WRITE_MASK;
  1202. }
  1203. /* Size is determined by what has been commited */
  1204. static inline unsigned rb_page_size(struct buffer_page *bpage)
  1205. {
  1206. return rb_page_commit(bpage);
  1207. }
  1208. static inline unsigned
  1209. rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
  1210. {
  1211. return rb_page_commit(cpu_buffer->commit_page);
  1212. }
  1213. static inline unsigned
  1214. rb_event_index(struct ring_buffer_event *event)
  1215. {
  1216. unsigned long addr = (unsigned long)event;
  1217. return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
  1218. }
  1219. static inline int
  1220. rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
  1221. struct ring_buffer_event *event)
  1222. {
  1223. unsigned long addr = (unsigned long)event;
  1224. unsigned long index;
  1225. index = rb_event_index(event);
  1226. addr &= PAGE_MASK;
  1227. return cpu_buffer->commit_page->page == (void *)addr &&
  1228. rb_commit_index(cpu_buffer) == index;
  1229. }
  1230. static void
  1231. rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
  1232. {
  1233. unsigned long max_count;
  1234. /*
  1235. * We only race with interrupts and NMIs on this CPU.
  1236. * If we own the commit event, then we can commit
  1237. * all others that interrupted us, since the interruptions
  1238. * are in stack format (they finish before they come
  1239. * back to us). This allows us to do a simple loop to
  1240. * assign the commit to the tail.
  1241. */
  1242. again:
  1243. max_count = cpu_buffer->buffer->pages * 100;
  1244. while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
  1245. if (RB_WARN_ON(cpu_buffer, !(--max_count)))
  1246. return;
  1247. if (RB_WARN_ON(cpu_buffer,
  1248. rb_is_reader_page(cpu_buffer->tail_page)))
  1249. return;
  1250. local_set(&cpu_buffer->commit_page->page->commit,
  1251. rb_page_write(cpu_buffer->commit_page));
  1252. rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
  1253. cpu_buffer->write_stamp =
  1254. cpu_buffer->commit_page->page->time_stamp;
  1255. /* add barrier to keep gcc from optimizing too much */
  1256. barrier();
  1257. }
  1258. while (rb_commit_index(cpu_buffer) !=
  1259. rb_page_write(cpu_buffer->commit_page)) {
  1260. local_set(&cpu_buffer->commit_page->page->commit,
  1261. rb_page_write(cpu_buffer->commit_page));
  1262. RB_WARN_ON(cpu_buffer,
  1263. local_read(&cpu_buffer->commit_page->page->commit) &
  1264. ~RB_WRITE_MASK);
  1265. barrier();
  1266. }
  1267. /* again, keep gcc from optimizing */
  1268. barrier();
  1269. /*
  1270. * If an interrupt came in just after the first while loop
  1271. * and pushed the tail page forward, we will be left with
  1272. * a dangling commit that will never go forward.
  1273. */
  1274. if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
  1275. goto again;
  1276. }
  1277. static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
  1278. {
  1279. cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
  1280. cpu_buffer->reader_page->read = 0;
  1281. }
  1282. static void rb_inc_iter(struct ring_buffer_iter *iter)
  1283. {
  1284. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  1285. /*
  1286. * The iterator could be on the reader page (it starts there).
  1287. * But the head could have moved, since the reader was
  1288. * found. Check for this case and assign the iterator
  1289. * to the head page instead of next.
  1290. */
  1291. if (iter->head_page == cpu_buffer->reader_page)
  1292. iter->head_page = rb_set_head_page(cpu_buffer);
  1293. else
  1294. rb_inc_page(cpu_buffer, &iter->head_page);
  1295. iter->read_stamp = iter->head_page->page->time_stamp;
  1296. iter->head = 0;
  1297. }
  1298. /**
  1299. * ring_buffer_update_event - update event type and data
  1300. * @event: the even to update
  1301. * @type: the type of event
  1302. * @length: the size of the event field in the ring buffer
  1303. *
  1304. * Update the type and data fields of the event. The length
  1305. * is the actual size that is written to the ring buffer,
  1306. * and with this, we can determine what to place into the
  1307. * data field.
  1308. */
  1309. static void
  1310. rb_update_event(struct ring_buffer_event *event,
  1311. unsigned type, unsigned length)
  1312. {
  1313. event->type_len = type;
  1314. switch (type) {
  1315. case RINGBUF_TYPE_PADDING:
  1316. case RINGBUF_TYPE_TIME_EXTEND:
  1317. case RINGBUF_TYPE_TIME_STAMP:
  1318. break;
  1319. case 0:
  1320. length -= RB_EVNT_HDR_SIZE;
  1321. if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
  1322. event->array[0] = length;
  1323. else
  1324. event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
  1325. break;
  1326. default:
  1327. BUG();
  1328. }
  1329. }
  1330. /*
  1331. * rb_handle_head_page - writer hit the head page
  1332. *
  1333. * Returns: +1 to retry page
  1334. * 0 to continue
  1335. * -1 on error
  1336. */
  1337. static int
  1338. rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
  1339. struct buffer_page *tail_page,
  1340. struct buffer_page *next_page)
  1341. {
  1342. struct buffer_page *new_head;
  1343. int entries;
  1344. int type;
  1345. int ret;
  1346. entries = rb_page_entries(next_page);
  1347. /*
  1348. * The hard part is here. We need to move the head
  1349. * forward, and protect against both readers on
  1350. * other CPUs and writers coming in via interrupts.
  1351. */
  1352. type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
  1353. RB_PAGE_HEAD);
  1354. /*
  1355. * type can be one of four:
  1356. * NORMAL - an interrupt already moved it for us
  1357. * HEAD - we are the first to get here.
  1358. * UPDATE - we are the interrupt interrupting
  1359. * a current move.
  1360. * MOVED - a reader on another CPU moved the next
  1361. * pointer to its reader page. Give up
  1362. * and try again.
  1363. */
  1364. switch (type) {
  1365. case RB_PAGE_HEAD:
  1366. /*
  1367. * We changed the head to UPDATE, thus
  1368. * it is our responsibility to update
  1369. * the counters.
  1370. */
  1371. local_add(entries, &cpu_buffer->overrun);
  1372. /*
  1373. * The entries will be zeroed out when we move the
  1374. * tail page.
  1375. */
  1376. /* still more to do */
  1377. break;
  1378. case RB_PAGE_UPDATE:
  1379. /*
  1380. * This is an interrupt that interrupt the
  1381. * previous update. Still more to do.
  1382. */
  1383. break;
  1384. case RB_PAGE_NORMAL:
  1385. /*
  1386. * An interrupt came in before the update
  1387. * and processed this for us.
  1388. * Nothing left to do.
  1389. */
  1390. return 1;
  1391. case RB_PAGE_MOVED:
  1392. /*
  1393. * The reader is on another CPU and just did
  1394. * a swap with our next_page.
  1395. * Try again.
  1396. */
  1397. return 1;
  1398. default:
  1399. RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
  1400. return -1;
  1401. }
  1402. /*
  1403. * Now that we are here, the old head pointer is
  1404. * set to UPDATE. This will keep the reader from
  1405. * swapping the head page with the reader page.
  1406. * The reader (on another CPU) will spin till
  1407. * we are finished.
  1408. *
  1409. * We just need to protect against interrupts
  1410. * doing the job. We will set the next pointer
  1411. * to HEAD. After that, we set the old pointer
  1412. * to NORMAL, but only if it was HEAD before.
  1413. * otherwise we are an interrupt, and only
  1414. * want the outer most commit to reset it.
  1415. */
  1416. new_head = next_page;
  1417. rb_inc_page(cpu_buffer, &new_head);
  1418. ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
  1419. RB_PAGE_NORMAL);
  1420. /*
  1421. * Valid returns are:
  1422. * HEAD - an interrupt came in and already set it.
  1423. * NORMAL - One of two things:
  1424. * 1) We really set it.
  1425. * 2) A bunch of interrupts came in and moved
  1426. * the page forward again.
  1427. */
  1428. switch (ret) {
  1429. case RB_PAGE_HEAD:
  1430. case RB_PAGE_NORMAL:
  1431. /* OK */
  1432. break;
  1433. default:
  1434. RB_WARN_ON(cpu_buffer, 1);
  1435. return -1;
  1436. }
  1437. /*
  1438. * It is possible that an interrupt came in,
  1439. * set the head up, then more interrupts came in
  1440. * and moved it again. When we get back here,
  1441. * the page would have been set to NORMAL but we
  1442. * just set it back to HEAD.
  1443. *
  1444. * How do you detect this? Well, if that happened
  1445. * the tail page would have moved.
  1446. */
  1447. if (ret == RB_PAGE_NORMAL) {
  1448. /*
  1449. * If the tail had moved passed next, then we need
  1450. * to reset the pointer.
  1451. */
  1452. if (cpu_buffer->tail_page != tail_page &&
  1453. cpu_buffer->tail_page != next_page)
  1454. rb_head_page_set_normal(cpu_buffer, new_head,
  1455. next_page,
  1456. RB_PAGE_HEAD);
  1457. }
  1458. /*
  1459. * If this was the outer most commit (the one that
  1460. * changed the original pointer from HEAD to UPDATE),
  1461. * then it is up to us to reset it to NORMAL.
  1462. */
  1463. if (type == RB_PAGE_HEAD) {
  1464. ret = rb_head_page_set_normal(cpu_buffer, next_page,
  1465. tail_page,
  1466. RB_PAGE_UPDATE);
  1467. if (RB_WARN_ON(cpu_buffer,
  1468. ret != RB_PAGE_UPDATE))
  1469. return -1;
  1470. }
  1471. return 0;
  1472. }
  1473. static unsigned rb_calculate_event_length(unsigned length)
  1474. {
  1475. struct ring_buffer_event event; /* Used only for sizeof array */
  1476. /* zero length can cause confusions */
  1477. if (!length)
  1478. length = 1;
  1479. if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
  1480. length += sizeof(event.array[0]);
  1481. length += RB_EVNT_HDR_SIZE;
  1482. length = ALIGN(length, RB_ARCH_ALIGNMENT);
  1483. return length;
  1484. }
  1485. static inline void
  1486. rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
  1487. struct buffer_page *tail_page,
  1488. unsigned long tail, unsigned long length)
  1489. {
  1490. struct ring_buffer_event *event;
  1491. /*
  1492. * Only the event that crossed the page boundary
  1493. * must fill the old tail_page with padding.
  1494. */
  1495. if (tail >= BUF_PAGE_SIZE) {
  1496. local_sub(length, &tail_page->write);
  1497. return;
  1498. }
  1499. event = __rb_page_index(tail_page, tail);
  1500. kmemcheck_annotate_bitfield(event, bitfield);
  1501. /*
  1502. * If this event is bigger than the minimum size, then
  1503. * we need to be careful that we don't subtract the
  1504. * write counter enough to allow another writer to slip
  1505. * in on this page.
  1506. * We put in a discarded commit instead, to make sure
  1507. * that this space is not used again.
  1508. *
  1509. * If we are less than the minimum size, we don't need to
  1510. * worry about it.
  1511. */
  1512. if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
  1513. /* No room for any events */
  1514. /* Mark the rest of the page with padding */
  1515. rb_event_set_padding(event);
  1516. /* Set the write back to the previous setting */
  1517. local_sub(length, &tail_page->write);
  1518. return;
  1519. }
  1520. /* Put in a discarded event */
  1521. event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
  1522. event->type_len = RINGBUF_TYPE_PADDING;
  1523. /* time delta must be non zero */
  1524. event->time_delta = 1;
  1525. /* Set write to end of buffer */
  1526. length = (tail + length) - BUF_PAGE_SIZE;
  1527. local_sub(length, &tail_page->write);
  1528. }
  1529. static struct ring_buffer_event *
  1530. rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
  1531. unsigned long length, unsigned long tail,
  1532. struct buffer_page *tail_page, u64 *ts)
  1533. {
  1534. struct buffer_page *commit_page = cpu_buffer->commit_page;
  1535. struct ring_buffer *buffer = cpu_buffer->buffer;
  1536. struct buffer_page *next_page;
  1537. int ret;
  1538. next_page = tail_page;
  1539. rb_inc_page(cpu_buffer, &next_page);
  1540. /*
  1541. * If for some reason, we had an interrupt storm that made
  1542. * it all the way around the buffer, bail, and warn
  1543. * about it.
  1544. */
  1545. if (unlikely(next_page == commit_page)) {
  1546. local_inc(&cpu_buffer->commit_overrun);
  1547. goto out_reset;
  1548. }
  1549. /*
  1550. * This is where the fun begins!
  1551. *
  1552. * We are fighting against races between a reader that
  1553. * could be on another CPU trying to swap its reader
  1554. * page with the buffer head.
  1555. *
  1556. * We are also fighting against interrupts coming in and
  1557. * moving the head or tail on us as well.
  1558. *
  1559. * If the next page is the head page then we have filled
  1560. * the buffer, unless the commit page is still on the
  1561. * reader page.
  1562. */
  1563. if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
  1564. /*
  1565. * If the commit is not on the reader page, then
  1566. * move the header page.
  1567. */
  1568. if (!rb_is_reader_page(cpu_buffer->commit_page)) {
  1569. /*
  1570. * If we are not in overwrite mode,
  1571. * this is easy, just stop here.
  1572. */
  1573. if (!(buffer->flags & RB_FL_OVERWRITE))
  1574. goto out_reset;
  1575. ret = rb_handle_head_page(cpu_buffer,
  1576. tail_page,
  1577. next_page);
  1578. if (ret < 0)
  1579. goto out_reset;
  1580. if (ret)
  1581. goto out_again;
  1582. } else {
  1583. /*
  1584. * We need to be careful here too. The
  1585. * commit page could still be on the reader
  1586. * page. We could have a small buffer, and
  1587. * have filled up the buffer with events
  1588. * from interrupts and such, and wrapped.
  1589. *
  1590. * Note, if the tail page is also the on the
  1591. * reader_page, we let it move out.
  1592. */
  1593. if (unlikely((cpu_buffer->commit_page !=
  1594. cpu_buffer->tail_page) &&
  1595. (cpu_buffer->commit_page ==
  1596. cpu_buffer->reader_page))) {
  1597. local_inc(&cpu_buffer->commit_overrun);
  1598. goto out_reset;
  1599. }
  1600. }
  1601. }
  1602. ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
  1603. if (ret) {
  1604. /*
  1605. * Nested commits always have zero deltas, so
  1606. * just reread the time stamp
  1607. */
  1608. *ts = rb_time_stamp(buffer);
  1609. next_page->page->time_stamp = *ts;
  1610. }
  1611. out_again:
  1612. rb_reset_tail(cpu_buffer, tail_page, tail, length);
  1613. /* fail and let the caller try again */
  1614. return ERR_PTR(-EAGAIN);
  1615. out_reset:
  1616. /* reset write */
  1617. rb_reset_tail(cpu_buffer, tail_page, tail, length);
  1618. return NULL;
  1619. }
  1620. static struct ring_buffer_event *
  1621. __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
  1622. unsigned type, unsigned long length, u64 *ts)
  1623. {
  1624. struct buffer_page *tail_page;
  1625. struct ring_buffer_event *event;
  1626. unsigned long tail, write;
  1627. tail_page = cpu_buffer->tail_page;
  1628. write = local_add_return(length, &tail_page->write);
  1629. /* set write to only the index of the write */
  1630. write &= RB_WRITE_MASK;
  1631. tail = write - length;
  1632. /* See if we shot pass the end of this buffer page */
  1633. if (write > BUF_PAGE_SIZE)
  1634. return rb_move_tail(cpu_buffer, length, tail,
  1635. tail_page, ts);
  1636. /* We reserved something on the buffer */
  1637. event = __rb_page_index(tail_page, tail);
  1638. kmemcheck_annotate_bitfield(event, bitfield);
  1639. rb_update_event(event, type, length);
  1640. /* The passed in type is zero for DATA */
  1641. if (likely(!type))
  1642. local_inc(&tail_page->entries);
  1643. /*
  1644. * If this is the first commit on the page, then update
  1645. * its timestamp.
  1646. */
  1647. if (!tail)
  1648. tail_page->page->time_stamp = *ts;
  1649. return event;
  1650. }
  1651. static inline int
  1652. rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
  1653. struct ring_buffer_event *event)
  1654. {
  1655. unsigned long new_index, old_index;
  1656. struct buffer_page *bpage;
  1657. unsigned long index;
  1658. unsigned long addr;
  1659. new_index = rb_event_index(event);
  1660. old_index = new_index + rb_event_length(event);
  1661. addr = (unsigned long)event;
  1662. addr &= PAGE_MASK;
  1663. bpage = cpu_buffer->tail_page;
  1664. if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
  1665. unsigned long write_mask =
  1666. local_read(&bpage->write) & ~RB_WRITE_MASK;
  1667. /*
  1668. * This is on the tail page. It is possible that
  1669. * a write could come in and move the tail page
  1670. * and write to the next page. That is fine
  1671. * because we just shorten what is on this page.
  1672. */
  1673. old_index += write_mask;
  1674. new_index += write_mask;
  1675. index = local_cmpxchg(&bpage->write, old_index, new_index);
  1676. if (index == old_index)
  1677. return 1;
  1678. }
  1679. /* could not discard */
  1680. return 0;
  1681. }
  1682. static int
  1683. rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  1684. u64 *ts, u64 *delta)
  1685. {
  1686. struct ring_buffer_event *event;
  1687. static int once;
  1688. int ret;
  1689. if (unlikely(*delta > (1ULL << 59) && !once++)) {
  1690. printk(KERN_WARNING "Delta way too big! %llu"
  1691. " ts=%llu write stamp = %llu\n",
  1692. (unsigned long long)*delta,
  1693. (unsigned long long)*ts,
  1694. (unsigned long long)cpu_buffer->write_stamp);
  1695. WARN_ON(1);
  1696. }
  1697. /*
  1698. * The delta is too big, we to add a
  1699. * new timestamp.
  1700. */
  1701. event = __rb_reserve_next(cpu_buffer,
  1702. RINGBUF_TYPE_TIME_EXTEND,
  1703. RB_LEN_TIME_EXTEND,
  1704. ts);
  1705. if (!event)
  1706. return -EBUSY;
  1707. if (PTR_ERR(event) == -EAGAIN)
  1708. return -EAGAIN;
  1709. /* Only a commited time event can update the write stamp */
  1710. if (rb_event_is_commit(cpu_buffer, event)) {
  1711. /*
  1712. * If this is the first on the page, then it was
  1713. * updated with the page itself. Try to discard it
  1714. * and if we can't just make it zero.
  1715. */
  1716. if (rb_event_index(event)) {
  1717. event->time_delta = *delta & TS_MASK;
  1718. event->array[0] = *delta >> TS_SHIFT;
  1719. } else {
  1720. /* try to discard, since we do not need this */
  1721. if (!rb_try_to_discard(cpu_buffer, event)) {
  1722. /* nope, just zero it */
  1723. event->time_delta = 0;
  1724. event->array[0] = 0;
  1725. }
  1726. }
  1727. cpu_buffer->write_stamp = *ts;
  1728. /* let the caller know this was the commit */
  1729. ret = 1;
  1730. } else {
  1731. /* Try to discard the event */
  1732. if (!rb_try_to_discard(cpu_buffer, event)) {
  1733. /* Darn, this is just wasted space */
  1734. event->time_delta = 0;
  1735. event->array[0] = 0;
  1736. }
  1737. ret = 0;
  1738. }
  1739. *delta = 0;
  1740. return ret;
  1741. }
  1742. static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
  1743. {
  1744. local_inc(&cpu_buffer->committing);
  1745. local_inc(&cpu_buffer->commits);
  1746. }
  1747. static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
  1748. {
  1749. unsigned long commits;
  1750. if (RB_WARN_ON(cpu_buffer,
  1751. !local_read(&cpu_buffer->committing)))
  1752. return;
  1753. again:
  1754. commits = local_read(&cpu_buffer->commits);
  1755. /* synchronize with interrupts */
  1756. barrier();
  1757. if (local_read(&cpu_buffer->committing) == 1)
  1758. rb_set_commit_to_write(cpu_buffer);
  1759. local_dec(&cpu_buffer->committing);
  1760. /* synchronize with interrupts */
  1761. barrier();
  1762. /*
  1763. * Need to account for interrupts coming in between the
  1764. * updating of the commit page and the clearing of the
  1765. * committing counter.
  1766. */
  1767. if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
  1768. !local_read(&cpu_buffer->committing)) {
  1769. local_inc(&cpu_buffer->committing);
  1770. goto again;
  1771. }
  1772. }
  1773. static struct ring_buffer_event *
  1774. rb_reserve_next_event(struct ring_buffer *buffer,
  1775. struct ring_buffer_per_cpu *cpu_buffer,
  1776. unsigned long length)
  1777. {
  1778. struct ring_buffer_event *event;
  1779. u64 ts, delta = 0;
  1780. int commit = 0;
  1781. int nr_loops = 0;
  1782. rb_start_commit(cpu_buffer);
  1783. #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
  1784. /*
  1785. * Due to the ability to swap a cpu buffer from a buffer
  1786. * it is possible it was swapped before we committed.
  1787. * (committing stops a swap). We check for it here and
  1788. * if it happened, we have to fail the write.
  1789. */
  1790. barrier();
  1791. if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
  1792. local_dec(&cpu_buffer->committing);
  1793. local_dec(&cpu_buffer->commits);
  1794. return NULL;
  1795. }
  1796. #endif
  1797. length = rb_calculate_event_length(length);
  1798. again:
  1799. /*
  1800. * We allow for interrupts to reenter here and do a trace.
  1801. * If one does, it will cause this original code to loop
  1802. * back here. Even with heavy interrupts happening, this
  1803. * should only happen a few times in a row. If this happens
  1804. * 1000 times in a row, there must be either an interrupt
  1805. * storm or we have something buggy.
  1806. * Bail!
  1807. */
  1808. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
  1809. goto out_fail;
  1810. ts = rb_time_stamp(cpu_buffer->buffer);
  1811. /*
  1812. * Only the first commit can update the timestamp.
  1813. * Yes there is a race here. If an interrupt comes in
  1814. * just after the conditional and it traces too, then it
  1815. * will also check the deltas. More than one timestamp may
  1816. * also be made. But only the entry that did the actual
  1817. * commit will be something other than zero.
  1818. */
  1819. if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
  1820. rb_page_write(cpu_buffer->tail_page) ==
  1821. rb_commit_index(cpu_buffer))) {
  1822. u64 diff;
  1823. diff = ts - cpu_buffer->write_stamp;
  1824. /* make sure this diff is calculated here */
  1825. barrier();
  1826. /* Did the write stamp get updated already? */
  1827. if (unlikely(ts < cpu_buffer->write_stamp))
  1828. goto get_event;
  1829. delta = diff;
  1830. if (unlikely(test_time_stamp(delta))) {
  1831. commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
  1832. if (commit == -EBUSY)
  1833. goto out_fail;
  1834. if (commit == -EAGAIN)
  1835. goto again;
  1836. RB_WARN_ON(cpu_buffer, commit < 0);
  1837. }
  1838. }
  1839. get_event:
  1840. event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
  1841. if (unlikely(PTR_ERR(event) == -EAGAIN))
  1842. goto again;
  1843. if (!event)
  1844. goto out_fail;
  1845. if (!rb_event_is_commit(cpu_buffer, event))
  1846. delta = 0;
  1847. event->time_delta = delta;
  1848. return event;
  1849. out_fail:
  1850. rb_end_commit(cpu_buffer);
  1851. return NULL;
  1852. }
  1853. #ifdef CONFIG_TRACING
  1854. #define TRACE_RECURSIVE_DEPTH 16
  1855. static int trace_recursive_lock(void)
  1856. {
  1857. current->trace_recursion++;
  1858. if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
  1859. return 0;
  1860. /* Disable all tracing before we do anything else */
  1861. tracing_off_permanent();
  1862. printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
  1863. "HC[%lu]:SC[%lu]:NMI[%lu]\n",
  1864. current->trace_recursion,
  1865. hardirq_count() >> HARDIRQ_SHIFT,
  1866. softirq_count() >> SOFTIRQ_SHIFT,
  1867. in_nmi());
  1868. WARN_ON_ONCE(1);
  1869. return -1;
  1870. }
  1871. static void trace_recursive_unlock(void)
  1872. {
  1873. WARN_ON_ONCE(!current->trace_recursion);
  1874. current->trace_recursion--;
  1875. }
  1876. #else
  1877. #define trace_recursive_lock() (0)
  1878. #define trace_recursive_unlock() do { } while (0)
  1879. #endif
  1880. static DEFINE_PER_CPU(int, rb_need_resched);
  1881. /**
  1882. * ring_buffer_lock_reserve - reserve a part of the buffer
  1883. * @buffer: the ring buffer to reserve from
  1884. * @length: the length of the data to reserve (excluding event header)
  1885. *
  1886. * Returns a reseverd event on the ring buffer to copy directly to.
  1887. * The user of this interface will need to get the body to write into
  1888. * and can use the ring_buffer_event_data() interface.
  1889. *
  1890. * The length is the length of the data needed, not the event length
  1891. * which also includes the event header.
  1892. *
  1893. * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
  1894. * If NULL is returned, then nothing has been allocated or locked.
  1895. */
  1896. struct ring_buffer_event *
  1897. ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
  1898. {
  1899. struct ring_buffer_per_cpu *cpu_buffer;
  1900. struct ring_buffer_event *event;
  1901. int cpu, resched;
  1902. if (ring_buffer_flags != RB_BUFFERS_ON)
  1903. return NULL;
  1904. /* If we are tracing schedule, we don't want to recurse */
  1905. resched = ftrace_preempt_disable();
  1906. if (atomic_read(&buffer->record_disabled))
  1907. goto out_nocheck;
  1908. if (trace_recursive_lock())
  1909. goto out_nocheck;
  1910. cpu = raw_smp_processor_id();
  1911. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  1912. goto out;
  1913. cpu_buffer = buffer->buffers[cpu];
  1914. if (atomic_read(&cpu_buffer->record_disabled))
  1915. goto out;
  1916. if (length > BUF_MAX_DATA_SIZE)
  1917. goto out;
  1918. event = rb_reserve_next_event(buffer, cpu_buffer, length);
  1919. if (!event)
  1920. goto out;
  1921. /*
  1922. * Need to store resched state on this cpu.
  1923. * Only the first needs to.
  1924. */
  1925. if (preempt_count() == 1)
  1926. per_cpu(rb_need_resched, cpu) = resched;
  1927. return event;
  1928. out:
  1929. trace_recursive_unlock();
  1930. out_nocheck:
  1931. ftrace_preempt_enable(resched);
  1932. return NULL;
  1933. }
  1934. EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
  1935. static void
  1936. rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  1937. struct ring_buffer_event *event)
  1938. {
  1939. /*
  1940. * The event first in the commit queue updates the
  1941. * time stamp.
  1942. */
  1943. if (rb_event_is_commit(cpu_buffer, event))
  1944. cpu_buffer->write_stamp += event->time_delta;
  1945. }
  1946. static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
  1947. struct ring_buffer_event *event)
  1948. {
  1949. local_inc(&cpu_buffer->entries);
  1950. rb_update_write_stamp(cpu_buffer, event);
  1951. rb_end_commit(cpu_buffer);
  1952. }
  1953. /**
  1954. * ring_buffer_unlock_commit - commit a reserved
  1955. * @buffer: The buffer to commit to
  1956. * @event: The event pointer to commit.
  1957. *
  1958. * This commits the data to the ring buffer, and releases any locks held.
  1959. *
  1960. * Must be paired with ring_buffer_lock_reserve.
  1961. */
  1962. int ring_buffer_unlock_commit(struct ring_buffer *buffer,
  1963. struct ring_buffer_event *event)
  1964. {
  1965. struct ring_buffer_per_cpu *cpu_buffer;
  1966. int cpu = raw_smp_processor_id();
  1967. cpu_buffer = buffer->buffers[cpu];
  1968. rb_commit(cpu_buffer, event);
  1969. trace_recursive_unlock();
  1970. /*
  1971. * Only the last preempt count needs to restore preemption.
  1972. */
  1973. if (preempt_count() == 1)
  1974. ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
  1975. else
  1976. preempt_enable_no_resched_notrace();
  1977. return 0;
  1978. }
  1979. EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
  1980. static inline void rb_event_discard(struct ring_buffer_event *event)
  1981. {
  1982. /* array[0] holds the actual length for the discarded event */
  1983. event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
  1984. event->type_len = RINGBUF_TYPE_PADDING;
  1985. /* time delta must be non zero */
  1986. if (!event->time_delta)
  1987. event->time_delta = 1;
  1988. }
  1989. /*
  1990. * Decrement the entries to the page that an event is on.
  1991. * The event does not even need to exist, only the pointer
  1992. * to the page it is on. This may only be called before the commit
  1993. * takes place.
  1994. */
  1995. static inline void
  1996. rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
  1997. struct ring_buffer_event *event)
  1998. {
  1999. unsigned long addr = (unsigned long)event;
  2000. struct buffer_page *bpage = cpu_buffer->commit_page;
  2001. struct buffer_page *start;
  2002. addr &= PAGE_MASK;
  2003. /* Do the likely case first */
  2004. if (likely(bpage->page == (void *)addr)) {
  2005. local_dec(&bpage->entries);
  2006. return;
  2007. }
  2008. /*
  2009. * Because the commit page may be on the reader page we
  2010. * start with the next page and check the end loop there.
  2011. */
  2012. rb_inc_page(cpu_buffer, &bpage);
  2013. start = bpage;
  2014. do {
  2015. if (bpage->page == (void *)addr) {
  2016. local_dec(&bpage->entries);
  2017. return;
  2018. }
  2019. rb_inc_page(cpu_buffer, &bpage);
  2020. } while (bpage != start);
  2021. /* commit not part of this buffer?? */
  2022. RB_WARN_ON(cpu_buffer, 1);
  2023. }
  2024. /**
  2025. * ring_buffer_commit_discard - discard an event that has not been committed
  2026. * @buffer: the ring buffer
  2027. * @event: non committed event to discard
  2028. *
  2029. * Sometimes an event that is in the ring buffer needs to be ignored.
  2030. * This function lets the user discard an event in the ring buffer
  2031. * and then that event will not be read later.
  2032. *
  2033. * This function only works if it is called before the the item has been
  2034. * committed. It will try to free the event from the ring buffer
  2035. * if another event has not been added behind it.
  2036. *
  2037. * If another event has been added behind it, it will set the event
  2038. * up as discarded, and perform the commit.
  2039. *
  2040. * If this function is called, do not call ring_buffer_unlock_commit on
  2041. * the event.
  2042. */
  2043. void ring_buffer_discard_commit(struct ring_buffer *buffer,
  2044. struct ring_buffer_event *event)
  2045. {
  2046. struct ring_buffer_per_cpu *cpu_buffer;
  2047. int cpu;
  2048. /* The event is discarded regardless */
  2049. rb_event_discard(event);
  2050. cpu = smp_processor_id();
  2051. cpu_buffer = buffer->buffers[cpu];
  2052. /*
  2053. * This must only be called if the event has not been
  2054. * committed yet. Thus we can assume that preemption
  2055. * is still disabled.
  2056. */
  2057. RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
  2058. rb_decrement_entry(cpu_buffer, event);
  2059. if (rb_try_to_discard(cpu_buffer, event))
  2060. goto out;
  2061. /*
  2062. * The commit is still visible by the reader, so we
  2063. * must still update the timestamp.
  2064. */
  2065. rb_update_write_stamp(cpu_buffer, event);
  2066. out:
  2067. rb_end_commit(cpu_buffer);
  2068. trace_recursive_unlock();
  2069. /*
  2070. * Only the last preempt count needs to restore preemption.
  2071. */
  2072. if (preempt_count() == 1)
  2073. ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
  2074. else
  2075. preempt_enable_no_resched_notrace();
  2076. }
  2077. EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
  2078. /**
  2079. * ring_buffer_write - write data to the buffer without reserving
  2080. * @buffer: The ring buffer to write to.
  2081. * @length: The length of the data being written (excluding the event header)
  2082. * @data: The data to write to the buffer.
  2083. *
  2084. * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
  2085. * one function. If you already have the data to write to the buffer, it
  2086. * may be easier to simply call this function.
  2087. *
  2088. * Note, like ring_buffer_lock_reserve, the length is the length of the data
  2089. * and not the length of the event which would hold the header.
  2090. */
  2091. int ring_buffer_write(struct ring_buffer *buffer,
  2092. unsigned long length,
  2093. void *data)
  2094. {
  2095. struct ring_buffer_per_cpu *cpu_buffer;
  2096. struct ring_buffer_event *event;
  2097. void *body;
  2098. int ret = -EBUSY;
  2099. int cpu, resched;
  2100. if (ring_buffer_flags != RB_BUFFERS_ON)
  2101. return -EBUSY;
  2102. resched = ftrace_preempt_disable();
  2103. if (atomic_read(&buffer->record_disabled))
  2104. goto out;
  2105. cpu = raw_smp_processor_id();
  2106. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2107. goto out;
  2108. cpu_buffer = buffer->buffers[cpu];
  2109. if (atomic_read(&cpu_buffer->record_disabled))
  2110. goto out;
  2111. if (length > BUF_MAX_DATA_SIZE)
  2112. goto out;
  2113. event = rb_reserve_next_event(buffer, cpu_buffer, length);
  2114. if (!event)
  2115. goto out;
  2116. body = rb_event_data(event);
  2117. memcpy(body, data, length);
  2118. rb_commit(cpu_buffer, event);
  2119. ret = 0;
  2120. out:
  2121. ftrace_preempt_enable(resched);
  2122. return ret;
  2123. }
  2124. EXPORT_SYMBOL_GPL(ring_buffer_write);
  2125. static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
  2126. {
  2127. struct buffer_page *reader = cpu_buffer->reader_page;
  2128. struct buffer_page *head = rb_set_head_page(cpu_buffer);
  2129. struct buffer_page *commit = cpu_buffer->commit_page;
  2130. /* In case of error, head will be NULL */
  2131. if (unlikely(!head))
  2132. return 1;
  2133. return reader->read == rb_page_commit(reader) &&
  2134. (commit == reader ||
  2135. (commit == head &&
  2136. head->read == rb_page_commit(commit)));
  2137. }
  2138. /**
  2139. * ring_buffer_record_disable - stop all writes into the buffer
  2140. * @buffer: The ring buffer to stop writes to.
  2141. *
  2142. * This prevents all writes to the buffer. Any attempt to write
  2143. * to the buffer after this will fail and return NULL.
  2144. *
  2145. * The caller should call synchronize_sched() after this.
  2146. */
  2147. void ring_buffer_record_disable(struct ring_buffer *buffer)
  2148. {
  2149. atomic_inc(&buffer->record_disabled);
  2150. }
  2151. EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
  2152. /**
  2153. * ring_buffer_record_enable - enable writes to the buffer
  2154. * @buffer: The ring buffer to enable writes
  2155. *
  2156. * Note, multiple disables will need the same number of enables
  2157. * to truly enable the writing (much like preempt_disable).
  2158. */
  2159. void ring_buffer_record_enable(struct ring_buffer *buffer)
  2160. {
  2161. atomic_dec(&buffer->record_disabled);
  2162. }
  2163. EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
  2164. /**
  2165. * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
  2166. * @buffer: The ring buffer to stop writes to.
  2167. * @cpu: The CPU buffer to stop
  2168. *
  2169. * This prevents all writes to the buffer. Any attempt to write
  2170. * to the buffer after this will fail and return NULL.
  2171. *
  2172. * The caller should call synchronize_sched() after this.
  2173. */
  2174. void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
  2175. {
  2176. struct ring_buffer_per_cpu *cpu_buffer;
  2177. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2178. return;
  2179. cpu_buffer = buffer->buffers[cpu];
  2180. atomic_inc(&cpu_buffer->record_disabled);
  2181. }
  2182. EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
  2183. /**
  2184. * ring_buffer_record_enable_cpu - enable writes to the buffer
  2185. * @buffer: The ring buffer to enable writes
  2186. * @cpu: The CPU to enable.
  2187. *
  2188. * Note, multiple disables will need the same number of enables
  2189. * to truly enable the writing (much like preempt_disable).
  2190. */
  2191. void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
  2192. {
  2193. struct ring_buffer_per_cpu *cpu_buffer;
  2194. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2195. return;
  2196. cpu_buffer = buffer->buffers[cpu];
  2197. atomic_dec(&cpu_buffer->record_disabled);
  2198. }
  2199. EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
  2200. /**
  2201. * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
  2202. * @buffer: The ring buffer
  2203. * @cpu: The per CPU buffer to get the entries from.
  2204. */
  2205. unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
  2206. {
  2207. struct ring_buffer_per_cpu *cpu_buffer;
  2208. unsigned long ret;
  2209. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2210. return 0;
  2211. cpu_buffer = buffer->buffers[cpu];
  2212. ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
  2213. - cpu_buffer->read;
  2214. return ret;
  2215. }
  2216. EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
  2217. /**
  2218. * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
  2219. * @buffer: The ring buffer
  2220. * @cpu: The per CPU buffer to get the number of overruns from
  2221. */
  2222. unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
  2223. {
  2224. struct ring_buffer_per_cpu *cpu_buffer;
  2225. unsigned long ret;
  2226. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2227. return 0;
  2228. cpu_buffer = buffer->buffers[cpu];
  2229. ret = local_read(&cpu_buffer->overrun);
  2230. return ret;
  2231. }
  2232. EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
  2233. /**
  2234. * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
  2235. * @buffer: The ring buffer
  2236. * @cpu: The per CPU buffer to get the number of overruns from
  2237. */
  2238. unsigned long
  2239. ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
  2240. {
  2241. struct ring_buffer_per_cpu *cpu_buffer;
  2242. unsigned long ret;
  2243. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2244. return 0;
  2245. cpu_buffer = buffer->buffers[cpu];
  2246. ret = local_read(&cpu_buffer->commit_overrun);
  2247. return ret;
  2248. }
  2249. EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
  2250. /**
  2251. * ring_buffer_entries - get the number of entries in a buffer
  2252. * @buffer: The ring buffer
  2253. *
  2254. * Returns the total number of entries in the ring buffer
  2255. * (all CPU entries)
  2256. */
  2257. unsigned long ring_buffer_entries(struct ring_buffer *buffer)
  2258. {
  2259. struct ring_buffer_per_cpu *cpu_buffer;
  2260. unsigned long entries = 0;
  2261. int cpu;
  2262. /* if you care about this being correct, lock the buffer */
  2263. for_each_buffer_cpu(buffer, cpu) {
  2264. cpu_buffer = buffer->buffers[cpu];
  2265. entries += (local_read(&cpu_buffer->entries) -
  2266. local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
  2267. }
  2268. return entries;
  2269. }
  2270. EXPORT_SYMBOL_GPL(ring_buffer_entries);
  2271. /**
  2272. * ring_buffer_overruns - get the number of overruns in buffer
  2273. * @buffer: The ring buffer
  2274. *
  2275. * Returns the total number of overruns in the ring buffer
  2276. * (all CPU entries)
  2277. */
  2278. unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
  2279. {
  2280. struct ring_buffer_per_cpu *cpu_buffer;
  2281. unsigned long overruns = 0;
  2282. int cpu;
  2283. /* if you care about this being correct, lock the buffer */
  2284. for_each_buffer_cpu(buffer, cpu) {
  2285. cpu_buffer = buffer->buffers[cpu];
  2286. overruns += local_read(&cpu_buffer->overrun);
  2287. }
  2288. return overruns;
  2289. }
  2290. EXPORT_SYMBOL_GPL(ring_buffer_overruns);
  2291. static void rb_iter_reset(struct ring_buffer_iter *iter)
  2292. {
  2293. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2294. /* Iterator usage is expected to have record disabled */
  2295. if (list_empty(&cpu_buffer->reader_page->list)) {
  2296. iter->head_page = rb_set_head_page(cpu_buffer);
  2297. if (unlikely(!iter->head_page))
  2298. return;
  2299. iter->head = iter->head_page->read;
  2300. } else {
  2301. iter->head_page = cpu_buffer->reader_page;
  2302. iter->head = cpu_buffer->reader_page->read;
  2303. }
  2304. if (iter->head)
  2305. iter->read_stamp = cpu_buffer->read_stamp;
  2306. else
  2307. iter->read_stamp = iter->head_page->page->time_stamp;
  2308. iter->cache_reader_page = cpu_buffer->reader_page;
  2309. iter->cache_read = cpu_buffer->read;
  2310. }
  2311. /**
  2312. * ring_buffer_iter_reset - reset an iterator
  2313. * @iter: The iterator to reset
  2314. *
  2315. * Resets the iterator, so that it will start from the beginning
  2316. * again.
  2317. */
  2318. void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
  2319. {
  2320. struct ring_buffer_per_cpu *cpu_buffer;
  2321. unsigned long flags;
  2322. if (!iter)
  2323. return;
  2324. cpu_buffer = iter->cpu_buffer;
  2325. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2326. rb_iter_reset(iter);
  2327. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2328. }
  2329. EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
  2330. /**
  2331. * ring_buffer_iter_empty - check if an iterator has no more to read
  2332. * @iter: The iterator to check
  2333. */
  2334. int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
  2335. {
  2336. struct ring_buffer_per_cpu *cpu_buffer;
  2337. cpu_buffer = iter->cpu_buffer;
  2338. return iter->head_page == cpu_buffer->commit_page &&
  2339. iter->head == rb_commit_index(cpu_buffer);
  2340. }
  2341. EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
  2342. static void
  2343. rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  2344. struct ring_buffer_event *event)
  2345. {
  2346. u64 delta;
  2347. switch (event->type_len) {
  2348. case RINGBUF_TYPE_PADDING:
  2349. return;
  2350. case RINGBUF_TYPE_TIME_EXTEND:
  2351. delta = event->array[0];
  2352. delta <<= TS_SHIFT;
  2353. delta += event->time_delta;
  2354. cpu_buffer->read_stamp += delta;
  2355. return;
  2356. case RINGBUF_TYPE_TIME_STAMP:
  2357. /* FIXME: not implemented */
  2358. return;
  2359. case RINGBUF_TYPE_DATA:
  2360. cpu_buffer->read_stamp += event->time_delta;
  2361. return;
  2362. default:
  2363. BUG();
  2364. }
  2365. return;
  2366. }
  2367. static void
  2368. rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
  2369. struct ring_buffer_event *event)
  2370. {
  2371. u64 delta;
  2372. switch (event->type_len) {
  2373. case RINGBUF_TYPE_PADDING:
  2374. return;
  2375. case RINGBUF_TYPE_TIME_EXTEND:
  2376. delta = event->array[0];
  2377. delta <<= TS_SHIFT;
  2378. delta += event->time_delta;
  2379. iter->read_stamp += delta;
  2380. return;
  2381. case RINGBUF_TYPE_TIME_STAMP:
  2382. /* FIXME: not implemented */
  2383. return;
  2384. case RINGBUF_TYPE_DATA:
  2385. iter->read_stamp += event->time_delta;
  2386. return;
  2387. default:
  2388. BUG();
  2389. }
  2390. return;
  2391. }
  2392. static struct buffer_page *
  2393. rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
  2394. {
  2395. struct buffer_page *reader = NULL;
  2396. unsigned long flags;
  2397. int nr_loops = 0;
  2398. int ret;
  2399. local_irq_save(flags);
  2400. arch_spin_lock(&cpu_buffer->lock);
  2401. again:
  2402. /*
  2403. * This should normally only loop twice. But because the
  2404. * start of the reader inserts an empty page, it causes
  2405. * a case where we will loop three times. There should be no
  2406. * reason to loop four times (that I know of).
  2407. */
  2408. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
  2409. reader = NULL;
  2410. goto out;
  2411. }
  2412. reader = cpu_buffer->reader_page;
  2413. /* If there's more to read, return this page */
  2414. if (cpu_buffer->reader_page->read < rb_page_size(reader))
  2415. goto out;
  2416. /* Never should we have an index greater than the size */
  2417. if (RB_WARN_ON(cpu_buffer,
  2418. cpu_buffer->reader_page->read > rb_page_size(reader)))
  2419. goto out;
  2420. /* check if we caught up to the tail */
  2421. reader = NULL;
  2422. if (cpu_buffer->commit_page == cpu_buffer->reader_page)
  2423. goto out;
  2424. /*
  2425. * Reset the reader page to size zero.
  2426. */
  2427. local_set(&cpu_buffer->reader_page->write, 0);
  2428. local_set(&cpu_buffer->reader_page->entries, 0);
  2429. local_set(&cpu_buffer->reader_page->page->commit, 0);
  2430. spin:
  2431. /*
  2432. * Splice the empty reader page into the list around the head.
  2433. */
  2434. reader = rb_set_head_page(cpu_buffer);
  2435. cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
  2436. cpu_buffer->reader_page->list.prev = reader->list.prev;
  2437. /*
  2438. * cpu_buffer->pages just needs to point to the buffer, it
  2439. * has no specific buffer page to point to. Lets move it out
  2440. * of our way so we don't accidently swap it.
  2441. */
  2442. cpu_buffer->pages = reader->list.prev;
  2443. /* The reader page will be pointing to the new head */
  2444. rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
  2445. /*
  2446. * Here's the tricky part.
  2447. *
  2448. * We need to move the pointer past the header page.
  2449. * But we can only do that if a writer is not currently
  2450. * moving it. The page before the header page has the
  2451. * flag bit '1' set if it is pointing to the page we want.
  2452. * but if the writer is in the process of moving it
  2453. * than it will be '2' or already moved '0'.
  2454. */
  2455. ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
  2456. /*
  2457. * If we did not convert it, then we must try again.
  2458. */
  2459. if (!ret)
  2460. goto spin;
  2461. /*
  2462. * Yeah! We succeeded in replacing the page.
  2463. *
  2464. * Now make the new head point back to the reader page.
  2465. */
  2466. rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
  2467. rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
  2468. /* Finally update the reader page to the new head */
  2469. cpu_buffer->reader_page = reader;
  2470. rb_reset_reader_page(cpu_buffer);
  2471. goto again;
  2472. out:
  2473. arch_spin_unlock(&cpu_buffer->lock);
  2474. local_irq_restore(flags);
  2475. return reader;
  2476. }
  2477. static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
  2478. {
  2479. struct ring_buffer_event *event;
  2480. struct buffer_page *reader;
  2481. unsigned length;
  2482. reader = rb_get_reader_page(cpu_buffer);
  2483. /* This function should not be called when buffer is empty */
  2484. if (RB_WARN_ON(cpu_buffer, !reader))
  2485. return;
  2486. event = rb_reader_event(cpu_buffer);
  2487. if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  2488. cpu_buffer->read++;
  2489. rb_update_read_stamp(cpu_buffer, event);
  2490. length = rb_event_length(event);
  2491. cpu_buffer->reader_page->read += length;
  2492. }
  2493. static void rb_advance_iter(struct ring_buffer_iter *iter)
  2494. {
  2495. struct ring_buffer *buffer;
  2496. struct ring_buffer_per_cpu *cpu_buffer;
  2497. struct ring_buffer_event *event;
  2498. unsigned length;
  2499. cpu_buffer = iter->cpu_buffer;
  2500. buffer = cpu_buffer->buffer;
  2501. /*
  2502. * Check if we are at the end of the buffer.
  2503. */
  2504. if (iter->head >= rb_page_size(iter->head_page)) {
  2505. /* discarded commits can make the page empty */
  2506. if (iter->head_page == cpu_buffer->commit_page)
  2507. return;
  2508. rb_inc_iter(iter);
  2509. return;
  2510. }
  2511. event = rb_iter_head_event(iter);
  2512. length = rb_event_length(event);
  2513. /*
  2514. * This should not be called to advance the header if we are
  2515. * at the tail of the buffer.
  2516. */
  2517. if (RB_WARN_ON(cpu_buffer,
  2518. (iter->head_page == cpu_buffer->commit_page) &&
  2519. (iter->head + length > rb_commit_index(cpu_buffer))))
  2520. return;
  2521. rb_update_iter_read_stamp(iter, event);
  2522. iter->head += length;
  2523. /* check for end of page padding */
  2524. if ((iter->head >= rb_page_size(iter->head_page)) &&
  2525. (iter->head_page != cpu_buffer->commit_page))
  2526. rb_advance_iter(iter);
  2527. }
  2528. static struct ring_buffer_event *
  2529. rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
  2530. {
  2531. struct ring_buffer_event *event;
  2532. struct buffer_page *reader;
  2533. int nr_loops = 0;
  2534. again:
  2535. /*
  2536. * We repeat when a timestamp is encountered. It is possible
  2537. * to get multiple timestamps from an interrupt entering just
  2538. * as one timestamp is about to be written, or from discarded
  2539. * commits. The most that we can have is the number on a single page.
  2540. */
  2541. if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
  2542. return NULL;
  2543. reader = rb_get_reader_page(cpu_buffer);
  2544. if (!reader)
  2545. return NULL;
  2546. event = rb_reader_event(cpu_buffer);
  2547. switch (event->type_len) {
  2548. case RINGBUF_TYPE_PADDING:
  2549. if (rb_null_event(event))
  2550. RB_WARN_ON(cpu_buffer, 1);
  2551. /*
  2552. * Because the writer could be discarding every
  2553. * event it creates (which would probably be bad)
  2554. * if we were to go back to "again" then we may never
  2555. * catch up, and will trigger the warn on, or lock
  2556. * the box. Return the padding, and we will release
  2557. * the current locks, and try again.
  2558. */
  2559. return event;
  2560. case RINGBUF_TYPE_TIME_EXTEND:
  2561. /* Internal data, OK to advance */
  2562. rb_advance_reader(cpu_buffer);
  2563. goto again;
  2564. case RINGBUF_TYPE_TIME_STAMP:
  2565. /* FIXME: not implemented */
  2566. rb_advance_reader(cpu_buffer);
  2567. goto again;
  2568. case RINGBUF_TYPE_DATA:
  2569. if (ts) {
  2570. *ts = cpu_buffer->read_stamp + event->time_delta;
  2571. ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
  2572. cpu_buffer->cpu, ts);
  2573. }
  2574. return event;
  2575. default:
  2576. BUG();
  2577. }
  2578. return NULL;
  2579. }
  2580. EXPORT_SYMBOL_GPL(ring_buffer_peek);
  2581. static struct ring_buffer_event *
  2582. rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  2583. {
  2584. struct ring_buffer *buffer;
  2585. struct ring_buffer_per_cpu *cpu_buffer;
  2586. struct ring_buffer_event *event;
  2587. int nr_loops = 0;
  2588. cpu_buffer = iter->cpu_buffer;
  2589. buffer = cpu_buffer->buffer;
  2590. /*
  2591. * Check if someone performed a consuming read to
  2592. * the buffer. A consuming read invalidates the iterator
  2593. * and we need to reset the iterator in this case.
  2594. */
  2595. if (unlikely(iter->cache_read != cpu_buffer->read ||
  2596. iter->cache_reader_page != cpu_buffer->reader_page))
  2597. rb_iter_reset(iter);
  2598. again:
  2599. if (ring_buffer_iter_empty(iter))
  2600. return NULL;
  2601. /*
  2602. * We repeat when a timestamp is encountered.
  2603. * We can get multiple timestamps by nested interrupts or also
  2604. * if filtering is on (discarding commits). Since discarding
  2605. * commits can be frequent we can get a lot of timestamps.
  2606. * But we limit them by not adding timestamps if they begin
  2607. * at the start of a page.
  2608. */
  2609. if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
  2610. return NULL;
  2611. if (rb_per_cpu_empty(cpu_buffer))
  2612. return NULL;
  2613. if (iter->head >= local_read(&iter->head_page->page->commit)) {
  2614. rb_inc_iter(iter);
  2615. goto again;
  2616. }
  2617. event = rb_iter_head_event(iter);
  2618. switch (event->type_len) {
  2619. case RINGBUF_TYPE_PADDING:
  2620. if (rb_null_event(event)) {
  2621. rb_inc_iter(iter);
  2622. goto again;
  2623. }
  2624. rb_advance_iter(iter);
  2625. return event;
  2626. case RINGBUF_TYPE_TIME_EXTEND:
  2627. /* Internal data, OK to advance */
  2628. rb_advance_iter(iter);
  2629. goto again;
  2630. case RINGBUF_TYPE_TIME_STAMP:
  2631. /* FIXME: not implemented */
  2632. rb_advance_iter(iter);
  2633. goto again;
  2634. case RINGBUF_TYPE_DATA:
  2635. if (ts) {
  2636. *ts = iter->read_stamp + event->time_delta;
  2637. ring_buffer_normalize_time_stamp(buffer,
  2638. cpu_buffer->cpu, ts);
  2639. }
  2640. return event;
  2641. default:
  2642. BUG();
  2643. }
  2644. return NULL;
  2645. }
  2646. EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
  2647. static inline int rb_ok_to_lock(void)
  2648. {
  2649. /*
  2650. * If an NMI die dumps out the content of the ring buffer
  2651. * do not grab locks. We also permanently disable the ring
  2652. * buffer too. A one time deal is all you get from reading
  2653. * the ring buffer from an NMI.
  2654. */
  2655. if (likely(!in_nmi()))
  2656. return 1;
  2657. tracing_off_permanent();
  2658. return 0;
  2659. }
  2660. /**
  2661. * ring_buffer_peek - peek at the next event to be read
  2662. * @buffer: The ring buffer to read
  2663. * @cpu: The cpu to peak at
  2664. * @ts: The timestamp counter of this event.
  2665. *
  2666. * This will return the event that will be read next, but does
  2667. * not consume the data.
  2668. */
  2669. struct ring_buffer_event *
  2670. ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
  2671. {
  2672. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  2673. struct ring_buffer_event *event;
  2674. unsigned long flags;
  2675. int dolock;
  2676. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2677. return NULL;
  2678. dolock = rb_ok_to_lock();
  2679. again:
  2680. local_irq_save(flags);
  2681. if (dolock)
  2682. spin_lock(&cpu_buffer->reader_lock);
  2683. event = rb_buffer_peek(cpu_buffer, ts);
  2684. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2685. rb_advance_reader(cpu_buffer);
  2686. if (dolock)
  2687. spin_unlock(&cpu_buffer->reader_lock);
  2688. local_irq_restore(flags);
  2689. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2690. goto again;
  2691. return event;
  2692. }
  2693. /**
  2694. * ring_buffer_iter_peek - peek at the next event to be read
  2695. * @iter: The ring buffer iterator
  2696. * @ts: The timestamp counter of this event.
  2697. *
  2698. * This will return the event that will be read next, but does
  2699. * not increment the iterator.
  2700. */
  2701. struct ring_buffer_event *
  2702. ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  2703. {
  2704. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2705. struct ring_buffer_event *event;
  2706. unsigned long flags;
  2707. again:
  2708. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2709. event = rb_iter_peek(iter, ts);
  2710. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2711. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2712. goto again;
  2713. return event;
  2714. }
  2715. /**
  2716. * ring_buffer_consume - return an event and consume it
  2717. * @buffer: The ring buffer to get the next event from
  2718. *
  2719. * Returns the next event in the ring buffer, and that event is consumed.
  2720. * Meaning, that sequential reads will keep returning a different event,
  2721. * and eventually empty the ring buffer if the producer is slower.
  2722. */
  2723. struct ring_buffer_event *
  2724. ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
  2725. {
  2726. struct ring_buffer_per_cpu *cpu_buffer;
  2727. struct ring_buffer_event *event = NULL;
  2728. unsigned long flags;
  2729. int dolock;
  2730. dolock = rb_ok_to_lock();
  2731. again:
  2732. /* might be called in atomic */
  2733. preempt_disable();
  2734. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2735. goto out;
  2736. cpu_buffer = buffer->buffers[cpu];
  2737. local_irq_save(flags);
  2738. if (dolock)
  2739. spin_lock(&cpu_buffer->reader_lock);
  2740. event = rb_buffer_peek(cpu_buffer, ts);
  2741. if (event)
  2742. rb_advance_reader(cpu_buffer);
  2743. if (dolock)
  2744. spin_unlock(&cpu_buffer->reader_lock);
  2745. local_irq_restore(flags);
  2746. out:
  2747. preempt_enable();
  2748. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2749. goto again;
  2750. return event;
  2751. }
  2752. EXPORT_SYMBOL_GPL(ring_buffer_consume);
  2753. /**
  2754. * ring_buffer_read_start - start a non consuming read of the buffer
  2755. * @buffer: The ring buffer to read from
  2756. * @cpu: The cpu buffer to iterate over
  2757. *
  2758. * This starts up an iteration through the buffer. It also disables
  2759. * the recording to the buffer until the reading is finished.
  2760. * This prevents the reading from being corrupted. This is not
  2761. * a consuming read, so a producer is not expected.
  2762. *
  2763. * Must be paired with ring_buffer_finish.
  2764. */
  2765. struct ring_buffer_iter *
  2766. ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
  2767. {
  2768. struct ring_buffer_per_cpu *cpu_buffer;
  2769. struct ring_buffer_iter *iter;
  2770. unsigned long flags;
  2771. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2772. return NULL;
  2773. iter = kmalloc(sizeof(*iter), GFP_KERNEL);
  2774. if (!iter)
  2775. return NULL;
  2776. cpu_buffer = buffer->buffers[cpu];
  2777. iter->cpu_buffer = cpu_buffer;
  2778. atomic_inc(&cpu_buffer->record_disabled);
  2779. synchronize_sched();
  2780. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2781. arch_spin_lock(&cpu_buffer->lock);
  2782. rb_iter_reset(iter);
  2783. arch_spin_unlock(&cpu_buffer->lock);
  2784. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2785. return iter;
  2786. }
  2787. EXPORT_SYMBOL_GPL(ring_buffer_read_start);
  2788. /**
  2789. * ring_buffer_finish - finish reading the iterator of the buffer
  2790. * @iter: The iterator retrieved by ring_buffer_start
  2791. *
  2792. * This re-enables the recording to the buffer, and frees the
  2793. * iterator.
  2794. */
  2795. void
  2796. ring_buffer_read_finish(struct ring_buffer_iter *iter)
  2797. {
  2798. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2799. atomic_dec(&cpu_buffer->record_disabled);
  2800. kfree(iter);
  2801. }
  2802. EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
  2803. /**
  2804. * ring_buffer_read - read the next item in the ring buffer by the iterator
  2805. * @iter: The ring buffer iterator
  2806. * @ts: The time stamp of the event read.
  2807. *
  2808. * This reads the next event in the ring buffer and increments the iterator.
  2809. */
  2810. struct ring_buffer_event *
  2811. ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
  2812. {
  2813. struct ring_buffer_event *event;
  2814. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2815. unsigned long flags;
  2816. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2817. again:
  2818. event = rb_iter_peek(iter, ts);
  2819. if (!event)
  2820. goto out;
  2821. if (event->type_len == RINGBUF_TYPE_PADDING)
  2822. goto again;
  2823. rb_advance_iter(iter);
  2824. out:
  2825. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2826. return event;
  2827. }
  2828. EXPORT_SYMBOL_GPL(ring_buffer_read);
  2829. /**
  2830. * ring_buffer_size - return the size of the ring buffer (in bytes)
  2831. * @buffer: The ring buffer.
  2832. */
  2833. unsigned long ring_buffer_size(struct ring_buffer *buffer)
  2834. {
  2835. return BUF_PAGE_SIZE * buffer->pages;
  2836. }
  2837. EXPORT_SYMBOL_GPL(ring_buffer_size);
  2838. static void
  2839. rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
  2840. {
  2841. rb_head_page_deactivate(cpu_buffer);
  2842. cpu_buffer->head_page
  2843. = list_entry(cpu_buffer->pages, struct buffer_page, list);
  2844. local_set(&cpu_buffer->head_page->write, 0);
  2845. local_set(&cpu_buffer->head_page->entries, 0);
  2846. local_set(&cpu_buffer->head_page->page->commit, 0);
  2847. cpu_buffer->head_page->read = 0;
  2848. cpu_buffer->tail_page = cpu_buffer->head_page;
  2849. cpu_buffer->commit_page = cpu_buffer->head_page;
  2850. INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
  2851. local_set(&cpu_buffer->reader_page->write, 0);
  2852. local_set(&cpu_buffer->reader_page->entries, 0);
  2853. local_set(&cpu_buffer->reader_page->page->commit, 0);
  2854. cpu_buffer->reader_page->read = 0;
  2855. local_set(&cpu_buffer->commit_overrun, 0);
  2856. local_set(&cpu_buffer->overrun, 0);
  2857. local_set(&cpu_buffer->entries, 0);
  2858. local_set(&cpu_buffer->committing, 0);
  2859. local_set(&cpu_buffer->commits, 0);
  2860. cpu_buffer->read = 0;
  2861. cpu_buffer->write_stamp = 0;
  2862. cpu_buffer->read_stamp = 0;
  2863. rb_head_page_activate(cpu_buffer);
  2864. }
  2865. /**
  2866. * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
  2867. * @buffer: The ring buffer to reset a per cpu buffer of
  2868. * @cpu: The CPU buffer to be reset
  2869. */
  2870. void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
  2871. {
  2872. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  2873. unsigned long flags;
  2874. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2875. return;
  2876. atomic_inc(&cpu_buffer->record_disabled);
  2877. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2878. if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
  2879. goto out;
  2880. arch_spin_lock(&cpu_buffer->lock);
  2881. rb_reset_cpu(cpu_buffer);
  2882. arch_spin_unlock(&cpu_buffer->lock);
  2883. out:
  2884. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2885. atomic_dec(&cpu_buffer->record_disabled);
  2886. }
  2887. EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
  2888. /**
  2889. * ring_buffer_reset - reset a ring buffer
  2890. * @buffer: The ring buffer to reset all cpu buffers
  2891. */
  2892. void ring_buffer_reset(struct ring_buffer *buffer)
  2893. {
  2894. int cpu;
  2895. for_each_buffer_cpu(buffer, cpu)
  2896. ring_buffer_reset_cpu(buffer, cpu);
  2897. }
  2898. EXPORT_SYMBOL_GPL(ring_buffer_reset);
  2899. /**
  2900. * rind_buffer_empty - is the ring buffer empty?
  2901. * @buffer: The ring buffer to test
  2902. */
  2903. int ring_buffer_empty(struct ring_buffer *buffer)
  2904. {
  2905. struct ring_buffer_per_cpu *cpu_buffer;
  2906. unsigned long flags;
  2907. int dolock;
  2908. int cpu;
  2909. int ret;
  2910. dolock = rb_ok_to_lock();
  2911. /* yes this is racy, but if you don't like the race, lock the buffer */
  2912. for_each_buffer_cpu(buffer, cpu) {
  2913. cpu_buffer = buffer->buffers[cpu];
  2914. local_irq_save(flags);
  2915. if (dolock)
  2916. spin_lock(&cpu_buffer->reader_lock);
  2917. ret = rb_per_cpu_empty(cpu_buffer);
  2918. if (dolock)
  2919. spin_unlock(&cpu_buffer->reader_lock);
  2920. local_irq_restore(flags);
  2921. if (!ret)
  2922. return 0;
  2923. }
  2924. return 1;
  2925. }
  2926. EXPORT_SYMBOL_GPL(ring_buffer_empty);
  2927. /**
  2928. * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
  2929. * @buffer: The ring buffer
  2930. * @cpu: The CPU buffer to test
  2931. */
  2932. int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
  2933. {
  2934. struct ring_buffer_per_cpu *cpu_buffer;
  2935. unsigned long flags;
  2936. int dolock;
  2937. int ret;
  2938. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2939. return 1;
  2940. dolock = rb_ok_to_lock();
  2941. cpu_buffer = buffer->buffers[cpu];
  2942. local_irq_save(flags);
  2943. if (dolock)
  2944. spin_lock(&cpu_buffer->reader_lock);
  2945. ret = rb_per_cpu_empty(cpu_buffer);
  2946. if (dolock)
  2947. spin_unlock(&cpu_buffer->reader_lock);
  2948. local_irq_restore(flags);
  2949. return ret;
  2950. }
  2951. EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
  2952. #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
  2953. /**
  2954. * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
  2955. * @buffer_a: One buffer to swap with
  2956. * @buffer_b: The other buffer to swap with
  2957. *
  2958. * This function is useful for tracers that want to take a "snapshot"
  2959. * of a CPU buffer and has another back up buffer lying around.
  2960. * it is expected that the tracer handles the cpu buffer not being
  2961. * used at the moment.
  2962. */
  2963. int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
  2964. struct ring_buffer *buffer_b, int cpu)
  2965. {
  2966. struct ring_buffer_per_cpu *cpu_buffer_a;
  2967. struct ring_buffer_per_cpu *cpu_buffer_b;
  2968. int ret = -EINVAL;
  2969. if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
  2970. !cpumask_test_cpu(cpu, buffer_b->cpumask))
  2971. goto out;
  2972. /* At least make sure the two buffers are somewhat the same */
  2973. if (buffer_a->pages != buffer_b->pages)
  2974. goto out;
  2975. ret = -EAGAIN;
  2976. if (ring_buffer_flags != RB_BUFFERS_ON)
  2977. goto out;
  2978. if (atomic_read(&buffer_a->record_disabled))
  2979. goto out;
  2980. if (atomic_read(&buffer_b->record_disabled))
  2981. goto out;
  2982. cpu_buffer_a = buffer_a->buffers[cpu];
  2983. cpu_buffer_b = buffer_b->buffers[cpu];
  2984. if (atomic_read(&cpu_buffer_a->record_disabled))
  2985. goto out;
  2986. if (atomic_read(&cpu_buffer_b->record_disabled))
  2987. goto out;
  2988. /*
  2989. * We can't do a synchronize_sched here because this
  2990. * function can be called in atomic context.
  2991. * Normally this will be called from the same CPU as cpu.
  2992. * If not it's up to the caller to protect this.
  2993. */
  2994. atomic_inc(&cpu_buffer_a->record_disabled);
  2995. atomic_inc(&cpu_buffer_b->record_disabled);
  2996. ret = -EBUSY;
  2997. if (local_read(&cpu_buffer_a->committing))
  2998. goto out_dec;
  2999. if (local_read(&cpu_buffer_b->committing))
  3000. goto out_dec;
  3001. buffer_a->buffers[cpu] = cpu_buffer_b;
  3002. buffer_b->buffers[cpu] = cpu_buffer_a;
  3003. cpu_buffer_b->buffer = buffer_a;
  3004. cpu_buffer_a->buffer = buffer_b;
  3005. ret = 0;
  3006. out_dec:
  3007. atomic_dec(&cpu_buffer_a->record_disabled);
  3008. atomic_dec(&cpu_buffer_b->record_disabled);
  3009. out:
  3010. return ret;
  3011. }
  3012. EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
  3013. #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
  3014. /**
  3015. * ring_buffer_alloc_read_page - allocate a page to read from buffer
  3016. * @buffer: the buffer to allocate for.
  3017. *
  3018. * This function is used in conjunction with ring_buffer_read_page.
  3019. * When reading a full page from the ring buffer, these functions
  3020. * can be used to speed up the process. The calling function should
  3021. * allocate a few pages first with this function. Then when it
  3022. * needs to get pages from the ring buffer, it passes the result
  3023. * of this function into ring_buffer_read_page, which will swap
  3024. * the page that was allocated, with the read page of the buffer.
  3025. *
  3026. * Returns:
  3027. * The page allocated, or NULL on error.
  3028. */
  3029. void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
  3030. {
  3031. struct buffer_data_page *bpage;
  3032. unsigned long addr;
  3033. addr = __get_free_page(GFP_KERNEL);
  3034. if (!addr)
  3035. return NULL;
  3036. bpage = (void *)addr;
  3037. rb_init_page(bpage);
  3038. return bpage;
  3039. }
  3040. EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
  3041. /**
  3042. * ring_buffer_free_read_page - free an allocated read page
  3043. * @buffer: the buffer the page was allocate for
  3044. * @data: the page to free
  3045. *
  3046. * Free a page allocated from ring_buffer_alloc_read_page.
  3047. */
  3048. void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
  3049. {
  3050. free_page((unsigned long)data);
  3051. }
  3052. EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
  3053. /**
  3054. * ring_buffer_read_page - extract a page from the ring buffer
  3055. * @buffer: buffer to extract from
  3056. * @data_page: the page to use allocated from ring_buffer_alloc_read_page
  3057. * @len: amount to extract
  3058. * @cpu: the cpu of the buffer to extract
  3059. * @full: should the extraction only happen when the page is full.
  3060. *
  3061. * This function will pull out a page from the ring buffer and consume it.
  3062. * @data_page must be the address of the variable that was returned
  3063. * from ring_buffer_alloc_read_page. This is because the page might be used
  3064. * to swap with a page in the ring buffer.
  3065. *
  3066. * for example:
  3067. * rpage = ring_buffer_alloc_read_page(buffer);
  3068. * if (!rpage)
  3069. * return error;
  3070. * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
  3071. * if (ret >= 0)
  3072. * process_page(rpage, ret);
  3073. *
  3074. * When @full is set, the function will not return true unless
  3075. * the writer is off the reader page.
  3076. *
  3077. * Note: it is up to the calling functions to handle sleeps and wakeups.
  3078. * The ring buffer can be used anywhere in the kernel and can not
  3079. * blindly call wake_up. The layer that uses the ring buffer must be
  3080. * responsible for that.
  3081. *
  3082. * Returns:
  3083. * >=0 if data has been transferred, returns the offset of consumed data.
  3084. * <0 if no data has been transferred.
  3085. */
  3086. int ring_buffer_read_page(struct ring_buffer *buffer,
  3087. void **data_page, size_t len, int cpu, int full)
  3088. {
  3089. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  3090. struct ring_buffer_event *event;
  3091. struct buffer_data_page *bpage;
  3092. struct buffer_page *reader;
  3093. unsigned long flags;
  3094. unsigned int commit;
  3095. unsigned int read;
  3096. u64 save_timestamp;
  3097. int ret = -1;
  3098. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  3099. goto out;
  3100. /*
  3101. * If len is not big enough to hold the page header, then
  3102. * we can not copy anything.
  3103. */
  3104. if (len <= BUF_PAGE_HDR_SIZE)
  3105. goto out;
  3106. len -= BUF_PAGE_HDR_SIZE;
  3107. if (!data_page)
  3108. goto out;
  3109. bpage = *data_page;
  3110. if (!bpage)
  3111. goto out;
  3112. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  3113. reader = rb_get_reader_page(cpu_buffer);
  3114. if (!reader)
  3115. goto out_unlock;
  3116. event = rb_reader_event(cpu_buffer);
  3117. read = reader->read;
  3118. commit = rb_page_commit(reader);
  3119. /*
  3120. * If this page has been partially read or
  3121. * if len is not big enough to read the rest of the page or
  3122. * a writer is still on the page, then
  3123. * we must copy the data from the page to the buffer.
  3124. * Otherwise, we can simply swap the page with the one passed in.
  3125. */
  3126. if (read || (len < (commit - read)) ||
  3127. cpu_buffer->reader_page == cpu_buffer->commit_page) {
  3128. struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
  3129. unsigned int rpos = read;
  3130. unsigned int pos = 0;
  3131. unsigned int size;
  3132. if (full)
  3133. goto out_unlock;
  3134. if (len > (commit - read))
  3135. len = (commit - read);
  3136. size = rb_event_length(event);
  3137. if (len < size)
  3138. goto out_unlock;
  3139. /* save the current timestamp, since the user will need it */
  3140. save_timestamp = cpu_buffer->read_stamp;
  3141. /* Need to copy one event at a time */
  3142. do {
  3143. memcpy(bpage->data + pos, rpage->data + rpos, size);
  3144. len -= size;
  3145. rb_advance_reader(cpu_buffer);
  3146. rpos = reader->read;
  3147. pos += size;
  3148. event = rb_reader_event(cpu_buffer);
  3149. size = rb_event_length(event);
  3150. } while (len > size);
  3151. /* update bpage */
  3152. local_set(&bpage->commit, pos);
  3153. bpage->time_stamp = save_timestamp;
  3154. /* we copied everything to the beginning */
  3155. read = 0;
  3156. } else {
  3157. /* update the entry counter */
  3158. cpu_buffer->read += rb_page_entries(reader);
  3159. /* swap the pages */
  3160. rb_init_page(bpage);
  3161. bpage = reader->page;
  3162. reader->page = *data_page;
  3163. local_set(&reader->write, 0);
  3164. local_set(&reader->entries, 0);
  3165. reader->read = 0;
  3166. *data_page = bpage;
  3167. }
  3168. ret = read;
  3169. out_unlock:
  3170. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  3171. out:
  3172. return ret;
  3173. }
  3174. EXPORT_SYMBOL_GPL(ring_buffer_read_page);
  3175. #ifdef CONFIG_TRACING
  3176. static ssize_t
  3177. rb_simple_read(struct file *filp, char __user *ubuf,
  3178. size_t cnt, loff_t *ppos)
  3179. {
  3180. unsigned long *p = filp->private_data;
  3181. char buf[64];
  3182. int r;
  3183. if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
  3184. r = sprintf(buf, "permanently disabled\n");
  3185. else
  3186. r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
  3187. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  3188. }
  3189. static ssize_t
  3190. rb_simple_write(struct file *filp, const char __user *ubuf,
  3191. size_t cnt, loff_t *ppos)
  3192. {
  3193. unsigned long *p = filp->private_data;
  3194. char buf[64];
  3195. unsigned long val;
  3196. int ret;
  3197. if (cnt >= sizeof(buf))
  3198. return -EINVAL;
  3199. if (copy_from_user(&buf, ubuf, cnt))
  3200. return -EFAULT;
  3201. buf[cnt] = 0;
  3202. ret = strict_strtoul(buf, 10, &val);
  3203. if (ret < 0)
  3204. return ret;
  3205. if (val)
  3206. set_bit(RB_BUFFERS_ON_BIT, p);
  3207. else
  3208. clear_bit(RB_BUFFERS_ON_BIT, p);
  3209. (*ppos)++;
  3210. return cnt;
  3211. }
  3212. static const struct file_operations rb_simple_fops = {
  3213. .open = tracing_open_generic,
  3214. .read = rb_simple_read,
  3215. .write = rb_simple_write,
  3216. };
  3217. static __init int rb_init_debugfs(void)
  3218. {
  3219. struct dentry *d_tracer;
  3220. d_tracer = tracing_init_dentry();
  3221. trace_create_file("tracing_on", 0644, d_tracer,
  3222. &ring_buffer_flags, &rb_simple_fops);
  3223. return 0;
  3224. }
  3225. fs_initcall(rb_init_debugfs);
  3226. #endif
  3227. #ifdef CONFIG_HOTPLUG_CPU
  3228. static int rb_cpu_notify(struct notifier_block *self,
  3229. unsigned long action, void *hcpu)
  3230. {
  3231. struct ring_buffer *buffer =
  3232. container_of(self, struct ring_buffer, cpu_notify);
  3233. long cpu = (long)hcpu;
  3234. switch (action) {
  3235. case CPU_UP_PREPARE:
  3236. case CPU_UP_PREPARE_FROZEN:
  3237. if (cpumask_test_cpu(cpu, buffer->cpumask))
  3238. return NOTIFY_OK;
  3239. buffer->buffers[cpu] =
  3240. rb_allocate_cpu_buffer(buffer, cpu);
  3241. if (!buffer->buffers[cpu]) {
  3242. WARN(1, "failed to allocate ring buffer on CPU %ld\n",
  3243. cpu);
  3244. return NOTIFY_OK;
  3245. }
  3246. smp_wmb();
  3247. cpumask_set_cpu(cpu, buffer->cpumask);
  3248. break;
  3249. case CPU_DOWN_PREPARE:
  3250. case CPU_DOWN_PREPARE_FROZEN:
  3251. /*
  3252. * Do nothing.
  3253. * If we were to free the buffer, then the user would
  3254. * lose any trace that was in the buffer.
  3255. */
  3256. break;
  3257. default:
  3258. break;
  3259. }
  3260. return NOTIFY_OK;
  3261. }
  3262. #endif