ring_buffer.c 95 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843
  1. /*
  2. * Generic ring buffer
  3. *
  4. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/ring_buffer.h>
  7. #include <linux/trace_clock.h>
  8. #include <linux/ftrace_irq.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/hardirq.h>
  13. #include <linux/kmemcheck.h>
  14. #include <linux/module.h>
  15. #include <linux/percpu.h>
  16. #include <linux/mutex.h>
  17. #include <linux/init.h>
  18. #include <linux/hash.h>
  19. #include <linux/list.h>
  20. #include <linux/cpu.h>
  21. #include <linux/fs.h>
  22. #include "trace.h"
  23. /*
  24. * The ring buffer header is special. We must manually up keep it.
  25. */
  26. int ring_buffer_print_entry_header(struct trace_seq *s)
  27. {
  28. int ret;
  29. ret = trace_seq_printf(s, "# compressed entry header\n");
  30. ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
  31. ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
  32. ret = trace_seq_printf(s, "\tarray : 32 bits\n");
  33. ret = trace_seq_printf(s, "\n");
  34. ret = trace_seq_printf(s, "\tpadding : type == %d\n",
  35. RINGBUF_TYPE_PADDING);
  36. ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
  37. RINGBUF_TYPE_TIME_EXTEND);
  38. ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
  39. RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  40. return ret;
  41. }
  42. /*
  43. * The ring buffer is made up of a list of pages. A separate list of pages is
  44. * allocated for each CPU. A writer may only write to a buffer that is
  45. * associated with the CPU it is currently executing on. A reader may read
  46. * from any per cpu buffer.
  47. *
  48. * The reader is special. For each per cpu buffer, the reader has its own
  49. * reader page. When a reader has read the entire reader page, this reader
  50. * page is swapped with another page in the ring buffer.
  51. *
  52. * Now, as long as the writer is off the reader page, the reader can do what
  53. * ever it wants with that page. The writer will never write to that page
  54. * again (as long as it is out of the ring buffer).
  55. *
  56. * Here's some silly ASCII art.
  57. *
  58. * +------+
  59. * |reader| RING BUFFER
  60. * |page |
  61. * +------+ +---+ +---+ +---+
  62. * | |-->| |-->| |
  63. * +---+ +---+ +---+
  64. * ^ |
  65. * | |
  66. * +---------------+
  67. *
  68. *
  69. * +------+
  70. * |reader| RING BUFFER
  71. * |page |------------------v
  72. * +------+ +---+ +---+ +---+
  73. * | |-->| |-->| |
  74. * +---+ +---+ +---+
  75. * ^ |
  76. * | |
  77. * +---------------+
  78. *
  79. *
  80. * +------+
  81. * |reader| RING BUFFER
  82. * |page |------------------v
  83. * +------+ +---+ +---+ +---+
  84. * ^ | |-->| |-->| |
  85. * | +---+ +---+ +---+
  86. * | |
  87. * | |
  88. * +------------------------------+
  89. *
  90. *
  91. * +------+
  92. * |buffer| RING BUFFER
  93. * |page |------------------v
  94. * +------+ +---+ +---+ +---+
  95. * ^ | | | |-->| |
  96. * | New +---+ +---+ +---+
  97. * | Reader------^ |
  98. * | page |
  99. * +------------------------------+
  100. *
  101. *
  102. * After we make this swap, the reader can hand this page off to the splice
  103. * code and be done with it. It can even allocate a new page if it needs to
  104. * and swap that into the ring buffer.
  105. *
  106. * We will be using cmpxchg soon to make all this lockless.
  107. *
  108. */
  109. /*
  110. * A fast way to enable or disable all ring buffers is to
  111. * call tracing_on or tracing_off. Turning off the ring buffers
  112. * prevents all ring buffers from being recorded to.
  113. * Turning this switch on, makes it OK to write to the
  114. * ring buffer, if the ring buffer is enabled itself.
  115. *
  116. * There's three layers that must be on in order to write
  117. * to the ring buffer.
  118. *
  119. * 1) This global flag must be set.
  120. * 2) The ring buffer must be enabled for recording.
  121. * 3) The per cpu buffer must be enabled for recording.
  122. *
  123. * In case of an anomaly, this global flag has a bit set that
  124. * will permantly disable all ring buffers.
  125. */
  126. /*
  127. * Global flag to disable all recording to ring buffers
  128. * This has two bits: ON, DISABLED
  129. *
  130. * ON DISABLED
  131. * ---- ----------
  132. * 0 0 : ring buffers are off
  133. * 1 0 : ring buffers are on
  134. * X 1 : ring buffers are permanently disabled
  135. */
  136. enum {
  137. RB_BUFFERS_ON_BIT = 0,
  138. RB_BUFFERS_DISABLED_BIT = 1,
  139. };
  140. enum {
  141. RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
  142. RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
  143. };
  144. static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
  145. #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
  146. /**
  147. * tracing_on - enable all tracing buffers
  148. *
  149. * This function enables all tracing buffers that may have been
  150. * disabled with tracing_off.
  151. */
  152. void tracing_on(void)
  153. {
  154. set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
  155. }
  156. EXPORT_SYMBOL_GPL(tracing_on);
  157. /**
  158. * tracing_off - turn off all tracing buffers
  159. *
  160. * This function stops all tracing buffers from recording data.
  161. * It does not disable any overhead the tracers themselves may
  162. * be causing. This function simply causes all recording to
  163. * the ring buffers to fail.
  164. */
  165. void tracing_off(void)
  166. {
  167. clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
  168. }
  169. EXPORT_SYMBOL_GPL(tracing_off);
  170. /**
  171. * tracing_off_permanent - permanently disable ring buffers
  172. *
  173. * This function, once called, will disable all ring buffers
  174. * permanently.
  175. */
  176. void tracing_off_permanent(void)
  177. {
  178. set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
  179. }
  180. /**
  181. * tracing_is_on - show state of ring buffers enabled
  182. */
  183. int tracing_is_on(void)
  184. {
  185. return ring_buffer_flags == RB_BUFFERS_ON;
  186. }
  187. EXPORT_SYMBOL_GPL(tracing_is_on);
  188. #include "trace.h"
  189. #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
  190. #define RB_ALIGNMENT 4U
  191. #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  192. #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
  193. /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
  194. #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
  195. enum {
  196. RB_LEN_TIME_EXTEND = 8,
  197. RB_LEN_TIME_STAMP = 16,
  198. };
  199. static inline int rb_null_event(struct ring_buffer_event *event)
  200. {
  201. return event->type_len == RINGBUF_TYPE_PADDING
  202. && event->time_delta == 0;
  203. }
  204. static inline int rb_discarded_event(struct ring_buffer_event *event)
  205. {
  206. return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
  207. }
  208. static void rb_event_set_padding(struct ring_buffer_event *event)
  209. {
  210. event->type_len = RINGBUF_TYPE_PADDING;
  211. event->time_delta = 0;
  212. }
  213. static unsigned
  214. rb_event_data_length(struct ring_buffer_event *event)
  215. {
  216. unsigned length;
  217. if (event->type_len)
  218. length = event->type_len * RB_ALIGNMENT;
  219. else
  220. length = event->array[0];
  221. return length + RB_EVNT_HDR_SIZE;
  222. }
  223. /* inline for ring buffer fast paths */
  224. static unsigned
  225. rb_event_length(struct ring_buffer_event *event)
  226. {
  227. switch (event->type_len) {
  228. case RINGBUF_TYPE_PADDING:
  229. if (rb_null_event(event))
  230. /* undefined */
  231. return -1;
  232. return event->array[0] + RB_EVNT_HDR_SIZE;
  233. case RINGBUF_TYPE_TIME_EXTEND:
  234. return RB_LEN_TIME_EXTEND;
  235. case RINGBUF_TYPE_TIME_STAMP:
  236. return RB_LEN_TIME_STAMP;
  237. case RINGBUF_TYPE_DATA:
  238. return rb_event_data_length(event);
  239. default:
  240. BUG();
  241. }
  242. /* not hit */
  243. return 0;
  244. }
  245. /**
  246. * ring_buffer_event_length - return the length of the event
  247. * @event: the event to get the length of
  248. */
  249. unsigned ring_buffer_event_length(struct ring_buffer_event *event)
  250. {
  251. unsigned length = rb_event_length(event);
  252. if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  253. return length;
  254. length -= RB_EVNT_HDR_SIZE;
  255. if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
  256. length -= sizeof(event->array[0]);
  257. return length;
  258. }
  259. EXPORT_SYMBOL_GPL(ring_buffer_event_length);
  260. /* inline for ring buffer fast paths */
  261. static void *
  262. rb_event_data(struct ring_buffer_event *event)
  263. {
  264. BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  265. /* If length is in len field, then array[0] has the data */
  266. if (event->type_len)
  267. return (void *)&event->array[0];
  268. /* Otherwise length is in array[0] and array[1] has the data */
  269. return (void *)&event->array[1];
  270. }
  271. /**
  272. * ring_buffer_event_data - return the data of the event
  273. * @event: the event to get the data from
  274. */
  275. void *ring_buffer_event_data(struct ring_buffer_event *event)
  276. {
  277. return rb_event_data(event);
  278. }
  279. EXPORT_SYMBOL_GPL(ring_buffer_event_data);
  280. #define for_each_buffer_cpu(buffer, cpu) \
  281. for_each_cpu(cpu, buffer->cpumask)
  282. #define TS_SHIFT 27
  283. #define TS_MASK ((1ULL << TS_SHIFT) - 1)
  284. #define TS_DELTA_TEST (~TS_MASK)
  285. struct buffer_data_page {
  286. u64 time_stamp; /* page time stamp */
  287. local_t commit; /* write committed index */
  288. unsigned char data[]; /* data of buffer page */
  289. };
  290. /*
  291. * Note, the buffer_page list must be first. The buffer pages
  292. * are allocated in cache lines, which means that each buffer
  293. * page will be at the beginning of a cache line, and thus
  294. * the least significant bits will be zero. We use this to
  295. * add flags in the list struct pointers, to make the ring buffer
  296. * lockless.
  297. */
  298. struct buffer_page {
  299. struct list_head list; /* list of buffer pages */
  300. local_t write; /* index for next write */
  301. unsigned read; /* index for next read */
  302. local_t entries; /* entries on this page */
  303. struct buffer_data_page *page; /* Actual data page */
  304. };
  305. /*
  306. * The buffer page counters, write and entries, must be reset
  307. * atomically when crossing page boundaries. To synchronize this
  308. * update, two counters are inserted into the number. One is
  309. * the actual counter for the write position or count on the page.
  310. *
  311. * The other is a counter of updaters. Before an update happens
  312. * the update partition of the counter is incremented. This will
  313. * allow the updater to update the counter atomically.
  314. *
  315. * The counter is 20 bits, and the state data is 12.
  316. */
  317. #define RB_WRITE_MASK 0xfffff
  318. #define RB_WRITE_INTCNT (1 << 20)
  319. static void rb_init_page(struct buffer_data_page *bpage)
  320. {
  321. local_set(&bpage->commit, 0);
  322. }
  323. /**
  324. * ring_buffer_page_len - the size of data on the page.
  325. * @page: The page to read
  326. *
  327. * Returns the amount of data on the page, including buffer page header.
  328. */
  329. size_t ring_buffer_page_len(void *page)
  330. {
  331. return local_read(&((struct buffer_data_page *)page)->commit)
  332. + BUF_PAGE_HDR_SIZE;
  333. }
  334. /*
  335. * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
  336. * this issue out.
  337. */
  338. static void free_buffer_page(struct buffer_page *bpage)
  339. {
  340. free_page((unsigned long)bpage->page);
  341. kfree(bpage);
  342. }
  343. /*
  344. * We need to fit the time_stamp delta into 27 bits.
  345. */
  346. static inline int test_time_stamp(u64 delta)
  347. {
  348. if (delta & TS_DELTA_TEST)
  349. return 1;
  350. return 0;
  351. }
  352. #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
  353. /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
  354. #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
  355. /* Max number of timestamps that can fit on a page */
  356. #define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
  357. int ring_buffer_print_page_header(struct trace_seq *s)
  358. {
  359. struct buffer_data_page field;
  360. int ret;
  361. ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
  362. "offset:0;\tsize:%u;\n",
  363. (unsigned int)sizeof(field.time_stamp));
  364. ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
  365. "offset:%u;\tsize:%u;\n",
  366. (unsigned int)offsetof(typeof(field), commit),
  367. (unsigned int)sizeof(field.commit));
  368. ret = trace_seq_printf(s, "\tfield: char data;\t"
  369. "offset:%u;\tsize:%u;\n",
  370. (unsigned int)offsetof(typeof(field), data),
  371. (unsigned int)BUF_PAGE_SIZE);
  372. return ret;
  373. }
  374. /*
  375. * head_page == tail_page && head == tail then buffer is empty.
  376. */
  377. struct ring_buffer_per_cpu {
  378. int cpu;
  379. struct ring_buffer *buffer;
  380. spinlock_t reader_lock; /* serialize readers */
  381. raw_spinlock_t lock;
  382. struct lock_class_key lock_key;
  383. struct list_head *pages;
  384. struct buffer_page *head_page; /* read from head */
  385. struct buffer_page *tail_page; /* write to tail */
  386. struct buffer_page *commit_page; /* committed pages */
  387. struct buffer_page *reader_page;
  388. local_t commit_overrun;
  389. local_t overrun;
  390. local_t entries;
  391. local_t committing;
  392. local_t commits;
  393. unsigned long read;
  394. u64 write_stamp;
  395. u64 read_stamp;
  396. atomic_t record_disabled;
  397. };
  398. struct ring_buffer {
  399. unsigned pages;
  400. unsigned flags;
  401. int cpus;
  402. atomic_t record_disabled;
  403. cpumask_var_t cpumask;
  404. struct lock_class_key *reader_lock_key;
  405. struct mutex mutex;
  406. struct ring_buffer_per_cpu **buffers;
  407. #ifdef CONFIG_HOTPLUG_CPU
  408. struct notifier_block cpu_notify;
  409. #endif
  410. u64 (*clock)(void);
  411. };
  412. struct ring_buffer_iter {
  413. struct ring_buffer_per_cpu *cpu_buffer;
  414. unsigned long head;
  415. struct buffer_page *head_page;
  416. u64 read_stamp;
  417. };
  418. /* buffer may be either ring_buffer or ring_buffer_per_cpu */
  419. #define RB_WARN_ON(buffer, cond) \
  420. ({ \
  421. int _____ret = unlikely(cond); \
  422. if (_____ret) { \
  423. atomic_inc(&buffer->record_disabled); \
  424. WARN_ON(1); \
  425. } \
  426. _____ret; \
  427. })
  428. /* Up this if you want to test the TIME_EXTENTS and normalization */
  429. #define DEBUG_SHIFT 0
  430. static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu)
  431. {
  432. /* shift to debug/test normalization and TIME_EXTENTS */
  433. return buffer->clock() << DEBUG_SHIFT;
  434. }
  435. u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
  436. {
  437. u64 time;
  438. preempt_disable_notrace();
  439. time = rb_time_stamp(buffer, cpu);
  440. preempt_enable_no_resched_notrace();
  441. return time;
  442. }
  443. EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
  444. void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
  445. int cpu, u64 *ts)
  446. {
  447. /* Just stupid testing the normalize function and deltas */
  448. *ts >>= DEBUG_SHIFT;
  449. }
  450. EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
  451. /*
  452. * Making the ring buffer lockless makes things tricky.
  453. * Although writes only happen on the CPU that they are on,
  454. * and they only need to worry about interrupts. Reads can
  455. * happen on any CPU.
  456. *
  457. * The reader page is always off the ring buffer, but when the
  458. * reader finishes with a page, it needs to swap its page with
  459. * a new one from the buffer. The reader needs to take from
  460. * the head (writes go to the tail). But if a writer is in overwrite
  461. * mode and wraps, it must push the head page forward.
  462. *
  463. * Here lies the problem.
  464. *
  465. * The reader must be careful to replace only the head page, and
  466. * not another one. As described at the top of the file in the
  467. * ASCII art, the reader sets its old page to point to the next
  468. * page after head. It then sets the page after head to point to
  469. * the old reader page. But if the writer moves the head page
  470. * during this operation, the reader could end up with the tail.
  471. *
  472. * We use cmpxchg to help prevent this race. We also do something
  473. * special with the page before head. We set the LSB to 1.
  474. *
  475. * When the writer must push the page forward, it will clear the
  476. * bit that points to the head page, move the head, and then set
  477. * the bit that points to the new head page.
  478. *
  479. * We also don't want an interrupt coming in and moving the head
  480. * page on another writer. Thus we use the second LSB to catch
  481. * that too. Thus:
  482. *
  483. * head->list->prev->next bit 1 bit 0
  484. * ------- -------
  485. * Normal page 0 0
  486. * Points to head page 0 1
  487. * New head page 1 0
  488. *
  489. * Note we can not trust the prev pointer of the head page, because:
  490. *
  491. * +----+ +-----+ +-----+
  492. * | |------>| T |---X--->| N |
  493. * | |<------| | | |
  494. * +----+ +-----+ +-----+
  495. * ^ ^ |
  496. * | +-----+ | |
  497. * +----------| R |----------+ |
  498. * | |<-----------+
  499. * +-----+
  500. *
  501. * Key: ---X--> HEAD flag set in pointer
  502. * T Tail page
  503. * R Reader page
  504. * N Next page
  505. *
  506. * (see __rb_reserve_next() to see where this happens)
  507. *
  508. * What the above shows is that the reader just swapped out
  509. * the reader page with a page in the buffer, but before it
  510. * could make the new header point back to the new page added
  511. * it was preempted by a writer. The writer moved forward onto
  512. * the new page added by the reader and is about to move forward
  513. * again.
  514. *
  515. * You can see, it is legitimate for the previous pointer of
  516. * the head (or any page) not to point back to itself. But only
  517. * temporarially.
  518. */
  519. #define RB_PAGE_NORMAL 0UL
  520. #define RB_PAGE_HEAD 1UL
  521. #define RB_PAGE_UPDATE 2UL
  522. #define RB_FLAG_MASK 3UL
  523. /* PAGE_MOVED is not part of the mask */
  524. #define RB_PAGE_MOVED 4UL
  525. /*
  526. * rb_list_head - remove any bit
  527. */
  528. static struct list_head *rb_list_head(struct list_head *list)
  529. {
  530. unsigned long val = (unsigned long)list;
  531. return (struct list_head *)(val & ~RB_FLAG_MASK);
  532. }
  533. /*
  534. * rb_is_head_page - test if the give page is the head page
  535. *
  536. * Because the reader may move the head_page pointer, we can
  537. * not trust what the head page is (it may be pointing to
  538. * the reader page). But if the next page is a header page,
  539. * its flags will be non zero.
  540. */
  541. static int inline
  542. rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
  543. struct buffer_page *page, struct list_head *list)
  544. {
  545. unsigned long val;
  546. val = (unsigned long)list->next;
  547. if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
  548. return RB_PAGE_MOVED;
  549. return val & RB_FLAG_MASK;
  550. }
  551. /*
  552. * rb_is_reader_page
  553. *
  554. * The unique thing about the reader page, is that, if the
  555. * writer is ever on it, the previous pointer never points
  556. * back to the reader page.
  557. */
  558. static int rb_is_reader_page(struct buffer_page *page)
  559. {
  560. struct list_head *list = page->list.prev;
  561. return rb_list_head(list->next) != &page->list;
  562. }
  563. /*
  564. * rb_set_list_to_head - set a list_head to be pointing to head.
  565. */
  566. static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
  567. struct list_head *list)
  568. {
  569. unsigned long *ptr;
  570. ptr = (unsigned long *)&list->next;
  571. *ptr |= RB_PAGE_HEAD;
  572. *ptr &= ~RB_PAGE_UPDATE;
  573. }
  574. /*
  575. * rb_head_page_activate - sets up head page
  576. */
  577. static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
  578. {
  579. struct buffer_page *head;
  580. head = cpu_buffer->head_page;
  581. if (!head)
  582. return;
  583. /*
  584. * Set the previous list pointer to have the HEAD flag.
  585. */
  586. rb_set_list_to_head(cpu_buffer, head->list.prev);
  587. }
  588. static void rb_list_head_clear(struct list_head *list)
  589. {
  590. unsigned long *ptr = (unsigned long *)&list->next;
  591. *ptr &= ~RB_FLAG_MASK;
  592. }
  593. /*
  594. * rb_head_page_dactivate - clears head page ptr (for free list)
  595. */
  596. static void
  597. rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
  598. {
  599. struct list_head *hd;
  600. /* Go through the whole list and clear any pointers found. */
  601. rb_list_head_clear(cpu_buffer->pages);
  602. list_for_each(hd, cpu_buffer->pages)
  603. rb_list_head_clear(hd);
  604. }
  605. static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
  606. struct buffer_page *head,
  607. struct buffer_page *prev,
  608. int old_flag, int new_flag)
  609. {
  610. struct list_head *list;
  611. unsigned long val = (unsigned long)&head->list;
  612. unsigned long ret;
  613. list = &prev->list;
  614. val &= ~RB_FLAG_MASK;
  615. ret = (unsigned long)cmpxchg(&list->next,
  616. val | old_flag, val | new_flag);
  617. /* check if the reader took the page */
  618. if ((ret & ~RB_FLAG_MASK) != val)
  619. return RB_PAGE_MOVED;
  620. return ret & RB_FLAG_MASK;
  621. }
  622. static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
  623. struct buffer_page *head,
  624. struct buffer_page *prev,
  625. int old_flag)
  626. {
  627. return rb_head_page_set(cpu_buffer, head, prev,
  628. old_flag, RB_PAGE_UPDATE);
  629. }
  630. static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
  631. struct buffer_page *head,
  632. struct buffer_page *prev,
  633. int old_flag)
  634. {
  635. return rb_head_page_set(cpu_buffer, head, prev,
  636. old_flag, RB_PAGE_HEAD);
  637. }
  638. static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
  639. struct buffer_page *head,
  640. struct buffer_page *prev,
  641. int old_flag)
  642. {
  643. return rb_head_page_set(cpu_buffer, head, prev,
  644. old_flag, RB_PAGE_NORMAL);
  645. }
  646. static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
  647. struct buffer_page **bpage)
  648. {
  649. struct list_head *p = rb_list_head((*bpage)->list.next);
  650. *bpage = list_entry(p, struct buffer_page, list);
  651. }
  652. static struct buffer_page *
  653. rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
  654. {
  655. struct buffer_page *head;
  656. struct buffer_page *page;
  657. struct list_head *list;
  658. int i;
  659. if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
  660. return NULL;
  661. /* sanity check */
  662. list = cpu_buffer->pages;
  663. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
  664. return NULL;
  665. page = head = cpu_buffer->head_page;
  666. /*
  667. * It is possible that the writer moves the header behind
  668. * where we started, and we miss in one loop.
  669. * A second loop should grab the header, but we'll do
  670. * three loops just because I'm paranoid.
  671. */
  672. for (i = 0; i < 3; i++) {
  673. do {
  674. if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
  675. cpu_buffer->head_page = page;
  676. return page;
  677. }
  678. rb_inc_page(cpu_buffer, &page);
  679. } while (page != head);
  680. }
  681. RB_WARN_ON(cpu_buffer, 1);
  682. return NULL;
  683. }
  684. static int rb_head_page_replace(struct buffer_page *old,
  685. struct buffer_page *new)
  686. {
  687. unsigned long *ptr = (unsigned long *)&old->list.prev->next;
  688. unsigned long val;
  689. unsigned long ret;
  690. val = *ptr & ~RB_FLAG_MASK;
  691. val |= RB_PAGE_HEAD;
  692. ret = cmpxchg(ptr, val, &new->list);
  693. return ret == val;
  694. }
  695. /*
  696. * rb_tail_page_update - move the tail page forward
  697. *
  698. * Returns 1 if moved tail page, 0 if someone else did.
  699. */
  700. static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
  701. struct buffer_page *tail_page,
  702. struct buffer_page *next_page)
  703. {
  704. struct buffer_page *old_tail;
  705. unsigned long old_entries;
  706. unsigned long old_write;
  707. int ret = 0;
  708. /*
  709. * The tail page now needs to be moved forward.
  710. *
  711. * We need to reset the tail page, but without messing
  712. * with possible erasing of data brought in by interrupts
  713. * that have moved the tail page and are currently on it.
  714. *
  715. * We add a counter to the write field to denote this.
  716. */
  717. old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
  718. old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
  719. /*
  720. * Just make sure we have seen our old_write and synchronize
  721. * with any interrupts that come in.
  722. */
  723. barrier();
  724. /*
  725. * If the tail page is still the same as what we think
  726. * it is, then it is up to us to update the tail
  727. * pointer.
  728. */
  729. if (tail_page == cpu_buffer->tail_page) {
  730. /* Zero the write counter */
  731. unsigned long val = old_write & ~RB_WRITE_MASK;
  732. unsigned long eval = old_entries & ~RB_WRITE_MASK;
  733. /*
  734. * This will only succeed if an interrupt did
  735. * not come in and change it. In which case, we
  736. * do not want to modify it.
  737. *
  738. * We add (void) to let the compiler know that we do not care
  739. * about the return value of these functions. We use the
  740. * cmpxchg to only update if an interrupt did not already
  741. * do it for us. If the cmpxchg fails, we don't care.
  742. */
  743. (void)local_cmpxchg(&next_page->write, old_write, val);
  744. (void)local_cmpxchg(&next_page->entries, old_entries, eval);
  745. /*
  746. * No need to worry about races with clearing out the commit.
  747. * it only can increment when a commit takes place. But that
  748. * only happens in the outer most nested commit.
  749. */
  750. local_set(&next_page->page->commit, 0);
  751. old_tail = cmpxchg(&cpu_buffer->tail_page,
  752. tail_page, next_page);
  753. if (old_tail == tail_page)
  754. ret = 1;
  755. }
  756. return ret;
  757. }
  758. static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
  759. struct buffer_page *bpage)
  760. {
  761. unsigned long val = (unsigned long)bpage;
  762. if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
  763. return 1;
  764. return 0;
  765. }
  766. /**
  767. * rb_check_list - make sure a pointer to a list has the last bits zero
  768. */
  769. static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
  770. struct list_head *list)
  771. {
  772. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
  773. return 1;
  774. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
  775. return 1;
  776. return 0;
  777. }
  778. /**
  779. * check_pages - integrity check of buffer pages
  780. * @cpu_buffer: CPU buffer with pages to test
  781. *
  782. * As a safety measure we check to make sure the data pages have not
  783. * been corrupted.
  784. */
  785. static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
  786. {
  787. struct list_head *head = cpu_buffer->pages;
  788. struct buffer_page *bpage, *tmp;
  789. rb_head_page_deactivate(cpu_buffer);
  790. if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
  791. return -1;
  792. if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
  793. return -1;
  794. if (rb_check_list(cpu_buffer, head))
  795. return -1;
  796. list_for_each_entry_safe(bpage, tmp, head, list) {
  797. if (RB_WARN_ON(cpu_buffer,
  798. bpage->list.next->prev != &bpage->list))
  799. return -1;
  800. if (RB_WARN_ON(cpu_buffer,
  801. bpage->list.prev->next != &bpage->list))
  802. return -1;
  803. if (rb_check_list(cpu_buffer, &bpage->list))
  804. return -1;
  805. }
  806. rb_head_page_activate(cpu_buffer);
  807. return 0;
  808. }
  809. static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
  810. unsigned nr_pages)
  811. {
  812. struct buffer_page *bpage, *tmp;
  813. unsigned long addr;
  814. LIST_HEAD(pages);
  815. unsigned i;
  816. WARN_ON(!nr_pages);
  817. for (i = 0; i < nr_pages; i++) {
  818. bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
  819. GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
  820. if (!bpage)
  821. goto free_pages;
  822. rb_check_bpage(cpu_buffer, bpage);
  823. list_add(&bpage->list, &pages);
  824. addr = __get_free_page(GFP_KERNEL);
  825. if (!addr)
  826. goto free_pages;
  827. bpage->page = (void *)addr;
  828. rb_init_page(bpage->page);
  829. }
  830. /*
  831. * The ring buffer page list is a circular list that does not
  832. * start and end with a list head. All page list items point to
  833. * other pages.
  834. */
  835. cpu_buffer->pages = pages.next;
  836. list_del(&pages);
  837. rb_check_pages(cpu_buffer);
  838. return 0;
  839. free_pages:
  840. list_for_each_entry_safe(bpage, tmp, &pages, list) {
  841. list_del_init(&bpage->list);
  842. free_buffer_page(bpage);
  843. }
  844. return -ENOMEM;
  845. }
  846. static struct ring_buffer_per_cpu *
  847. rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
  848. {
  849. struct ring_buffer_per_cpu *cpu_buffer;
  850. struct buffer_page *bpage;
  851. unsigned long addr;
  852. int ret;
  853. cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
  854. GFP_KERNEL, cpu_to_node(cpu));
  855. if (!cpu_buffer)
  856. return NULL;
  857. cpu_buffer->cpu = cpu;
  858. cpu_buffer->buffer = buffer;
  859. spin_lock_init(&cpu_buffer->reader_lock);
  860. lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
  861. cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
  862. bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
  863. GFP_KERNEL, cpu_to_node(cpu));
  864. if (!bpage)
  865. goto fail_free_buffer;
  866. rb_check_bpage(cpu_buffer, bpage);
  867. cpu_buffer->reader_page = bpage;
  868. addr = __get_free_page(GFP_KERNEL);
  869. if (!addr)
  870. goto fail_free_reader;
  871. bpage->page = (void *)addr;
  872. rb_init_page(bpage->page);
  873. INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
  874. ret = rb_allocate_pages(cpu_buffer, buffer->pages);
  875. if (ret < 0)
  876. goto fail_free_reader;
  877. cpu_buffer->head_page
  878. = list_entry(cpu_buffer->pages, struct buffer_page, list);
  879. cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
  880. rb_head_page_activate(cpu_buffer);
  881. return cpu_buffer;
  882. fail_free_reader:
  883. free_buffer_page(cpu_buffer->reader_page);
  884. fail_free_buffer:
  885. kfree(cpu_buffer);
  886. return NULL;
  887. }
  888. static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
  889. {
  890. struct list_head *head = cpu_buffer->pages;
  891. struct buffer_page *bpage, *tmp;
  892. free_buffer_page(cpu_buffer->reader_page);
  893. rb_head_page_deactivate(cpu_buffer);
  894. if (head) {
  895. list_for_each_entry_safe(bpage, tmp, head, list) {
  896. list_del_init(&bpage->list);
  897. free_buffer_page(bpage);
  898. }
  899. bpage = list_entry(head, struct buffer_page, list);
  900. free_buffer_page(bpage);
  901. }
  902. kfree(cpu_buffer);
  903. }
  904. #ifdef CONFIG_HOTPLUG_CPU
  905. static int rb_cpu_notify(struct notifier_block *self,
  906. unsigned long action, void *hcpu);
  907. #endif
  908. /**
  909. * ring_buffer_alloc - allocate a new ring_buffer
  910. * @size: the size in bytes per cpu that is needed.
  911. * @flags: attributes to set for the ring buffer.
  912. *
  913. * Currently the only flag that is available is the RB_FL_OVERWRITE
  914. * flag. This flag means that the buffer will overwrite old data
  915. * when the buffer wraps. If this flag is not set, the buffer will
  916. * drop data when the tail hits the head.
  917. */
  918. struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
  919. struct lock_class_key *key)
  920. {
  921. struct ring_buffer *buffer;
  922. int bsize;
  923. int cpu;
  924. /* keep it in its own cache line */
  925. buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
  926. GFP_KERNEL);
  927. if (!buffer)
  928. return NULL;
  929. if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
  930. goto fail_free_buffer;
  931. buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  932. buffer->flags = flags;
  933. buffer->clock = trace_clock_local;
  934. buffer->reader_lock_key = key;
  935. /* need at least two pages */
  936. if (buffer->pages < 2)
  937. buffer->pages = 2;
  938. /*
  939. * In case of non-hotplug cpu, if the ring-buffer is allocated
  940. * in early initcall, it will not be notified of secondary cpus.
  941. * In that off case, we need to allocate for all possible cpus.
  942. */
  943. #ifdef CONFIG_HOTPLUG_CPU
  944. get_online_cpus();
  945. cpumask_copy(buffer->cpumask, cpu_online_mask);
  946. #else
  947. cpumask_copy(buffer->cpumask, cpu_possible_mask);
  948. #endif
  949. buffer->cpus = nr_cpu_ids;
  950. bsize = sizeof(void *) * nr_cpu_ids;
  951. buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
  952. GFP_KERNEL);
  953. if (!buffer->buffers)
  954. goto fail_free_cpumask;
  955. for_each_buffer_cpu(buffer, cpu) {
  956. buffer->buffers[cpu] =
  957. rb_allocate_cpu_buffer(buffer, cpu);
  958. if (!buffer->buffers[cpu])
  959. goto fail_free_buffers;
  960. }
  961. #ifdef CONFIG_HOTPLUG_CPU
  962. buffer->cpu_notify.notifier_call = rb_cpu_notify;
  963. buffer->cpu_notify.priority = 0;
  964. register_cpu_notifier(&buffer->cpu_notify);
  965. #endif
  966. put_online_cpus();
  967. mutex_init(&buffer->mutex);
  968. return buffer;
  969. fail_free_buffers:
  970. for_each_buffer_cpu(buffer, cpu) {
  971. if (buffer->buffers[cpu])
  972. rb_free_cpu_buffer(buffer->buffers[cpu]);
  973. }
  974. kfree(buffer->buffers);
  975. fail_free_cpumask:
  976. free_cpumask_var(buffer->cpumask);
  977. put_online_cpus();
  978. fail_free_buffer:
  979. kfree(buffer);
  980. return NULL;
  981. }
  982. EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
  983. /**
  984. * ring_buffer_free - free a ring buffer.
  985. * @buffer: the buffer to free.
  986. */
  987. void
  988. ring_buffer_free(struct ring_buffer *buffer)
  989. {
  990. int cpu;
  991. get_online_cpus();
  992. #ifdef CONFIG_HOTPLUG_CPU
  993. unregister_cpu_notifier(&buffer->cpu_notify);
  994. #endif
  995. for_each_buffer_cpu(buffer, cpu)
  996. rb_free_cpu_buffer(buffer->buffers[cpu]);
  997. put_online_cpus();
  998. kfree(buffer->buffers);
  999. free_cpumask_var(buffer->cpumask);
  1000. kfree(buffer);
  1001. }
  1002. EXPORT_SYMBOL_GPL(ring_buffer_free);
  1003. void ring_buffer_set_clock(struct ring_buffer *buffer,
  1004. u64 (*clock)(void))
  1005. {
  1006. buffer->clock = clock;
  1007. }
  1008. static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
  1009. static void
  1010. rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
  1011. {
  1012. struct buffer_page *bpage;
  1013. struct list_head *p;
  1014. unsigned i;
  1015. atomic_inc(&cpu_buffer->record_disabled);
  1016. synchronize_sched();
  1017. rb_head_page_deactivate(cpu_buffer);
  1018. for (i = 0; i < nr_pages; i++) {
  1019. if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
  1020. return;
  1021. p = cpu_buffer->pages->next;
  1022. bpage = list_entry(p, struct buffer_page, list);
  1023. list_del_init(&bpage->list);
  1024. free_buffer_page(bpage);
  1025. }
  1026. if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
  1027. return;
  1028. rb_reset_cpu(cpu_buffer);
  1029. rb_check_pages(cpu_buffer);
  1030. atomic_dec(&cpu_buffer->record_disabled);
  1031. }
  1032. static void
  1033. rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
  1034. struct list_head *pages, unsigned nr_pages)
  1035. {
  1036. struct buffer_page *bpage;
  1037. struct list_head *p;
  1038. unsigned i;
  1039. atomic_inc(&cpu_buffer->record_disabled);
  1040. synchronize_sched();
  1041. spin_lock_irq(&cpu_buffer->reader_lock);
  1042. rb_head_page_deactivate(cpu_buffer);
  1043. for (i = 0; i < nr_pages; i++) {
  1044. if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
  1045. return;
  1046. p = pages->next;
  1047. bpage = list_entry(p, struct buffer_page, list);
  1048. list_del_init(&bpage->list);
  1049. list_add_tail(&bpage->list, cpu_buffer->pages);
  1050. }
  1051. rb_reset_cpu(cpu_buffer);
  1052. spin_unlock_irq(&cpu_buffer->reader_lock);
  1053. rb_check_pages(cpu_buffer);
  1054. atomic_dec(&cpu_buffer->record_disabled);
  1055. }
  1056. /**
  1057. * ring_buffer_resize - resize the ring buffer
  1058. * @buffer: the buffer to resize.
  1059. * @size: the new size.
  1060. *
  1061. * The tracer is responsible for making sure that the buffer is
  1062. * not being used while changing the size.
  1063. * Note: We may be able to change the above requirement by using
  1064. * RCU synchronizations.
  1065. *
  1066. * Minimum size is 2 * BUF_PAGE_SIZE.
  1067. *
  1068. * Returns -1 on failure.
  1069. */
  1070. int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
  1071. {
  1072. struct ring_buffer_per_cpu *cpu_buffer;
  1073. unsigned nr_pages, rm_pages, new_pages;
  1074. struct buffer_page *bpage, *tmp;
  1075. unsigned long buffer_size;
  1076. unsigned long addr;
  1077. LIST_HEAD(pages);
  1078. int i, cpu;
  1079. /*
  1080. * Always succeed at resizing a non-existent buffer:
  1081. */
  1082. if (!buffer)
  1083. return size;
  1084. size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  1085. size *= BUF_PAGE_SIZE;
  1086. buffer_size = buffer->pages * BUF_PAGE_SIZE;
  1087. /* we need a minimum of two pages */
  1088. if (size < BUF_PAGE_SIZE * 2)
  1089. size = BUF_PAGE_SIZE * 2;
  1090. if (size == buffer_size)
  1091. return size;
  1092. mutex_lock(&buffer->mutex);
  1093. get_online_cpus();
  1094. nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  1095. if (size < buffer_size) {
  1096. /* easy case, just free pages */
  1097. if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
  1098. goto out_fail;
  1099. rm_pages = buffer->pages - nr_pages;
  1100. for_each_buffer_cpu(buffer, cpu) {
  1101. cpu_buffer = buffer->buffers[cpu];
  1102. rb_remove_pages(cpu_buffer, rm_pages);
  1103. }
  1104. goto out;
  1105. }
  1106. /*
  1107. * This is a bit more difficult. We only want to add pages
  1108. * when we can allocate enough for all CPUs. We do this
  1109. * by allocating all the pages and storing them on a local
  1110. * link list. If we succeed in our allocation, then we
  1111. * add these pages to the cpu_buffers. Otherwise we just free
  1112. * them all and return -ENOMEM;
  1113. */
  1114. if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
  1115. goto out_fail;
  1116. new_pages = nr_pages - buffer->pages;
  1117. for_each_buffer_cpu(buffer, cpu) {
  1118. for (i = 0; i < new_pages; i++) {
  1119. bpage = kzalloc_node(ALIGN(sizeof(*bpage),
  1120. cache_line_size()),
  1121. GFP_KERNEL, cpu_to_node(cpu));
  1122. if (!bpage)
  1123. goto free_pages;
  1124. list_add(&bpage->list, &pages);
  1125. addr = __get_free_page(GFP_KERNEL);
  1126. if (!addr)
  1127. goto free_pages;
  1128. bpage->page = (void *)addr;
  1129. rb_init_page(bpage->page);
  1130. }
  1131. }
  1132. for_each_buffer_cpu(buffer, cpu) {
  1133. cpu_buffer = buffer->buffers[cpu];
  1134. rb_insert_pages(cpu_buffer, &pages, new_pages);
  1135. }
  1136. if (RB_WARN_ON(buffer, !list_empty(&pages)))
  1137. goto out_fail;
  1138. out:
  1139. buffer->pages = nr_pages;
  1140. put_online_cpus();
  1141. mutex_unlock(&buffer->mutex);
  1142. return size;
  1143. free_pages:
  1144. list_for_each_entry_safe(bpage, tmp, &pages, list) {
  1145. list_del_init(&bpage->list);
  1146. free_buffer_page(bpage);
  1147. }
  1148. put_online_cpus();
  1149. mutex_unlock(&buffer->mutex);
  1150. return -ENOMEM;
  1151. /*
  1152. * Something went totally wrong, and we are too paranoid
  1153. * to even clean up the mess.
  1154. */
  1155. out_fail:
  1156. put_online_cpus();
  1157. mutex_unlock(&buffer->mutex);
  1158. return -1;
  1159. }
  1160. EXPORT_SYMBOL_GPL(ring_buffer_resize);
  1161. static inline void *
  1162. __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
  1163. {
  1164. return bpage->data + index;
  1165. }
  1166. static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
  1167. {
  1168. return bpage->page->data + index;
  1169. }
  1170. static inline struct ring_buffer_event *
  1171. rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
  1172. {
  1173. return __rb_page_index(cpu_buffer->reader_page,
  1174. cpu_buffer->reader_page->read);
  1175. }
  1176. static inline struct ring_buffer_event *
  1177. rb_iter_head_event(struct ring_buffer_iter *iter)
  1178. {
  1179. return __rb_page_index(iter->head_page, iter->head);
  1180. }
  1181. static inline unsigned long rb_page_write(struct buffer_page *bpage)
  1182. {
  1183. return local_read(&bpage->write) & RB_WRITE_MASK;
  1184. }
  1185. static inline unsigned rb_page_commit(struct buffer_page *bpage)
  1186. {
  1187. return local_read(&bpage->page->commit);
  1188. }
  1189. static inline unsigned long rb_page_entries(struct buffer_page *bpage)
  1190. {
  1191. return local_read(&bpage->entries) & RB_WRITE_MASK;
  1192. }
  1193. /* Size is determined by what has been commited */
  1194. static inline unsigned rb_page_size(struct buffer_page *bpage)
  1195. {
  1196. return rb_page_commit(bpage);
  1197. }
  1198. static inline unsigned
  1199. rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
  1200. {
  1201. return rb_page_commit(cpu_buffer->commit_page);
  1202. }
  1203. static inline unsigned
  1204. rb_event_index(struct ring_buffer_event *event)
  1205. {
  1206. unsigned long addr = (unsigned long)event;
  1207. return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
  1208. }
  1209. static inline int
  1210. rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
  1211. struct ring_buffer_event *event)
  1212. {
  1213. unsigned long addr = (unsigned long)event;
  1214. unsigned long index;
  1215. index = rb_event_index(event);
  1216. addr &= PAGE_MASK;
  1217. return cpu_buffer->commit_page->page == (void *)addr &&
  1218. rb_commit_index(cpu_buffer) == index;
  1219. }
  1220. static void
  1221. rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
  1222. {
  1223. unsigned long max_count;
  1224. /*
  1225. * We only race with interrupts and NMIs on this CPU.
  1226. * If we own the commit event, then we can commit
  1227. * all others that interrupted us, since the interruptions
  1228. * are in stack format (they finish before they come
  1229. * back to us). This allows us to do a simple loop to
  1230. * assign the commit to the tail.
  1231. */
  1232. again:
  1233. max_count = cpu_buffer->buffer->pages * 100;
  1234. while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
  1235. if (RB_WARN_ON(cpu_buffer, !(--max_count)))
  1236. return;
  1237. if (RB_WARN_ON(cpu_buffer,
  1238. rb_is_reader_page(cpu_buffer->tail_page)))
  1239. return;
  1240. local_set(&cpu_buffer->commit_page->page->commit,
  1241. rb_page_write(cpu_buffer->commit_page));
  1242. rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
  1243. cpu_buffer->write_stamp =
  1244. cpu_buffer->commit_page->page->time_stamp;
  1245. /* add barrier to keep gcc from optimizing too much */
  1246. barrier();
  1247. }
  1248. while (rb_commit_index(cpu_buffer) !=
  1249. rb_page_write(cpu_buffer->commit_page)) {
  1250. local_set(&cpu_buffer->commit_page->page->commit,
  1251. rb_page_write(cpu_buffer->commit_page));
  1252. RB_WARN_ON(cpu_buffer,
  1253. local_read(&cpu_buffer->commit_page->page->commit) &
  1254. ~RB_WRITE_MASK);
  1255. barrier();
  1256. }
  1257. /* again, keep gcc from optimizing */
  1258. barrier();
  1259. /*
  1260. * If an interrupt came in just after the first while loop
  1261. * and pushed the tail page forward, we will be left with
  1262. * a dangling commit that will never go forward.
  1263. */
  1264. if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
  1265. goto again;
  1266. }
  1267. static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
  1268. {
  1269. cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
  1270. cpu_buffer->reader_page->read = 0;
  1271. }
  1272. static void rb_inc_iter(struct ring_buffer_iter *iter)
  1273. {
  1274. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  1275. /*
  1276. * The iterator could be on the reader page (it starts there).
  1277. * But the head could have moved, since the reader was
  1278. * found. Check for this case and assign the iterator
  1279. * to the head page instead of next.
  1280. */
  1281. if (iter->head_page == cpu_buffer->reader_page)
  1282. iter->head_page = rb_set_head_page(cpu_buffer);
  1283. else
  1284. rb_inc_page(cpu_buffer, &iter->head_page);
  1285. iter->read_stamp = iter->head_page->page->time_stamp;
  1286. iter->head = 0;
  1287. }
  1288. /**
  1289. * ring_buffer_update_event - update event type and data
  1290. * @event: the even to update
  1291. * @type: the type of event
  1292. * @length: the size of the event field in the ring buffer
  1293. *
  1294. * Update the type and data fields of the event. The length
  1295. * is the actual size that is written to the ring buffer,
  1296. * and with this, we can determine what to place into the
  1297. * data field.
  1298. */
  1299. static void
  1300. rb_update_event(struct ring_buffer_event *event,
  1301. unsigned type, unsigned length)
  1302. {
  1303. event->type_len = type;
  1304. switch (type) {
  1305. case RINGBUF_TYPE_PADDING:
  1306. case RINGBUF_TYPE_TIME_EXTEND:
  1307. case RINGBUF_TYPE_TIME_STAMP:
  1308. break;
  1309. case 0:
  1310. length -= RB_EVNT_HDR_SIZE;
  1311. if (length > RB_MAX_SMALL_DATA)
  1312. event->array[0] = length;
  1313. else
  1314. event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
  1315. break;
  1316. default:
  1317. BUG();
  1318. }
  1319. }
  1320. /*
  1321. * rb_handle_head_page - writer hit the head page
  1322. *
  1323. * Returns: +1 to retry page
  1324. * 0 to continue
  1325. * -1 on error
  1326. */
  1327. static int
  1328. rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
  1329. struct buffer_page *tail_page,
  1330. struct buffer_page *next_page)
  1331. {
  1332. struct buffer_page *new_head;
  1333. int entries;
  1334. int type;
  1335. int ret;
  1336. entries = rb_page_entries(next_page);
  1337. /*
  1338. * The hard part is here. We need to move the head
  1339. * forward, and protect against both readers on
  1340. * other CPUs and writers coming in via interrupts.
  1341. */
  1342. type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
  1343. RB_PAGE_HEAD);
  1344. /*
  1345. * type can be one of four:
  1346. * NORMAL - an interrupt already moved it for us
  1347. * HEAD - we are the first to get here.
  1348. * UPDATE - we are the interrupt interrupting
  1349. * a current move.
  1350. * MOVED - a reader on another CPU moved the next
  1351. * pointer to its reader page. Give up
  1352. * and try again.
  1353. */
  1354. switch (type) {
  1355. case RB_PAGE_HEAD:
  1356. /*
  1357. * We changed the head to UPDATE, thus
  1358. * it is our responsibility to update
  1359. * the counters.
  1360. */
  1361. local_add(entries, &cpu_buffer->overrun);
  1362. /*
  1363. * The entries will be zeroed out when we move the
  1364. * tail page.
  1365. */
  1366. /* still more to do */
  1367. break;
  1368. case RB_PAGE_UPDATE:
  1369. /*
  1370. * This is an interrupt that interrupt the
  1371. * previous update. Still more to do.
  1372. */
  1373. break;
  1374. case RB_PAGE_NORMAL:
  1375. /*
  1376. * An interrupt came in before the update
  1377. * and processed this for us.
  1378. * Nothing left to do.
  1379. */
  1380. return 1;
  1381. case RB_PAGE_MOVED:
  1382. /*
  1383. * The reader is on another CPU and just did
  1384. * a swap with our next_page.
  1385. * Try again.
  1386. */
  1387. return 1;
  1388. default:
  1389. RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
  1390. return -1;
  1391. }
  1392. /*
  1393. * Now that we are here, the old head pointer is
  1394. * set to UPDATE. This will keep the reader from
  1395. * swapping the head page with the reader page.
  1396. * The reader (on another CPU) will spin till
  1397. * we are finished.
  1398. *
  1399. * We just need to protect against interrupts
  1400. * doing the job. We will set the next pointer
  1401. * to HEAD. After that, we set the old pointer
  1402. * to NORMAL, but only if it was HEAD before.
  1403. * otherwise we are an interrupt, and only
  1404. * want the outer most commit to reset it.
  1405. */
  1406. new_head = next_page;
  1407. rb_inc_page(cpu_buffer, &new_head);
  1408. ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
  1409. RB_PAGE_NORMAL);
  1410. /*
  1411. * Valid returns are:
  1412. * HEAD - an interrupt came in and already set it.
  1413. * NORMAL - One of two things:
  1414. * 1) We really set it.
  1415. * 2) A bunch of interrupts came in and moved
  1416. * the page forward again.
  1417. */
  1418. switch (ret) {
  1419. case RB_PAGE_HEAD:
  1420. case RB_PAGE_NORMAL:
  1421. /* OK */
  1422. break;
  1423. default:
  1424. RB_WARN_ON(cpu_buffer, 1);
  1425. return -1;
  1426. }
  1427. /*
  1428. * It is possible that an interrupt came in,
  1429. * set the head up, then more interrupts came in
  1430. * and moved it again. When we get back here,
  1431. * the page would have been set to NORMAL but we
  1432. * just set it back to HEAD.
  1433. *
  1434. * How do you detect this? Well, if that happened
  1435. * the tail page would have moved.
  1436. */
  1437. if (ret == RB_PAGE_NORMAL) {
  1438. /*
  1439. * If the tail had moved passed next, then we need
  1440. * to reset the pointer.
  1441. */
  1442. if (cpu_buffer->tail_page != tail_page &&
  1443. cpu_buffer->tail_page != next_page)
  1444. rb_head_page_set_normal(cpu_buffer, new_head,
  1445. next_page,
  1446. RB_PAGE_HEAD);
  1447. }
  1448. /*
  1449. * If this was the outer most commit (the one that
  1450. * changed the original pointer from HEAD to UPDATE),
  1451. * then it is up to us to reset it to NORMAL.
  1452. */
  1453. if (type == RB_PAGE_HEAD) {
  1454. ret = rb_head_page_set_normal(cpu_buffer, next_page,
  1455. tail_page,
  1456. RB_PAGE_UPDATE);
  1457. if (RB_WARN_ON(cpu_buffer,
  1458. ret != RB_PAGE_UPDATE))
  1459. return -1;
  1460. }
  1461. return 0;
  1462. }
  1463. static unsigned rb_calculate_event_length(unsigned length)
  1464. {
  1465. struct ring_buffer_event event; /* Used only for sizeof array */
  1466. /* zero length can cause confusions */
  1467. if (!length)
  1468. length = 1;
  1469. if (length > RB_MAX_SMALL_DATA)
  1470. length += sizeof(event.array[0]);
  1471. length += RB_EVNT_HDR_SIZE;
  1472. length = ALIGN(length, RB_ALIGNMENT);
  1473. return length;
  1474. }
  1475. static inline void
  1476. rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
  1477. struct buffer_page *tail_page,
  1478. unsigned long tail, unsigned long length)
  1479. {
  1480. struct ring_buffer_event *event;
  1481. /*
  1482. * Only the event that crossed the page boundary
  1483. * must fill the old tail_page with padding.
  1484. */
  1485. if (tail >= BUF_PAGE_SIZE) {
  1486. local_sub(length, &tail_page->write);
  1487. return;
  1488. }
  1489. event = __rb_page_index(tail_page, tail);
  1490. kmemcheck_annotate_bitfield(event, bitfield);
  1491. /*
  1492. * If this event is bigger than the minimum size, then
  1493. * we need to be careful that we don't subtract the
  1494. * write counter enough to allow another writer to slip
  1495. * in on this page.
  1496. * We put in a discarded commit instead, to make sure
  1497. * that this space is not used again.
  1498. *
  1499. * If we are less than the minimum size, we don't need to
  1500. * worry about it.
  1501. */
  1502. if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
  1503. /* No room for any events */
  1504. /* Mark the rest of the page with padding */
  1505. rb_event_set_padding(event);
  1506. /* Set the write back to the previous setting */
  1507. local_sub(length, &tail_page->write);
  1508. return;
  1509. }
  1510. /* Put in a discarded event */
  1511. event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
  1512. event->type_len = RINGBUF_TYPE_PADDING;
  1513. /* time delta must be non zero */
  1514. event->time_delta = 1;
  1515. /* Account for this as an entry */
  1516. local_inc(&tail_page->entries);
  1517. local_inc(&cpu_buffer->entries);
  1518. /* Set write to end of buffer */
  1519. length = (tail + length) - BUF_PAGE_SIZE;
  1520. local_sub(length, &tail_page->write);
  1521. }
  1522. static struct ring_buffer_event *
  1523. rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
  1524. unsigned long length, unsigned long tail,
  1525. struct buffer_page *commit_page,
  1526. struct buffer_page *tail_page, u64 *ts)
  1527. {
  1528. struct ring_buffer *buffer = cpu_buffer->buffer;
  1529. struct buffer_page *next_page;
  1530. int ret;
  1531. next_page = tail_page;
  1532. rb_inc_page(cpu_buffer, &next_page);
  1533. /*
  1534. * If for some reason, we had an interrupt storm that made
  1535. * it all the way around the buffer, bail, and warn
  1536. * about it.
  1537. */
  1538. if (unlikely(next_page == commit_page)) {
  1539. local_inc(&cpu_buffer->commit_overrun);
  1540. goto out_reset;
  1541. }
  1542. /*
  1543. * This is where the fun begins!
  1544. *
  1545. * We are fighting against races between a reader that
  1546. * could be on another CPU trying to swap its reader
  1547. * page with the buffer head.
  1548. *
  1549. * We are also fighting against interrupts coming in and
  1550. * moving the head or tail on us as well.
  1551. *
  1552. * If the next page is the head page then we have filled
  1553. * the buffer, unless the commit page is still on the
  1554. * reader page.
  1555. */
  1556. if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
  1557. /*
  1558. * If the commit is not on the reader page, then
  1559. * move the header page.
  1560. */
  1561. if (!rb_is_reader_page(cpu_buffer->commit_page)) {
  1562. /*
  1563. * If we are not in overwrite mode,
  1564. * this is easy, just stop here.
  1565. */
  1566. if (!(buffer->flags & RB_FL_OVERWRITE))
  1567. goto out_reset;
  1568. ret = rb_handle_head_page(cpu_buffer,
  1569. tail_page,
  1570. next_page);
  1571. if (ret < 0)
  1572. goto out_reset;
  1573. if (ret)
  1574. goto out_again;
  1575. } else {
  1576. /*
  1577. * We need to be careful here too. The
  1578. * commit page could still be on the reader
  1579. * page. We could have a small buffer, and
  1580. * have filled up the buffer with events
  1581. * from interrupts and such, and wrapped.
  1582. *
  1583. * Note, if the tail page is also the on the
  1584. * reader_page, we let it move out.
  1585. */
  1586. if (unlikely((cpu_buffer->commit_page !=
  1587. cpu_buffer->tail_page) &&
  1588. (cpu_buffer->commit_page ==
  1589. cpu_buffer->reader_page))) {
  1590. local_inc(&cpu_buffer->commit_overrun);
  1591. goto out_reset;
  1592. }
  1593. }
  1594. }
  1595. ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
  1596. if (ret) {
  1597. /*
  1598. * Nested commits always have zero deltas, so
  1599. * just reread the time stamp
  1600. */
  1601. *ts = rb_time_stamp(buffer, cpu_buffer->cpu);
  1602. next_page->page->time_stamp = *ts;
  1603. }
  1604. out_again:
  1605. rb_reset_tail(cpu_buffer, tail_page, tail, length);
  1606. /* fail and let the caller try again */
  1607. return ERR_PTR(-EAGAIN);
  1608. out_reset:
  1609. /* reset write */
  1610. rb_reset_tail(cpu_buffer, tail_page, tail, length);
  1611. return NULL;
  1612. }
  1613. static struct ring_buffer_event *
  1614. __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
  1615. unsigned type, unsigned long length, u64 *ts)
  1616. {
  1617. struct buffer_page *tail_page, *commit_page;
  1618. struct ring_buffer_event *event;
  1619. unsigned long tail, write;
  1620. commit_page = cpu_buffer->commit_page;
  1621. /* we just need to protect against interrupts */
  1622. barrier();
  1623. tail_page = cpu_buffer->tail_page;
  1624. write = local_add_return(length, &tail_page->write);
  1625. /* set write to only the index of the write */
  1626. write &= RB_WRITE_MASK;
  1627. tail = write - length;
  1628. /* See if we shot pass the end of this buffer page */
  1629. if (write > BUF_PAGE_SIZE)
  1630. return rb_move_tail(cpu_buffer, length, tail,
  1631. commit_page, tail_page, ts);
  1632. /* We reserved something on the buffer */
  1633. event = __rb_page_index(tail_page, tail);
  1634. kmemcheck_annotate_bitfield(event, bitfield);
  1635. rb_update_event(event, type, length);
  1636. /* The passed in type is zero for DATA */
  1637. if (likely(!type))
  1638. local_inc(&tail_page->entries);
  1639. /*
  1640. * If this is the first commit on the page, then update
  1641. * its timestamp.
  1642. */
  1643. if (!tail)
  1644. tail_page->page->time_stamp = *ts;
  1645. return event;
  1646. }
  1647. static inline int
  1648. rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
  1649. struct ring_buffer_event *event)
  1650. {
  1651. unsigned long new_index, old_index;
  1652. struct buffer_page *bpage;
  1653. unsigned long index;
  1654. unsigned long addr;
  1655. new_index = rb_event_index(event);
  1656. old_index = new_index + rb_event_length(event);
  1657. addr = (unsigned long)event;
  1658. addr &= PAGE_MASK;
  1659. bpage = cpu_buffer->tail_page;
  1660. if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
  1661. unsigned long write_mask =
  1662. local_read(&bpage->write) & ~RB_WRITE_MASK;
  1663. /*
  1664. * This is on the tail page. It is possible that
  1665. * a write could come in and move the tail page
  1666. * and write to the next page. That is fine
  1667. * because we just shorten what is on this page.
  1668. */
  1669. old_index += write_mask;
  1670. new_index += write_mask;
  1671. index = local_cmpxchg(&bpage->write, old_index, new_index);
  1672. if (index == old_index)
  1673. return 1;
  1674. }
  1675. /* could not discard */
  1676. return 0;
  1677. }
  1678. static int
  1679. rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  1680. u64 *ts, u64 *delta)
  1681. {
  1682. struct ring_buffer_event *event;
  1683. static int once;
  1684. int ret;
  1685. if (unlikely(*delta > (1ULL << 59) && !once++)) {
  1686. printk(KERN_WARNING "Delta way too big! %llu"
  1687. " ts=%llu write stamp = %llu\n",
  1688. (unsigned long long)*delta,
  1689. (unsigned long long)*ts,
  1690. (unsigned long long)cpu_buffer->write_stamp);
  1691. WARN_ON(1);
  1692. }
  1693. /*
  1694. * The delta is too big, we to add a
  1695. * new timestamp.
  1696. */
  1697. event = __rb_reserve_next(cpu_buffer,
  1698. RINGBUF_TYPE_TIME_EXTEND,
  1699. RB_LEN_TIME_EXTEND,
  1700. ts);
  1701. if (!event)
  1702. return -EBUSY;
  1703. if (PTR_ERR(event) == -EAGAIN)
  1704. return -EAGAIN;
  1705. /* Only a commited time event can update the write stamp */
  1706. if (rb_event_is_commit(cpu_buffer, event)) {
  1707. /*
  1708. * If this is the first on the page, then it was
  1709. * updated with the page itself. Try to discard it
  1710. * and if we can't just make it zero.
  1711. */
  1712. if (rb_event_index(event)) {
  1713. event->time_delta = *delta & TS_MASK;
  1714. event->array[0] = *delta >> TS_SHIFT;
  1715. } else {
  1716. /* try to discard, since we do not need this */
  1717. if (!rb_try_to_discard(cpu_buffer, event)) {
  1718. /* nope, just zero it */
  1719. event->time_delta = 0;
  1720. event->array[0] = 0;
  1721. }
  1722. }
  1723. cpu_buffer->write_stamp = *ts;
  1724. /* let the caller know this was the commit */
  1725. ret = 1;
  1726. } else {
  1727. /* Try to discard the event */
  1728. if (!rb_try_to_discard(cpu_buffer, event)) {
  1729. /* Darn, this is just wasted space */
  1730. event->time_delta = 0;
  1731. event->array[0] = 0;
  1732. }
  1733. ret = 0;
  1734. }
  1735. *delta = 0;
  1736. return ret;
  1737. }
  1738. static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
  1739. {
  1740. local_inc(&cpu_buffer->committing);
  1741. local_inc(&cpu_buffer->commits);
  1742. }
  1743. static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
  1744. {
  1745. unsigned long commits;
  1746. if (RB_WARN_ON(cpu_buffer,
  1747. !local_read(&cpu_buffer->committing)))
  1748. return;
  1749. again:
  1750. commits = local_read(&cpu_buffer->commits);
  1751. /* synchronize with interrupts */
  1752. barrier();
  1753. if (local_read(&cpu_buffer->committing) == 1)
  1754. rb_set_commit_to_write(cpu_buffer);
  1755. local_dec(&cpu_buffer->committing);
  1756. /* synchronize with interrupts */
  1757. barrier();
  1758. /*
  1759. * Need to account for interrupts coming in between the
  1760. * updating of the commit page and the clearing of the
  1761. * committing counter.
  1762. */
  1763. if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
  1764. !local_read(&cpu_buffer->committing)) {
  1765. local_inc(&cpu_buffer->committing);
  1766. goto again;
  1767. }
  1768. }
  1769. static struct ring_buffer_event *
  1770. rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
  1771. unsigned long length)
  1772. {
  1773. struct ring_buffer_event *event;
  1774. u64 ts, delta = 0;
  1775. int commit = 0;
  1776. int nr_loops = 0;
  1777. rb_start_commit(cpu_buffer);
  1778. length = rb_calculate_event_length(length);
  1779. again:
  1780. /*
  1781. * We allow for interrupts to reenter here and do a trace.
  1782. * If one does, it will cause this original code to loop
  1783. * back here. Even with heavy interrupts happening, this
  1784. * should only happen a few times in a row. If this happens
  1785. * 1000 times in a row, there must be either an interrupt
  1786. * storm or we have something buggy.
  1787. * Bail!
  1788. */
  1789. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
  1790. goto out_fail;
  1791. ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
  1792. /*
  1793. * Only the first commit can update the timestamp.
  1794. * Yes there is a race here. If an interrupt comes in
  1795. * just after the conditional and it traces too, then it
  1796. * will also check the deltas. More than one timestamp may
  1797. * also be made. But only the entry that did the actual
  1798. * commit will be something other than zero.
  1799. */
  1800. if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
  1801. rb_page_write(cpu_buffer->tail_page) ==
  1802. rb_commit_index(cpu_buffer))) {
  1803. u64 diff;
  1804. diff = ts - cpu_buffer->write_stamp;
  1805. /* make sure this diff is calculated here */
  1806. barrier();
  1807. /* Did the write stamp get updated already? */
  1808. if (unlikely(ts < cpu_buffer->write_stamp))
  1809. goto get_event;
  1810. delta = diff;
  1811. if (unlikely(test_time_stamp(delta))) {
  1812. commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
  1813. if (commit == -EBUSY)
  1814. goto out_fail;
  1815. if (commit == -EAGAIN)
  1816. goto again;
  1817. RB_WARN_ON(cpu_buffer, commit < 0);
  1818. }
  1819. }
  1820. get_event:
  1821. event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
  1822. if (unlikely(PTR_ERR(event) == -EAGAIN))
  1823. goto again;
  1824. if (!event)
  1825. goto out_fail;
  1826. if (!rb_event_is_commit(cpu_buffer, event))
  1827. delta = 0;
  1828. event->time_delta = delta;
  1829. return event;
  1830. out_fail:
  1831. rb_end_commit(cpu_buffer);
  1832. return NULL;
  1833. }
  1834. #ifdef CONFIG_TRACING
  1835. #define TRACE_RECURSIVE_DEPTH 16
  1836. static int trace_recursive_lock(void)
  1837. {
  1838. current->trace_recursion++;
  1839. if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
  1840. return 0;
  1841. /* Disable all tracing before we do anything else */
  1842. tracing_off_permanent();
  1843. printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
  1844. "HC[%lu]:SC[%lu]:NMI[%lu]\n",
  1845. current->trace_recursion,
  1846. hardirq_count() >> HARDIRQ_SHIFT,
  1847. softirq_count() >> SOFTIRQ_SHIFT,
  1848. in_nmi());
  1849. WARN_ON_ONCE(1);
  1850. return -1;
  1851. }
  1852. static void trace_recursive_unlock(void)
  1853. {
  1854. WARN_ON_ONCE(!current->trace_recursion);
  1855. current->trace_recursion--;
  1856. }
  1857. #else
  1858. #define trace_recursive_lock() (0)
  1859. #define trace_recursive_unlock() do { } while (0)
  1860. #endif
  1861. static DEFINE_PER_CPU(int, rb_need_resched);
  1862. /**
  1863. * ring_buffer_lock_reserve - reserve a part of the buffer
  1864. * @buffer: the ring buffer to reserve from
  1865. * @length: the length of the data to reserve (excluding event header)
  1866. *
  1867. * Returns a reseverd event on the ring buffer to copy directly to.
  1868. * The user of this interface will need to get the body to write into
  1869. * and can use the ring_buffer_event_data() interface.
  1870. *
  1871. * The length is the length of the data needed, not the event length
  1872. * which also includes the event header.
  1873. *
  1874. * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
  1875. * If NULL is returned, then nothing has been allocated or locked.
  1876. */
  1877. struct ring_buffer_event *
  1878. ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
  1879. {
  1880. struct ring_buffer_per_cpu *cpu_buffer;
  1881. struct ring_buffer_event *event;
  1882. int cpu, resched;
  1883. if (ring_buffer_flags != RB_BUFFERS_ON)
  1884. return NULL;
  1885. if (atomic_read(&buffer->record_disabled))
  1886. return NULL;
  1887. /* If we are tracing schedule, we don't want to recurse */
  1888. resched = ftrace_preempt_disable();
  1889. if (trace_recursive_lock())
  1890. goto out_nocheck;
  1891. cpu = raw_smp_processor_id();
  1892. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  1893. goto out;
  1894. cpu_buffer = buffer->buffers[cpu];
  1895. if (atomic_read(&cpu_buffer->record_disabled))
  1896. goto out;
  1897. if (length > BUF_MAX_DATA_SIZE)
  1898. goto out;
  1899. event = rb_reserve_next_event(cpu_buffer, length);
  1900. if (!event)
  1901. goto out;
  1902. /*
  1903. * Need to store resched state on this cpu.
  1904. * Only the first needs to.
  1905. */
  1906. if (preempt_count() == 1)
  1907. per_cpu(rb_need_resched, cpu) = resched;
  1908. return event;
  1909. out:
  1910. trace_recursive_unlock();
  1911. out_nocheck:
  1912. ftrace_preempt_enable(resched);
  1913. return NULL;
  1914. }
  1915. EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
  1916. static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
  1917. struct ring_buffer_event *event)
  1918. {
  1919. local_inc(&cpu_buffer->entries);
  1920. /*
  1921. * The event first in the commit queue updates the
  1922. * time stamp.
  1923. */
  1924. if (rb_event_is_commit(cpu_buffer, event))
  1925. cpu_buffer->write_stamp += event->time_delta;
  1926. rb_end_commit(cpu_buffer);
  1927. }
  1928. /**
  1929. * ring_buffer_unlock_commit - commit a reserved
  1930. * @buffer: The buffer to commit to
  1931. * @event: The event pointer to commit.
  1932. *
  1933. * This commits the data to the ring buffer, and releases any locks held.
  1934. *
  1935. * Must be paired with ring_buffer_lock_reserve.
  1936. */
  1937. int ring_buffer_unlock_commit(struct ring_buffer *buffer,
  1938. struct ring_buffer_event *event)
  1939. {
  1940. struct ring_buffer_per_cpu *cpu_buffer;
  1941. int cpu = raw_smp_processor_id();
  1942. cpu_buffer = buffer->buffers[cpu];
  1943. rb_commit(cpu_buffer, event);
  1944. trace_recursive_unlock();
  1945. /*
  1946. * Only the last preempt count needs to restore preemption.
  1947. */
  1948. if (preempt_count() == 1)
  1949. ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
  1950. else
  1951. preempt_enable_no_resched_notrace();
  1952. return 0;
  1953. }
  1954. EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
  1955. static inline void rb_event_discard(struct ring_buffer_event *event)
  1956. {
  1957. /* array[0] holds the actual length for the discarded event */
  1958. event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
  1959. event->type_len = RINGBUF_TYPE_PADDING;
  1960. /* time delta must be non zero */
  1961. if (!event->time_delta)
  1962. event->time_delta = 1;
  1963. }
  1964. /**
  1965. * ring_buffer_event_discard - discard any event in the ring buffer
  1966. * @event: the event to discard
  1967. *
  1968. * Sometimes a event that is in the ring buffer needs to be ignored.
  1969. * This function lets the user discard an event in the ring buffer
  1970. * and then that event will not be read later.
  1971. *
  1972. * Note, it is up to the user to be careful with this, and protect
  1973. * against races. If the user discards an event that has been consumed
  1974. * it is possible that it could corrupt the ring buffer.
  1975. */
  1976. void ring_buffer_event_discard(struct ring_buffer_event *event)
  1977. {
  1978. rb_event_discard(event);
  1979. }
  1980. EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
  1981. /**
  1982. * ring_buffer_commit_discard - discard an event that has not been committed
  1983. * @buffer: the ring buffer
  1984. * @event: non committed event to discard
  1985. *
  1986. * This is similar to ring_buffer_event_discard but must only be
  1987. * performed on an event that has not been committed yet. The difference
  1988. * is that this will also try to free the event from the ring buffer
  1989. * if another event has not been added behind it.
  1990. *
  1991. * If another event has been added behind it, it will set the event
  1992. * up as discarded, and perform the commit.
  1993. *
  1994. * If this function is called, do not call ring_buffer_unlock_commit on
  1995. * the event.
  1996. */
  1997. void ring_buffer_discard_commit(struct ring_buffer *buffer,
  1998. struct ring_buffer_event *event)
  1999. {
  2000. struct ring_buffer_per_cpu *cpu_buffer;
  2001. int cpu;
  2002. /* The event is discarded regardless */
  2003. rb_event_discard(event);
  2004. cpu = smp_processor_id();
  2005. cpu_buffer = buffer->buffers[cpu];
  2006. /*
  2007. * This must only be called if the event has not been
  2008. * committed yet. Thus we can assume that preemption
  2009. * is still disabled.
  2010. */
  2011. RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
  2012. if (rb_try_to_discard(cpu_buffer, event))
  2013. goto out;
  2014. /*
  2015. * The commit is still visible by the reader, so we
  2016. * must increment entries.
  2017. */
  2018. local_inc(&cpu_buffer->entries);
  2019. out:
  2020. rb_end_commit(cpu_buffer);
  2021. trace_recursive_unlock();
  2022. /*
  2023. * Only the last preempt count needs to restore preemption.
  2024. */
  2025. if (preempt_count() == 1)
  2026. ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
  2027. else
  2028. preempt_enable_no_resched_notrace();
  2029. }
  2030. EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
  2031. /**
  2032. * ring_buffer_write - write data to the buffer without reserving
  2033. * @buffer: The ring buffer to write to.
  2034. * @length: The length of the data being written (excluding the event header)
  2035. * @data: The data to write to the buffer.
  2036. *
  2037. * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
  2038. * one function. If you already have the data to write to the buffer, it
  2039. * may be easier to simply call this function.
  2040. *
  2041. * Note, like ring_buffer_lock_reserve, the length is the length of the data
  2042. * and not the length of the event which would hold the header.
  2043. */
  2044. int ring_buffer_write(struct ring_buffer *buffer,
  2045. unsigned long length,
  2046. void *data)
  2047. {
  2048. struct ring_buffer_per_cpu *cpu_buffer;
  2049. struct ring_buffer_event *event;
  2050. void *body;
  2051. int ret = -EBUSY;
  2052. int cpu, resched;
  2053. if (ring_buffer_flags != RB_BUFFERS_ON)
  2054. return -EBUSY;
  2055. if (atomic_read(&buffer->record_disabled))
  2056. return -EBUSY;
  2057. resched = ftrace_preempt_disable();
  2058. cpu = raw_smp_processor_id();
  2059. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2060. goto out;
  2061. cpu_buffer = buffer->buffers[cpu];
  2062. if (atomic_read(&cpu_buffer->record_disabled))
  2063. goto out;
  2064. if (length > BUF_MAX_DATA_SIZE)
  2065. goto out;
  2066. event = rb_reserve_next_event(cpu_buffer, length);
  2067. if (!event)
  2068. goto out;
  2069. body = rb_event_data(event);
  2070. memcpy(body, data, length);
  2071. rb_commit(cpu_buffer, event);
  2072. ret = 0;
  2073. out:
  2074. ftrace_preempt_enable(resched);
  2075. return ret;
  2076. }
  2077. EXPORT_SYMBOL_GPL(ring_buffer_write);
  2078. static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
  2079. {
  2080. struct buffer_page *reader = cpu_buffer->reader_page;
  2081. struct buffer_page *head = rb_set_head_page(cpu_buffer);
  2082. struct buffer_page *commit = cpu_buffer->commit_page;
  2083. /* In case of error, head will be NULL */
  2084. if (unlikely(!head))
  2085. return 1;
  2086. return reader->read == rb_page_commit(reader) &&
  2087. (commit == reader ||
  2088. (commit == head &&
  2089. head->read == rb_page_commit(commit)));
  2090. }
  2091. /**
  2092. * ring_buffer_record_disable - stop all writes into the buffer
  2093. * @buffer: The ring buffer to stop writes to.
  2094. *
  2095. * This prevents all writes to the buffer. Any attempt to write
  2096. * to the buffer after this will fail and return NULL.
  2097. *
  2098. * The caller should call synchronize_sched() after this.
  2099. */
  2100. void ring_buffer_record_disable(struct ring_buffer *buffer)
  2101. {
  2102. atomic_inc(&buffer->record_disabled);
  2103. }
  2104. EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
  2105. /**
  2106. * ring_buffer_record_enable - enable writes to the buffer
  2107. * @buffer: The ring buffer to enable writes
  2108. *
  2109. * Note, multiple disables will need the same number of enables
  2110. * to truely enable the writing (much like preempt_disable).
  2111. */
  2112. void ring_buffer_record_enable(struct ring_buffer *buffer)
  2113. {
  2114. atomic_dec(&buffer->record_disabled);
  2115. }
  2116. EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
  2117. /**
  2118. * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
  2119. * @buffer: The ring buffer to stop writes to.
  2120. * @cpu: The CPU buffer to stop
  2121. *
  2122. * This prevents all writes to the buffer. Any attempt to write
  2123. * to the buffer after this will fail and return NULL.
  2124. *
  2125. * The caller should call synchronize_sched() after this.
  2126. */
  2127. void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
  2128. {
  2129. struct ring_buffer_per_cpu *cpu_buffer;
  2130. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2131. return;
  2132. cpu_buffer = buffer->buffers[cpu];
  2133. atomic_inc(&cpu_buffer->record_disabled);
  2134. }
  2135. EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
  2136. /**
  2137. * ring_buffer_record_enable_cpu - enable writes to the buffer
  2138. * @buffer: The ring buffer to enable writes
  2139. * @cpu: The CPU to enable.
  2140. *
  2141. * Note, multiple disables will need the same number of enables
  2142. * to truely enable the writing (much like preempt_disable).
  2143. */
  2144. void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
  2145. {
  2146. struct ring_buffer_per_cpu *cpu_buffer;
  2147. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2148. return;
  2149. cpu_buffer = buffer->buffers[cpu];
  2150. atomic_dec(&cpu_buffer->record_disabled);
  2151. }
  2152. EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
  2153. /**
  2154. * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
  2155. * @buffer: The ring buffer
  2156. * @cpu: The per CPU buffer to get the entries from.
  2157. */
  2158. unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
  2159. {
  2160. struct ring_buffer_per_cpu *cpu_buffer;
  2161. unsigned long ret;
  2162. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2163. return 0;
  2164. cpu_buffer = buffer->buffers[cpu];
  2165. ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
  2166. - cpu_buffer->read;
  2167. return ret;
  2168. }
  2169. EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
  2170. /**
  2171. * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
  2172. * @buffer: The ring buffer
  2173. * @cpu: The per CPU buffer to get the number of overruns from
  2174. */
  2175. unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
  2176. {
  2177. struct ring_buffer_per_cpu *cpu_buffer;
  2178. unsigned long ret;
  2179. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2180. return 0;
  2181. cpu_buffer = buffer->buffers[cpu];
  2182. ret = local_read(&cpu_buffer->overrun);
  2183. return ret;
  2184. }
  2185. EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
  2186. /**
  2187. * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
  2188. * @buffer: The ring buffer
  2189. * @cpu: The per CPU buffer to get the number of overruns from
  2190. */
  2191. unsigned long
  2192. ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
  2193. {
  2194. struct ring_buffer_per_cpu *cpu_buffer;
  2195. unsigned long ret;
  2196. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2197. return 0;
  2198. cpu_buffer = buffer->buffers[cpu];
  2199. ret = local_read(&cpu_buffer->commit_overrun);
  2200. return ret;
  2201. }
  2202. EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
  2203. /**
  2204. * ring_buffer_entries - get the number of entries in a buffer
  2205. * @buffer: The ring buffer
  2206. *
  2207. * Returns the total number of entries in the ring buffer
  2208. * (all CPU entries)
  2209. */
  2210. unsigned long ring_buffer_entries(struct ring_buffer *buffer)
  2211. {
  2212. struct ring_buffer_per_cpu *cpu_buffer;
  2213. unsigned long entries = 0;
  2214. int cpu;
  2215. /* if you care about this being correct, lock the buffer */
  2216. for_each_buffer_cpu(buffer, cpu) {
  2217. cpu_buffer = buffer->buffers[cpu];
  2218. entries += (local_read(&cpu_buffer->entries) -
  2219. local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
  2220. }
  2221. return entries;
  2222. }
  2223. EXPORT_SYMBOL_GPL(ring_buffer_entries);
  2224. /**
  2225. * ring_buffer_overrun_cpu - get the number of overruns in buffer
  2226. * @buffer: The ring buffer
  2227. *
  2228. * Returns the total number of overruns in the ring buffer
  2229. * (all CPU entries)
  2230. */
  2231. unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
  2232. {
  2233. struct ring_buffer_per_cpu *cpu_buffer;
  2234. unsigned long overruns = 0;
  2235. int cpu;
  2236. /* if you care about this being correct, lock the buffer */
  2237. for_each_buffer_cpu(buffer, cpu) {
  2238. cpu_buffer = buffer->buffers[cpu];
  2239. overruns += local_read(&cpu_buffer->overrun);
  2240. }
  2241. return overruns;
  2242. }
  2243. EXPORT_SYMBOL_GPL(ring_buffer_overruns);
  2244. static void rb_iter_reset(struct ring_buffer_iter *iter)
  2245. {
  2246. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2247. /* Iterator usage is expected to have record disabled */
  2248. if (list_empty(&cpu_buffer->reader_page->list)) {
  2249. iter->head_page = rb_set_head_page(cpu_buffer);
  2250. if (unlikely(!iter->head_page))
  2251. return;
  2252. iter->head = iter->head_page->read;
  2253. } else {
  2254. iter->head_page = cpu_buffer->reader_page;
  2255. iter->head = cpu_buffer->reader_page->read;
  2256. }
  2257. if (iter->head)
  2258. iter->read_stamp = cpu_buffer->read_stamp;
  2259. else
  2260. iter->read_stamp = iter->head_page->page->time_stamp;
  2261. }
  2262. /**
  2263. * ring_buffer_iter_reset - reset an iterator
  2264. * @iter: The iterator to reset
  2265. *
  2266. * Resets the iterator, so that it will start from the beginning
  2267. * again.
  2268. */
  2269. void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
  2270. {
  2271. struct ring_buffer_per_cpu *cpu_buffer;
  2272. unsigned long flags;
  2273. if (!iter)
  2274. return;
  2275. cpu_buffer = iter->cpu_buffer;
  2276. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2277. rb_iter_reset(iter);
  2278. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2279. }
  2280. EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
  2281. /**
  2282. * ring_buffer_iter_empty - check if an iterator has no more to read
  2283. * @iter: The iterator to check
  2284. */
  2285. int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
  2286. {
  2287. struct ring_buffer_per_cpu *cpu_buffer;
  2288. cpu_buffer = iter->cpu_buffer;
  2289. return iter->head_page == cpu_buffer->commit_page &&
  2290. iter->head == rb_commit_index(cpu_buffer);
  2291. }
  2292. EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
  2293. static void
  2294. rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  2295. struct ring_buffer_event *event)
  2296. {
  2297. u64 delta;
  2298. switch (event->type_len) {
  2299. case RINGBUF_TYPE_PADDING:
  2300. return;
  2301. case RINGBUF_TYPE_TIME_EXTEND:
  2302. delta = event->array[0];
  2303. delta <<= TS_SHIFT;
  2304. delta += event->time_delta;
  2305. cpu_buffer->read_stamp += delta;
  2306. return;
  2307. case RINGBUF_TYPE_TIME_STAMP:
  2308. /* FIXME: not implemented */
  2309. return;
  2310. case RINGBUF_TYPE_DATA:
  2311. cpu_buffer->read_stamp += event->time_delta;
  2312. return;
  2313. default:
  2314. BUG();
  2315. }
  2316. return;
  2317. }
  2318. static void
  2319. rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
  2320. struct ring_buffer_event *event)
  2321. {
  2322. u64 delta;
  2323. switch (event->type_len) {
  2324. case RINGBUF_TYPE_PADDING:
  2325. return;
  2326. case RINGBUF_TYPE_TIME_EXTEND:
  2327. delta = event->array[0];
  2328. delta <<= TS_SHIFT;
  2329. delta += event->time_delta;
  2330. iter->read_stamp += delta;
  2331. return;
  2332. case RINGBUF_TYPE_TIME_STAMP:
  2333. /* FIXME: not implemented */
  2334. return;
  2335. case RINGBUF_TYPE_DATA:
  2336. iter->read_stamp += event->time_delta;
  2337. return;
  2338. default:
  2339. BUG();
  2340. }
  2341. return;
  2342. }
  2343. static struct buffer_page *
  2344. rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
  2345. {
  2346. struct buffer_page *reader = NULL;
  2347. unsigned long flags;
  2348. int nr_loops = 0;
  2349. int ret;
  2350. local_irq_save(flags);
  2351. __raw_spin_lock(&cpu_buffer->lock);
  2352. again:
  2353. /*
  2354. * This should normally only loop twice. But because the
  2355. * start of the reader inserts an empty page, it causes
  2356. * a case where we will loop three times. There should be no
  2357. * reason to loop four times (that I know of).
  2358. */
  2359. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
  2360. reader = NULL;
  2361. goto out;
  2362. }
  2363. reader = cpu_buffer->reader_page;
  2364. /* If there's more to read, return this page */
  2365. if (cpu_buffer->reader_page->read < rb_page_size(reader))
  2366. goto out;
  2367. /* Never should we have an index greater than the size */
  2368. if (RB_WARN_ON(cpu_buffer,
  2369. cpu_buffer->reader_page->read > rb_page_size(reader)))
  2370. goto out;
  2371. /* check if we caught up to the tail */
  2372. reader = NULL;
  2373. if (cpu_buffer->commit_page == cpu_buffer->reader_page)
  2374. goto out;
  2375. /*
  2376. * Reset the reader page to size zero.
  2377. */
  2378. local_set(&cpu_buffer->reader_page->write, 0);
  2379. local_set(&cpu_buffer->reader_page->entries, 0);
  2380. local_set(&cpu_buffer->reader_page->page->commit, 0);
  2381. spin:
  2382. /*
  2383. * Splice the empty reader page into the list around the head.
  2384. */
  2385. reader = rb_set_head_page(cpu_buffer);
  2386. cpu_buffer->reader_page->list.next = reader->list.next;
  2387. cpu_buffer->reader_page->list.prev = reader->list.prev;
  2388. /*
  2389. * cpu_buffer->pages just needs to point to the buffer, it
  2390. * has no specific buffer page to point to. Lets move it out
  2391. * of our way so we don't accidently swap it.
  2392. */
  2393. cpu_buffer->pages = reader->list.prev;
  2394. /* The reader page will be pointing to the new head */
  2395. rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
  2396. /*
  2397. * Here's the tricky part.
  2398. *
  2399. * We need to move the pointer past the header page.
  2400. * But we can only do that if a writer is not currently
  2401. * moving it. The page before the header page has the
  2402. * flag bit '1' set if it is pointing to the page we want.
  2403. * but if the writer is in the process of moving it
  2404. * than it will be '2' or already moved '0'.
  2405. */
  2406. ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
  2407. /*
  2408. * If we did not convert it, then we must try again.
  2409. */
  2410. if (!ret)
  2411. goto spin;
  2412. /*
  2413. * Yeah! We succeeded in replacing the page.
  2414. *
  2415. * Now make the new head point back to the reader page.
  2416. */
  2417. reader->list.next->prev = &cpu_buffer->reader_page->list;
  2418. rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
  2419. /* Finally update the reader page to the new head */
  2420. cpu_buffer->reader_page = reader;
  2421. rb_reset_reader_page(cpu_buffer);
  2422. goto again;
  2423. out:
  2424. __raw_spin_unlock(&cpu_buffer->lock);
  2425. local_irq_restore(flags);
  2426. return reader;
  2427. }
  2428. static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
  2429. {
  2430. struct ring_buffer_event *event;
  2431. struct buffer_page *reader;
  2432. unsigned length;
  2433. reader = rb_get_reader_page(cpu_buffer);
  2434. /* This function should not be called when buffer is empty */
  2435. if (RB_WARN_ON(cpu_buffer, !reader))
  2436. return;
  2437. event = rb_reader_event(cpu_buffer);
  2438. if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
  2439. || rb_discarded_event(event))
  2440. cpu_buffer->read++;
  2441. rb_update_read_stamp(cpu_buffer, event);
  2442. length = rb_event_length(event);
  2443. cpu_buffer->reader_page->read += length;
  2444. }
  2445. static void rb_advance_iter(struct ring_buffer_iter *iter)
  2446. {
  2447. struct ring_buffer *buffer;
  2448. struct ring_buffer_per_cpu *cpu_buffer;
  2449. struct ring_buffer_event *event;
  2450. unsigned length;
  2451. cpu_buffer = iter->cpu_buffer;
  2452. buffer = cpu_buffer->buffer;
  2453. /*
  2454. * Check if we are at the end of the buffer.
  2455. */
  2456. if (iter->head >= rb_page_size(iter->head_page)) {
  2457. /* discarded commits can make the page empty */
  2458. if (iter->head_page == cpu_buffer->commit_page)
  2459. return;
  2460. rb_inc_iter(iter);
  2461. return;
  2462. }
  2463. event = rb_iter_head_event(iter);
  2464. length = rb_event_length(event);
  2465. /*
  2466. * This should not be called to advance the header if we are
  2467. * at the tail of the buffer.
  2468. */
  2469. if (RB_WARN_ON(cpu_buffer,
  2470. (iter->head_page == cpu_buffer->commit_page) &&
  2471. (iter->head + length > rb_commit_index(cpu_buffer))))
  2472. return;
  2473. rb_update_iter_read_stamp(iter, event);
  2474. iter->head += length;
  2475. /* check for end of page padding */
  2476. if ((iter->head >= rb_page_size(iter->head_page)) &&
  2477. (iter->head_page != cpu_buffer->commit_page))
  2478. rb_advance_iter(iter);
  2479. }
  2480. static struct ring_buffer_event *
  2481. rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
  2482. {
  2483. struct ring_buffer_per_cpu *cpu_buffer;
  2484. struct ring_buffer_event *event;
  2485. struct buffer_page *reader;
  2486. int nr_loops = 0;
  2487. cpu_buffer = buffer->buffers[cpu];
  2488. again:
  2489. /*
  2490. * We repeat when a timestamp is encountered. It is possible
  2491. * to get multiple timestamps from an interrupt entering just
  2492. * as one timestamp is about to be written, or from discarded
  2493. * commits. The most that we can have is the number on a single page.
  2494. */
  2495. if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
  2496. return NULL;
  2497. reader = rb_get_reader_page(cpu_buffer);
  2498. if (!reader)
  2499. return NULL;
  2500. event = rb_reader_event(cpu_buffer);
  2501. switch (event->type_len) {
  2502. case RINGBUF_TYPE_PADDING:
  2503. if (rb_null_event(event))
  2504. RB_WARN_ON(cpu_buffer, 1);
  2505. /*
  2506. * Because the writer could be discarding every
  2507. * event it creates (which would probably be bad)
  2508. * if we were to go back to "again" then we may never
  2509. * catch up, and will trigger the warn on, or lock
  2510. * the box. Return the padding, and we will release
  2511. * the current locks, and try again.
  2512. */
  2513. return event;
  2514. case RINGBUF_TYPE_TIME_EXTEND:
  2515. /* Internal data, OK to advance */
  2516. rb_advance_reader(cpu_buffer);
  2517. goto again;
  2518. case RINGBUF_TYPE_TIME_STAMP:
  2519. /* FIXME: not implemented */
  2520. rb_advance_reader(cpu_buffer);
  2521. goto again;
  2522. case RINGBUF_TYPE_DATA:
  2523. if (ts) {
  2524. *ts = cpu_buffer->read_stamp + event->time_delta;
  2525. ring_buffer_normalize_time_stamp(buffer,
  2526. cpu_buffer->cpu, ts);
  2527. }
  2528. return event;
  2529. default:
  2530. BUG();
  2531. }
  2532. return NULL;
  2533. }
  2534. EXPORT_SYMBOL_GPL(ring_buffer_peek);
  2535. static struct ring_buffer_event *
  2536. rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  2537. {
  2538. struct ring_buffer *buffer;
  2539. struct ring_buffer_per_cpu *cpu_buffer;
  2540. struct ring_buffer_event *event;
  2541. int nr_loops = 0;
  2542. if (ring_buffer_iter_empty(iter))
  2543. return NULL;
  2544. cpu_buffer = iter->cpu_buffer;
  2545. buffer = cpu_buffer->buffer;
  2546. again:
  2547. /*
  2548. * We repeat when a timestamp is encountered.
  2549. * We can get multiple timestamps by nested interrupts or also
  2550. * if filtering is on (discarding commits). Since discarding
  2551. * commits can be frequent we can get a lot of timestamps.
  2552. * But we limit them by not adding timestamps if they begin
  2553. * at the start of a page.
  2554. */
  2555. if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
  2556. return NULL;
  2557. if (rb_per_cpu_empty(cpu_buffer))
  2558. return NULL;
  2559. event = rb_iter_head_event(iter);
  2560. switch (event->type_len) {
  2561. case RINGBUF_TYPE_PADDING:
  2562. if (rb_null_event(event)) {
  2563. rb_inc_iter(iter);
  2564. goto again;
  2565. }
  2566. rb_advance_iter(iter);
  2567. return event;
  2568. case RINGBUF_TYPE_TIME_EXTEND:
  2569. /* Internal data, OK to advance */
  2570. rb_advance_iter(iter);
  2571. goto again;
  2572. case RINGBUF_TYPE_TIME_STAMP:
  2573. /* FIXME: not implemented */
  2574. rb_advance_iter(iter);
  2575. goto again;
  2576. case RINGBUF_TYPE_DATA:
  2577. if (ts) {
  2578. *ts = iter->read_stamp + event->time_delta;
  2579. ring_buffer_normalize_time_stamp(buffer,
  2580. cpu_buffer->cpu, ts);
  2581. }
  2582. return event;
  2583. default:
  2584. BUG();
  2585. }
  2586. return NULL;
  2587. }
  2588. EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
  2589. static inline int rb_ok_to_lock(void)
  2590. {
  2591. /*
  2592. * If an NMI die dumps out the content of the ring buffer
  2593. * do not grab locks. We also permanently disable the ring
  2594. * buffer too. A one time deal is all you get from reading
  2595. * the ring buffer from an NMI.
  2596. */
  2597. if (likely(!in_nmi()))
  2598. return 1;
  2599. tracing_off_permanent();
  2600. return 0;
  2601. }
  2602. /**
  2603. * ring_buffer_peek - peek at the next event to be read
  2604. * @buffer: The ring buffer to read
  2605. * @cpu: The cpu to peak at
  2606. * @ts: The timestamp counter of this event.
  2607. *
  2608. * This will return the event that will be read next, but does
  2609. * not consume the data.
  2610. */
  2611. struct ring_buffer_event *
  2612. ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
  2613. {
  2614. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  2615. struct ring_buffer_event *event;
  2616. unsigned long flags;
  2617. int dolock;
  2618. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2619. return NULL;
  2620. dolock = rb_ok_to_lock();
  2621. again:
  2622. local_irq_save(flags);
  2623. if (dolock)
  2624. spin_lock(&cpu_buffer->reader_lock);
  2625. event = rb_buffer_peek(buffer, cpu, ts);
  2626. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2627. rb_advance_reader(cpu_buffer);
  2628. if (dolock)
  2629. spin_unlock(&cpu_buffer->reader_lock);
  2630. local_irq_restore(flags);
  2631. if (event && event->type_len == RINGBUF_TYPE_PADDING) {
  2632. cpu_relax();
  2633. goto again;
  2634. }
  2635. return event;
  2636. }
  2637. /**
  2638. * ring_buffer_iter_peek - peek at the next event to be read
  2639. * @iter: The ring buffer iterator
  2640. * @ts: The timestamp counter of this event.
  2641. *
  2642. * This will return the event that will be read next, but does
  2643. * not increment the iterator.
  2644. */
  2645. struct ring_buffer_event *
  2646. ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  2647. {
  2648. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2649. struct ring_buffer_event *event;
  2650. unsigned long flags;
  2651. again:
  2652. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2653. event = rb_iter_peek(iter, ts);
  2654. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2655. if (event && event->type_len == RINGBUF_TYPE_PADDING) {
  2656. cpu_relax();
  2657. goto again;
  2658. }
  2659. return event;
  2660. }
  2661. /**
  2662. * ring_buffer_consume - return an event and consume it
  2663. * @buffer: The ring buffer to get the next event from
  2664. *
  2665. * Returns the next event in the ring buffer, and that event is consumed.
  2666. * Meaning, that sequential reads will keep returning a different event,
  2667. * and eventually empty the ring buffer if the producer is slower.
  2668. */
  2669. struct ring_buffer_event *
  2670. ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
  2671. {
  2672. struct ring_buffer_per_cpu *cpu_buffer;
  2673. struct ring_buffer_event *event = NULL;
  2674. unsigned long flags;
  2675. int dolock;
  2676. dolock = rb_ok_to_lock();
  2677. again:
  2678. /* might be called in atomic */
  2679. preempt_disable();
  2680. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2681. goto out;
  2682. cpu_buffer = buffer->buffers[cpu];
  2683. local_irq_save(flags);
  2684. if (dolock)
  2685. spin_lock(&cpu_buffer->reader_lock);
  2686. event = rb_buffer_peek(buffer, cpu, ts);
  2687. if (event)
  2688. rb_advance_reader(cpu_buffer);
  2689. if (dolock)
  2690. spin_unlock(&cpu_buffer->reader_lock);
  2691. local_irq_restore(flags);
  2692. out:
  2693. preempt_enable();
  2694. if (event && event->type_len == RINGBUF_TYPE_PADDING) {
  2695. cpu_relax();
  2696. goto again;
  2697. }
  2698. return event;
  2699. }
  2700. EXPORT_SYMBOL_GPL(ring_buffer_consume);
  2701. /**
  2702. * ring_buffer_read_start - start a non consuming read of the buffer
  2703. * @buffer: The ring buffer to read from
  2704. * @cpu: The cpu buffer to iterate over
  2705. *
  2706. * This starts up an iteration through the buffer. It also disables
  2707. * the recording to the buffer until the reading is finished.
  2708. * This prevents the reading from being corrupted. This is not
  2709. * a consuming read, so a producer is not expected.
  2710. *
  2711. * Must be paired with ring_buffer_finish.
  2712. */
  2713. struct ring_buffer_iter *
  2714. ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
  2715. {
  2716. struct ring_buffer_per_cpu *cpu_buffer;
  2717. struct ring_buffer_iter *iter;
  2718. unsigned long flags;
  2719. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2720. return NULL;
  2721. iter = kmalloc(sizeof(*iter), GFP_KERNEL);
  2722. if (!iter)
  2723. return NULL;
  2724. cpu_buffer = buffer->buffers[cpu];
  2725. iter->cpu_buffer = cpu_buffer;
  2726. atomic_inc(&cpu_buffer->record_disabled);
  2727. synchronize_sched();
  2728. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2729. __raw_spin_lock(&cpu_buffer->lock);
  2730. rb_iter_reset(iter);
  2731. __raw_spin_unlock(&cpu_buffer->lock);
  2732. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2733. return iter;
  2734. }
  2735. EXPORT_SYMBOL_GPL(ring_buffer_read_start);
  2736. /**
  2737. * ring_buffer_finish - finish reading the iterator of the buffer
  2738. * @iter: The iterator retrieved by ring_buffer_start
  2739. *
  2740. * This re-enables the recording to the buffer, and frees the
  2741. * iterator.
  2742. */
  2743. void
  2744. ring_buffer_read_finish(struct ring_buffer_iter *iter)
  2745. {
  2746. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2747. atomic_dec(&cpu_buffer->record_disabled);
  2748. kfree(iter);
  2749. }
  2750. EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
  2751. /**
  2752. * ring_buffer_read - read the next item in the ring buffer by the iterator
  2753. * @iter: The ring buffer iterator
  2754. * @ts: The time stamp of the event read.
  2755. *
  2756. * This reads the next event in the ring buffer and increments the iterator.
  2757. */
  2758. struct ring_buffer_event *
  2759. ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
  2760. {
  2761. struct ring_buffer_event *event;
  2762. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2763. unsigned long flags;
  2764. again:
  2765. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2766. event = rb_iter_peek(iter, ts);
  2767. if (!event)
  2768. goto out;
  2769. rb_advance_iter(iter);
  2770. out:
  2771. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2772. if (event && event->type_len == RINGBUF_TYPE_PADDING) {
  2773. cpu_relax();
  2774. goto again;
  2775. }
  2776. return event;
  2777. }
  2778. EXPORT_SYMBOL_GPL(ring_buffer_read);
  2779. /**
  2780. * ring_buffer_size - return the size of the ring buffer (in bytes)
  2781. * @buffer: The ring buffer.
  2782. */
  2783. unsigned long ring_buffer_size(struct ring_buffer *buffer)
  2784. {
  2785. return BUF_PAGE_SIZE * buffer->pages;
  2786. }
  2787. EXPORT_SYMBOL_GPL(ring_buffer_size);
  2788. static void
  2789. rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
  2790. {
  2791. rb_head_page_deactivate(cpu_buffer);
  2792. cpu_buffer->head_page
  2793. = list_entry(cpu_buffer->pages, struct buffer_page, list);
  2794. local_set(&cpu_buffer->head_page->write, 0);
  2795. local_set(&cpu_buffer->head_page->entries, 0);
  2796. local_set(&cpu_buffer->head_page->page->commit, 0);
  2797. cpu_buffer->head_page->read = 0;
  2798. cpu_buffer->tail_page = cpu_buffer->head_page;
  2799. cpu_buffer->commit_page = cpu_buffer->head_page;
  2800. INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
  2801. local_set(&cpu_buffer->reader_page->write, 0);
  2802. local_set(&cpu_buffer->reader_page->entries, 0);
  2803. local_set(&cpu_buffer->reader_page->page->commit, 0);
  2804. cpu_buffer->reader_page->read = 0;
  2805. local_set(&cpu_buffer->commit_overrun, 0);
  2806. local_set(&cpu_buffer->overrun, 0);
  2807. local_set(&cpu_buffer->entries, 0);
  2808. local_set(&cpu_buffer->committing, 0);
  2809. local_set(&cpu_buffer->commits, 0);
  2810. cpu_buffer->read = 0;
  2811. cpu_buffer->write_stamp = 0;
  2812. cpu_buffer->read_stamp = 0;
  2813. rb_head_page_activate(cpu_buffer);
  2814. }
  2815. /**
  2816. * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
  2817. * @buffer: The ring buffer to reset a per cpu buffer of
  2818. * @cpu: The CPU buffer to be reset
  2819. */
  2820. void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
  2821. {
  2822. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  2823. unsigned long flags;
  2824. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2825. return;
  2826. atomic_inc(&cpu_buffer->record_disabled);
  2827. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2828. if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
  2829. goto out;
  2830. __raw_spin_lock(&cpu_buffer->lock);
  2831. rb_reset_cpu(cpu_buffer);
  2832. __raw_spin_unlock(&cpu_buffer->lock);
  2833. out:
  2834. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2835. atomic_dec(&cpu_buffer->record_disabled);
  2836. }
  2837. EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
  2838. /**
  2839. * ring_buffer_reset - reset a ring buffer
  2840. * @buffer: The ring buffer to reset all cpu buffers
  2841. */
  2842. void ring_buffer_reset(struct ring_buffer *buffer)
  2843. {
  2844. int cpu;
  2845. for_each_buffer_cpu(buffer, cpu)
  2846. ring_buffer_reset_cpu(buffer, cpu);
  2847. }
  2848. EXPORT_SYMBOL_GPL(ring_buffer_reset);
  2849. /**
  2850. * rind_buffer_empty - is the ring buffer empty?
  2851. * @buffer: The ring buffer to test
  2852. */
  2853. int ring_buffer_empty(struct ring_buffer *buffer)
  2854. {
  2855. struct ring_buffer_per_cpu *cpu_buffer;
  2856. unsigned long flags;
  2857. int dolock;
  2858. int cpu;
  2859. int ret;
  2860. dolock = rb_ok_to_lock();
  2861. /* yes this is racy, but if you don't like the race, lock the buffer */
  2862. for_each_buffer_cpu(buffer, cpu) {
  2863. cpu_buffer = buffer->buffers[cpu];
  2864. local_irq_save(flags);
  2865. if (dolock)
  2866. spin_lock(&cpu_buffer->reader_lock);
  2867. ret = rb_per_cpu_empty(cpu_buffer);
  2868. if (dolock)
  2869. spin_unlock(&cpu_buffer->reader_lock);
  2870. local_irq_restore(flags);
  2871. if (!ret)
  2872. return 0;
  2873. }
  2874. return 1;
  2875. }
  2876. EXPORT_SYMBOL_GPL(ring_buffer_empty);
  2877. /**
  2878. * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
  2879. * @buffer: The ring buffer
  2880. * @cpu: The CPU buffer to test
  2881. */
  2882. int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
  2883. {
  2884. struct ring_buffer_per_cpu *cpu_buffer;
  2885. unsigned long flags;
  2886. int dolock;
  2887. int ret;
  2888. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2889. return 1;
  2890. dolock = rb_ok_to_lock();
  2891. cpu_buffer = buffer->buffers[cpu];
  2892. local_irq_save(flags);
  2893. if (dolock)
  2894. spin_lock(&cpu_buffer->reader_lock);
  2895. ret = rb_per_cpu_empty(cpu_buffer);
  2896. if (dolock)
  2897. spin_unlock(&cpu_buffer->reader_lock);
  2898. local_irq_restore(flags);
  2899. return ret;
  2900. }
  2901. EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
  2902. /**
  2903. * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
  2904. * @buffer_a: One buffer to swap with
  2905. * @buffer_b: The other buffer to swap with
  2906. *
  2907. * This function is useful for tracers that want to take a "snapshot"
  2908. * of a CPU buffer and has another back up buffer lying around.
  2909. * it is expected that the tracer handles the cpu buffer not being
  2910. * used at the moment.
  2911. */
  2912. int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
  2913. struct ring_buffer *buffer_b, int cpu)
  2914. {
  2915. struct ring_buffer_per_cpu *cpu_buffer_a;
  2916. struct ring_buffer_per_cpu *cpu_buffer_b;
  2917. int ret = -EINVAL;
  2918. if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
  2919. !cpumask_test_cpu(cpu, buffer_b->cpumask))
  2920. goto out;
  2921. /* At least make sure the two buffers are somewhat the same */
  2922. if (buffer_a->pages != buffer_b->pages)
  2923. goto out;
  2924. ret = -EAGAIN;
  2925. if (ring_buffer_flags != RB_BUFFERS_ON)
  2926. goto out;
  2927. if (atomic_read(&buffer_a->record_disabled))
  2928. goto out;
  2929. if (atomic_read(&buffer_b->record_disabled))
  2930. goto out;
  2931. cpu_buffer_a = buffer_a->buffers[cpu];
  2932. cpu_buffer_b = buffer_b->buffers[cpu];
  2933. if (atomic_read(&cpu_buffer_a->record_disabled))
  2934. goto out;
  2935. if (atomic_read(&cpu_buffer_b->record_disabled))
  2936. goto out;
  2937. /*
  2938. * We can't do a synchronize_sched here because this
  2939. * function can be called in atomic context.
  2940. * Normally this will be called from the same CPU as cpu.
  2941. * If not it's up to the caller to protect this.
  2942. */
  2943. atomic_inc(&cpu_buffer_a->record_disabled);
  2944. atomic_inc(&cpu_buffer_b->record_disabled);
  2945. ret = -EBUSY;
  2946. if (local_read(&cpu_buffer_a->committing))
  2947. goto out_dec;
  2948. if (local_read(&cpu_buffer_b->committing))
  2949. goto out_dec;
  2950. buffer_a->buffers[cpu] = cpu_buffer_b;
  2951. buffer_b->buffers[cpu] = cpu_buffer_a;
  2952. cpu_buffer_b->buffer = buffer_a;
  2953. cpu_buffer_a->buffer = buffer_b;
  2954. ret = 0;
  2955. out_dec:
  2956. atomic_dec(&cpu_buffer_a->record_disabled);
  2957. atomic_dec(&cpu_buffer_b->record_disabled);
  2958. out:
  2959. return ret;
  2960. }
  2961. EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
  2962. /**
  2963. * ring_buffer_alloc_read_page - allocate a page to read from buffer
  2964. * @buffer: the buffer to allocate for.
  2965. *
  2966. * This function is used in conjunction with ring_buffer_read_page.
  2967. * When reading a full page from the ring buffer, these functions
  2968. * can be used to speed up the process. The calling function should
  2969. * allocate a few pages first with this function. Then when it
  2970. * needs to get pages from the ring buffer, it passes the result
  2971. * of this function into ring_buffer_read_page, which will swap
  2972. * the page that was allocated, with the read page of the buffer.
  2973. *
  2974. * Returns:
  2975. * The page allocated, or NULL on error.
  2976. */
  2977. void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
  2978. {
  2979. struct buffer_data_page *bpage;
  2980. unsigned long addr;
  2981. addr = __get_free_page(GFP_KERNEL);
  2982. if (!addr)
  2983. return NULL;
  2984. bpage = (void *)addr;
  2985. rb_init_page(bpage);
  2986. return bpage;
  2987. }
  2988. EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
  2989. /**
  2990. * ring_buffer_free_read_page - free an allocated read page
  2991. * @buffer: the buffer the page was allocate for
  2992. * @data: the page to free
  2993. *
  2994. * Free a page allocated from ring_buffer_alloc_read_page.
  2995. */
  2996. void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
  2997. {
  2998. free_page((unsigned long)data);
  2999. }
  3000. EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
  3001. /**
  3002. * ring_buffer_read_page - extract a page from the ring buffer
  3003. * @buffer: buffer to extract from
  3004. * @data_page: the page to use allocated from ring_buffer_alloc_read_page
  3005. * @len: amount to extract
  3006. * @cpu: the cpu of the buffer to extract
  3007. * @full: should the extraction only happen when the page is full.
  3008. *
  3009. * This function will pull out a page from the ring buffer and consume it.
  3010. * @data_page must be the address of the variable that was returned
  3011. * from ring_buffer_alloc_read_page. This is because the page might be used
  3012. * to swap with a page in the ring buffer.
  3013. *
  3014. * for example:
  3015. * rpage = ring_buffer_alloc_read_page(buffer);
  3016. * if (!rpage)
  3017. * return error;
  3018. * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
  3019. * if (ret >= 0)
  3020. * process_page(rpage, ret);
  3021. *
  3022. * When @full is set, the function will not return true unless
  3023. * the writer is off the reader page.
  3024. *
  3025. * Note: it is up to the calling functions to handle sleeps and wakeups.
  3026. * The ring buffer can be used anywhere in the kernel and can not
  3027. * blindly call wake_up. The layer that uses the ring buffer must be
  3028. * responsible for that.
  3029. *
  3030. * Returns:
  3031. * >=0 if data has been transferred, returns the offset of consumed data.
  3032. * <0 if no data has been transferred.
  3033. */
  3034. int ring_buffer_read_page(struct ring_buffer *buffer,
  3035. void **data_page, size_t len, int cpu, int full)
  3036. {
  3037. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  3038. struct ring_buffer_event *event;
  3039. struct buffer_data_page *bpage;
  3040. struct buffer_page *reader;
  3041. unsigned long flags;
  3042. unsigned int commit;
  3043. unsigned int read;
  3044. u64 save_timestamp;
  3045. int ret = -1;
  3046. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  3047. goto out;
  3048. /*
  3049. * If len is not big enough to hold the page header, then
  3050. * we can not copy anything.
  3051. */
  3052. if (len <= BUF_PAGE_HDR_SIZE)
  3053. goto out;
  3054. len -= BUF_PAGE_HDR_SIZE;
  3055. if (!data_page)
  3056. goto out;
  3057. bpage = *data_page;
  3058. if (!bpage)
  3059. goto out;
  3060. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  3061. reader = rb_get_reader_page(cpu_buffer);
  3062. if (!reader)
  3063. goto out_unlock;
  3064. event = rb_reader_event(cpu_buffer);
  3065. read = reader->read;
  3066. commit = rb_page_commit(reader);
  3067. /*
  3068. * If this page has been partially read or
  3069. * if len is not big enough to read the rest of the page or
  3070. * a writer is still on the page, then
  3071. * we must copy the data from the page to the buffer.
  3072. * Otherwise, we can simply swap the page with the one passed in.
  3073. */
  3074. if (read || (len < (commit - read)) ||
  3075. cpu_buffer->reader_page == cpu_buffer->commit_page) {
  3076. struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
  3077. unsigned int rpos = read;
  3078. unsigned int pos = 0;
  3079. unsigned int size;
  3080. if (full)
  3081. goto out_unlock;
  3082. if (len > (commit - read))
  3083. len = (commit - read);
  3084. size = rb_event_length(event);
  3085. if (len < size)
  3086. goto out_unlock;
  3087. /* save the current timestamp, since the user will need it */
  3088. save_timestamp = cpu_buffer->read_stamp;
  3089. /* Need to copy one event at a time */
  3090. do {
  3091. memcpy(bpage->data + pos, rpage->data + rpos, size);
  3092. len -= size;
  3093. rb_advance_reader(cpu_buffer);
  3094. rpos = reader->read;
  3095. pos += size;
  3096. event = rb_reader_event(cpu_buffer);
  3097. size = rb_event_length(event);
  3098. } while (len > size);
  3099. /* update bpage */
  3100. local_set(&bpage->commit, pos);
  3101. bpage->time_stamp = save_timestamp;
  3102. /* we copied everything to the beginning */
  3103. read = 0;
  3104. } else {
  3105. /* update the entry counter */
  3106. cpu_buffer->read += rb_page_entries(reader);
  3107. /* swap the pages */
  3108. rb_init_page(bpage);
  3109. bpage = reader->page;
  3110. reader->page = *data_page;
  3111. local_set(&reader->write, 0);
  3112. local_set(&reader->entries, 0);
  3113. reader->read = 0;
  3114. *data_page = bpage;
  3115. }
  3116. ret = read;
  3117. out_unlock:
  3118. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  3119. out:
  3120. return ret;
  3121. }
  3122. EXPORT_SYMBOL_GPL(ring_buffer_read_page);
  3123. #ifdef CONFIG_TRACING
  3124. static ssize_t
  3125. rb_simple_read(struct file *filp, char __user *ubuf,
  3126. size_t cnt, loff_t *ppos)
  3127. {
  3128. unsigned long *p = filp->private_data;
  3129. char buf[64];
  3130. int r;
  3131. if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
  3132. r = sprintf(buf, "permanently disabled\n");
  3133. else
  3134. r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
  3135. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  3136. }
  3137. static ssize_t
  3138. rb_simple_write(struct file *filp, const char __user *ubuf,
  3139. size_t cnt, loff_t *ppos)
  3140. {
  3141. unsigned long *p = filp->private_data;
  3142. char buf[64];
  3143. unsigned long val;
  3144. int ret;
  3145. if (cnt >= sizeof(buf))
  3146. return -EINVAL;
  3147. if (copy_from_user(&buf, ubuf, cnt))
  3148. return -EFAULT;
  3149. buf[cnt] = 0;
  3150. ret = strict_strtoul(buf, 10, &val);
  3151. if (ret < 0)
  3152. return ret;
  3153. if (val)
  3154. set_bit(RB_BUFFERS_ON_BIT, p);
  3155. else
  3156. clear_bit(RB_BUFFERS_ON_BIT, p);
  3157. (*ppos)++;
  3158. return cnt;
  3159. }
  3160. static const struct file_operations rb_simple_fops = {
  3161. .open = tracing_open_generic,
  3162. .read = rb_simple_read,
  3163. .write = rb_simple_write,
  3164. };
  3165. static __init int rb_init_debugfs(void)
  3166. {
  3167. struct dentry *d_tracer;
  3168. d_tracer = tracing_init_dentry();
  3169. trace_create_file("tracing_on", 0644, d_tracer,
  3170. &ring_buffer_flags, &rb_simple_fops);
  3171. return 0;
  3172. }
  3173. fs_initcall(rb_init_debugfs);
  3174. #endif
  3175. #ifdef CONFIG_HOTPLUG_CPU
  3176. static int rb_cpu_notify(struct notifier_block *self,
  3177. unsigned long action, void *hcpu)
  3178. {
  3179. struct ring_buffer *buffer =
  3180. container_of(self, struct ring_buffer, cpu_notify);
  3181. long cpu = (long)hcpu;
  3182. switch (action) {
  3183. case CPU_UP_PREPARE:
  3184. case CPU_UP_PREPARE_FROZEN:
  3185. if (cpumask_test_cpu(cpu, buffer->cpumask))
  3186. return NOTIFY_OK;
  3187. buffer->buffers[cpu] =
  3188. rb_allocate_cpu_buffer(buffer, cpu);
  3189. if (!buffer->buffers[cpu]) {
  3190. WARN(1, "failed to allocate ring buffer on CPU %ld\n",
  3191. cpu);
  3192. return NOTIFY_OK;
  3193. }
  3194. smp_wmb();
  3195. cpumask_set_cpu(cpu, buffer->cpumask);
  3196. break;
  3197. case CPU_DOWN_PREPARE:
  3198. case CPU_DOWN_PREPARE_FROZEN:
  3199. /*
  3200. * Do nothing.
  3201. * If we were to free the buffer, then the user would
  3202. * lose any trace that was in the buffer.
  3203. */
  3204. break;
  3205. default:
  3206. break;
  3207. }
  3208. return NOTIFY_OK;
  3209. }
  3210. #endif