ring_buffer.c 104 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104
  1. /*
  2. * Generic ring buffer
  3. *
  4. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/ring_buffer.h>
  7. #include <linux/trace_clock.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/hardirq.h>
  12. #include <linux/kmemcheck.h>
  13. #include <linux/module.h>
  14. #include <linux/percpu.h>
  15. #include <linux/mutex.h>
  16. #include <linux/slab.h>
  17. #include <linux/init.h>
  18. #include <linux/hash.h>
  19. #include <linux/list.h>
  20. #include <linux/cpu.h>
  21. #include <linux/fs.h>
  22. #include <asm/local.h>
  23. #include "trace.h"
  24. /*
  25. * The ring buffer header is special. We must manually up keep it.
  26. */
  27. int ring_buffer_print_entry_header(struct trace_seq *s)
  28. {
  29. int ret;
  30. ret = trace_seq_printf(s, "# compressed entry header\n");
  31. ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
  32. ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
  33. ret = trace_seq_printf(s, "\tarray : 32 bits\n");
  34. ret = trace_seq_printf(s, "\n");
  35. ret = trace_seq_printf(s, "\tpadding : type == %d\n",
  36. RINGBUF_TYPE_PADDING);
  37. ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
  38. RINGBUF_TYPE_TIME_EXTEND);
  39. ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
  40. RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  41. return ret;
  42. }
  43. /*
  44. * The ring buffer is made up of a list of pages. A separate list of pages is
  45. * allocated for each CPU. A writer may only write to a buffer that is
  46. * associated with the CPU it is currently executing on. A reader may read
  47. * from any per cpu buffer.
  48. *
  49. * The reader is special. For each per cpu buffer, the reader has its own
  50. * reader page. When a reader has read the entire reader page, this reader
  51. * page is swapped with another page in the ring buffer.
  52. *
  53. * Now, as long as the writer is off the reader page, the reader can do what
  54. * ever it wants with that page. The writer will never write to that page
  55. * again (as long as it is out of the ring buffer).
  56. *
  57. * Here's some silly ASCII art.
  58. *
  59. * +------+
  60. * |reader| RING BUFFER
  61. * |page |
  62. * +------+ +---+ +---+ +---+
  63. * | |-->| |-->| |
  64. * +---+ +---+ +---+
  65. * ^ |
  66. * | |
  67. * +---------------+
  68. *
  69. *
  70. * +------+
  71. * |reader| RING BUFFER
  72. * |page |------------------v
  73. * +------+ +---+ +---+ +---+
  74. * | |-->| |-->| |
  75. * +---+ +---+ +---+
  76. * ^ |
  77. * | |
  78. * +---------------+
  79. *
  80. *
  81. * +------+
  82. * |reader| RING BUFFER
  83. * |page |------------------v
  84. * +------+ +---+ +---+ +---+
  85. * ^ | |-->| |-->| |
  86. * | +---+ +---+ +---+
  87. * | |
  88. * | |
  89. * +------------------------------+
  90. *
  91. *
  92. * +------+
  93. * |buffer| RING BUFFER
  94. * |page |------------------v
  95. * +------+ +---+ +---+ +---+
  96. * ^ | | | |-->| |
  97. * | New +---+ +---+ +---+
  98. * | Reader------^ |
  99. * | page |
  100. * +------------------------------+
  101. *
  102. *
  103. * After we make this swap, the reader can hand this page off to the splice
  104. * code and be done with it. It can even allocate a new page if it needs to
  105. * and swap that into the ring buffer.
  106. *
  107. * We will be using cmpxchg soon to make all this lockless.
  108. *
  109. */
  110. /*
  111. * A fast way to enable or disable all ring buffers is to
  112. * call tracing_on or tracing_off. Turning off the ring buffers
  113. * prevents all ring buffers from being recorded to.
  114. * Turning this switch on, makes it OK to write to the
  115. * ring buffer, if the ring buffer is enabled itself.
  116. *
  117. * There's three layers that must be on in order to write
  118. * to the ring buffer.
  119. *
  120. * 1) This global flag must be set.
  121. * 2) The ring buffer must be enabled for recording.
  122. * 3) The per cpu buffer must be enabled for recording.
  123. *
  124. * In case of an anomaly, this global flag has a bit set that
  125. * will permantly disable all ring buffers.
  126. */
  127. /*
  128. * Global flag to disable all recording to ring buffers
  129. * This has two bits: ON, DISABLED
  130. *
  131. * ON DISABLED
  132. * ---- ----------
  133. * 0 0 : ring buffers are off
  134. * 1 0 : ring buffers are on
  135. * X 1 : ring buffers are permanently disabled
  136. */
  137. enum {
  138. RB_BUFFERS_ON_BIT = 0,
  139. RB_BUFFERS_DISABLED_BIT = 1,
  140. };
  141. enum {
  142. RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
  143. RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
  144. };
  145. static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
  146. /* Used for individual buffers (after the counter) */
  147. #define RB_BUFFER_OFF (1 << 20)
  148. #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
  149. /**
  150. * tracing_off_permanent - permanently disable ring buffers
  151. *
  152. * This function, once called, will disable all ring buffers
  153. * permanently.
  154. */
  155. void tracing_off_permanent(void)
  156. {
  157. set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
  158. }
  159. #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
  160. #define RB_ALIGNMENT 4U
  161. #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  162. #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
  163. #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  164. # define RB_FORCE_8BYTE_ALIGNMENT 0
  165. # define RB_ARCH_ALIGNMENT RB_ALIGNMENT
  166. #else
  167. # define RB_FORCE_8BYTE_ALIGNMENT 1
  168. # define RB_ARCH_ALIGNMENT 8U
  169. #endif
  170. /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
  171. #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
  172. enum {
  173. RB_LEN_TIME_EXTEND = 8,
  174. RB_LEN_TIME_STAMP = 16,
  175. };
  176. #define skip_time_extend(event) \
  177. ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
  178. static inline int rb_null_event(struct ring_buffer_event *event)
  179. {
  180. return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
  181. }
  182. static void rb_event_set_padding(struct ring_buffer_event *event)
  183. {
  184. /* padding has a NULL time_delta */
  185. event->type_len = RINGBUF_TYPE_PADDING;
  186. event->time_delta = 0;
  187. }
  188. static unsigned
  189. rb_event_data_length(struct ring_buffer_event *event)
  190. {
  191. unsigned length;
  192. if (event->type_len)
  193. length = event->type_len * RB_ALIGNMENT;
  194. else
  195. length = event->array[0];
  196. return length + RB_EVNT_HDR_SIZE;
  197. }
  198. /*
  199. * Return the length of the given event. Will return
  200. * the length of the time extend if the event is a
  201. * time extend.
  202. */
  203. static inline unsigned
  204. rb_event_length(struct ring_buffer_event *event)
  205. {
  206. switch (event->type_len) {
  207. case RINGBUF_TYPE_PADDING:
  208. if (rb_null_event(event))
  209. /* undefined */
  210. return -1;
  211. return event->array[0] + RB_EVNT_HDR_SIZE;
  212. case RINGBUF_TYPE_TIME_EXTEND:
  213. return RB_LEN_TIME_EXTEND;
  214. case RINGBUF_TYPE_TIME_STAMP:
  215. return RB_LEN_TIME_STAMP;
  216. case RINGBUF_TYPE_DATA:
  217. return rb_event_data_length(event);
  218. default:
  219. BUG();
  220. }
  221. /* not hit */
  222. return 0;
  223. }
  224. /*
  225. * Return total length of time extend and data,
  226. * or just the event length for all other events.
  227. */
  228. static inline unsigned
  229. rb_event_ts_length(struct ring_buffer_event *event)
  230. {
  231. unsigned len = 0;
  232. if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
  233. /* time extends include the data event after it */
  234. len = RB_LEN_TIME_EXTEND;
  235. event = skip_time_extend(event);
  236. }
  237. return len + rb_event_length(event);
  238. }
  239. /**
  240. * ring_buffer_event_length - return the length of the event
  241. * @event: the event to get the length of
  242. *
  243. * Returns the size of the data load of a data event.
  244. * If the event is something other than a data event, it
  245. * returns the size of the event itself. With the exception
  246. * of a TIME EXTEND, where it still returns the size of the
  247. * data load of the data event after it.
  248. */
  249. unsigned ring_buffer_event_length(struct ring_buffer_event *event)
  250. {
  251. unsigned length;
  252. if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
  253. event = skip_time_extend(event);
  254. length = rb_event_length(event);
  255. if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  256. return length;
  257. length -= RB_EVNT_HDR_SIZE;
  258. if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
  259. length -= sizeof(event->array[0]);
  260. return length;
  261. }
  262. EXPORT_SYMBOL_GPL(ring_buffer_event_length);
  263. /* inline for ring buffer fast paths */
  264. static void *
  265. rb_event_data(struct ring_buffer_event *event)
  266. {
  267. if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
  268. event = skip_time_extend(event);
  269. BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  270. /* If length is in len field, then array[0] has the data */
  271. if (event->type_len)
  272. return (void *)&event->array[0];
  273. /* Otherwise length is in array[0] and array[1] has the data */
  274. return (void *)&event->array[1];
  275. }
  276. /**
  277. * ring_buffer_event_data - return the data of the event
  278. * @event: the event to get the data from
  279. */
  280. void *ring_buffer_event_data(struct ring_buffer_event *event)
  281. {
  282. return rb_event_data(event);
  283. }
  284. EXPORT_SYMBOL_GPL(ring_buffer_event_data);
  285. #define for_each_buffer_cpu(buffer, cpu) \
  286. for_each_cpu(cpu, buffer->cpumask)
  287. #define TS_SHIFT 27
  288. #define TS_MASK ((1ULL << TS_SHIFT) - 1)
  289. #define TS_DELTA_TEST (~TS_MASK)
  290. /* Flag when events were overwritten */
  291. #define RB_MISSED_EVENTS (1 << 31)
  292. /* Missed count stored at end */
  293. #define RB_MISSED_STORED (1 << 30)
  294. struct buffer_data_page {
  295. u64 time_stamp; /* page time stamp */
  296. local_t commit; /* write committed index */
  297. unsigned char data[]; /* data of buffer page */
  298. };
  299. /*
  300. * Note, the buffer_page list must be first. The buffer pages
  301. * are allocated in cache lines, which means that each buffer
  302. * page will be at the beginning of a cache line, and thus
  303. * the least significant bits will be zero. We use this to
  304. * add flags in the list struct pointers, to make the ring buffer
  305. * lockless.
  306. */
  307. struct buffer_page {
  308. struct list_head list; /* list of buffer pages */
  309. local_t write; /* index for next write */
  310. unsigned read; /* index for next read */
  311. local_t entries; /* entries on this page */
  312. unsigned long real_end; /* real end of data */
  313. struct buffer_data_page *page; /* Actual data page */
  314. };
  315. /*
  316. * The buffer page counters, write and entries, must be reset
  317. * atomically when crossing page boundaries. To synchronize this
  318. * update, two counters are inserted into the number. One is
  319. * the actual counter for the write position or count on the page.
  320. *
  321. * The other is a counter of updaters. Before an update happens
  322. * the update partition of the counter is incremented. This will
  323. * allow the updater to update the counter atomically.
  324. *
  325. * The counter is 20 bits, and the state data is 12.
  326. */
  327. #define RB_WRITE_MASK 0xfffff
  328. #define RB_WRITE_INTCNT (1 << 20)
  329. static void rb_init_page(struct buffer_data_page *bpage)
  330. {
  331. local_set(&bpage->commit, 0);
  332. }
  333. /**
  334. * ring_buffer_page_len - the size of data on the page.
  335. * @page: The page to read
  336. *
  337. * Returns the amount of data on the page, including buffer page header.
  338. */
  339. size_t ring_buffer_page_len(void *page)
  340. {
  341. return local_read(&((struct buffer_data_page *)page)->commit)
  342. + BUF_PAGE_HDR_SIZE;
  343. }
  344. /*
  345. * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
  346. * this issue out.
  347. */
  348. static void free_buffer_page(struct buffer_page *bpage)
  349. {
  350. free_page((unsigned long)bpage->page);
  351. kfree(bpage);
  352. }
  353. /*
  354. * We need to fit the time_stamp delta into 27 bits.
  355. */
  356. static inline int test_time_stamp(u64 delta)
  357. {
  358. if (delta & TS_DELTA_TEST)
  359. return 1;
  360. return 0;
  361. }
  362. #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
  363. /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
  364. #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
  365. int ring_buffer_print_page_header(struct trace_seq *s)
  366. {
  367. struct buffer_data_page field;
  368. int ret;
  369. ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
  370. "offset:0;\tsize:%u;\tsigned:%u;\n",
  371. (unsigned int)sizeof(field.time_stamp),
  372. (unsigned int)is_signed_type(u64));
  373. ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
  374. "offset:%u;\tsize:%u;\tsigned:%u;\n",
  375. (unsigned int)offsetof(typeof(field), commit),
  376. (unsigned int)sizeof(field.commit),
  377. (unsigned int)is_signed_type(long));
  378. ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
  379. "offset:%u;\tsize:%u;\tsigned:%u;\n",
  380. (unsigned int)offsetof(typeof(field), commit),
  381. 1,
  382. (unsigned int)is_signed_type(long));
  383. ret = trace_seq_printf(s, "\tfield: char data;\t"
  384. "offset:%u;\tsize:%u;\tsigned:%u;\n",
  385. (unsigned int)offsetof(typeof(field), data),
  386. (unsigned int)BUF_PAGE_SIZE,
  387. (unsigned int)is_signed_type(char));
  388. return ret;
  389. }
  390. /*
  391. * head_page == tail_page && head == tail then buffer is empty.
  392. */
  393. struct ring_buffer_per_cpu {
  394. int cpu;
  395. atomic_t record_disabled;
  396. struct ring_buffer *buffer;
  397. raw_spinlock_t reader_lock; /* serialize readers */
  398. arch_spinlock_t lock;
  399. struct lock_class_key lock_key;
  400. struct list_head *pages;
  401. struct buffer_page *head_page; /* read from head */
  402. struct buffer_page *tail_page; /* write to tail */
  403. struct buffer_page *commit_page; /* committed pages */
  404. struct buffer_page *reader_page;
  405. unsigned long lost_events;
  406. unsigned long last_overrun;
  407. local_t entries_bytes;
  408. local_t commit_overrun;
  409. local_t overrun;
  410. local_t entries;
  411. local_t committing;
  412. local_t commits;
  413. unsigned long read;
  414. unsigned long read_bytes;
  415. u64 write_stamp;
  416. u64 read_stamp;
  417. };
  418. struct ring_buffer {
  419. unsigned pages;
  420. unsigned flags;
  421. int cpus;
  422. atomic_t record_disabled;
  423. cpumask_var_t cpumask;
  424. struct lock_class_key *reader_lock_key;
  425. struct mutex mutex;
  426. struct ring_buffer_per_cpu **buffers;
  427. #ifdef CONFIG_HOTPLUG_CPU
  428. struct notifier_block cpu_notify;
  429. #endif
  430. u64 (*clock)(void);
  431. };
  432. struct ring_buffer_iter {
  433. struct ring_buffer_per_cpu *cpu_buffer;
  434. unsigned long head;
  435. struct buffer_page *head_page;
  436. struct buffer_page *cache_reader_page;
  437. unsigned long cache_read;
  438. u64 read_stamp;
  439. };
  440. /* buffer may be either ring_buffer or ring_buffer_per_cpu */
  441. #define RB_WARN_ON(b, cond) \
  442. ({ \
  443. int _____ret = unlikely(cond); \
  444. if (_____ret) { \
  445. if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
  446. struct ring_buffer_per_cpu *__b = \
  447. (void *)b; \
  448. atomic_inc(&__b->buffer->record_disabled); \
  449. } else \
  450. atomic_inc(&b->record_disabled); \
  451. WARN_ON(1); \
  452. } \
  453. _____ret; \
  454. })
  455. /* Up this if you want to test the TIME_EXTENTS and normalization */
  456. #define DEBUG_SHIFT 0
  457. static inline u64 rb_time_stamp(struct ring_buffer *buffer)
  458. {
  459. /* shift to debug/test normalization and TIME_EXTENTS */
  460. return buffer->clock() << DEBUG_SHIFT;
  461. }
  462. u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
  463. {
  464. u64 time;
  465. preempt_disable_notrace();
  466. time = rb_time_stamp(buffer);
  467. preempt_enable_no_resched_notrace();
  468. return time;
  469. }
  470. EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
  471. void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
  472. int cpu, u64 *ts)
  473. {
  474. /* Just stupid testing the normalize function and deltas */
  475. *ts >>= DEBUG_SHIFT;
  476. }
  477. EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
  478. /*
  479. * Making the ring buffer lockless makes things tricky.
  480. * Although writes only happen on the CPU that they are on,
  481. * and they only need to worry about interrupts. Reads can
  482. * happen on any CPU.
  483. *
  484. * The reader page is always off the ring buffer, but when the
  485. * reader finishes with a page, it needs to swap its page with
  486. * a new one from the buffer. The reader needs to take from
  487. * the head (writes go to the tail). But if a writer is in overwrite
  488. * mode and wraps, it must push the head page forward.
  489. *
  490. * Here lies the problem.
  491. *
  492. * The reader must be careful to replace only the head page, and
  493. * not another one. As described at the top of the file in the
  494. * ASCII art, the reader sets its old page to point to the next
  495. * page after head. It then sets the page after head to point to
  496. * the old reader page. But if the writer moves the head page
  497. * during this operation, the reader could end up with the tail.
  498. *
  499. * We use cmpxchg to help prevent this race. We also do something
  500. * special with the page before head. We set the LSB to 1.
  501. *
  502. * When the writer must push the page forward, it will clear the
  503. * bit that points to the head page, move the head, and then set
  504. * the bit that points to the new head page.
  505. *
  506. * We also don't want an interrupt coming in and moving the head
  507. * page on another writer. Thus we use the second LSB to catch
  508. * that too. Thus:
  509. *
  510. * head->list->prev->next bit 1 bit 0
  511. * ------- -------
  512. * Normal page 0 0
  513. * Points to head page 0 1
  514. * New head page 1 0
  515. *
  516. * Note we can not trust the prev pointer of the head page, because:
  517. *
  518. * +----+ +-----+ +-----+
  519. * | |------>| T |---X--->| N |
  520. * | |<------| | | |
  521. * +----+ +-----+ +-----+
  522. * ^ ^ |
  523. * | +-----+ | |
  524. * +----------| R |----------+ |
  525. * | |<-----------+
  526. * +-----+
  527. *
  528. * Key: ---X--> HEAD flag set in pointer
  529. * T Tail page
  530. * R Reader page
  531. * N Next page
  532. *
  533. * (see __rb_reserve_next() to see where this happens)
  534. *
  535. * What the above shows is that the reader just swapped out
  536. * the reader page with a page in the buffer, but before it
  537. * could make the new header point back to the new page added
  538. * it was preempted by a writer. The writer moved forward onto
  539. * the new page added by the reader and is about to move forward
  540. * again.
  541. *
  542. * You can see, it is legitimate for the previous pointer of
  543. * the head (or any page) not to point back to itself. But only
  544. * temporarially.
  545. */
  546. #define RB_PAGE_NORMAL 0UL
  547. #define RB_PAGE_HEAD 1UL
  548. #define RB_PAGE_UPDATE 2UL
  549. #define RB_FLAG_MASK 3UL
  550. /* PAGE_MOVED is not part of the mask */
  551. #define RB_PAGE_MOVED 4UL
  552. /*
  553. * rb_list_head - remove any bit
  554. */
  555. static struct list_head *rb_list_head(struct list_head *list)
  556. {
  557. unsigned long val = (unsigned long)list;
  558. return (struct list_head *)(val & ~RB_FLAG_MASK);
  559. }
  560. /*
  561. * rb_is_head_page - test if the given page is the head page
  562. *
  563. * Because the reader may move the head_page pointer, we can
  564. * not trust what the head page is (it may be pointing to
  565. * the reader page). But if the next page is a header page,
  566. * its flags will be non zero.
  567. */
  568. static inline int
  569. rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
  570. struct buffer_page *page, struct list_head *list)
  571. {
  572. unsigned long val;
  573. val = (unsigned long)list->next;
  574. if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
  575. return RB_PAGE_MOVED;
  576. return val & RB_FLAG_MASK;
  577. }
  578. /*
  579. * rb_is_reader_page
  580. *
  581. * The unique thing about the reader page, is that, if the
  582. * writer is ever on it, the previous pointer never points
  583. * back to the reader page.
  584. */
  585. static int rb_is_reader_page(struct buffer_page *page)
  586. {
  587. struct list_head *list = page->list.prev;
  588. return rb_list_head(list->next) != &page->list;
  589. }
  590. /*
  591. * rb_set_list_to_head - set a list_head to be pointing to head.
  592. */
  593. static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
  594. struct list_head *list)
  595. {
  596. unsigned long *ptr;
  597. ptr = (unsigned long *)&list->next;
  598. *ptr |= RB_PAGE_HEAD;
  599. *ptr &= ~RB_PAGE_UPDATE;
  600. }
  601. /*
  602. * rb_head_page_activate - sets up head page
  603. */
  604. static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
  605. {
  606. struct buffer_page *head;
  607. head = cpu_buffer->head_page;
  608. if (!head)
  609. return;
  610. /*
  611. * Set the previous list pointer to have the HEAD flag.
  612. */
  613. rb_set_list_to_head(cpu_buffer, head->list.prev);
  614. }
  615. static void rb_list_head_clear(struct list_head *list)
  616. {
  617. unsigned long *ptr = (unsigned long *)&list->next;
  618. *ptr &= ~RB_FLAG_MASK;
  619. }
  620. /*
  621. * rb_head_page_dactivate - clears head page ptr (for free list)
  622. */
  623. static void
  624. rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
  625. {
  626. struct list_head *hd;
  627. /* Go through the whole list and clear any pointers found. */
  628. rb_list_head_clear(cpu_buffer->pages);
  629. list_for_each(hd, cpu_buffer->pages)
  630. rb_list_head_clear(hd);
  631. }
  632. static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
  633. struct buffer_page *head,
  634. struct buffer_page *prev,
  635. int old_flag, int new_flag)
  636. {
  637. struct list_head *list;
  638. unsigned long val = (unsigned long)&head->list;
  639. unsigned long ret;
  640. list = &prev->list;
  641. val &= ~RB_FLAG_MASK;
  642. ret = cmpxchg((unsigned long *)&list->next,
  643. val | old_flag, val | new_flag);
  644. /* check if the reader took the page */
  645. if ((ret & ~RB_FLAG_MASK) != val)
  646. return RB_PAGE_MOVED;
  647. return ret & RB_FLAG_MASK;
  648. }
  649. static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
  650. struct buffer_page *head,
  651. struct buffer_page *prev,
  652. int old_flag)
  653. {
  654. return rb_head_page_set(cpu_buffer, head, prev,
  655. old_flag, RB_PAGE_UPDATE);
  656. }
  657. static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
  658. struct buffer_page *head,
  659. struct buffer_page *prev,
  660. int old_flag)
  661. {
  662. return rb_head_page_set(cpu_buffer, head, prev,
  663. old_flag, RB_PAGE_HEAD);
  664. }
  665. static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
  666. struct buffer_page *head,
  667. struct buffer_page *prev,
  668. int old_flag)
  669. {
  670. return rb_head_page_set(cpu_buffer, head, prev,
  671. old_flag, RB_PAGE_NORMAL);
  672. }
  673. static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
  674. struct buffer_page **bpage)
  675. {
  676. struct list_head *p = rb_list_head((*bpage)->list.next);
  677. *bpage = list_entry(p, struct buffer_page, list);
  678. }
  679. static struct buffer_page *
  680. rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
  681. {
  682. struct buffer_page *head;
  683. struct buffer_page *page;
  684. struct list_head *list;
  685. int i;
  686. if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
  687. return NULL;
  688. /* sanity check */
  689. list = cpu_buffer->pages;
  690. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
  691. return NULL;
  692. page = head = cpu_buffer->head_page;
  693. /*
  694. * It is possible that the writer moves the header behind
  695. * where we started, and we miss in one loop.
  696. * A second loop should grab the header, but we'll do
  697. * three loops just because I'm paranoid.
  698. */
  699. for (i = 0; i < 3; i++) {
  700. do {
  701. if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
  702. cpu_buffer->head_page = page;
  703. return page;
  704. }
  705. rb_inc_page(cpu_buffer, &page);
  706. } while (page != head);
  707. }
  708. RB_WARN_ON(cpu_buffer, 1);
  709. return NULL;
  710. }
  711. static int rb_head_page_replace(struct buffer_page *old,
  712. struct buffer_page *new)
  713. {
  714. unsigned long *ptr = (unsigned long *)&old->list.prev->next;
  715. unsigned long val;
  716. unsigned long ret;
  717. val = *ptr & ~RB_FLAG_MASK;
  718. val |= RB_PAGE_HEAD;
  719. ret = cmpxchg(ptr, val, (unsigned long)&new->list);
  720. return ret == val;
  721. }
  722. /*
  723. * rb_tail_page_update - move the tail page forward
  724. *
  725. * Returns 1 if moved tail page, 0 if someone else did.
  726. */
  727. static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
  728. struct buffer_page *tail_page,
  729. struct buffer_page *next_page)
  730. {
  731. struct buffer_page *old_tail;
  732. unsigned long old_entries;
  733. unsigned long old_write;
  734. int ret = 0;
  735. /*
  736. * The tail page now needs to be moved forward.
  737. *
  738. * We need to reset the tail page, but without messing
  739. * with possible erasing of data brought in by interrupts
  740. * that have moved the tail page and are currently on it.
  741. *
  742. * We add a counter to the write field to denote this.
  743. */
  744. old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
  745. old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
  746. /*
  747. * Just make sure we have seen our old_write and synchronize
  748. * with any interrupts that come in.
  749. */
  750. barrier();
  751. /*
  752. * If the tail page is still the same as what we think
  753. * it is, then it is up to us to update the tail
  754. * pointer.
  755. */
  756. if (tail_page == cpu_buffer->tail_page) {
  757. /* Zero the write counter */
  758. unsigned long val = old_write & ~RB_WRITE_MASK;
  759. unsigned long eval = old_entries & ~RB_WRITE_MASK;
  760. /*
  761. * This will only succeed if an interrupt did
  762. * not come in and change it. In which case, we
  763. * do not want to modify it.
  764. *
  765. * We add (void) to let the compiler know that we do not care
  766. * about the return value of these functions. We use the
  767. * cmpxchg to only update if an interrupt did not already
  768. * do it for us. If the cmpxchg fails, we don't care.
  769. */
  770. (void)local_cmpxchg(&next_page->write, old_write, val);
  771. (void)local_cmpxchg(&next_page->entries, old_entries, eval);
  772. /*
  773. * No need to worry about races with clearing out the commit.
  774. * it only can increment when a commit takes place. But that
  775. * only happens in the outer most nested commit.
  776. */
  777. local_set(&next_page->page->commit, 0);
  778. old_tail = cmpxchg(&cpu_buffer->tail_page,
  779. tail_page, next_page);
  780. if (old_tail == tail_page)
  781. ret = 1;
  782. }
  783. return ret;
  784. }
  785. static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
  786. struct buffer_page *bpage)
  787. {
  788. unsigned long val = (unsigned long)bpage;
  789. if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
  790. return 1;
  791. return 0;
  792. }
  793. /**
  794. * rb_check_list - make sure a pointer to a list has the last bits zero
  795. */
  796. static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
  797. struct list_head *list)
  798. {
  799. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
  800. return 1;
  801. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
  802. return 1;
  803. return 0;
  804. }
  805. /**
  806. * check_pages - integrity check of buffer pages
  807. * @cpu_buffer: CPU buffer with pages to test
  808. *
  809. * As a safety measure we check to make sure the data pages have not
  810. * been corrupted.
  811. */
  812. static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
  813. {
  814. struct list_head *head = cpu_buffer->pages;
  815. struct buffer_page *bpage, *tmp;
  816. rb_head_page_deactivate(cpu_buffer);
  817. if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
  818. return -1;
  819. if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
  820. return -1;
  821. if (rb_check_list(cpu_buffer, head))
  822. return -1;
  823. list_for_each_entry_safe(bpage, tmp, head, list) {
  824. if (RB_WARN_ON(cpu_buffer,
  825. bpage->list.next->prev != &bpage->list))
  826. return -1;
  827. if (RB_WARN_ON(cpu_buffer,
  828. bpage->list.prev->next != &bpage->list))
  829. return -1;
  830. if (rb_check_list(cpu_buffer, &bpage->list))
  831. return -1;
  832. }
  833. rb_head_page_activate(cpu_buffer);
  834. return 0;
  835. }
  836. static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
  837. unsigned nr_pages)
  838. {
  839. struct buffer_page *bpage, *tmp;
  840. LIST_HEAD(pages);
  841. unsigned i;
  842. WARN_ON(!nr_pages);
  843. for (i = 0; i < nr_pages; i++) {
  844. struct page *page;
  845. /*
  846. * __GFP_NORETRY flag makes sure that the allocation fails
  847. * gracefully without invoking oom-killer and the system is
  848. * not destabilized.
  849. */
  850. bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
  851. GFP_KERNEL | __GFP_NORETRY,
  852. cpu_to_node(cpu_buffer->cpu));
  853. if (!bpage)
  854. goto free_pages;
  855. rb_check_bpage(cpu_buffer, bpage);
  856. list_add(&bpage->list, &pages);
  857. page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
  858. GFP_KERNEL | __GFP_NORETRY, 0);
  859. if (!page)
  860. goto free_pages;
  861. bpage->page = page_address(page);
  862. rb_init_page(bpage->page);
  863. }
  864. /*
  865. * The ring buffer page list is a circular list that does not
  866. * start and end with a list head. All page list items point to
  867. * other pages.
  868. */
  869. cpu_buffer->pages = pages.next;
  870. list_del(&pages);
  871. rb_check_pages(cpu_buffer);
  872. return 0;
  873. free_pages:
  874. list_for_each_entry_safe(bpage, tmp, &pages, list) {
  875. list_del_init(&bpage->list);
  876. free_buffer_page(bpage);
  877. }
  878. return -ENOMEM;
  879. }
  880. static struct ring_buffer_per_cpu *
  881. rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
  882. {
  883. struct ring_buffer_per_cpu *cpu_buffer;
  884. struct buffer_page *bpage;
  885. struct page *page;
  886. int ret;
  887. cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
  888. GFP_KERNEL, cpu_to_node(cpu));
  889. if (!cpu_buffer)
  890. return NULL;
  891. cpu_buffer->cpu = cpu;
  892. cpu_buffer->buffer = buffer;
  893. raw_spin_lock_init(&cpu_buffer->reader_lock);
  894. lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
  895. cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  896. bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
  897. GFP_KERNEL, cpu_to_node(cpu));
  898. if (!bpage)
  899. goto fail_free_buffer;
  900. rb_check_bpage(cpu_buffer, bpage);
  901. cpu_buffer->reader_page = bpage;
  902. page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
  903. if (!page)
  904. goto fail_free_reader;
  905. bpage->page = page_address(page);
  906. rb_init_page(bpage->page);
  907. INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
  908. ret = rb_allocate_pages(cpu_buffer, buffer->pages);
  909. if (ret < 0)
  910. goto fail_free_reader;
  911. cpu_buffer->head_page
  912. = list_entry(cpu_buffer->pages, struct buffer_page, list);
  913. cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
  914. rb_head_page_activate(cpu_buffer);
  915. return cpu_buffer;
  916. fail_free_reader:
  917. free_buffer_page(cpu_buffer->reader_page);
  918. fail_free_buffer:
  919. kfree(cpu_buffer);
  920. return NULL;
  921. }
  922. static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
  923. {
  924. struct list_head *head = cpu_buffer->pages;
  925. struct buffer_page *bpage, *tmp;
  926. free_buffer_page(cpu_buffer->reader_page);
  927. rb_head_page_deactivate(cpu_buffer);
  928. if (head) {
  929. list_for_each_entry_safe(bpage, tmp, head, list) {
  930. list_del_init(&bpage->list);
  931. free_buffer_page(bpage);
  932. }
  933. bpage = list_entry(head, struct buffer_page, list);
  934. free_buffer_page(bpage);
  935. }
  936. kfree(cpu_buffer);
  937. }
  938. #ifdef CONFIG_HOTPLUG_CPU
  939. static int rb_cpu_notify(struct notifier_block *self,
  940. unsigned long action, void *hcpu);
  941. #endif
  942. /**
  943. * ring_buffer_alloc - allocate a new ring_buffer
  944. * @size: the size in bytes per cpu that is needed.
  945. * @flags: attributes to set for the ring buffer.
  946. *
  947. * Currently the only flag that is available is the RB_FL_OVERWRITE
  948. * flag. This flag means that the buffer will overwrite old data
  949. * when the buffer wraps. If this flag is not set, the buffer will
  950. * drop data when the tail hits the head.
  951. */
  952. struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
  953. struct lock_class_key *key)
  954. {
  955. struct ring_buffer *buffer;
  956. int bsize;
  957. int cpu;
  958. /* keep it in its own cache line */
  959. buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
  960. GFP_KERNEL);
  961. if (!buffer)
  962. return NULL;
  963. if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
  964. goto fail_free_buffer;
  965. buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  966. buffer->flags = flags;
  967. buffer->clock = trace_clock_local;
  968. buffer->reader_lock_key = key;
  969. /* need at least two pages */
  970. if (buffer->pages < 2)
  971. buffer->pages = 2;
  972. /*
  973. * In case of non-hotplug cpu, if the ring-buffer is allocated
  974. * in early initcall, it will not be notified of secondary cpus.
  975. * In that off case, we need to allocate for all possible cpus.
  976. */
  977. #ifdef CONFIG_HOTPLUG_CPU
  978. get_online_cpus();
  979. cpumask_copy(buffer->cpumask, cpu_online_mask);
  980. #else
  981. cpumask_copy(buffer->cpumask, cpu_possible_mask);
  982. #endif
  983. buffer->cpus = nr_cpu_ids;
  984. bsize = sizeof(void *) * nr_cpu_ids;
  985. buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
  986. GFP_KERNEL);
  987. if (!buffer->buffers)
  988. goto fail_free_cpumask;
  989. for_each_buffer_cpu(buffer, cpu) {
  990. buffer->buffers[cpu] =
  991. rb_allocate_cpu_buffer(buffer, cpu);
  992. if (!buffer->buffers[cpu])
  993. goto fail_free_buffers;
  994. }
  995. #ifdef CONFIG_HOTPLUG_CPU
  996. buffer->cpu_notify.notifier_call = rb_cpu_notify;
  997. buffer->cpu_notify.priority = 0;
  998. register_cpu_notifier(&buffer->cpu_notify);
  999. #endif
  1000. put_online_cpus();
  1001. mutex_init(&buffer->mutex);
  1002. return buffer;
  1003. fail_free_buffers:
  1004. for_each_buffer_cpu(buffer, cpu) {
  1005. if (buffer->buffers[cpu])
  1006. rb_free_cpu_buffer(buffer->buffers[cpu]);
  1007. }
  1008. kfree(buffer->buffers);
  1009. fail_free_cpumask:
  1010. free_cpumask_var(buffer->cpumask);
  1011. put_online_cpus();
  1012. fail_free_buffer:
  1013. kfree(buffer);
  1014. return NULL;
  1015. }
  1016. EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
  1017. /**
  1018. * ring_buffer_free - free a ring buffer.
  1019. * @buffer: the buffer to free.
  1020. */
  1021. void
  1022. ring_buffer_free(struct ring_buffer *buffer)
  1023. {
  1024. int cpu;
  1025. get_online_cpus();
  1026. #ifdef CONFIG_HOTPLUG_CPU
  1027. unregister_cpu_notifier(&buffer->cpu_notify);
  1028. #endif
  1029. for_each_buffer_cpu(buffer, cpu)
  1030. rb_free_cpu_buffer(buffer->buffers[cpu]);
  1031. put_online_cpus();
  1032. kfree(buffer->buffers);
  1033. free_cpumask_var(buffer->cpumask);
  1034. kfree(buffer);
  1035. }
  1036. EXPORT_SYMBOL_GPL(ring_buffer_free);
  1037. void ring_buffer_set_clock(struct ring_buffer *buffer,
  1038. u64 (*clock)(void))
  1039. {
  1040. buffer->clock = clock;
  1041. }
  1042. static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
  1043. static void
  1044. rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
  1045. {
  1046. struct buffer_page *bpage;
  1047. struct list_head *p;
  1048. unsigned i;
  1049. raw_spin_lock_irq(&cpu_buffer->reader_lock);
  1050. rb_head_page_deactivate(cpu_buffer);
  1051. for (i = 0; i < nr_pages; i++) {
  1052. if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
  1053. goto out;
  1054. p = cpu_buffer->pages->next;
  1055. bpage = list_entry(p, struct buffer_page, list);
  1056. list_del_init(&bpage->list);
  1057. free_buffer_page(bpage);
  1058. }
  1059. if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
  1060. goto out;
  1061. rb_reset_cpu(cpu_buffer);
  1062. rb_check_pages(cpu_buffer);
  1063. out:
  1064. raw_spin_unlock_irq(&cpu_buffer->reader_lock);
  1065. }
  1066. static void
  1067. rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
  1068. struct list_head *pages, unsigned nr_pages)
  1069. {
  1070. struct buffer_page *bpage;
  1071. struct list_head *p;
  1072. unsigned i;
  1073. raw_spin_lock_irq(&cpu_buffer->reader_lock);
  1074. rb_head_page_deactivate(cpu_buffer);
  1075. for (i = 0; i < nr_pages; i++) {
  1076. if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
  1077. goto out;
  1078. p = pages->next;
  1079. bpage = list_entry(p, struct buffer_page, list);
  1080. list_del_init(&bpage->list);
  1081. list_add_tail(&bpage->list, cpu_buffer->pages);
  1082. }
  1083. rb_reset_cpu(cpu_buffer);
  1084. rb_check_pages(cpu_buffer);
  1085. out:
  1086. raw_spin_unlock_irq(&cpu_buffer->reader_lock);
  1087. }
  1088. /**
  1089. * ring_buffer_resize - resize the ring buffer
  1090. * @buffer: the buffer to resize.
  1091. * @size: the new size.
  1092. *
  1093. * Minimum size is 2 * BUF_PAGE_SIZE.
  1094. *
  1095. * Returns -1 on failure.
  1096. */
  1097. int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
  1098. {
  1099. struct ring_buffer_per_cpu *cpu_buffer;
  1100. unsigned nr_pages, rm_pages, new_pages;
  1101. struct buffer_page *bpage, *tmp;
  1102. unsigned long buffer_size;
  1103. LIST_HEAD(pages);
  1104. int i, cpu;
  1105. /*
  1106. * Always succeed at resizing a non-existent buffer:
  1107. */
  1108. if (!buffer)
  1109. return size;
  1110. size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  1111. size *= BUF_PAGE_SIZE;
  1112. buffer_size = buffer->pages * BUF_PAGE_SIZE;
  1113. /* we need a minimum of two pages */
  1114. if (size < BUF_PAGE_SIZE * 2)
  1115. size = BUF_PAGE_SIZE * 2;
  1116. if (size == buffer_size)
  1117. return size;
  1118. atomic_inc(&buffer->record_disabled);
  1119. /* Make sure all writers are done with this buffer. */
  1120. synchronize_sched();
  1121. mutex_lock(&buffer->mutex);
  1122. get_online_cpus();
  1123. nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  1124. if (size < buffer_size) {
  1125. /* easy case, just free pages */
  1126. if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
  1127. goto out_fail;
  1128. rm_pages = buffer->pages - nr_pages;
  1129. for_each_buffer_cpu(buffer, cpu) {
  1130. cpu_buffer = buffer->buffers[cpu];
  1131. rb_remove_pages(cpu_buffer, rm_pages);
  1132. }
  1133. goto out;
  1134. }
  1135. /*
  1136. * This is a bit more difficult. We only want to add pages
  1137. * when we can allocate enough for all CPUs. We do this
  1138. * by allocating all the pages and storing them on a local
  1139. * link list. If we succeed in our allocation, then we
  1140. * add these pages to the cpu_buffers. Otherwise we just free
  1141. * them all and return -ENOMEM;
  1142. */
  1143. if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
  1144. goto out_fail;
  1145. new_pages = nr_pages - buffer->pages;
  1146. for_each_buffer_cpu(buffer, cpu) {
  1147. for (i = 0; i < new_pages; i++) {
  1148. struct page *page;
  1149. /*
  1150. * __GFP_NORETRY flag makes sure that the allocation
  1151. * fails gracefully without invoking oom-killer and
  1152. * the system is not destabilized.
  1153. */
  1154. bpage = kzalloc_node(ALIGN(sizeof(*bpage),
  1155. cache_line_size()),
  1156. GFP_KERNEL | __GFP_NORETRY,
  1157. cpu_to_node(cpu));
  1158. if (!bpage)
  1159. goto free_pages;
  1160. list_add(&bpage->list, &pages);
  1161. page = alloc_pages_node(cpu_to_node(cpu),
  1162. GFP_KERNEL | __GFP_NORETRY, 0);
  1163. if (!page)
  1164. goto free_pages;
  1165. bpage->page = page_address(page);
  1166. rb_init_page(bpage->page);
  1167. }
  1168. }
  1169. for_each_buffer_cpu(buffer, cpu) {
  1170. cpu_buffer = buffer->buffers[cpu];
  1171. rb_insert_pages(cpu_buffer, &pages, new_pages);
  1172. }
  1173. if (RB_WARN_ON(buffer, !list_empty(&pages)))
  1174. goto out_fail;
  1175. out:
  1176. buffer->pages = nr_pages;
  1177. put_online_cpus();
  1178. mutex_unlock(&buffer->mutex);
  1179. atomic_dec(&buffer->record_disabled);
  1180. return size;
  1181. free_pages:
  1182. list_for_each_entry_safe(bpage, tmp, &pages, list) {
  1183. list_del_init(&bpage->list);
  1184. free_buffer_page(bpage);
  1185. }
  1186. put_online_cpus();
  1187. mutex_unlock(&buffer->mutex);
  1188. atomic_dec(&buffer->record_disabled);
  1189. return -ENOMEM;
  1190. /*
  1191. * Something went totally wrong, and we are too paranoid
  1192. * to even clean up the mess.
  1193. */
  1194. out_fail:
  1195. put_online_cpus();
  1196. mutex_unlock(&buffer->mutex);
  1197. atomic_dec(&buffer->record_disabled);
  1198. return -1;
  1199. }
  1200. EXPORT_SYMBOL_GPL(ring_buffer_resize);
  1201. void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
  1202. {
  1203. mutex_lock(&buffer->mutex);
  1204. if (val)
  1205. buffer->flags |= RB_FL_OVERWRITE;
  1206. else
  1207. buffer->flags &= ~RB_FL_OVERWRITE;
  1208. mutex_unlock(&buffer->mutex);
  1209. }
  1210. EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
  1211. static inline void *
  1212. __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
  1213. {
  1214. return bpage->data + index;
  1215. }
  1216. static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
  1217. {
  1218. return bpage->page->data + index;
  1219. }
  1220. static inline struct ring_buffer_event *
  1221. rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
  1222. {
  1223. return __rb_page_index(cpu_buffer->reader_page,
  1224. cpu_buffer->reader_page->read);
  1225. }
  1226. static inline struct ring_buffer_event *
  1227. rb_iter_head_event(struct ring_buffer_iter *iter)
  1228. {
  1229. return __rb_page_index(iter->head_page, iter->head);
  1230. }
  1231. static inline unsigned long rb_page_write(struct buffer_page *bpage)
  1232. {
  1233. return local_read(&bpage->write) & RB_WRITE_MASK;
  1234. }
  1235. static inline unsigned rb_page_commit(struct buffer_page *bpage)
  1236. {
  1237. return local_read(&bpage->page->commit);
  1238. }
  1239. static inline unsigned long rb_page_entries(struct buffer_page *bpage)
  1240. {
  1241. return local_read(&bpage->entries) & RB_WRITE_MASK;
  1242. }
  1243. /* Size is determined by what has been committed */
  1244. static inline unsigned rb_page_size(struct buffer_page *bpage)
  1245. {
  1246. return rb_page_commit(bpage);
  1247. }
  1248. static inline unsigned
  1249. rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
  1250. {
  1251. return rb_page_commit(cpu_buffer->commit_page);
  1252. }
  1253. static inline unsigned
  1254. rb_event_index(struct ring_buffer_event *event)
  1255. {
  1256. unsigned long addr = (unsigned long)event;
  1257. return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
  1258. }
  1259. static inline int
  1260. rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
  1261. struct ring_buffer_event *event)
  1262. {
  1263. unsigned long addr = (unsigned long)event;
  1264. unsigned long index;
  1265. index = rb_event_index(event);
  1266. addr &= PAGE_MASK;
  1267. return cpu_buffer->commit_page->page == (void *)addr &&
  1268. rb_commit_index(cpu_buffer) == index;
  1269. }
  1270. static void
  1271. rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
  1272. {
  1273. unsigned long max_count;
  1274. /*
  1275. * We only race with interrupts and NMIs on this CPU.
  1276. * If we own the commit event, then we can commit
  1277. * all others that interrupted us, since the interruptions
  1278. * are in stack format (they finish before they come
  1279. * back to us). This allows us to do a simple loop to
  1280. * assign the commit to the tail.
  1281. */
  1282. again:
  1283. max_count = cpu_buffer->buffer->pages * 100;
  1284. while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
  1285. if (RB_WARN_ON(cpu_buffer, !(--max_count)))
  1286. return;
  1287. if (RB_WARN_ON(cpu_buffer,
  1288. rb_is_reader_page(cpu_buffer->tail_page)))
  1289. return;
  1290. local_set(&cpu_buffer->commit_page->page->commit,
  1291. rb_page_write(cpu_buffer->commit_page));
  1292. rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
  1293. cpu_buffer->write_stamp =
  1294. cpu_buffer->commit_page->page->time_stamp;
  1295. /* add barrier to keep gcc from optimizing too much */
  1296. barrier();
  1297. }
  1298. while (rb_commit_index(cpu_buffer) !=
  1299. rb_page_write(cpu_buffer->commit_page)) {
  1300. local_set(&cpu_buffer->commit_page->page->commit,
  1301. rb_page_write(cpu_buffer->commit_page));
  1302. RB_WARN_ON(cpu_buffer,
  1303. local_read(&cpu_buffer->commit_page->page->commit) &
  1304. ~RB_WRITE_MASK);
  1305. barrier();
  1306. }
  1307. /* again, keep gcc from optimizing */
  1308. barrier();
  1309. /*
  1310. * If an interrupt came in just after the first while loop
  1311. * and pushed the tail page forward, we will be left with
  1312. * a dangling commit that will never go forward.
  1313. */
  1314. if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
  1315. goto again;
  1316. }
  1317. static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
  1318. {
  1319. cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
  1320. cpu_buffer->reader_page->read = 0;
  1321. }
  1322. static void rb_inc_iter(struct ring_buffer_iter *iter)
  1323. {
  1324. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  1325. /*
  1326. * The iterator could be on the reader page (it starts there).
  1327. * But the head could have moved, since the reader was
  1328. * found. Check for this case and assign the iterator
  1329. * to the head page instead of next.
  1330. */
  1331. if (iter->head_page == cpu_buffer->reader_page)
  1332. iter->head_page = rb_set_head_page(cpu_buffer);
  1333. else
  1334. rb_inc_page(cpu_buffer, &iter->head_page);
  1335. iter->read_stamp = iter->head_page->page->time_stamp;
  1336. iter->head = 0;
  1337. }
  1338. /* Slow path, do not inline */
  1339. static noinline struct ring_buffer_event *
  1340. rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
  1341. {
  1342. event->type_len = RINGBUF_TYPE_TIME_EXTEND;
  1343. /* Not the first event on the page? */
  1344. if (rb_event_index(event)) {
  1345. event->time_delta = delta & TS_MASK;
  1346. event->array[0] = delta >> TS_SHIFT;
  1347. } else {
  1348. /* nope, just zero it */
  1349. event->time_delta = 0;
  1350. event->array[0] = 0;
  1351. }
  1352. return skip_time_extend(event);
  1353. }
  1354. /**
  1355. * ring_buffer_update_event - update event type and data
  1356. * @event: the even to update
  1357. * @type: the type of event
  1358. * @length: the size of the event field in the ring buffer
  1359. *
  1360. * Update the type and data fields of the event. The length
  1361. * is the actual size that is written to the ring buffer,
  1362. * and with this, we can determine what to place into the
  1363. * data field.
  1364. */
  1365. static void
  1366. rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
  1367. struct ring_buffer_event *event, unsigned length,
  1368. int add_timestamp, u64 delta)
  1369. {
  1370. /* Only a commit updates the timestamp */
  1371. if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
  1372. delta = 0;
  1373. /*
  1374. * If we need to add a timestamp, then we
  1375. * add it to the start of the resevered space.
  1376. */
  1377. if (unlikely(add_timestamp)) {
  1378. event = rb_add_time_stamp(event, delta);
  1379. length -= RB_LEN_TIME_EXTEND;
  1380. delta = 0;
  1381. }
  1382. event->time_delta = delta;
  1383. length -= RB_EVNT_HDR_SIZE;
  1384. if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
  1385. event->type_len = 0;
  1386. event->array[0] = length;
  1387. } else
  1388. event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
  1389. }
  1390. /*
  1391. * rb_handle_head_page - writer hit the head page
  1392. *
  1393. * Returns: +1 to retry page
  1394. * 0 to continue
  1395. * -1 on error
  1396. */
  1397. static int
  1398. rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
  1399. struct buffer_page *tail_page,
  1400. struct buffer_page *next_page)
  1401. {
  1402. struct buffer_page *new_head;
  1403. int entries;
  1404. int type;
  1405. int ret;
  1406. entries = rb_page_entries(next_page);
  1407. /*
  1408. * The hard part is here. We need to move the head
  1409. * forward, and protect against both readers on
  1410. * other CPUs and writers coming in via interrupts.
  1411. */
  1412. type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
  1413. RB_PAGE_HEAD);
  1414. /*
  1415. * type can be one of four:
  1416. * NORMAL - an interrupt already moved it for us
  1417. * HEAD - we are the first to get here.
  1418. * UPDATE - we are the interrupt interrupting
  1419. * a current move.
  1420. * MOVED - a reader on another CPU moved the next
  1421. * pointer to its reader page. Give up
  1422. * and try again.
  1423. */
  1424. switch (type) {
  1425. case RB_PAGE_HEAD:
  1426. /*
  1427. * We changed the head to UPDATE, thus
  1428. * it is our responsibility to update
  1429. * the counters.
  1430. */
  1431. local_add(entries, &cpu_buffer->overrun);
  1432. local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
  1433. /*
  1434. * The entries will be zeroed out when we move the
  1435. * tail page.
  1436. */
  1437. /* still more to do */
  1438. break;
  1439. case RB_PAGE_UPDATE:
  1440. /*
  1441. * This is an interrupt that interrupt the
  1442. * previous update. Still more to do.
  1443. */
  1444. break;
  1445. case RB_PAGE_NORMAL:
  1446. /*
  1447. * An interrupt came in before the update
  1448. * and processed this for us.
  1449. * Nothing left to do.
  1450. */
  1451. return 1;
  1452. case RB_PAGE_MOVED:
  1453. /*
  1454. * The reader is on another CPU and just did
  1455. * a swap with our next_page.
  1456. * Try again.
  1457. */
  1458. return 1;
  1459. default:
  1460. RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
  1461. return -1;
  1462. }
  1463. /*
  1464. * Now that we are here, the old head pointer is
  1465. * set to UPDATE. This will keep the reader from
  1466. * swapping the head page with the reader page.
  1467. * The reader (on another CPU) will spin till
  1468. * we are finished.
  1469. *
  1470. * We just need to protect against interrupts
  1471. * doing the job. We will set the next pointer
  1472. * to HEAD. After that, we set the old pointer
  1473. * to NORMAL, but only if it was HEAD before.
  1474. * otherwise we are an interrupt, and only
  1475. * want the outer most commit to reset it.
  1476. */
  1477. new_head = next_page;
  1478. rb_inc_page(cpu_buffer, &new_head);
  1479. ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
  1480. RB_PAGE_NORMAL);
  1481. /*
  1482. * Valid returns are:
  1483. * HEAD - an interrupt came in and already set it.
  1484. * NORMAL - One of two things:
  1485. * 1) We really set it.
  1486. * 2) A bunch of interrupts came in and moved
  1487. * the page forward again.
  1488. */
  1489. switch (ret) {
  1490. case RB_PAGE_HEAD:
  1491. case RB_PAGE_NORMAL:
  1492. /* OK */
  1493. break;
  1494. default:
  1495. RB_WARN_ON(cpu_buffer, 1);
  1496. return -1;
  1497. }
  1498. /*
  1499. * It is possible that an interrupt came in,
  1500. * set the head up, then more interrupts came in
  1501. * and moved it again. When we get back here,
  1502. * the page would have been set to NORMAL but we
  1503. * just set it back to HEAD.
  1504. *
  1505. * How do you detect this? Well, if that happened
  1506. * the tail page would have moved.
  1507. */
  1508. if (ret == RB_PAGE_NORMAL) {
  1509. /*
  1510. * If the tail had moved passed next, then we need
  1511. * to reset the pointer.
  1512. */
  1513. if (cpu_buffer->tail_page != tail_page &&
  1514. cpu_buffer->tail_page != next_page)
  1515. rb_head_page_set_normal(cpu_buffer, new_head,
  1516. next_page,
  1517. RB_PAGE_HEAD);
  1518. }
  1519. /*
  1520. * If this was the outer most commit (the one that
  1521. * changed the original pointer from HEAD to UPDATE),
  1522. * then it is up to us to reset it to NORMAL.
  1523. */
  1524. if (type == RB_PAGE_HEAD) {
  1525. ret = rb_head_page_set_normal(cpu_buffer, next_page,
  1526. tail_page,
  1527. RB_PAGE_UPDATE);
  1528. if (RB_WARN_ON(cpu_buffer,
  1529. ret != RB_PAGE_UPDATE))
  1530. return -1;
  1531. }
  1532. return 0;
  1533. }
  1534. static unsigned rb_calculate_event_length(unsigned length)
  1535. {
  1536. struct ring_buffer_event event; /* Used only for sizeof array */
  1537. /* zero length can cause confusions */
  1538. if (!length)
  1539. length = 1;
  1540. if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
  1541. length += sizeof(event.array[0]);
  1542. length += RB_EVNT_HDR_SIZE;
  1543. length = ALIGN(length, RB_ARCH_ALIGNMENT);
  1544. return length;
  1545. }
  1546. static inline void
  1547. rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
  1548. struct buffer_page *tail_page,
  1549. unsigned long tail, unsigned long length)
  1550. {
  1551. struct ring_buffer_event *event;
  1552. /*
  1553. * Only the event that crossed the page boundary
  1554. * must fill the old tail_page with padding.
  1555. */
  1556. if (tail >= BUF_PAGE_SIZE) {
  1557. /*
  1558. * If the page was filled, then we still need
  1559. * to update the real_end. Reset it to zero
  1560. * and the reader will ignore it.
  1561. */
  1562. if (tail == BUF_PAGE_SIZE)
  1563. tail_page->real_end = 0;
  1564. local_sub(length, &tail_page->write);
  1565. return;
  1566. }
  1567. event = __rb_page_index(tail_page, tail);
  1568. kmemcheck_annotate_bitfield(event, bitfield);
  1569. /* account for padding bytes */
  1570. local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
  1571. /*
  1572. * Save the original length to the meta data.
  1573. * This will be used by the reader to add lost event
  1574. * counter.
  1575. */
  1576. tail_page->real_end = tail;
  1577. /*
  1578. * If this event is bigger than the minimum size, then
  1579. * we need to be careful that we don't subtract the
  1580. * write counter enough to allow another writer to slip
  1581. * in on this page.
  1582. * We put in a discarded commit instead, to make sure
  1583. * that this space is not used again.
  1584. *
  1585. * If we are less than the minimum size, we don't need to
  1586. * worry about it.
  1587. */
  1588. if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
  1589. /* No room for any events */
  1590. /* Mark the rest of the page with padding */
  1591. rb_event_set_padding(event);
  1592. /* Set the write back to the previous setting */
  1593. local_sub(length, &tail_page->write);
  1594. return;
  1595. }
  1596. /* Put in a discarded event */
  1597. event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
  1598. event->type_len = RINGBUF_TYPE_PADDING;
  1599. /* time delta must be non zero */
  1600. event->time_delta = 1;
  1601. /* Set write to end of buffer */
  1602. length = (tail + length) - BUF_PAGE_SIZE;
  1603. local_sub(length, &tail_page->write);
  1604. }
  1605. /*
  1606. * This is the slow path, force gcc not to inline it.
  1607. */
  1608. static noinline struct ring_buffer_event *
  1609. rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
  1610. unsigned long length, unsigned long tail,
  1611. struct buffer_page *tail_page, u64 ts)
  1612. {
  1613. struct buffer_page *commit_page = cpu_buffer->commit_page;
  1614. struct ring_buffer *buffer = cpu_buffer->buffer;
  1615. struct buffer_page *next_page;
  1616. int ret;
  1617. next_page = tail_page;
  1618. rb_inc_page(cpu_buffer, &next_page);
  1619. /*
  1620. * If for some reason, we had an interrupt storm that made
  1621. * it all the way around the buffer, bail, and warn
  1622. * about it.
  1623. */
  1624. if (unlikely(next_page == commit_page)) {
  1625. local_inc(&cpu_buffer->commit_overrun);
  1626. goto out_reset;
  1627. }
  1628. /*
  1629. * This is where the fun begins!
  1630. *
  1631. * We are fighting against races between a reader that
  1632. * could be on another CPU trying to swap its reader
  1633. * page with the buffer head.
  1634. *
  1635. * We are also fighting against interrupts coming in and
  1636. * moving the head or tail on us as well.
  1637. *
  1638. * If the next page is the head page then we have filled
  1639. * the buffer, unless the commit page is still on the
  1640. * reader page.
  1641. */
  1642. if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
  1643. /*
  1644. * If the commit is not on the reader page, then
  1645. * move the header page.
  1646. */
  1647. if (!rb_is_reader_page(cpu_buffer->commit_page)) {
  1648. /*
  1649. * If we are not in overwrite mode,
  1650. * this is easy, just stop here.
  1651. */
  1652. if (!(buffer->flags & RB_FL_OVERWRITE))
  1653. goto out_reset;
  1654. ret = rb_handle_head_page(cpu_buffer,
  1655. tail_page,
  1656. next_page);
  1657. if (ret < 0)
  1658. goto out_reset;
  1659. if (ret)
  1660. goto out_again;
  1661. } else {
  1662. /*
  1663. * We need to be careful here too. The
  1664. * commit page could still be on the reader
  1665. * page. We could have a small buffer, and
  1666. * have filled up the buffer with events
  1667. * from interrupts and such, and wrapped.
  1668. *
  1669. * Note, if the tail page is also the on the
  1670. * reader_page, we let it move out.
  1671. */
  1672. if (unlikely((cpu_buffer->commit_page !=
  1673. cpu_buffer->tail_page) &&
  1674. (cpu_buffer->commit_page ==
  1675. cpu_buffer->reader_page))) {
  1676. local_inc(&cpu_buffer->commit_overrun);
  1677. goto out_reset;
  1678. }
  1679. }
  1680. }
  1681. ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
  1682. if (ret) {
  1683. /*
  1684. * Nested commits always have zero deltas, so
  1685. * just reread the time stamp
  1686. */
  1687. ts = rb_time_stamp(buffer);
  1688. next_page->page->time_stamp = ts;
  1689. }
  1690. out_again:
  1691. rb_reset_tail(cpu_buffer, tail_page, tail, length);
  1692. /* fail and let the caller try again */
  1693. return ERR_PTR(-EAGAIN);
  1694. out_reset:
  1695. /* reset write */
  1696. rb_reset_tail(cpu_buffer, tail_page, tail, length);
  1697. return NULL;
  1698. }
  1699. static struct ring_buffer_event *
  1700. __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
  1701. unsigned long length, u64 ts,
  1702. u64 delta, int add_timestamp)
  1703. {
  1704. struct buffer_page *tail_page;
  1705. struct ring_buffer_event *event;
  1706. unsigned long tail, write;
  1707. /*
  1708. * If the time delta since the last event is too big to
  1709. * hold in the time field of the event, then we append a
  1710. * TIME EXTEND event ahead of the data event.
  1711. */
  1712. if (unlikely(add_timestamp))
  1713. length += RB_LEN_TIME_EXTEND;
  1714. tail_page = cpu_buffer->tail_page;
  1715. write = local_add_return(length, &tail_page->write);
  1716. /* set write to only the index of the write */
  1717. write &= RB_WRITE_MASK;
  1718. tail = write - length;
  1719. /* See if we shot pass the end of this buffer page */
  1720. if (unlikely(write > BUF_PAGE_SIZE))
  1721. return rb_move_tail(cpu_buffer, length, tail,
  1722. tail_page, ts);
  1723. /* We reserved something on the buffer */
  1724. event = __rb_page_index(tail_page, tail);
  1725. kmemcheck_annotate_bitfield(event, bitfield);
  1726. rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
  1727. local_inc(&tail_page->entries);
  1728. /*
  1729. * If this is the first commit on the page, then update
  1730. * its timestamp.
  1731. */
  1732. if (!tail)
  1733. tail_page->page->time_stamp = ts;
  1734. /* account for these added bytes */
  1735. local_add(length, &cpu_buffer->entries_bytes);
  1736. return event;
  1737. }
  1738. static inline int
  1739. rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
  1740. struct ring_buffer_event *event)
  1741. {
  1742. unsigned long new_index, old_index;
  1743. struct buffer_page *bpage;
  1744. unsigned long index;
  1745. unsigned long addr;
  1746. new_index = rb_event_index(event);
  1747. old_index = new_index + rb_event_ts_length(event);
  1748. addr = (unsigned long)event;
  1749. addr &= PAGE_MASK;
  1750. bpage = cpu_buffer->tail_page;
  1751. if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
  1752. unsigned long write_mask =
  1753. local_read(&bpage->write) & ~RB_WRITE_MASK;
  1754. unsigned long event_length = rb_event_length(event);
  1755. /*
  1756. * This is on the tail page. It is possible that
  1757. * a write could come in and move the tail page
  1758. * and write to the next page. That is fine
  1759. * because we just shorten what is on this page.
  1760. */
  1761. old_index += write_mask;
  1762. new_index += write_mask;
  1763. index = local_cmpxchg(&bpage->write, old_index, new_index);
  1764. if (index == old_index) {
  1765. /* update counters */
  1766. local_sub(event_length, &cpu_buffer->entries_bytes);
  1767. return 1;
  1768. }
  1769. }
  1770. /* could not discard */
  1771. return 0;
  1772. }
  1773. static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
  1774. {
  1775. local_inc(&cpu_buffer->committing);
  1776. local_inc(&cpu_buffer->commits);
  1777. }
  1778. static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
  1779. {
  1780. unsigned long commits;
  1781. if (RB_WARN_ON(cpu_buffer,
  1782. !local_read(&cpu_buffer->committing)))
  1783. return;
  1784. again:
  1785. commits = local_read(&cpu_buffer->commits);
  1786. /* synchronize with interrupts */
  1787. barrier();
  1788. if (local_read(&cpu_buffer->committing) == 1)
  1789. rb_set_commit_to_write(cpu_buffer);
  1790. local_dec(&cpu_buffer->committing);
  1791. /* synchronize with interrupts */
  1792. barrier();
  1793. /*
  1794. * Need to account for interrupts coming in between the
  1795. * updating of the commit page and the clearing of the
  1796. * committing counter.
  1797. */
  1798. if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
  1799. !local_read(&cpu_buffer->committing)) {
  1800. local_inc(&cpu_buffer->committing);
  1801. goto again;
  1802. }
  1803. }
  1804. static struct ring_buffer_event *
  1805. rb_reserve_next_event(struct ring_buffer *buffer,
  1806. struct ring_buffer_per_cpu *cpu_buffer,
  1807. unsigned long length)
  1808. {
  1809. struct ring_buffer_event *event;
  1810. u64 ts, delta;
  1811. int nr_loops = 0;
  1812. int add_timestamp;
  1813. u64 diff;
  1814. rb_start_commit(cpu_buffer);
  1815. #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
  1816. /*
  1817. * Due to the ability to swap a cpu buffer from a buffer
  1818. * it is possible it was swapped before we committed.
  1819. * (committing stops a swap). We check for it here and
  1820. * if it happened, we have to fail the write.
  1821. */
  1822. barrier();
  1823. if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
  1824. local_dec(&cpu_buffer->committing);
  1825. local_dec(&cpu_buffer->commits);
  1826. return NULL;
  1827. }
  1828. #endif
  1829. length = rb_calculate_event_length(length);
  1830. again:
  1831. add_timestamp = 0;
  1832. delta = 0;
  1833. /*
  1834. * We allow for interrupts to reenter here and do a trace.
  1835. * If one does, it will cause this original code to loop
  1836. * back here. Even with heavy interrupts happening, this
  1837. * should only happen a few times in a row. If this happens
  1838. * 1000 times in a row, there must be either an interrupt
  1839. * storm or we have something buggy.
  1840. * Bail!
  1841. */
  1842. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
  1843. goto out_fail;
  1844. ts = rb_time_stamp(cpu_buffer->buffer);
  1845. diff = ts - cpu_buffer->write_stamp;
  1846. /* make sure this diff is calculated here */
  1847. barrier();
  1848. /* Did the write stamp get updated already? */
  1849. if (likely(ts >= cpu_buffer->write_stamp)) {
  1850. delta = diff;
  1851. if (unlikely(test_time_stamp(delta))) {
  1852. int local_clock_stable = 1;
  1853. #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
  1854. local_clock_stable = sched_clock_stable;
  1855. #endif
  1856. WARN_ONCE(delta > (1ULL << 59),
  1857. KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
  1858. (unsigned long long)delta,
  1859. (unsigned long long)ts,
  1860. (unsigned long long)cpu_buffer->write_stamp,
  1861. local_clock_stable ? "" :
  1862. "If you just came from a suspend/resume,\n"
  1863. "please switch to the trace global clock:\n"
  1864. " echo global > /sys/kernel/debug/tracing/trace_clock\n");
  1865. add_timestamp = 1;
  1866. }
  1867. }
  1868. event = __rb_reserve_next(cpu_buffer, length, ts,
  1869. delta, add_timestamp);
  1870. if (unlikely(PTR_ERR(event) == -EAGAIN))
  1871. goto again;
  1872. if (!event)
  1873. goto out_fail;
  1874. return event;
  1875. out_fail:
  1876. rb_end_commit(cpu_buffer);
  1877. return NULL;
  1878. }
  1879. #ifdef CONFIG_TRACING
  1880. #define TRACE_RECURSIVE_DEPTH 16
  1881. /* Keep this code out of the fast path cache */
  1882. static noinline void trace_recursive_fail(void)
  1883. {
  1884. /* Disable all tracing before we do anything else */
  1885. tracing_off_permanent();
  1886. printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
  1887. "HC[%lu]:SC[%lu]:NMI[%lu]\n",
  1888. trace_recursion_buffer(),
  1889. hardirq_count() >> HARDIRQ_SHIFT,
  1890. softirq_count() >> SOFTIRQ_SHIFT,
  1891. in_nmi());
  1892. WARN_ON_ONCE(1);
  1893. }
  1894. static inline int trace_recursive_lock(void)
  1895. {
  1896. trace_recursion_inc();
  1897. if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH))
  1898. return 0;
  1899. trace_recursive_fail();
  1900. return -1;
  1901. }
  1902. static inline void trace_recursive_unlock(void)
  1903. {
  1904. WARN_ON_ONCE(!trace_recursion_buffer());
  1905. trace_recursion_dec();
  1906. }
  1907. #else
  1908. #define trace_recursive_lock() (0)
  1909. #define trace_recursive_unlock() do { } while (0)
  1910. #endif
  1911. /**
  1912. * ring_buffer_lock_reserve - reserve a part of the buffer
  1913. * @buffer: the ring buffer to reserve from
  1914. * @length: the length of the data to reserve (excluding event header)
  1915. *
  1916. * Returns a reseverd event on the ring buffer to copy directly to.
  1917. * The user of this interface will need to get the body to write into
  1918. * and can use the ring_buffer_event_data() interface.
  1919. *
  1920. * The length is the length of the data needed, not the event length
  1921. * which also includes the event header.
  1922. *
  1923. * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
  1924. * If NULL is returned, then nothing has been allocated or locked.
  1925. */
  1926. struct ring_buffer_event *
  1927. ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
  1928. {
  1929. struct ring_buffer_per_cpu *cpu_buffer;
  1930. struct ring_buffer_event *event;
  1931. int cpu;
  1932. if (ring_buffer_flags != RB_BUFFERS_ON)
  1933. return NULL;
  1934. /* If we are tracing schedule, we don't want to recurse */
  1935. preempt_disable_notrace();
  1936. if (atomic_read(&buffer->record_disabled))
  1937. goto out_nocheck;
  1938. if (trace_recursive_lock())
  1939. goto out_nocheck;
  1940. cpu = raw_smp_processor_id();
  1941. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  1942. goto out;
  1943. cpu_buffer = buffer->buffers[cpu];
  1944. if (atomic_read(&cpu_buffer->record_disabled))
  1945. goto out;
  1946. if (length > BUF_MAX_DATA_SIZE)
  1947. goto out;
  1948. event = rb_reserve_next_event(buffer, cpu_buffer, length);
  1949. if (!event)
  1950. goto out;
  1951. return event;
  1952. out:
  1953. trace_recursive_unlock();
  1954. out_nocheck:
  1955. preempt_enable_notrace();
  1956. return NULL;
  1957. }
  1958. EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
  1959. static void
  1960. rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  1961. struct ring_buffer_event *event)
  1962. {
  1963. u64 delta;
  1964. /*
  1965. * The event first in the commit queue updates the
  1966. * time stamp.
  1967. */
  1968. if (rb_event_is_commit(cpu_buffer, event)) {
  1969. /*
  1970. * A commit event that is first on a page
  1971. * updates the write timestamp with the page stamp
  1972. */
  1973. if (!rb_event_index(event))
  1974. cpu_buffer->write_stamp =
  1975. cpu_buffer->commit_page->page->time_stamp;
  1976. else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
  1977. delta = event->array[0];
  1978. delta <<= TS_SHIFT;
  1979. delta += event->time_delta;
  1980. cpu_buffer->write_stamp += delta;
  1981. } else
  1982. cpu_buffer->write_stamp += event->time_delta;
  1983. }
  1984. }
  1985. static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
  1986. struct ring_buffer_event *event)
  1987. {
  1988. local_inc(&cpu_buffer->entries);
  1989. rb_update_write_stamp(cpu_buffer, event);
  1990. rb_end_commit(cpu_buffer);
  1991. }
  1992. /**
  1993. * ring_buffer_unlock_commit - commit a reserved
  1994. * @buffer: The buffer to commit to
  1995. * @event: The event pointer to commit.
  1996. *
  1997. * This commits the data to the ring buffer, and releases any locks held.
  1998. *
  1999. * Must be paired with ring_buffer_lock_reserve.
  2000. */
  2001. int ring_buffer_unlock_commit(struct ring_buffer *buffer,
  2002. struct ring_buffer_event *event)
  2003. {
  2004. struct ring_buffer_per_cpu *cpu_buffer;
  2005. int cpu = raw_smp_processor_id();
  2006. cpu_buffer = buffer->buffers[cpu];
  2007. rb_commit(cpu_buffer, event);
  2008. trace_recursive_unlock();
  2009. preempt_enable_notrace();
  2010. return 0;
  2011. }
  2012. EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
  2013. static inline void rb_event_discard(struct ring_buffer_event *event)
  2014. {
  2015. if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
  2016. event = skip_time_extend(event);
  2017. /* array[0] holds the actual length for the discarded event */
  2018. event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
  2019. event->type_len = RINGBUF_TYPE_PADDING;
  2020. /* time delta must be non zero */
  2021. if (!event->time_delta)
  2022. event->time_delta = 1;
  2023. }
  2024. /*
  2025. * Decrement the entries to the page that an event is on.
  2026. * The event does not even need to exist, only the pointer
  2027. * to the page it is on. This may only be called before the commit
  2028. * takes place.
  2029. */
  2030. static inline void
  2031. rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
  2032. struct ring_buffer_event *event)
  2033. {
  2034. unsigned long addr = (unsigned long)event;
  2035. struct buffer_page *bpage = cpu_buffer->commit_page;
  2036. struct buffer_page *start;
  2037. addr &= PAGE_MASK;
  2038. /* Do the likely case first */
  2039. if (likely(bpage->page == (void *)addr)) {
  2040. local_dec(&bpage->entries);
  2041. return;
  2042. }
  2043. /*
  2044. * Because the commit page may be on the reader page we
  2045. * start with the next page and check the end loop there.
  2046. */
  2047. rb_inc_page(cpu_buffer, &bpage);
  2048. start = bpage;
  2049. do {
  2050. if (bpage->page == (void *)addr) {
  2051. local_dec(&bpage->entries);
  2052. return;
  2053. }
  2054. rb_inc_page(cpu_buffer, &bpage);
  2055. } while (bpage != start);
  2056. /* commit not part of this buffer?? */
  2057. RB_WARN_ON(cpu_buffer, 1);
  2058. }
  2059. /**
  2060. * ring_buffer_commit_discard - discard an event that has not been committed
  2061. * @buffer: the ring buffer
  2062. * @event: non committed event to discard
  2063. *
  2064. * Sometimes an event that is in the ring buffer needs to be ignored.
  2065. * This function lets the user discard an event in the ring buffer
  2066. * and then that event will not be read later.
  2067. *
  2068. * This function only works if it is called before the the item has been
  2069. * committed. It will try to free the event from the ring buffer
  2070. * if another event has not been added behind it.
  2071. *
  2072. * If another event has been added behind it, it will set the event
  2073. * up as discarded, and perform the commit.
  2074. *
  2075. * If this function is called, do not call ring_buffer_unlock_commit on
  2076. * the event.
  2077. */
  2078. void ring_buffer_discard_commit(struct ring_buffer *buffer,
  2079. struct ring_buffer_event *event)
  2080. {
  2081. struct ring_buffer_per_cpu *cpu_buffer;
  2082. int cpu;
  2083. /* The event is discarded regardless */
  2084. rb_event_discard(event);
  2085. cpu = smp_processor_id();
  2086. cpu_buffer = buffer->buffers[cpu];
  2087. /*
  2088. * This must only be called if the event has not been
  2089. * committed yet. Thus we can assume that preemption
  2090. * is still disabled.
  2091. */
  2092. RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
  2093. rb_decrement_entry(cpu_buffer, event);
  2094. if (rb_try_to_discard(cpu_buffer, event))
  2095. goto out;
  2096. /*
  2097. * The commit is still visible by the reader, so we
  2098. * must still update the timestamp.
  2099. */
  2100. rb_update_write_stamp(cpu_buffer, event);
  2101. out:
  2102. rb_end_commit(cpu_buffer);
  2103. trace_recursive_unlock();
  2104. preempt_enable_notrace();
  2105. }
  2106. EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
  2107. /**
  2108. * ring_buffer_write - write data to the buffer without reserving
  2109. * @buffer: The ring buffer to write to.
  2110. * @length: The length of the data being written (excluding the event header)
  2111. * @data: The data to write to the buffer.
  2112. *
  2113. * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
  2114. * one function. If you already have the data to write to the buffer, it
  2115. * may be easier to simply call this function.
  2116. *
  2117. * Note, like ring_buffer_lock_reserve, the length is the length of the data
  2118. * and not the length of the event which would hold the header.
  2119. */
  2120. int ring_buffer_write(struct ring_buffer *buffer,
  2121. unsigned long length,
  2122. void *data)
  2123. {
  2124. struct ring_buffer_per_cpu *cpu_buffer;
  2125. struct ring_buffer_event *event;
  2126. void *body;
  2127. int ret = -EBUSY;
  2128. int cpu;
  2129. if (ring_buffer_flags != RB_BUFFERS_ON)
  2130. return -EBUSY;
  2131. preempt_disable_notrace();
  2132. if (atomic_read(&buffer->record_disabled))
  2133. goto out;
  2134. cpu = raw_smp_processor_id();
  2135. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2136. goto out;
  2137. cpu_buffer = buffer->buffers[cpu];
  2138. if (atomic_read(&cpu_buffer->record_disabled))
  2139. goto out;
  2140. if (length > BUF_MAX_DATA_SIZE)
  2141. goto out;
  2142. event = rb_reserve_next_event(buffer, cpu_buffer, length);
  2143. if (!event)
  2144. goto out;
  2145. body = rb_event_data(event);
  2146. memcpy(body, data, length);
  2147. rb_commit(cpu_buffer, event);
  2148. ret = 0;
  2149. out:
  2150. preempt_enable_notrace();
  2151. return ret;
  2152. }
  2153. EXPORT_SYMBOL_GPL(ring_buffer_write);
  2154. static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
  2155. {
  2156. struct buffer_page *reader = cpu_buffer->reader_page;
  2157. struct buffer_page *head = rb_set_head_page(cpu_buffer);
  2158. struct buffer_page *commit = cpu_buffer->commit_page;
  2159. /* In case of error, head will be NULL */
  2160. if (unlikely(!head))
  2161. return 1;
  2162. return reader->read == rb_page_commit(reader) &&
  2163. (commit == reader ||
  2164. (commit == head &&
  2165. head->read == rb_page_commit(commit)));
  2166. }
  2167. /**
  2168. * ring_buffer_record_disable - stop all writes into the buffer
  2169. * @buffer: The ring buffer to stop writes to.
  2170. *
  2171. * This prevents all writes to the buffer. Any attempt to write
  2172. * to the buffer after this will fail and return NULL.
  2173. *
  2174. * The caller should call synchronize_sched() after this.
  2175. */
  2176. void ring_buffer_record_disable(struct ring_buffer *buffer)
  2177. {
  2178. atomic_inc(&buffer->record_disabled);
  2179. }
  2180. EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
  2181. /**
  2182. * ring_buffer_record_enable - enable writes to the buffer
  2183. * @buffer: The ring buffer to enable writes
  2184. *
  2185. * Note, multiple disables will need the same number of enables
  2186. * to truly enable the writing (much like preempt_disable).
  2187. */
  2188. void ring_buffer_record_enable(struct ring_buffer *buffer)
  2189. {
  2190. atomic_dec(&buffer->record_disabled);
  2191. }
  2192. EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
  2193. /**
  2194. * ring_buffer_record_off - stop all writes into the buffer
  2195. * @buffer: The ring buffer to stop writes to.
  2196. *
  2197. * This prevents all writes to the buffer. Any attempt to write
  2198. * to the buffer after this will fail and return NULL.
  2199. *
  2200. * This is different than ring_buffer_record_disable() as
  2201. * it works like an on/off switch, where as the disable() verison
  2202. * must be paired with a enable().
  2203. */
  2204. void ring_buffer_record_off(struct ring_buffer *buffer)
  2205. {
  2206. unsigned int rd;
  2207. unsigned int new_rd;
  2208. do {
  2209. rd = atomic_read(&buffer->record_disabled);
  2210. new_rd = rd | RB_BUFFER_OFF;
  2211. } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
  2212. }
  2213. EXPORT_SYMBOL_GPL(ring_buffer_record_off);
  2214. /**
  2215. * ring_buffer_record_on - restart writes into the buffer
  2216. * @buffer: The ring buffer to start writes to.
  2217. *
  2218. * This enables all writes to the buffer that was disabled by
  2219. * ring_buffer_record_off().
  2220. *
  2221. * This is different than ring_buffer_record_enable() as
  2222. * it works like an on/off switch, where as the enable() verison
  2223. * must be paired with a disable().
  2224. */
  2225. void ring_buffer_record_on(struct ring_buffer *buffer)
  2226. {
  2227. unsigned int rd;
  2228. unsigned int new_rd;
  2229. do {
  2230. rd = atomic_read(&buffer->record_disabled);
  2231. new_rd = rd & ~RB_BUFFER_OFF;
  2232. } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
  2233. }
  2234. EXPORT_SYMBOL_GPL(ring_buffer_record_on);
  2235. /**
  2236. * ring_buffer_record_is_on - return true if the ring buffer can write
  2237. * @buffer: The ring buffer to see if write is enabled
  2238. *
  2239. * Returns true if the ring buffer is in a state that it accepts writes.
  2240. */
  2241. int ring_buffer_record_is_on(struct ring_buffer *buffer)
  2242. {
  2243. return !atomic_read(&buffer->record_disabled);
  2244. }
  2245. /**
  2246. * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
  2247. * @buffer: The ring buffer to stop writes to.
  2248. * @cpu: The CPU buffer to stop
  2249. *
  2250. * This prevents all writes to the buffer. Any attempt to write
  2251. * to the buffer after this will fail and return NULL.
  2252. *
  2253. * The caller should call synchronize_sched() after this.
  2254. */
  2255. void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
  2256. {
  2257. struct ring_buffer_per_cpu *cpu_buffer;
  2258. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2259. return;
  2260. cpu_buffer = buffer->buffers[cpu];
  2261. atomic_inc(&cpu_buffer->record_disabled);
  2262. }
  2263. EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
  2264. /**
  2265. * ring_buffer_record_enable_cpu - enable writes to the buffer
  2266. * @buffer: The ring buffer to enable writes
  2267. * @cpu: The CPU to enable.
  2268. *
  2269. * Note, multiple disables will need the same number of enables
  2270. * to truly enable the writing (much like preempt_disable).
  2271. */
  2272. void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
  2273. {
  2274. struct ring_buffer_per_cpu *cpu_buffer;
  2275. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2276. return;
  2277. cpu_buffer = buffer->buffers[cpu];
  2278. atomic_dec(&cpu_buffer->record_disabled);
  2279. }
  2280. EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
  2281. /*
  2282. * The total entries in the ring buffer is the running counter
  2283. * of entries entered into the ring buffer, minus the sum of
  2284. * the entries read from the ring buffer and the number of
  2285. * entries that were overwritten.
  2286. */
  2287. static inline unsigned long
  2288. rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
  2289. {
  2290. return local_read(&cpu_buffer->entries) -
  2291. (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
  2292. }
  2293. /**
  2294. * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
  2295. * @buffer: The ring buffer
  2296. * @cpu: The per CPU buffer to read from.
  2297. */
  2298. unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
  2299. {
  2300. unsigned long flags;
  2301. struct ring_buffer_per_cpu *cpu_buffer;
  2302. struct buffer_page *bpage;
  2303. unsigned long ret;
  2304. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2305. return 0;
  2306. cpu_buffer = buffer->buffers[cpu];
  2307. raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2308. /*
  2309. * if the tail is on reader_page, oldest time stamp is on the reader
  2310. * page
  2311. */
  2312. if (cpu_buffer->tail_page == cpu_buffer->reader_page)
  2313. bpage = cpu_buffer->reader_page;
  2314. else
  2315. bpage = rb_set_head_page(cpu_buffer);
  2316. ret = bpage->page->time_stamp;
  2317. raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2318. return ret;
  2319. }
  2320. EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
  2321. /**
  2322. * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
  2323. * @buffer: The ring buffer
  2324. * @cpu: The per CPU buffer to read from.
  2325. */
  2326. unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
  2327. {
  2328. struct ring_buffer_per_cpu *cpu_buffer;
  2329. unsigned long ret;
  2330. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2331. return 0;
  2332. cpu_buffer = buffer->buffers[cpu];
  2333. ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
  2334. return ret;
  2335. }
  2336. EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
  2337. /**
  2338. * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
  2339. * @buffer: The ring buffer
  2340. * @cpu: The per CPU buffer to get the entries from.
  2341. */
  2342. unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
  2343. {
  2344. struct ring_buffer_per_cpu *cpu_buffer;
  2345. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2346. return 0;
  2347. cpu_buffer = buffer->buffers[cpu];
  2348. return rb_num_of_entries(cpu_buffer);
  2349. }
  2350. EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
  2351. /**
  2352. * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
  2353. * @buffer: The ring buffer
  2354. * @cpu: The per CPU buffer to get the number of overruns from
  2355. */
  2356. unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
  2357. {
  2358. struct ring_buffer_per_cpu *cpu_buffer;
  2359. unsigned long ret;
  2360. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2361. return 0;
  2362. cpu_buffer = buffer->buffers[cpu];
  2363. ret = local_read(&cpu_buffer->overrun);
  2364. return ret;
  2365. }
  2366. EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
  2367. /**
  2368. * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
  2369. * @buffer: The ring buffer
  2370. * @cpu: The per CPU buffer to get the number of overruns from
  2371. */
  2372. unsigned long
  2373. ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
  2374. {
  2375. struct ring_buffer_per_cpu *cpu_buffer;
  2376. unsigned long ret;
  2377. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2378. return 0;
  2379. cpu_buffer = buffer->buffers[cpu];
  2380. ret = local_read(&cpu_buffer->commit_overrun);
  2381. return ret;
  2382. }
  2383. EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
  2384. /**
  2385. * ring_buffer_entries - get the number of entries in a buffer
  2386. * @buffer: The ring buffer
  2387. *
  2388. * Returns the total number of entries in the ring buffer
  2389. * (all CPU entries)
  2390. */
  2391. unsigned long ring_buffer_entries(struct ring_buffer *buffer)
  2392. {
  2393. struct ring_buffer_per_cpu *cpu_buffer;
  2394. unsigned long entries = 0;
  2395. int cpu;
  2396. /* if you care about this being correct, lock the buffer */
  2397. for_each_buffer_cpu(buffer, cpu) {
  2398. cpu_buffer = buffer->buffers[cpu];
  2399. entries += rb_num_of_entries(cpu_buffer);
  2400. }
  2401. return entries;
  2402. }
  2403. EXPORT_SYMBOL_GPL(ring_buffer_entries);
  2404. /**
  2405. * ring_buffer_overruns - get the number of overruns in buffer
  2406. * @buffer: The ring buffer
  2407. *
  2408. * Returns the total number of overruns in the ring buffer
  2409. * (all CPU entries)
  2410. */
  2411. unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
  2412. {
  2413. struct ring_buffer_per_cpu *cpu_buffer;
  2414. unsigned long overruns = 0;
  2415. int cpu;
  2416. /* if you care about this being correct, lock the buffer */
  2417. for_each_buffer_cpu(buffer, cpu) {
  2418. cpu_buffer = buffer->buffers[cpu];
  2419. overruns += local_read(&cpu_buffer->overrun);
  2420. }
  2421. return overruns;
  2422. }
  2423. EXPORT_SYMBOL_GPL(ring_buffer_overruns);
  2424. static void rb_iter_reset(struct ring_buffer_iter *iter)
  2425. {
  2426. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2427. /* Iterator usage is expected to have record disabled */
  2428. if (list_empty(&cpu_buffer->reader_page->list)) {
  2429. iter->head_page = rb_set_head_page(cpu_buffer);
  2430. if (unlikely(!iter->head_page))
  2431. return;
  2432. iter->head = iter->head_page->read;
  2433. } else {
  2434. iter->head_page = cpu_buffer->reader_page;
  2435. iter->head = cpu_buffer->reader_page->read;
  2436. }
  2437. if (iter->head)
  2438. iter->read_stamp = cpu_buffer->read_stamp;
  2439. else
  2440. iter->read_stamp = iter->head_page->page->time_stamp;
  2441. iter->cache_reader_page = cpu_buffer->reader_page;
  2442. iter->cache_read = cpu_buffer->read;
  2443. }
  2444. /**
  2445. * ring_buffer_iter_reset - reset an iterator
  2446. * @iter: The iterator to reset
  2447. *
  2448. * Resets the iterator, so that it will start from the beginning
  2449. * again.
  2450. */
  2451. void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
  2452. {
  2453. struct ring_buffer_per_cpu *cpu_buffer;
  2454. unsigned long flags;
  2455. if (!iter)
  2456. return;
  2457. cpu_buffer = iter->cpu_buffer;
  2458. raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2459. rb_iter_reset(iter);
  2460. raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2461. }
  2462. EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
  2463. /**
  2464. * ring_buffer_iter_empty - check if an iterator has no more to read
  2465. * @iter: The iterator to check
  2466. */
  2467. int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
  2468. {
  2469. struct ring_buffer_per_cpu *cpu_buffer;
  2470. cpu_buffer = iter->cpu_buffer;
  2471. return iter->head_page == cpu_buffer->commit_page &&
  2472. iter->head == rb_commit_index(cpu_buffer);
  2473. }
  2474. EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
  2475. static void
  2476. rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  2477. struct ring_buffer_event *event)
  2478. {
  2479. u64 delta;
  2480. switch (event->type_len) {
  2481. case RINGBUF_TYPE_PADDING:
  2482. return;
  2483. case RINGBUF_TYPE_TIME_EXTEND:
  2484. delta = event->array[0];
  2485. delta <<= TS_SHIFT;
  2486. delta += event->time_delta;
  2487. cpu_buffer->read_stamp += delta;
  2488. return;
  2489. case RINGBUF_TYPE_TIME_STAMP:
  2490. /* FIXME: not implemented */
  2491. return;
  2492. case RINGBUF_TYPE_DATA:
  2493. cpu_buffer->read_stamp += event->time_delta;
  2494. return;
  2495. default:
  2496. BUG();
  2497. }
  2498. return;
  2499. }
  2500. static void
  2501. rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
  2502. struct ring_buffer_event *event)
  2503. {
  2504. u64 delta;
  2505. switch (event->type_len) {
  2506. case RINGBUF_TYPE_PADDING:
  2507. return;
  2508. case RINGBUF_TYPE_TIME_EXTEND:
  2509. delta = event->array[0];
  2510. delta <<= TS_SHIFT;
  2511. delta += event->time_delta;
  2512. iter->read_stamp += delta;
  2513. return;
  2514. case RINGBUF_TYPE_TIME_STAMP:
  2515. /* FIXME: not implemented */
  2516. return;
  2517. case RINGBUF_TYPE_DATA:
  2518. iter->read_stamp += event->time_delta;
  2519. return;
  2520. default:
  2521. BUG();
  2522. }
  2523. return;
  2524. }
  2525. static struct buffer_page *
  2526. rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
  2527. {
  2528. struct buffer_page *reader = NULL;
  2529. unsigned long overwrite;
  2530. unsigned long flags;
  2531. int nr_loops = 0;
  2532. int ret;
  2533. local_irq_save(flags);
  2534. arch_spin_lock(&cpu_buffer->lock);
  2535. again:
  2536. /*
  2537. * This should normally only loop twice. But because the
  2538. * start of the reader inserts an empty page, it causes
  2539. * a case where we will loop three times. There should be no
  2540. * reason to loop four times (that I know of).
  2541. */
  2542. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
  2543. reader = NULL;
  2544. goto out;
  2545. }
  2546. reader = cpu_buffer->reader_page;
  2547. /* If there's more to read, return this page */
  2548. if (cpu_buffer->reader_page->read < rb_page_size(reader))
  2549. goto out;
  2550. /* Never should we have an index greater than the size */
  2551. if (RB_WARN_ON(cpu_buffer,
  2552. cpu_buffer->reader_page->read > rb_page_size(reader)))
  2553. goto out;
  2554. /* check if we caught up to the tail */
  2555. reader = NULL;
  2556. if (cpu_buffer->commit_page == cpu_buffer->reader_page)
  2557. goto out;
  2558. /*
  2559. * Reset the reader page to size zero.
  2560. */
  2561. local_set(&cpu_buffer->reader_page->write, 0);
  2562. local_set(&cpu_buffer->reader_page->entries, 0);
  2563. local_set(&cpu_buffer->reader_page->page->commit, 0);
  2564. cpu_buffer->reader_page->real_end = 0;
  2565. spin:
  2566. /*
  2567. * Splice the empty reader page into the list around the head.
  2568. */
  2569. reader = rb_set_head_page(cpu_buffer);
  2570. cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
  2571. cpu_buffer->reader_page->list.prev = reader->list.prev;
  2572. /*
  2573. * cpu_buffer->pages just needs to point to the buffer, it
  2574. * has no specific buffer page to point to. Lets move it out
  2575. * of our way so we don't accidentally swap it.
  2576. */
  2577. cpu_buffer->pages = reader->list.prev;
  2578. /* The reader page will be pointing to the new head */
  2579. rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
  2580. /*
  2581. * We want to make sure we read the overruns after we set up our
  2582. * pointers to the next object. The writer side does a
  2583. * cmpxchg to cross pages which acts as the mb on the writer
  2584. * side. Note, the reader will constantly fail the swap
  2585. * while the writer is updating the pointers, so this
  2586. * guarantees that the overwrite recorded here is the one we
  2587. * want to compare with the last_overrun.
  2588. */
  2589. smp_mb();
  2590. overwrite = local_read(&(cpu_buffer->overrun));
  2591. /*
  2592. * Here's the tricky part.
  2593. *
  2594. * We need to move the pointer past the header page.
  2595. * But we can only do that if a writer is not currently
  2596. * moving it. The page before the header page has the
  2597. * flag bit '1' set if it is pointing to the page we want.
  2598. * but if the writer is in the process of moving it
  2599. * than it will be '2' or already moved '0'.
  2600. */
  2601. ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
  2602. /*
  2603. * If we did not convert it, then we must try again.
  2604. */
  2605. if (!ret)
  2606. goto spin;
  2607. /*
  2608. * Yeah! We succeeded in replacing the page.
  2609. *
  2610. * Now make the new head point back to the reader page.
  2611. */
  2612. rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
  2613. rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
  2614. /* Finally update the reader page to the new head */
  2615. cpu_buffer->reader_page = reader;
  2616. rb_reset_reader_page(cpu_buffer);
  2617. if (overwrite != cpu_buffer->last_overrun) {
  2618. cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
  2619. cpu_buffer->last_overrun = overwrite;
  2620. }
  2621. goto again;
  2622. out:
  2623. arch_spin_unlock(&cpu_buffer->lock);
  2624. local_irq_restore(flags);
  2625. return reader;
  2626. }
  2627. static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
  2628. {
  2629. struct ring_buffer_event *event;
  2630. struct buffer_page *reader;
  2631. unsigned length;
  2632. reader = rb_get_reader_page(cpu_buffer);
  2633. /* This function should not be called when buffer is empty */
  2634. if (RB_WARN_ON(cpu_buffer, !reader))
  2635. return;
  2636. event = rb_reader_event(cpu_buffer);
  2637. if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  2638. cpu_buffer->read++;
  2639. rb_update_read_stamp(cpu_buffer, event);
  2640. length = rb_event_length(event);
  2641. cpu_buffer->reader_page->read += length;
  2642. }
  2643. static void rb_advance_iter(struct ring_buffer_iter *iter)
  2644. {
  2645. struct ring_buffer_per_cpu *cpu_buffer;
  2646. struct ring_buffer_event *event;
  2647. unsigned length;
  2648. cpu_buffer = iter->cpu_buffer;
  2649. /*
  2650. * Check if we are at the end of the buffer.
  2651. */
  2652. if (iter->head >= rb_page_size(iter->head_page)) {
  2653. /* discarded commits can make the page empty */
  2654. if (iter->head_page == cpu_buffer->commit_page)
  2655. return;
  2656. rb_inc_iter(iter);
  2657. return;
  2658. }
  2659. event = rb_iter_head_event(iter);
  2660. length = rb_event_length(event);
  2661. /*
  2662. * This should not be called to advance the header if we are
  2663. * at the tail of the buffer.
  2664. */
  2665. if (RB_WARN_ON(cpu_buffer,
  2666. (iter->head_page == cpu_buffer->commit_page) &&
  2667. (iter->head + length > rb_commit_index(cpu_buffer))))
  2668. return;
  2669. rb_update_iter_read_stamp(iter, event);
  2670. iter->head += length;
  2671. /* check for end of page padding */
  2672. if ((iter->head >= rb_page_size(iter->head_page)) &&
  2673. (iter->head_page != cpu_buffer->commit_page))
  2674. rb_advance_iter(iter);
  2675. }
  2676. static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
  2677. {
  2678. return cpu_buffer->lost_events;
  2679. }
  2680. static struct ring_buffer_event *
  2681. rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
  2682. unsigned long *lost_events)
  2683. {
  2684. struct ring_buffer_event *event;
  2685. struct buffer_page *reader;
  2686. int nr_loops = 0;
  2687. again:
  2688. /*
  2689. * We repeat when a time extend is encountered.
  2690. * Since the time extend is always attached to a data event,
  2691. * we should never loop more than once.
  2692. * (We never hit the following condition more than twice).
  2693. */
  2694. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
  2695. return NULL;
  2696. reader = rb_get_reader_page(cpu_buffer);
  2697. if (!reader)
  2698. return NULL;
  2699. event = rb_reader_event(cpu_buffer);
  2700. switch (event->type_len) {
  2701. case RINGBUF_TYPE_PADDING:
  2702. if (rb_null_event(event))
  2703. RB_WARN_ON(cpu_buffer, 1);
  2704. /*
  2705. * Because the writer could be discarding every
  2706. * event it creates (which would probably be bad)
  2707. * if we were to go back to "again" then we may never
  2708. * catch up, and will trigger the warn on, or lock
  2709. * the box. Return the padding, and we will release
  2710. * the current locks, and try again.
  2711. */
  2712. return event;
  2713. case RINGBUF_TYPE_TIME_EXTEND:
  2714. /* Internal data, OK to advance */
  2715. rb_advance_reader(cpu_buffer);
  2716. goto again;
  2717. case RINGBUF_TYPE_TIME_STAMP:
  2718. /* FIXME: not implemented */
  2719. rb_advance_reader(cpu_buffer);
  2720. goto again;
  2721. case RINGBUF_TYPE_DATA:
  2722. if (ts) {
  2723. *ts = cpu_buffer->read_stamp + event->time_delta;
  2724. ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
  2725. cpu_buffer->cpu, ts);
  2726. }
  2727. if (lost_events)
  2728. *lost_events = rb_lost_events(cpu_buffer);
  2729. return event;
  2730. default:
  2731. BUG();
  2732. }
  2733. return NULL;
  2734. }
  2735. EXPORT_SYMBOL_GPL(ring_buffer_peek);
  2736. static struct ring_buffer_event *
  2737. rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  2738. {
  2739. struct ring_buffer *buffer;
  2740. struct ring_buffer_per_cpu *cpu_buffer;
  2741. struct ring_buffer_event *event;
  2742. int nr_loops = 0;
  2743. cpu_buffer = iter->cpu_buffer;
  2744. buffer = cpu_buffer->buffer;
  2745. /*
  2746. * Check if someone performed a consuming read to
  2747. * the buffer. A consuming read invalidates the iterator
  2748. * and we need to reset the iterator in this case.
  2749. */
  2750. if (unlikely(iter->cache_read != cpu_buffer->read ||
  2751. iter->cache_reader_page != cpu_buffer->reader_page))
  2752. rb_iter_reset(iter);
  2753. again:
  2754. if (ring_buffer_iter_empty(iter))
  2755. return NULL;
  2756. /*
  2757. * We repeat when a time extend is encountered.
  2758. * Since the time extend is always attached to a data event,
  2759. * we should never loop more than once.
  2760. * (We never hit the following condition more than twice).
  2761. */
  2762. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
  2763. return NULL;
  2764. if (rb_per_cpu_empty(cpu_buffer))
  2765. return NULL;
  2766. if (iter->head >= local_read(&iter->head_page->page->commit)) {
  2767. rb_inc_iter(iter);
  2768. goto again;
  2769. }
  2770. event = rb_iter_head_event(iter);
  2771. switch (event->type_len) {
  2772. case RINGBUF_TYPE_PADDING:
  2773. if (rb_null_event(event)) {
  2774. rb_inc_iter(iter);
  2775. goto again;
  2776. }
  2777. rb_advance_iter(iter);
  2778. return event;
  2779. case RINGBUF_TYPE_TIME_EXTEND:
  2780. /* Internal data, OK to advance */
  2781. rb_advance_iter(iter);
  2782. goto again;
  2783. case RINGBUF_TYPE_TIME_STAMP:
  2784. /* FIXME: not implemented */
  2785. rb_advance_iter(iter);
  2786. goto again;
  2787. case RINGBUF_TYPE_DATA:
  2788. if (ts) {
  2789. *ts = iter->read_stamp + event->time_delta;
  2790. ring_buffer_normalize_time_stamp(buffer,
  2791. cpu_buffer->cpu, ts);
  2792. }
  2793. return event;
  2794. default:
  2795. BUG();
  2796. }
  2797. return NULL;
  2798. }
  2799. EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
  2800. static inline int rb_ok_to_lock(void)
  2801. {
  2802. /*
  2803. * If an NMI die dumps out the content of the ring buffer
  2804. * do not grab locks. We also permanently disable the ring
  2805. * buffer too. A one time deal is all you get from reading
  2806. * the ring buffer from an NMI.
  2807. */
  2808. if (likely(!in_nmi()))
  2809. return 1;
  2810. tracing_off_permanent();
  2811. return 0;
  2812. }
  2813. /**
  2814. * ring_buffer_peek - peek at the next event to be read
  2815. * @buffer: The ring buffer to read
  2816. * @cpu: The cpu to peak at
  2817. * @ts: The timestamp counter of this event.
  2818. * @lost_events: a variable to store if events were lost (may be NULL)
  2819. *
  2820. * This will return the event that will be read next, but does
  2821. * not consume the data.
  2822. */
  2823. struct ring_buffer_event *
  2824. ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
  2825. unsigned long *lost_events)
  2826. {
  2827. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  2828. struct ring_buffer_event *event;
  2829. unsigned long flags;
  2830. int dolock;
  2831. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2832. return NULL;
  2833. dolock = rb_ok_to_lock();
  2834. again:
  2835. local_irq_save(flags);
  2836. if (dolock)
  2837. raw_spin_lock(&cpu_buffer->reader_lock);
  2838. event = rb_buffer_peek(cpu_buffer, ts, lost_events);
  2839. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2840. rb_advance_reader(cpu_buffer);
  2841. if (dolock)
  2842. raw_spin_unlock(&cpu_buffer->reader_lock);
  2843. local_irq_restore(flags);
  2844. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2845. goto again;
  2846. return event;
  2847. }
  2848. /**
  2849. * ring_buffer_iter_peek - peek at the next event to be read
  2850. * @iter: The ring buffer iterator
  2851. * @ts: The timestamp counter of this event.
  2852. *
  2853. * This will return the event that will be read next, but does
  2854. * not increment the iterator.
  2855. */
  2856. struct ring_buffer_event *
  2857. ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  2858. {
  2859. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2860. struct ring_buffer_event *event;
  2861. unsigned long flags;
  2862. again:
  2863. raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2864. event = rb_iter_peek(iter, ts);
  2865. raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2866. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2867. goto again;
  2868. return event;
  2869. }
  2870. /**
  2871. * ring_buffer_consume - return an event and consume it
  2872. * @buffer: The ring buffer to get the next event from
  2873. * @cpu: the cpu to read the buffer from
  2874. * @ts: a variable to store the timestamp (may be NULL)
  2875. * @lost_events: a variable to store if events were lost (may be NULL)
  2876. *
  2877. * Returns the next event in the ring buffer, and that event is consumed.
  2878. * Meaning, that sequential reads will keep returning a different event,
  2879. * and eventually empty the ring buffer if the producer is slower.
  2880. */
  2881. struct ring_buffer_event *
  2882. ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
  2883. unsigned long *lost_events)
  2884. {
  2885. struct ring_buffer_per_cpu *cpu_buffer;
  2886. struct ring_buffer_event *event = NULL;
  2887. unsigned long flags;
  2888. int dolock;
  2889. dolock = rb_ok_to_lock();
  2890. again:
  2891. /* might be called in atomic */
  2892. preempt_disable();
  2893. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2894. goto out;
  2895. cpu_buffer = buffer->buffers[cpu];
  2896. local_irq_save(flags);
  2897. if (dolock)
  2898. raw_spin_lock(&cpu_buffer->reader_lock);
  2899. event = rb_buffer_peek(cpu_buffer, ts, lost_events);
  2900. if (event) {
  2901. cpu_buffer->lost_events = 0;
  2902. rb_advance_reader(cpu_buffer);
  2903. }
  2904. if (dolock)
  2905. raw_spin_unlock(&cpu_buffer->reader_lock);
  2906. local_irq_restore(flags);
  2907. out:
  2908. preempt_enable();
  2909. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2910. goto again;
  2911. return event;
  2912. }
  2913. EXPORT_SYMBOL_GPL(ring_buffer_consume);
  2914. /**
  2915. * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
  2916. * @buffer: The ring buffer to read from
  2917. * @cpu: The cpu buffer to iterate over
  2918. *
  2919. * This performs the initial preparations necessary to iterate
  2920. * through the buffer. Memory is allocated, buffer recording
  2921. * is disabled, and the iterator pointer is returned to the caller.
  2922. *
  2923. * Disabling buffer recordng prevents the reading from being
  2924. * corrupted. This is not a consuming read, so a producer is not
  2925. * expected.
  2926. *
  2927. * After a sequence of ring_buffer_read_prepare calls, the user is
  2928. * expected to make at least one call to ring_buffer_prepare_sync.
  2929. * Afterwards, ring_buffer_read_start is invoked to get things going
  2930. * for real.
  2931. *
  2932. * This overall must be paired with ring_buffer_finish.
  2933. */
  2934. struct ring_buffer_iter *
  2935. ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
  2936. {
  2937. struct ring_buffer_per_cpu *cpu_buffer;
  2938. struct ring_buffer_iter *iter;
  2939. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2940. return NULL;
  2941. iter = kmalloc(sizeof(*iter), GFP_KERNEL);
  2942. if (!iter)
  2943. return NULL;
  2944. cpu_buffer = buffer->buffers[cpu];
  2945. iter->cpu_buffer = cpu_buffer;
  2946. atomic_inc(&cpu_buffer->record_disabled);
  2947. return iter;
  2948. }
  2949. EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
  2950. /**
  2951. * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
  2952. *
  2953. * All previously invoked ring_buffer_read_prepare calls to prepare
  2954. * iterators will be synchronized. Afterwards, read_buffer_read_start
  2955. * calls on those iterators are allowed.
  2956. */
  2957. void
  2958. ring_buffer_read_prepare_sync(void)
  2959. {
  2960. synchronize_sched();
  2961. }
  2962. EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
  2963. /**
  2964. * ring_buffer_read_start - start a non consuming read of the buffer
  2965. * @iter: The iterator returned by ring_buffer_read_prepare
  2966. *
  2967. * This finalizes the startup of an iteration through the buffer.
  2968. * The iterator comes from a call to ring_buffer_read_prepare and
  2969. * an intervening ring_buffer_read_prepare_sync must have been
  2970. * performed.
  2971. *
  2972. * Must be paired with ring_buffer_finish.
  2973. */
  2974. void
  2975. ring_buffer_read_start(struct ring_buffer_iter *iter)
  2976. {
  2977. struct ring_buffer_per_cpu *cpu_buffer;
  2978. unsigned long flags;
  2979. if (!iter)
  2980. return;
  2981. cpu_buffer = iter->cpu_buffer;
  2982. raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2983. arch_spin_lock(&cpu_buffer->lock);
  2984. rb_iter_reset(iter);
  2985. arch_spin_unlock(&cpu_buffer->lock);
  2986. raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2987. }
  2988. EXPORT_SYMBOL_GPL(ring_buffer_read_start);
  2989. /**
  2990. * ring_buffer_finish - finish reading the iterator of the buffer
  2991. * @iter: The iterator retrieved by ring_buffer_start
  2992. *
  2993. * This re-enables the recording to the buffer, and frees the
  2994. * iterator.
  2995. */
  2996. void
  2997. ring_buffer_read_finish(struct ring_buffer_iter *iter)
  2998. {
  2999. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  3000. atomic_dec(&cpu_buffer->record_disabled);
  3001. kfree(iter);
  3002. }
  3003. EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
  3004. /**
  3005. * ring_buffer_read - read the next item in the ring buffer by the iterator
  3006. * @iter: The ring buffer iterator
  3007. * @ts: The time stamp of the event read.
  3008. *
  3009. * This reads the next event in the ring buffer and increments the iterator.
  3010. */
  3011. struct ring_buffer_event *
  3012. ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
  3013. {
  3014. struct ring_buffer_event *event;
  3015. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  3016. unsigned long flags;
  3017. raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  3018. again:
  3019. event = rb_iter_peek(iter, ts);
  3020. if (!event)
  3021. goto out;
  3022. if (event->type_len == RINGBUF_TYPE_PADDING)
  3023. goto again;
  3024. rb_advance_iter(iter);
  3025. out:
  3026. raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  3027. return event;
  3028. }
  3029. EXPORT_SYMBOL_GPL(ring_buffer_read);
  3030. /**
  3031. * ring_buffer_size - return the size of the ring buffer (in bytes)
  3032. * @buffer: The ring buffer.
  3033. */
  3034. unsigned long ring_buffer_size(struct ring_buffer *buffer)
  3035. {
  3036. return BUF_PAGE_SIZE * buffer->pages;
  3037. }
  3038. EXPORT_SYMBOL_GPL(ring_buffer_size);
  3039. static void
  3040. rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
  3041. {
  3042. rb_head_page_deactivate(cpu_buffer);
  3043. cpu_buffer->head_page
  3044. = list_entry(cpu_buffer->pages, struct buffer_page, list);
  3045. local_set(&cpu_buffer->head_page->write, 0);
  3046. local_set(&cpu_buffer->head_page->entries, 0);
  3047. local_set(&cpu_buffer->head_page->page->commit, 0);
  3048. cpu_buffer->head_page->read = 0;
  3049. cpu_buffer->tail_page = cpu_buffer->head_page;
  3050. cpu_buffer->commit_page = cpu_buffer->head_page;
  3051. INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
  3052. local_set(&cpu_buffer->reader_page->write, 0);
  3053. local_set(&cpu_buffer->reader_page->entries, 0);
  3054. local_set(&cpu_buffer->reader_page->page->commit, 0);
  3055. cpu_buffer->reader_page->read = 0;
  3056. local_set(&cpu_buffer->commit_overrun, 0);
  3057. local_set(&cpu_buffer->entries_bytes, 0);
  3058. local_set(&cpu_buffer->overrun, 0);
  3059. local_set(&cpu_buffer->entries, 0);
  3060. local_set(&cpu_buffer->committing, 0);
  3061. local_set(&cpu_buffer->commits, 0);
  3062. cpu_buffer->read = 0;
  3063. cpu_buffer->read_bytes = 0;
  3064. cpu_buffer->write_stamp = 0;
  3065. cpu_buffer->read_stamp = 0;
  3066. cpu_buffer->lost_events = 0;
  3067. cpu_buffer->last_overrun = 0;
  3068. rb_head_page_activate(cpu_buffer);
  3069. }
  3070. /**
  3071. * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
  3072. * @buffer: The ring buffer to reset a per cpu buffer of
  3073. * @cpu: The CPU buffer to be reset
  3074. */
  3075. void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
  3076. {
  3077. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  3078. unsigned long flags;
  3079. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  3080. return;
  3081. atomic_inc(&cpu_buffer->record_disabled);
  3082. raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  3083. if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
  3084. goto out;
  3085. arch_spin_lock(&cpu_buffer->lock);
  3086. rb_reset_cpu(cpu_buffer);
  3087. arch_spin_unlock(&cpu_buffer->lock);
  3088. out:
  3089. raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  3090. atomic_dec(&cpu_buffer->record_disabled);
  3091. }
  3092. EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
  3093. /**
  3094. * ring_buffer_reset - reset a ring buffer
  3095. * @buffer: The ring buffer to reset all cpu buffers
  3096. */
  3097. void ring_buffer_reset(struct ring_buffer *buffer)
  3098. {
  3099. int cpu;
  3100. for_each_buffer_cpu(buffer, cpu)
  3101. ring_buffer_reset_cpu(buffer, cpu);
  3102. }
  3103. EXPORT_SYMBOL_GPL(ring_buffer_reset);
  3104. /**
  3105. * rind_buffer_empty - is the ring buffer empty?
  3106. * @buffer: The ring buffer to test
  3107. */
  3108. int ring_buffer_empty(struct ring_buffer *buffer)
  3109. {
  3110. struct ring_buffer_per_cpu *cpu_buffer;
  3111. unsigned long flags;
  3112. int dolock;
  3113. int cpu;
  3114. int ret;
  3115. dolock = rb_ok_to_lock();
  3116. /* yes this is racy, but if you don't like the race, lock the buffer */
  3117. for_each_buffer_cpu(buffer, cpu) {
  3118. cpu_buffer = buffer->buffers[cpu];
  3119. local_irq_save(flags);
  3120. if (dolock)
  3121. raw_spin_lock(&cpu_buffer->reader_lock);
  3122. ret = rb_per_cpu_empty(cpu_buffer);
  3123. if (dolock)
  3124. raw_spin_unlock(&cpu_buffer->reader_lock);
  3125. local_irq_restore(flags);
  3126. if (!ret)
  3127. return 0;
  3128. }
  3129. return 1;
  3130. }
  3131. EXPORT_SYMBOL_GPL(ring_buffer_empty);
  3132. /**
  3133. * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
  3134. * @buffer: The ring buffer
  3135. * @cpu: The CPU buffer to test
  3136. */
  3137. int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
  3138. {
  3139. struct ring_buffer_per_cpu *cpu_buffer;
  3140. unsigned long flags;
  3141. int dolock;
  3142. int ret;
  3143. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  3144. return 1;
  3145. dolock = rb_ok_to_lock();
  3146. cpu_buffer = buffer->buffers[cpu];
  3147. local_irq_save(flags);
  3148. if (dolock)
  3149. raw_spin_lock(&cpu_buffer->reader_lock);
  3150. ret = rb_per_cpu_empty(cpu_buffer);
  3151. if (dolock)
  3152. raw_spin_unlock(&cpu_buffer->reader_lock);
  3153. local_irq_restore(flags);
  3154. return ret;
  3155. }
  3156. EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
  3157. #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
  3158. /**
  3159. * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
  3160. * @buffer_a: One buffer to swap with
  3161. * @buffer_b: The other buffer to swap with
  3162. *
  3163. * This function is useful for tracers that want to take a "snapshot"
  3164. * of a CPU buffer and has another back up buffer lying around.
  3165. * it is expected that the tracer handles the cpu buffer not being
  3166. * used at the moment.
  3167. */
  3168. int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
  3169. struct ring_buffer *buffer_b, int cpu)
  3170. {
  3171. struct ring_buffer_per_cpu *cpu_buffer_a;
  3172. struct ring_buffer_per_cpu *cpu_buffer_b;
  3173. int ret = -EINVAL;
  3174. if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
  3175. !cpumask_test_cpu(cpu, buffer_b->cpumask))
  3176. goto out;
  3177. /* At least make sure the two buffers are somewhat the same */
  3178. if (buffer_a->pages != buffer_b->pages)
  3179. goto out;
  3180. ret = -EAGAIN;
  3181. if (ring_buffer_flags != RB_BUFFERS_ON)
  3182. goto out;
  3183. if (atomic_read(&buffer_a->record_disabled))
  3184. goto out;
  3185. if (atomic_read(&buffer_b->record_disabled))
  3186. goto out;
  3187. cpu_buffer_a = buffer_a->buffers[cpu];
  3188. cpu_buffer_b = buffer_b->buffers[cpu];
  3189. if (atomic_read(&cpu_buffer_a->record_disabled))
  3190. goto out;
  3191. if (atomic_read(&cpu_buffer_b->record_disabled))
  3192. goto out;
  3193. /*
  3194. * We can't do a synchronize_sched here because this
  3195. * function can be called in atomic context.
  3196. * Normally this will be called from the same CPU as cpu.
  3197. * If not it's up to the caller to protect this.
  3198. */
  3199. atomic_inc(&cpu_buffer_a->record_disabled);
  3200. atomic_inc(&cpu_buffer_b->record_disabled);
  3201. ret = -EBUSY;
  3202. if (local_read(&cpu_buffer_a->committing))
  3203. goto out_dec;
  3204. if (local_read(&cpu_buffer_b->committing))
  3205. goto out_dec;
  3206. buffer_a->buffers[cpu] = cpu_buffer_b;
  3207. buffer_b->buffers[cpu] = cpu_buffer_a;
  3208. cpu_buffer_b->buffer = buffer_a;
  3209. cpu_buffer_a->buffer = buffer_b;
  3210. ret = 0;
  3211. out_dec:
  3212. atomic_dec(&cpu_buffer_a->record_disabled);
  3213. atomic_dec(&cpu_buffer_b->record_disabled);
  3214. out:
  3215. return ret;
  3216. }
  3217. EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
  3218. #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
  3219. /**
  3220. * ring_buffer_alloc_read_page - allocate a page to read from buffer
  3221. * @buffer: the buffer to allocate for.
  3222. *
  3223. * This function is used in conjunction with ring_buffer_read_page.
  3224. * When reading a full page from the ring buffer, these functions
  3225. * can be used to speed up the process. The calling function should
  3226. * allocate a few pages first with this function. Then when it
  3227. * needs to get pages from the ring buffer, it passes the result
  3228. * of this function into ring_buffer_read_page, which will swap
  3229. * the page that was allocated, with the read page of the buffer.
  3230. *
  3231. * Returns:
  3232. * The page allocated, or NULL on error.
  3233. */
  3234. void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
  3235. {
  3236. struct buffer_data_page *bpage;
  3237. struct page *page;
  3238. page = alloc_pages_node(cpu_to_node(cpu),
  3239. GFP_KERNEL | __GFP_NORETRY, 0);
  3240. if (!page)
  3241. return NULL;
  3242. bpage = page_address(page);
  3243. rb_init_page(bpage);
  3244. return bpage;
  3245. }
  3246. EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
  3247. /**
  3248. * ring_buffer_free_read_page - free an allocated read page
  3249. * @buffer: the buffer the page was allocate for
  3250. * @data: the page to free
  3251. *
  3252. * Free a page allocated from ring_buffer_alloc_read_page.
  3253. */
  3254. void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
  3255. {
  3256. free_page((unsigned long)data);
  3257. }
  3258. EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
  3259. /**
  3260. * ring_buffer_read_page - extract a page from the ring buffer
  3261. * @buffer: buffer to extract from
  3262. * @data_page: the page to use allocated from ring_buffer_alloc_read_page
  3263. * @len: amount to extract
  3264. * @cpu: the cpu of the buffer to extract
  3265. * @full: should the extraction only happen when the page is full.
  3266. *
  3267. * This function will pull out a page from the ring buffer and consume it.
  3268. * @data_page must be the address of the variable that was returned
  3269. * from ring_buffer_alloc_read_page. This is because the page might be used
  3270. * to swap with a page in the ring buffer.
  3271. *
  3272. * for example:
  3273. * rpage = ring_buffer_alloc_read_page(buffer);
  3274. * if (!rpage)
  3275. * return error;
  3276. * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
  3277. * if (ret >= 0)
  3278. * process_page(rpage, ret);
  3279. *
  3280. * When @full is set, the function will not return true unless
  3281. * the writer is off the reader page.
  3282. *
  3283. * Note: it is up to the calling functions to handle sleeps and wakeups.
  3284. * The ring buffer can be used anywhere in the kernel and can not
  3285. * blindly call wake_up. The layer that uses the ring buffer must be
  3286. * responsible for that.
  3287. *
  3288. * Returns:
  3289. * >=0 if data has been transferred, returns the offset of consumed data.
  3290. * <0 if no data has been transferred.
  3291. */
  3292. int ring_buffer_read_page(struct ring_buffer *buffer,
  3293. void **data_page, size_t len, int cpu, int full)
  3294. {
  3295. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  3296. struct ring_buffer_event *event;
  3297. struct buffer_data_page *bpage;
  3298. struct buffer_page *reader;
  3299. unsigned long missed_events;
  3300. unsigned long flags;
  3301. unsigned int commit;
  3302. unsigned int read;
  3303. u64 save_timestamp;
  3304. int ret = -1;
  3305. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  3306. goto out;
  3307. /*
  3308. * If len is not big enough to hold the page header, then
  3309. * we can not copy anything.
  3310. */
  3311. if (len <= BUF_PAGE_HDR_SIZE)
  3312. goto out;
  3313. len -= BUF_PAGE_HDR_SIZE;
  3314. if (!data_page)
  3315. goto out;
  3316. bpage = *data_page;
  3317. if (!bpage)
  3318. goto out;
  3319. raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  3320. reader = rb_get_reader_page(cpu_buffer);
  3321. if (!reader)
  3322. goto out_unlock;
  3323. event = rb_reader_event(cpu_buffer);
  3324. read = reader->read;
  3325. commit = rb_page_commit(reader);
  3326. /* Check if any events were dropped */
  3327. missed_events = cpu_buffer->lost_events;
  3328. /*
  3329. * If this page has been partially read or
  3330. * if len is not big enough to read the rest of the page or
  3331. * a writer is still on the page, then
  3332. * we must copy the data from the page to the buffer.
  3333. * Otherwise, we can simply swap the page with the one passed in.
  3334. */
  3335. if (read || (len < (commit - read)) ||
  3336. cpu_buffer->reader_page == cpu_buffer->commit_page) {
  3337. struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
  3338. unsigned int rpos = read;
  3339. unsigned int pos = 0;
  3340. unsigned int size;
  3341. if (full)
  3342. goto out_unlock;
  3343. if (len > (commit - read))
  3344. len = (commit - read);
  3345. /* Always keep the time extend and data together */
  3346. size = rb_event_ts_length(event);
  3347. if (len < size)
  3348. goto out_unlock;
  3349. /* save the current timestamp, since the user will need it */
  3350. save_timestamp = cpu_buffer->read_stamp;
  3351. /* Need to copy one event at a time */
  3352. do {
  3353. /* We need the size of one event, because
  3354. * rb_advance_reader only advances by one event,
  3355. * whereas rb_event_ts_length may include the size of
  3356. * one or two events.
  3357. * We have already ensured there's enough space if this
  3358. * is a time extend. */
  3359. size = rb_event_length(event);
  3360. memcpy(bpage->data + pos, rpage->data + rpos, size);
  3361. len -= size;
  3362. rb_advance_reader(cpu_buffer);
  3363. rpos = reader->read;
  3364. pos += size;
  3365. if (rpos >= commit)
  3366. break;
  3367. event = rb_reader_event(cpu_buffer);
  3368. /* Always keep the time extend and data together */
  3369. size = rb_event_ts_length(event);
  3370. } while (len >= size);
  3371. /* update bpage */
  3372. local_set(&bpage->commit, pos);
  3373. bpage->time_stamp = save_timestamp;
  3374. /* we copied everything to the beginning */
  3375. read = 0;
  3376. } else {
  3377. /* update the entry counter */
  3378. cpu_buffer->read += rb_page_entries(reader);
  3379. cpu_buffer->read_bytes += BUF_PAGE_SIZE;
  3380. /* swap the pages */
  3381. rb_init_page(bpage);
  3382. bpage = reader->page;
  3383. reader->page = *data_page;
  3384. local_set(&reader->write, 0);
  3385. local_set(&reader->entries, 0);
  3386. reader->read = 0;
  3387. *data_page = bpage;
  3388. /*
  3389. * Use the real_end for the data size,
  3390. * This gives us a chance to store the lost events
  3391. * on the page.
  3392. */
  3393. if (reader->real_end)
  3394. local_set(&bpage->commit, reader->real_end);
  3395. }
  3396. ret = read;
  3397. cpu_buffer->lost_events = 0;
  3398. commit = local_read(&bpage->commit);
  3399. /*
  3400. * Set a flag in the commit field if we lost events
  3401. */
  3402. if (missed_events) {
  3403. /* If there is room at the end of the page to save the
  3404. * missed events, then record it there.
  3405. */
  3406. if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
  3407. memcpy(&bpage->data[commit], &missed_events,
  3408. sizeof(missed_events));
  3409. local_add(RB_MISSED_STORED, &bpage->commit);
  3410. commit += sizeof(missed_events);
  3411. }
  3412. local_add(RB_MISSED_EVENTS, &bpage->commit);
  3413. }
  3414. /*
  3415. * This page may be off to user land. Zero it out here.
  3416. */
  3417. if (commit < BUF_PAGE_SIZE)
  3418. memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
  3419. out_unlock:
  3420. raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  3421. out:
  3422. return ret;
  3423. }
  3424. EXPORT_SYMBOL_GPL(ring_buffer_read_page);
  3425. #ifdef CONFIG_HOTPLUG_CPU
  3426. static int rb_cpu_notify(struct notifier_block *self,
  3427. unsigned long action, void *hcpu)
  3428. {
  3429. struct ring_buffer *buffer =
  3430. container_of(self, struct ring_buffer, cpu_notify);
  3431. long cpu = (long)hcpu;
  3432. switch (action) {
  3433. case CPU_UP_PREPARE:
  3434. case CPU_UP_PREPARE_FROZEN:
  3435. if (cpumask_test_cpu(cpu, buffer->cpumask))
  3436. return NOTIFY_OK;
  3437. buffer->buffers[cpu] =
  3438. rb_allocate_cpu_buffer(buffer, cpu);
  3439. if (!buffer->buffers[cpu]) {
  3440. WARN(1, "failed to allocate ring buffer on CPU %ld\n",
  3441. cpu);
  3442. return NOTIFY_OK;
  3443. }
  3444. smp_wmb();
  3445. cpumask_set_cpu(cpu, buffer->cpumask);
  3446. break;
  3447. case CPU_DOWN_PREPARE:
  3448. case CPU_DOWN_PREPARE_FROZEN:
  3449. /*
  3450. * Do nothing.
  3451. * If we were to free the buffer, then the user would
  3452. * lose any trace that was in the buffer.
  3453. */
  3454. break;
  3455. default:
  3456. break;
  3457. }
  3458. return NOTIFY_OK;
  3459. }
  3460. #endif