ring_buffer.c 105 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141
  1. /*
  2. * Generic ring buffer
  3. *
  4. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/ring_buffer.h>
  7. #include <linux/trace_clock.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/hardirq.h>
  12. #include <linux/kmemcheck.h>
  13. #include <linux/module.h>
  14. #include <linux/percpu.h>
  15. #include <linux/mutex.h>
  16. #include <linux/slab.h>
  17. #include <linux/init.h>
  18. #include <linux/hash.h>
  19. #include <linux/list.h>
  20. #include <linux/cpu.h>
  21. #include <linux/fs.h>
  22. #include <asm/local.h>
  23. #include "trace.h"
  24. /*
  25. * The ring buffer header is special. We must manually up keep it.
  26. */
  27. int ring_buffer_print_entry_header(struct trace_seq *s)
  28. {
  29. int ret;
  30. ret = trace_seq_printf(s, "# compressed entry header\n");
  31. ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
  32. ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
  33. ret = trace_seq_printf(s, "\tarray : 32 bits\n");
  34. ret = trace_seq_printf(s, "\n");
  35. ret = trace_seq_printf(s, "\tpadding : type == %d\n",
  36. RINGBUF_TYPE_PADDING);
  37. ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
  38. RINGBUF_TYPE_TIME_EXTEND);
  39. ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
  40. RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  41. return ret;
  42. }
  43. /*
  44. * The ring buffer is made up of a list of pages. A separate list of pages is
  45. * allocated for each CPU. A writer may only write to a buffer that is
  46. * associated with the CPU it is currently executing on. A reader may read
  47. * from any per cpu buffer.
  48. *
  49. * The reader is special. For each per cpu buffer, the reader has its own
  50. * reader page. When a reader has read the entire reader page, this reader
  51. * page is swapped with another page in the ring buffer.
  52. *
  53. * Now, as long as the writer is off the reader page, the reader can do what
  54. * ever it wants with that page. The writer will never write to that page
  55. * again (as long as it is out of the ring buffer).
  56. *
  57. * Here's some silly ASCII art.
  58. *
  59. * +------+
  60. * |reader| RING BUFFER
  61. * |page |
  62. * +------+ +---+ +---+ +---+
  63. * | |-->| |-->| |
  64. * +---+ +---+ +---+
  65. * ^ |
  66. * | |
  67. * +---------------+
  68. *
  69. *
  70. * +------+
  71. * |reader| RING BUFFER
  72. * |page |------------------v
  73. * +------+ +---+ +---+ +---+
  74. * | |-->| |-->| |
  75. * +---+ +---+ +---+
  76. * ^ |
  77. * | |
  78. * +---------------+
  79. *
  80. *
  81. * +------+
  82. * |reader| RING BUFFER
  83. * |page |------------------v
  84. * +------+ +---+ +---+ +---+
  85. * ^ | |-->| |-->| |
  86. * | +---+ +---+ +---+
  87. * | |
  88. * | |
  89. * +------------------------------+
  90. *
  91. *
  92. * +------+
  93. * |buffer| RING BUFFER
  94. * |page |------------------v
  95. * +------+ +---+ +---+ +---+
  96. * ^ | | | |-->| |
  97. * | New +---+ +---+ +---+
  98. * | Reader------^ |
  99. * | page |
  100. * +------------------------------+
  101. *
  102. *
  103. * After we make this swap, the reader can hand this page off to the splice
  104. * code and be done with it. It can even allocate a new page if it needs to
  105. * and swap that into the ring buffer.
  106. *
  107. * We will be using cmpxchg soon to make all this lockless.
  108. *
  109. */
  110. /*
  111. * A fast way to enable or disable all ring buffers is to
  112. * call tracing_on or tracing_off. Turning off the ring buffers
  113. * prevents all ring buffers from being recorded to.
  114. * Turning this switch on, makes it OK to write to the
  115. * ring buffer, if the ring buffer is enabled itself.
  116. *
  117. * There's three layers that must be on in order to write
  118. * to the ring buffer.
  119. *
  120. * 1) This global flag must be set.
  121. * 2) The ring buffer must be enabled for recording.
  122. * 3) The per cpu buffer must be enabled for recording.
  123. *
  124. * In case of an anomaly, this global flag has a bit set that
  125. * will permantly disable all ring buffers.
  126. */
  127. /*
  128. * Global flag to disable all recording to ring buffers
  129. * This has two bits: ON, DISABLED
  130. *
  131. * ON DISABLED
  132. * ---- ----------
  133. * 0 0 : ring buffers are off
  134. * 1 0 : ring buffers are on
  135. * X 1 : ring buffers are permanently disabled
  136. */
  137. enum {
  138. RB_BUFFERS_ON_BIT = 0,
  139. RB_BUFFERS_DISABLED_BIT = 1,
  140. };
  141. enum {
  142. RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
  143. RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
  144. };
  145. static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
  146. #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
  147. /**
  148. * tracing_on - enable all tracing buffers
  149. *
  150. * This function enables all tracing buffers that may have been
  151. * disabled with tracing_off.
  152. */
  153. void tracing_on(void)
  154. {
  155. set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
  156. }
  157. EXPORT_SYMBOL_GPL(tracing_on);
  158. /**
  159. * tracing_off - turn off all tracing buffers
  160. *
  161. * This function stops all tracing buffers from recording data.
  162. * It does not disable any overhead the tracers themselves may
  163. * be causing. This function simply causes all recording to
  164. * the ring buffers to fail.
  165. */
  166. void tracing_off(void)
  167. {
  168. clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
  169. }
  170. EXPORT_SYMBOL_GPL(tracing_off);
  171. /**
  172. * tracing_off_permanent - permanently disable ring buffers
  173. *
  174. * This function, once called, will disable all ring buffers
  175. * permanently.
  176. */
  177. void tracing_off_permanent(void)
  178. {
  179. set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
  180. }
  181. /**
  182. * tracing_is_on - show state of ring buffers enabled
  183. */
  184. int tracing_is_on(void)
  185. {
  186. return ring_buffer_flags == RB_BUFFERS_ON;
  187. }
  188. EXPORT_SYMBOL_GPL(tracing_is_on);
  189. #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
  190. #define RB_ALIGNMENT 4U
  191. #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  192. #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
  193. #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  194. # define RB_FORCE_8BYTE_ALIGNMENT 0
  195. # define RB_ARCH_ALIGNMENT RB_ALIGNMENT
  196. #else
  197. # define RB_FORCE_8BYTE_ALIGNMENT 1
  198. # define RB_ARCH_ALIGNMENT 8U
  199. #endif
  200. /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
  201. #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
  202. enum {
  203. RB_LEN_TIME_EXTEND = 8,
  204. RB_LEN_TIME_STAMP = 16,
  205. };
  206. #define skip_time_extend(event) \
  207. ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
  208. static inline int rb_null_event(struct ring_buffer_event *event)
  209. {
  210. return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
  211. }
  212. static void rb_event_set_padding(struct ring_buffer_event *event)
  213. {
  214. /* padding has a NULL time_delta */
  215. event->type_len = RINGBUF_TYPE_PADDING;
  216. event->time_delta = 0;
  217. }
  218. static unsigned
  219. rb_event_data_length(struct ring_buffer_event *event)
  220. {
  221. unsigned length;
  222. if (event->type_len)
  223. length = event->type_len * RB_ALIGNMENT;
  224. else
  225. length = event->array[0];
  226. return length + RB_EVNT_HDR_SIZE;
  227. }
  228. /*
  229. * Return the length of the given event. Will return
  230. * the length of the time extend if the event is a
  231. * time extend.
  232. */
  233. static inline unsigned
  234. rb_event_length(struct ring_buffer_event *event)
  235. {
  236. switch (event->type_len) {
  237. case RINGBUF_TYPE_PADDING:
  238. if (rb_null_event(event))
  239. /* undefined */
  240. return -1;
  241. return event->array[0] + RB_EVNT_HDR_SIZE;
  242. case RINGBUF_TYPE_TIME_EXTEND:
  243. return RB_LEN_TIME_EXTEND;
  244. case RINGBUF_TYPE_TIME_STAMP:
  245. return RB_LEN_TIME_STAMP;
  246. case RINGBUF_TYPE_DATA:
  247. return rb_event_data_length(event);
  248. default:
  249. BUG();
  250. }
  251. /* not hit */
  252. return 0;
  253. }
  254. /*
  255. * Return total length of time extend and data,
  256. * or just the event length for all other events.
  257. */
  258. static inline unsigned
  259. rb_event_ts_length(struct ring_buffer_event *event)
  260. {
  261. unsigned len = 0;
  262. if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
  263. /* time extends include the data event after it */
  264. len = RB_LEN_TIME_EXTEND;
  265. event = skip_time_extend(event);
  266. }
  267. return len + rb_event_length(event);
  268. }
  269. /**
  270. * ring_buffer_event_length - return the length of the event
  271. * @event: the event to get the length of
  272. *
  273. * Returns the size of the data load of a data event.
  274. * If the event is something other than a data event, it
  275. * returns the size of the event itself. With the exception
  276. * of a TIME EXTEND, where it still returns the size of the
  277. * data load of the data event after it.
  278. */
  279. unsigned ring_buffer_event_length(struct ring_buffer_event *event)
  280. {
  281. unsigned length;
  282. if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
  283. event = skip_time_extend(event);
  284. length = rb_event_length(event);
  285. if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  286. return length;
  287. length -= RB_EVNT_HDR_SIZE;
  288. if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
  289. length -= sizeof(event->array[0]);
  290. return length;
  291. }
  292. EXPORT_SYMBOL_GPL(ring_buffer_event_length);
  293. /* inline for ring buffer fast paths */
  294. static void *
  295. rb_event_data(struct ring_buffer_event *event)
  296. {
  297. if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
  298. event = skip_time_extend(event);
  299. BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  300. /* If length is in len field, then array[0] has the data */
  301. if (event->type_len)
  302. return (void *)&event->array[0];
  303. /* Otherwise length is in array[0] and array[1] has the data */
  304. return (void *)&event->array[1];
  305. }
  306. /**
  307. * ring_buffer_event_data - return the data of the event
  308. * @event: the event to get the data from
  309. */
  310. void *ring_buffer_event_data(struct ring_buffer_event *event)
  311. {
  312. return rb_event_data(event);
  313. }
  314. EXPORT_SYMBOL_GPL(ring_buffer_event_data);
  315. #define for_each_buffer_cpu(buffer, cpu) \
  316. for_each_cpu(cpu, buffer->cpumask)
  317. #define TS_SHIFT 27
  318. #define TS_MASK ((1ULL << TS_SHIFT) - 1)
  319. #define TS_DELTA_TEST (~TS_MASK)
  320. /* Flag when events were overwritten */
  321. #define RB_MISSED_EVENTS (1 << 31)
  322. /* Missed count stored at end */
  323. #define RB_MISSED_STORED (1 << 30)
  324. struct buffer_data_page {
  325. u64 time_stamp; /* page time stamp */
  326. local_t commit; /* write committed index */
  327. unsigned char data[]; /* data of buffer page */
  328. };
  329. /*
  330. * Note, the buffer_page list must be first. The buffer pages
  331. * are allocated in cache lines, which means that each buffer
  332. * page will be at the beginning of a cache line, and thus
  333. * the least significant bits will be zero. We use this to
  334. * add flags in the list struct pointers, to make the ring buffer
  335. * lockless.
  336. */
  337. struct buffer_page {
  338. struct list_head list; /* list of buffer pages */
  339. local_t write; /* index for next write */
  340. unsigned read; /* index for next read */
  341. local_t entries; /* entries on this page */
  342. unsigned long real_end; /* real end of data */
  343. struct buffer_data_page *page; /* Actual data page */
  344. };
  345. /*
  346. * The buffer page counters, write and entries, must be reset
  347. * atomically when crossing page boundaries. To synchronize this
  348. * update, two counters are inserted into the number. One is
  349. * the actual counter for the write position or count on the page.
  350. *
  351. * The other is a counter of updaters. Before an update happens
  352. * the update partition of the counter is incremented. This will
  353. * allow the updater to update the counter atomically.
  354. *
  355. * The counter is 20 bits, and the state data is 12.
  356. */
  357. #define RB_WRITE_MASK 0xfffff
  358. #define RB_WRITE_INTCNT (1 << 20)
  359. static void rb_init_page(struct buffer_data_page *bpage)
  360. {
  361. local_set(&bpage->commit, 0);
  362. }
  363. /**
  364. * ring_buffer_page_len - the size of data on the page.
  365. * @page: The page to read
  366. *
  367. * Returns the amount of data on the page, including buffer page header.
  368. */
  369. size_t ring_buffer_page_len(void *page)
  370. {
  371. return local_read(&((struct buffer_data_page *)page)->commit)
  372. + BUF_PAGE_HDR_SIZE;
  373. }
  374. /*
  375. * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
  376. * this issue out.
  377. */
  378. static void free_buffer_page(struct buffer_page *bpage)
  379. {
  380. free_page((unsigned long)bpage->page);
  381. kfree(bpage);
  382. }
  383. /*
  384. * We need to fit the time_stamp delta into 27 bits.
  385. */
  386. static inline int test_time_stamp(u64 delta)
  387. {
  388. if (delta & TS_DELTA_TEST)
  389. return 1;
  390. return 0;
  391. }
  392. #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
  393. /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
  394. #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
  395. int ring_buffer_print_page_header(struct trace_seq *s)
  396. {
  397. struct buffer_data_page field;
  398. int ret;
  399. ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
  400. "offset:0;\tsize:%u;\tsigned:%u;\n",
  401. (unsigned int)sizeof(field.time_stamp),
  402. (unsigned int)is_signed_type(u64));
  403. ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
  404. "offset:%u;\tsize:%u;\tsigned:%u;\n",
  405. (unsigned int)offsetof(typeof(field), commit),
  406. (unsigned int)sizeof(field.commit),
  407. (unsigned int)is_signed_type(long));
  408. ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
  409. "offset:%u;\tsize:%u;\tsigned:%u;\n",
  410. (unsigned int)offsetof(typeof(field), commit),
  411. 1,
  412. (unsigned int)is_signed_type(long));
  413. ret = trace_seq_printf(s, "\tfield: char data;\t"
  414. "offset:%u;\tsize:%u;\tsigned:%u;\n",
  415. (unsigned int)offsetof(typeof(field), data),
  416. (unsigned int)BUF_PAGE_SIZE,
  417. (unsigned int)is_signed_type(char));
  418. return ret;
  419. }
  420. /*
  421. * head_page == tail_page && head == tail then buffer is empty.
  422. */
  423. struct ring_buffer_per_cpu {
  424. int cpu;
  425. atomic_t record_disabled;
  426. struct ring_buffer *buffer;
  427. raw_spinlock_t reader_lock; /* serialize readers */
  428. arch_spinlock_t lock;
  429. struct lock_class_key lock_key;
  430. struct list_head *pages;
  431. struct buffer_page *head_page; /* read from head */
  432. struct buffer_page *tail_page; /* write to tail */
  433. struct buffer_page *commit_page; /* committed pages */
  434. struct buffer_page *reader_page;
  435. unsigned long lost_events;
  436. unsigned long last_overrun;
  437. local_t entries_bytes;
  438. local_t commit_overrun;
  439. local_t overrun;
  440. local_t entries;
  441. local_t committing;
  442. local_t commits;
  443. unsigned long read;
  444. unsigned long read_bytes;
  445. u64 write_stamp;
  446. u64 read_stamp;
  447. };
  448. struct ring_buffer {
  449. unsigned pages;
  450. unsigned flags;
  451. int cpus;
  452. atomic_t record_disabled;
  453. cpumask_var_t cpumask;
  454. struct lock_class_key *reader_lock_key;
  455. struct mutex mutex;
  456. struct ring_buffer_per_cpu **buffers;
  457. #ifdef CONFIG_HOTPLUG_CPU
  458. struct notifier_block cpu_notify;
  459. #endif
  460. u64 (*clock)(void);
  461. };
  462. struct ring_buffer_iter {
  463. struct ring_buffer_per_cpu *cpu_buffer;
  464. unsigned long head;
  465. struct buffer_page *head_page;
  466. struct buffer_page *cache_reader_page;
  467. unsigned long cache_read;
  468. u64 read_stamp;
  469. };
  470. /* buffer may be either ring_buffer or ring_buffer_per_cpu */
  471. #define RB_WARN_ON(b, cond) \
  472. ({ \
  473. int _____ret = unlikely(cond); \
  474. if (_____ret) { \
  475. if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
  476. struct ring_buffer_per_cpu *__b = \
  477. (void *)b; \
  478. atomic_inc(&__b->buffer->record_disabled); \
  479. } else \
  480. atomic_inc(&b->record_disabled); \
  481. WARN_ON(1); \
  482. } \
  483. _____ret; \
  484. })
  485. /* Up this if you want to test the TIME_EXTENTS and normalization */
  486. #define DEBUG_SHIFT 0
  487. static inline u64 rb_time_stamp(struct ring_buffer *buffer)
  488. {
  489. /* shift to debug/test normalization and TIME_EXTENTS */
  490. return buffer->clock() << DEBUG_SHIFT;
  491. }
  492. u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
  493. {
  494. u64 time;
  495. preempt_disable_notrace();
  496. time = rb_time_stamp(buffer);
  497. preempt_enable_no_resched_notrace();
  498. return time;
  499. }
  500. EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
  501. void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
  502. int cpu, u64 *ts)
  503. {
  504. /* Just stupid testing the normalize function and deltas */
  505. *ts >>= DEBUG_SHIFT;
  506. }
  507. EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
  508. /*
  509. * Making the ring buffer lockless makes things tricky.
  510. * Although writes only happen on the CPU that they are on,
  511. * and they only need to worry about interrupts. Reads can
  512. * happen on any CPU.
  513. *
  514. * The reader page is always off the ring buffer, but when the
  515. * reader finishes with a page, it needs to swap its page with
  516. * a new one from the buffer. The reader needs to take from
  517. * the head (writes go to the tail). But if a writer is in overwrite
  518. * mode and wraps, it must push the head page forward.
  519. *
  520. * Here lies the problem.
  521. *
  522. * The reader must be careful to replace only the head page, and
  523. * not another one. As described at the top of the file in the
  524. * ASCII art, the reader sets its old page to point to the next
  525. * page after head. It then sets the page after head to point to
  526. * the old reader page. But if the writer moves the head page
  527. * during this operation, the reader could end up with the tail.
  528. *
  529. * We use cmpxchg to help prevent this race. We also do something
  530. * special with the page before head. We set the LSB to 1.
  531. *
  532. * When the writer must push the page forward, it will clear the
  533. * bit that points to the head page, move the head, and then set
  534. * the bit that points to the new head page.
  535. *
  536. * We also don't want an interrupt coming in and moving the head
  537. * page on another writer. Thus we use the second LSB to catch
  538. * that too. Thus:
  539. *
  540. * head->list->prev->next bit 1 bit 0
  541. * ------- -------
  542. * Normal page 0 0
  543. * Points to head page 0 1
  544. * New head page 1 0
  545. *
  546. * Note we can not trust the prev pointer of the head page, because:
  547. *
  548. * +----+ +-----+ +-----+
  549. * | |------>| T |---X--->| N |
  550. * | |<------| | | |
  551. * +----+ +-----+ +-----+
  552. * ^ ^ |
  553. * | +-----+ | |
  554. * +----------| R |----------+ |
  555. * | |<-----------+
  556. * +-----+
  557. *
  558. * Key: ---X--> HEAD flag set in pointer
  559. * T Tail page
  560. * R Reader page
  561. * N Next page
  562. *
  563. * (see __rb_reserve_next() to see where this happens)
  564. *
  565. * What the above shows is that the reader just swapped out
  566. * the reader page with a page in the buffer, but before it
  567. * could make the new header point back to the new page added
  568. * it was preempted by a writer. The writer moved forward onto
  569. * the new page added by the reader and is about to move forward
  570. * again.
  571. *
  572. * You can see, it is legitimate for the previous pointer of
  573. * the head (or any page) not to point back to itself. But only
  574. * temporarially.
  575. */
  576. #define RB_PAGE_NORMAL 0UL
  577. #define RB_PAGE_HEAD 1UL
  578. #define RB_PAGE_UPDATE 2UL
  579. #define RB_FLAG_MASK 3UL
  580. /* PAGE_MOVED is not part of the mask */
  581. #define RB_PAGE_MOVED 4UL
  582. /*
  583. * rb_list_head - remove any bit
  584. */
  585. static struct list_head *rb_list_head(struct list_head *list)
  586. {
  587. unsigned long val = (unsigned long)list;
  588. return (struct list_head *)(val & ~RB_FLAG_MASK);
  589. }
  590. /*
  591. * rb_is_head_page - test if the given page is the head page
  592. *
  593. * Because the reader may move the head_page pointer, we can
  594. * not trust what the head page is (it may be pointing to
  595. * the reader page). But if the next page is a header page,
  596. * its flags will be non zero.
  597. */
  598. static inline int
  599. rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
  600. struct buffer_page *page, struct list_head *list)
  601. {
  602. unsigned long val;
  603. val = (unsigned long)list->next;
  604. if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
  605. return RB_PAGE_MOVED;
  606. return val & RB_FLAG_MASK;
  607. }
  608. /*
  609. * rb_is_reader_page
  610. *
  611. * The unique thing about the reader page, is that, if the
  612. * writer is ever on it, the previous pointer never points
  613. * back to the reader page.
  614. */
  615. static int rb_is_reader_page(struct buffer_page *page)
  616. {
  617. struct list_head *list = page->list.prev;
  618. return rb_list_head(list->next) != &page->list;
  619. }
  620. /*
  621. * rb_set_list_to_head - set a list_head to be pointing to head.
  622. */
  623. static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
  624. struct list_head *list)
  625. {
  626. unsigned long *ptr;
  627. ptr = (unsigned long *)&list->next;
  628. *ptr |= RB_PAGE_HEAD;
  629. *ptr &= ~RB_PAGE_UPDATE;
  630. }
  631. /*
  632. * rb_head_page_activate - sets up head page
  633. */
  634. static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
  635. {
  636. struct buffer_page *head;
  637. head = cpu_buffer->head_page;
  638. if (!head)
  639. return;
  640. /*
  641. * Set the previous list pointer to have the HEAD flag.
  642. */
  643. rb_set_list_to_head(cpu_buffer, head->list.prev);
  644. }
  645. static void rb_list_head_clear(struct list_head *list)
  646. {
  647. unsigned long *ptr = (unsigned long *)&list->next;
  648. *ptr &= ~RB_FLAG_MASK;
  649. }
  650. /*
  651. * rb_head_page_dactivate - clears head page ptr (for free list)
  652. */
  653. static void
  654. rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
  655. {
  656. struct list_head *hd;
  657. /* Go through the whole list and clear any pointers found. */
  658. rb_list_head_clear(cpu_buffer->pages);
  659. list_for_each(hd, cpu_buffer->pages)
  660. rb_list_head_clear(hd);
  661. }
  662. static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
  663. struct buffer_page *head,
  664. struct buffer_page *prev,
  665. int old_flag, int new_flag)
  666. {
  667. struct list_head *list;
  668. unsigned long val = (unsigned long)&head->list;
  669. unsigned long ret;
  670. list = &prev->list;
  671. val &= ~RB_FLAG_MASK;
  672. ret = cmpxchg((unsigned long *)&list->next,
  673. val | old_flag, val | new_flag);
  674. /* check if the reader took the page */
  675. if ((ret & ~RB_FLAG_MASK) != val)
  676. return RB_PAGE_MOVED;
  677. return ret & RB_FLAG_MASK;
  678. }
  679. static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
  680. struct buffer_page *head,
  681. struct buffer_page *prev,
  682. int old_flag)
  683. {
  684. return rb_head_page_set(cpu_buffer, head, prev,
  685. old_flag, RB_PAGE_UPDATE);
  686. }
  687. static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
  688. struct buffer_page *head,
  689. struct buffer_page *prev,
  690. int old_flag)
  691. {
  692. return rb_head_page_set(cpu_buffer, head, prev,
  693. old_flag, RB_PAGE_HEAD);
  694. }
  695. static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
  696. struct buffer_page *head,
  697. struct buffer_page *prev,
  698. int old_flag)
  699. {
  700. return rb_head_page_set(cpu_buffer, head, prev,
  701. old_flag, RB_PAGE_NORMAL);
  702. }
  703. static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
  704. struct buffer_page **bpage)
  705. {
  706. struct list_head *p = rb_list_head((*bpage)->list.next);
  707. *bpage = list_entry(p, struct buffer_page, list);
  708. }
  709. static struct buffer_page *
  710. rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
  711. {
  712. struct buffer_page *head;
  713. struct buffer_page *page;
  714. struct list_head *list;
  715. int i;
  716. if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
  717. return NULL;
  718. /* sanity check */
  719. list = cpu_buffer->pages;
  720. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
  721. return NULL;
  722. page = head = cpu_buffer->head_page;
  723. /*
  724. * It is possible that the writer moves the header behind
  725. * where we started, and we miss in one loop.
  726. * A second loop should grab the header, but we'll do
  727. * three loops just because I'm paranoid.
  728. */
  729. for (i = 0; i < 3; i++) {
  730. do {
  731. if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
  732. cpu_buffer->head_page = page;
  733. return page;
  734. }
  735. rb_inc_page(cpu_buffer, &page);
  736. } while (page != head);
  737. }
  738. RB_WARN_ON(cpu_buffer, 1);
  739. return NULL;
  740. }
  741. static int rb_head_page_replace(struct buffer_page *old,
  742. struct buffer_page *new)
  743. {
  744. unsigned long *ptr = (unsigned long *)&old->list.prev->next;
  745. unsigned long val;
  746. unsigned long ret;
  747. val = *ptr & ~RB_FLAG_MASK;
  748. val |= RB_PAGE_HEAD;
  749. ret = cmpxchg(ptr, val, (unsigned long)&new->list);
  750. return ret == val;
  751. }
  752. /*
  753. * rb_tail_page_update - move the tail page forward
  754. *
  755. * Returns 1 if moved tail page, 0 if someone else did.
  756. */
  757. static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
  758. struct buffer_page *tail_page,
  759. struct buffer_page *next_page)
  760. {
  761. struct buffer_page *old_tail;
  762. unsigned long old_entries;
  763. unsigned long old_write;
  764. int ret = 0;
  765. /*
  766. * The tail page now needs to be moved forward.
  767. *
  768. * We need to reset the tail page, but without messing
  769. * with possible erasing of data brought in by interrupts
  770. * that have moved the tail page and are currently on it.
  771. *
  772. * We add a counter to the write field to denote this.
  773. */
  774. old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
  775. old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
  776. /*
  777. * Just make sure we have seen our old_write and synchronize
  778. * with any interrupts that come in.
  779. */
  780. barrier();
  781. /*
  782. * If the tail page is still the same as what we think
  783. * it is, then it is up to us to update the tail
  784. * pointer.
  785. */
  786. if (tail_page == cpu_buffer->tail_page) {
  787. /* Zero the write counter */
  788. unsigned long val = old_write & ~RB_WRITE_MASK;
  789. unsigned long eval = old_entries & ~RB_WRITE_MASK;
  790. /*
  791. * This will only succeed if an interrupt did
  792. * not come in and change it. In which case, we
  793. * do not want to modify it.
  794. *
  795. * We add (void) to let the compiler know that we do not care
  796. * about the return value of these functions. We use the
  797. * cmpxchg to only update if an interrupt did not already
  798. * do it for us. If the cmpxchg fails, we don't care.
  799. */
  800. (void)local_cmpxchg(&next_page->write, old_write, val);
  801. (void)local_cmpxchg(&next_page->entries, old_entries, eval);
  802. /*
  803. * No need to worry about races with clearing out the commit.
  804. * it only can increment when a commit takes place. But that
  805. * only happens in the outer most nested commit.
  806. */
  807. local_set(&next_page->page->commit, 0);
  808. old_tail = cmpxchg(&cpu_buffer->tail_page,
  809. tail_page, next_page);
  810. if (old_tail == tail_page)
  811. ret = 1;
  812. }
  813. return ret;
  814. }
  815. static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
  816. struct buffer_page *bpage)
  817. {
  818. unsigned long val = (unsigned long)bpage;
  819. if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
  820. return 1;
  821. return 0;
  822. }
  823. /**
  824. * rb_check_list - make sure a pointer to a list has the last bits zero
  825. */
  826. static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
  827. struct list_head *list)
  828. {
  829. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
  830. return 1;
  831. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
  832. return 1;
  833. return 0;
  834. }
  835. /**
  836. * check_pages - integrity check of buffer pages
  837. * @cpu_buffer: CPU buffer with pages to test
  838. *
  839. * As a safety measure we check to make sure the data pages have not
  840. * been corrupted.
  841. */
  842. static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
  843. {
  844. struct list_head *head = cpu_buffer->pages;
  845. struct buffer_page *bpage, *tmp;
  846. rb_head_page_deactivate(cpu_buffer);
  847. if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
  848. return -1;
  849. if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
  850. return -1;
  851. if (rb_check_list(cpu_buffer, head))
  852. return -1;
  853. list_for_each_entry_safe(bpage, tmp, head, list) {
  854. if (RB_WARN_ON(cpu_buffer,
  855. bpage->list.next->prev != &bpage->list))
  856. return -1;
  857. if (RB_WARN_ON(cpu_buffer,
  858. bpage->list.prev->next != &bpage->list))
  859. return -1;
  860. if (rb_check_list(cpu_buffer, &bpage->list))
  861. return -1;
  862. }
  863. rb_head_page_activate(cpu_buffer);
  864. return 0;
  865. }
  866. static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
  867. unsigned nr_pages)
  868. {
  869. struct buffer_page *bpage, *tmp;
  870. LIST_HEAD(pages);
  871. unsigned i;
  872. WARN_ON(!nr_pages);
  873. for (i = 0; i < nr_pages; i++) {
  874. struct page *page;
  875. /*
  876. * __GFP_NORETRY flag makes sure that the allocation fails
  877. * gracefully without invoking oom-killer and the system is
  878. * not destabilized.
  879. */
  880. bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
  881. GFP_KERNEL | __GFP_NORETRY,
  882. cpu_to_node(cpu_buffer->cpu));
  883. if (!bpage)
  884. goto free_pages;
  885. rb_check_bpage(cpu_buffer, bpage);
  886. list_add(&bpage->list, &pages);
  887. page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
  888. GFP_KERNEL | __GFP_NORETRY, 0);
  889. if (!page)
  890. goto free_pages;
  891. bpage->page = page_address(page);
  892. rb_init_page(bpage->page);
  893. }
  894. /*
  895. * The ring buffer page list is a circular list that does not
  896. * start and end with a list head. All page list items point to
  897. * other pages.
  898. */
  899. cpu_buffer->pages = pages.next;
  900. list_del(&pages);
  901. rb_check_pages(cpu_buffer);
  902. return 0;
  903. free_pages:
  904. list_for_each_entry_safe(bpage, tmp, &pages, list) {
  905. list_del_init(&bpage->list);
  906. free_buffer_page(bpage);
  907. }
  908. return -ENOMEM;
  909. }
  910. static struct ring_buffer_per_cpu *
  911. rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
  912. {
  913. struct ring_buffer_per_cpu *cpu_buffer;
  914. struct buffer_page *bpage;
  915. struct page *page;
  916. int ret;
  917. cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
  918. GFP_KERNEL, cpu_to_node(cpu));
  919. if (!cpu_buffer)
  920. return NULL;
  921. cpu_buffer->cpu = cpu;
  922. cpu_buffer->buffer = buffer;
  923. raw_spin_lock_init(&cpu_buffer->reader_lock);
  924. lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
  925. cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  926. bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
  927. GFP_KERNEL, cpu_to_node(cpu));
  928. if (!bpage)
  929. goto fail_free_buffer;
  930. rb_check_bpage(cpu_buffer, bpage);
  931. cpu_buffer->reader_page = bpage;
  932. page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
  933. if (!page)
  934. goto fail_free_reader;
  935. bpage->page = page_address(page);
  936. rb_init_page(bpage->page);
  937. INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
  938. ret = rb_allocate_pages(cpu_buffer, buffer->pages);
  939. if (ret < 0)
  940. goto fail_free_reader;
  941. cpu_buffer->head_page
  942. = list_entry(cpu_buffer->pages, struct buffer_page, list);
  943. cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
  944. rb_head_page_activate(cpu_buffer);
  945. return cpu_buffer;
  946. fail_free_reader:
  947. free_buffer_page(cpu_buffer->reader_page);
  948. fail_free_buffer:
  949. kfree(cpu_buffer);
  950. return NULL;
  951. }
  952. static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
  953. {
  954. struct list_head *head = cpu_buffer->pages;
  955. struct buffer_page *bpage, *tmp;
  956. free_buffer_page(cpu_buffer->reader_page);
  957. rb_head_page_deactivate(cpu_buffer);
  958. if (head) {
  959. list_for_each_entry_safe(bpage, tmp, head, list) {
  960. list_del_init(&bpage->list);
  961. free_buffer_page(bpage);
  962. }
  963. bpage = list_entry(head, struct buffer_page, list);
  964. free_buffer_page(bpage);
  965. }
  966. kfree(cpu_buffer);
  967. }
  968. #ifdef CONFIG_HOTPLUG_CPU
  969. static int rb_cpu_notify(struct notifier_block *self,
  970. unsigned long action, void *hcpu);
  971. #endif
  972. /**
  973. * ring_buffer_alloc - allocate a new ring_buffer
  974. * @size: the size in bytes per cpu that is needed.
  975. * @flags: attributes to set for the ring buffer.
  976. *
  977. * Currently the only flag that is available is the RB_FL_OVERWRITE
  978. * flag. This flag means that the buffer will overwrite old data
  979. * when the buffer wraps. If this flag is not set, the buffer will
  980. * drop data when the tail hits the head.
  981. */
  982. struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
  983. struct lock_class_key *key)
  984. {
  985. struct ring_buffer *buffer;
  986. int bsize;
  987. int cpu;
  988. /* keep it in its own cache line */
  989. buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
  990. GFP_KERNEL);
  991. if (!buffer)
  992. return NULL;
  993. if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
  994. goto fail_free_buffer;
  995. buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  996. buffer->flags = flags;
  997. buffer->clock = trace_clock_local;
  998. buffer->reader_lock_key = key;
  999. /* need at least two pages */
  1000. if (buffer->pages < 2)
  1001. buffer->pages = 2;
  1002. /*
  1003. * In case of non-hotplug cpu, if the ring-buffer is allocated
  1004. * in early initcall, it will not be notified of secondary cpus.
  1005. * In that off case, we need to allocate for all possible cpus.
  1006. */
  1007. #ifdef CONFIG_HOTPLUG_CPU
  1008. get_online_cpus();
  1009. cpumask_copy(buffer->cpumask, cpu_online_mask);
  1010. #else
  1011. cpumask_copy(buffer->cpumask, cpu_possible_mask);
  1012. #endif
  1013. buffer->cpus = nr_cpu_ids;
  1014. bsize = sizeof(void *) * nr_cpu_ids;
  1015. buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
  1016. GFP_KERNEL);
  1017. if (!buffer->buffers)
  1018. goto fail_free_cpumask;
  1019. for_each_buffer_cpu(buffer, cpu) {
  1020. buffer->buffers[cpu] =
  1021. rb_allocate_cpu_buffer(buffer, cpu);
  1022. if (!buffer->buffers[cpu])
  1023. goto fail_free_buffers;
  1024. }
  1025. #ifdef CONFIG_HOTPLUG_CPU
  1026. buffer->cpu_notify.notifier_call = rb_cpu_notify;
  1027. buffer->cpu_notify.priority = 0;
  1028. register_cpu_notifier(&buffer->cpu_notify);
  1029. #endif
  1030. put_online_cpus();
  1031. mutex_init(&buffer->mutex);
  1032. return buffer;
  1033. fail_free_buffers:
  1034. for_each_buffer_cpu(buffer, cpu) {
  1035. if (buffer->buffers[cpu])
  1036. rb_free_cpu_buffer(buffer->buffers[cpu]);
  1037. }
  1038. kfree(buffer->buffers);
  1039. fail_free_cpumask:
  1040. free_cpumask_var(buffer->cpumask);
  1041. put_online_cpus();
  1042. fail_free_buffer:
  1043. kfree(buffer);
  1044. return NULL;
  1045. }
  1046. EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
  1047. /**
  1048. * ring_buffer_free - free a ring buffer.
  1049. * @buffer: the buffer to free.
  1050. */
  1051. void
  1052. ring_buffer_free(struct ring_buffer *buffer)
  1053. {
  1054. int cpu;
  1055. get_online_cpus();
  1056. #ifdef CONFIG_HOTPLUG_CPU
  1057. unregister_cpu_notifier(&buffer->cpu_notify);
  1058. #endif
  1059. for_each_buffer_cpu(buffer, cpu)
  1060. rb_free_cpu_buffer(buffer->buffers[cpu]);
  1061. put_online_cpus();
  1062. kfree(buffer->buffers);
  1063. free_cpumask_var(buffer->cpumask);
  1064. kfree(buffer);
  1065. }
  1066. EXPORT_SYMBOL_GPL(ring_buffer_free);
  1067. void ring_buffer_set_clock(struct ring_buffer *buffer,
  1068. u64 (*clock)(void))
  1069. {
  1070. buffer->clock = clock;
  1071. }
  1072. static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
  1073. static void
  1074. rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
  1075. {
  1076. struct buffer_page *bpage;
  1077. struct list_head *p;
  1078. unsigned i;
  1079. raw_spin_lock_irq(&cpu_buffer->reader_lock);
  1080. rb_head_page_deactivate(cpu_buffer);
  1081. for (i = 0; i < nr_pages; i++) {
  1082. if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
  1083. goto out;
  1084. p = cpu_buffer->pages->next;
  1085. bpage = list_entry(p, struct buffer_page, list);
  1086. list_del_init(&bpage->list);
  1087. free_buffer_page(bpage);
  1088. }
  1089. if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
  1090. goto out;
  1091. rb_reset_cpu(cpu_buffer);
  1092. rb_check_pages(cpu_buffer);
  1093. out:
  1094. raw_spin_unlock_irq(&cpu_buffer->reader_lock);
  1095. }
  1096. static void
  1097. rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
  1098. struct list_head *pages, unsigned nr_pages)
  1099. {
  1100. struct buffer_page *bpage;
  1101. struct list_head *p;
  1102. unsigned i;
  1103. raw_spin_lock_irq(&cpu_buffer->reader_lock);
  1104. rb_head_page_deactivate(cpu_buffer);
  1105. for (i = 0; i < nr_pages; i++) {
  1106. if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
  1107. goto out;
  1108. p = pages->next;
  1109. bpage = list_entry(p, struct buffer_page, list);
  1110. list_del_init(&bpage->list);
  1111. list_add_tail(&bpage->list, cpu_buffer->pages);
  1112. }
  1113. rb_reset_cpu(cpu_buffer);
  1114. rb_check_pages(cpu_buffer);
  1115. out:
  1116. raw_spin_unlock_irq(&cpu_buffer->reader_lock);
  1117. }
  1118. /**
  1119. * ring_buffer_resize - resize the ring buffer
  1120. * @buffer: the buffer to resize.
  1121. * @size: the new size.
  1122. *
  1123. * Minimum size is 2 * BUF_PAGE_SIZE.
  1124. *
  1125. * Returns -1 on failure.
  1126. */
  1127. int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
  1128. {
  1129. struct ring_buffer_per_cpu *cpu_buffer;
  1130. unsigned nr_pages, rm_pages, new_pages;
  1131. struct buffer_page *bpage, *tmp;
  1132. unsigned long buffer_size;
  1133. LIST_HEAD(pages);
  1134. int i, cpu;
  1135. /*
  1136. * Always succeed at resizing a non-existent buffer:
  1137. */
  1138. if (!buffer)
  1139. return size;
  1140. size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  1141. size *= BUF_PAGE_SIZE;
  1142. buffer_size = buffer->pages * BUF_PAGE_SIZE;
  1143. /* we need a minimum of two pages */
  1144. if (size < BUF_PAGE_SIZE * 2)
  1145. size = BUF_PAGE_SIZE * 2;
  1146. if (size == buffer_size)
  1147. return size;
  1148. atomic_inc(&buffer->record_disabled);
  1149. /* Make sure all writers are done with this buffer. */
  1150. synchronize_sched();
  1151. mutex_lock(&buffer->mutex);
  1152. get_online_cpus();
  1153. nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  1154. if (size < buffer_size) {
  1155. /* easy case, just free pages */
  1156. if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
  1157. goto out_fail;
  1158. rm_pages = buffer->pages - nr_pages;
  1159. for_each_buffer_cpu(buffer, cpu) {
  1160. cpu_buffer = buffer->buffers[cpu];
  1161. rb_remove_pages(cpu_buffer, rm_pages);
  1162. }
  1163. goto out;
  1164. }
  1165. /*
  1166. * This is a bit more difficult. We only want to add pages
  1167. * when we can allocate enough for all CPUs. We do this
  1168. * by allocating all the pages and storing them on a local
  1169. * link list. If we succeed in our allocation, then we
  1170. * add these pages to the cpu_buffers. Otherwise we just free
  1171. * them all and return -ENOMEM;
  1172. */
  1173. if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
  1174. goto out_fail;
  1175. new_pages = nr_pages - buffer->pages;
  1176. for_each_buffer_cpu(buffer, cpu) {
  1177. for (i = 0; i < new_pages; i++) {
  1178. struct page *page;
  1179. /*
  1180. * __GFP_NORETRY flag makes sure that the allocation
  1181. * fails gracefully without invoking oom-killer and
  1182. * the system is not destabilized.
  1183. */
  1184. bpage = kzalloc_node(ALIGN(sizeof(*bpage),
  1185. cache_line_size()),
  1186. GFP_KERNEL | __GFP_NORETRY,
  1187. cpu_to_node(cpu));
  1188. if (!bpage)
  1189. goto free_pages;
  1190. list_add(&bpage->list, &pages);
  1191. page = alloc_pages_node(cpu_to_node(cpu),
  1192. GFP_KERNEL | __GFP_NORETRY, 0);
  1193. if (!page)
  1194. goto free_pages;
  1195. bpage->page = page_address(page);
  1196. rb_init_page(bpage->page);
  1197. }
  1198. }
  1199. for_each_buffer_cpu(buffer, cpu) {
  1200. cpu_buffer = buffer->buffers[cpu];
  1201. rb_insert_pages(cpu_buffer, &pages, new_pages);
  1202. }
  1203. if (RB_WARN_ON(buffer, !list_empty(&pages)))
  1204. goto out_fail;
  1205. out:
  1206. buffer->pages = nr_pages;
  1207. put_online_cpus();
  1208. mutex_unlock(&buffer->mutex);
  1209. atomic_dec(&buffer->record_disabled);
  1210. return size;
  1211. free_pages:
  1212. list_for_each_entry_safe(bpage, tmp, &pages, list) {
  1213. list_del_init(&bpage->list);
  1214. free_buffer_page(bpage);
  1215. }
  1216. put_online_cpus();
  1217. mutex_unlock(&buffer->mutex);
  1218. atomic_dec(&buffer->record_disabled);
  1219. return -ENOMEM;
  1220. /*
  1221. * Something went totally wrong, and we are too paranoid
  1222. * to even clean up the mess.
  1223. */
  1224. out_fail:
  1225. put_online_cpus();
  1226. mutex_unlock(&buffer->mutex);
  1227. atomic_dec(&buffer->record_disabled);
  1228. return -1;
  1229. }
  1230. EXPORT_SYMBOL_GPL(ring_buffer_resize);
  1231. void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
  1232. {
  1233. mutex_lock(&buffer->mutex);
  1234. if (val)
  1235. buffer->flags |= RB_FL_OVERWRITE;
  1236. else
  1237. buffer->flags &= ~RB_FL_OVERWRITE;
  1238. mutex_unlock(&buffer->mutex);
  1239. }
  1240. EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
  1241. static inline void *
  1242. __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
  1243. {
  1244. return bpage->data + index;
  1245. }
  1246. static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
  1247. {
  1248. return bpage->page->data + index;
  1249. }
  1250. static inline struct ring_buffer_event *
  1251. rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
  1252. {
  1253. return __rb_page_index(cpu_buffer->reader_page,
  1254. cpu_buffer->reader_page->read);
  1255. }
  1256. static inline struct ring_buffer_event *
  1257. rb_iter_head_event(struct ring_buffer_iter *iter)
  1258. {
  1259. return __rb_page_index(iter->head_page, iter->head);
  1260. }
  1261. static inline unsigned long rb_page_write(struct buffer_page *bpage)
  1262. {
  1263. return local_read(&bpage->write) & RB_WRITE_MASK;
  1264. }
  1265. static inline unsigned rb_page_commit(struct buffer_page *bpage)
  1266. {
  1267. return local_read(&bpage->page->commit);
  1268. }
  1269. static inline unsigned long rb_page_entries(struct buffer_page *bpage)
  1270. {
  1271. return local_read(&bpage->entries) & RB_WRITE_MASK;
  1272. }
  1273. /* Size is determined by what has been committed */
  1274. static inline unsigned rb_page_size(struct buffer_page *bpage)
  1275. {
  1276. return rb_page_commit(bpage);
  1277. }
  1278. static inline unsigned
  1279. rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
  1280. {
  1281. return rb_page_commit(cpu_buffer->commit_page);
  1282. }
  1283. static inline unsigned
  1284. rb_event_index(struct ring_buffer_event *event)
  1285. {
  1286. unsigned long addr = (unsigned long)event;
  1287. return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
  1288. }
  1289. static inline int
  1290. rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
  1291. struct ring_buffer_event *event)
  1292. {
  1293. unsigned long addr = (unsigned long)event;
  1294. unsigned long index;
  1295. index = rb_event_index(event);
  1296. addr &= PAGE_MASK;
  1297. return cpu_buffer->commit_page->page == (void *)addr &&
  1298. rb_commit_index(cpu_buffer) == index;
  1299. }
  1300. static void
  1301. rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
  1302. {
  1303. unsigned long max_count;
  1304. /*
  1305. * We only race with interrupts and NMIs on this CPU.
  1306. * If we own the commit event, then we can commit
  1307. * all others that interrupted us, since the interruptions
  1308. * are in stack format (they finish before they come
  1309. * back to us). This allows us to do a simple loop to
  1310. * assign the commit to the tail.
  1311. */
  1312. again:
  1313. max_count = cpu_buffer->buffer->pages * 100;
  1314. while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
  1315. if (RB_WARN_ON(cpu_buffer, !(--max_count)))
  1316. return;
  1317. if (RB_WARN_ON(cpu_buffer,
  1318. rb_is_reader_page(cpu_buffer->tail_page)))
  1319. return;
  1320. local_set(&cpu_buffer->commit_page->page->commit,
  1321. rb_page_write(cpu_buffer->commit_page));
  1322. rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
  1323. cpu_buffer->write_stamp =
  1324. cpu_buffer->commit_page->page->time_stamp;
  1325. /* add barrier to keep gcc from optimizing too much */
  1326. barrier();
  1327. }
  1328. while (rb_commit_index(cpu_buffer) !=
  1329. rb_page_write(cpu_buffer->commit_page)) {
  1330. local_set(&cpu_buffer->commit_page->page->commit,
  1331. rb_page_write(cpu_buffer->commit_page));
  1332. RB_WARN_ON(cpu_buffer,
  1333. local_read(&cpu_buffer->commit_page->page->commit) &
  1334. ~RB_WRITE_MASK);
  1335. barrier();
  1336. }
  1337. /* again, keep gcc from optimizing */
  1338. barrier();
  1339. /*
  1340. * If an interrupt came in just after the first while loop
  1341. * and pushed the tail page forward, we will be left with
  1342. * a dangling commit that will never go forward.
  1343. */
  1344. if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
  1345. goto again;
  1346. }
  1347. static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
  1348. {
  1349. cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
  1350. cpu_buffer->reader_page->read = 0;
  1351. }
  1352. static void rb_inc_iter(struct ring_buffer_iter *iter)
  1353. {
  1354. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  1355. /*
  1356. * The iterator could be on the reader page (it starts there).
  1357. * But the head could have moved, since the reader was
  1358. * found. Check for this case and assign the iterator
  1359. * to the head page instead of next.
  1360. */
  1361. if (iter->head_page == cpu_buffer->reader_page)
  1362. iter->head_page = rb_set_head_page(cpu_buffer);
  1363. else
  1364. rb_inc_page(cpu_buffer, &iter->head_page);
  1365. iter->read_stamp = iter->head_page->page->time_stamp;
  1366. iter->head = 0;
  1367. }
  1368. /* Slow path, do not inline */
  1369. static noinline struct ring_buffer_event *
  1370. rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
  1371. {
  1372. event->type_len = RINGBUF_TYPE_TIME_EXTEND;
  1373. /* Not the first event on the page? */
  1374. if (rb_event_index(event)) {
  1375. event->time_delta = delta & TS_MASK;
  1376. event->array[0] = delta >> TS_SHIFT;
  1377. } else {
  1378. /* nope, just zero it */
  1379. event->time_delta = 0;
  1380. event->array[0] = 0;
  1381. }
  1382. return skip_time_extend(event);
  1383. }
  1384. /**
  1385. * ring_buffer_update_event - update event type and data
  1386. * @event: the even to update
  1387. * @type: the type of event
  1388. * @length: the size of the event field in the ring buffer
  1389. *
  1390. * Update the type and data fields of the event. The length
  1391. * is the actual size that is written to the ring buffer,
  1392. * and with this, we can determine what to place into the
  1393. * data field.
  1394. */
  1395. static void
  1396. rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
  1397. struct ring_buffer_event *event, unsigned length,
  1398. int add_timestamp, u64 delta)
  1399. {
  1400. /* Only a commit updates the timestamp */
  1401. if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
  1402. delta = 0;
  1403. /*
  1404. * If we need to add a timestamp, then we
  1405. * add it to the start of the resevered space.
  1406. */
  1407. if (unlikely(add_timestamp)) {
  1408. event = rb_add_time_stamp(event, delta);
  1409. length -= RB_LEN_TIME_EXTEND;
  1410. delta = 0;
  1411. }
  1412. event->time_delta = delta;
  1413. length -= RB_EVNT_HDR_SIZE;
  1414. if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
  1415. event->type_len = 0;
  1416. event->array[0] = length;
  1417. } else
  1418. event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
  1419. }
  1420. /*
  1421. * rb_handle_head_page - writer hit the head page
  1422. *
  1423. * Returns: +1 to retry page
  1424. * 0 to continue
  1425. * -1 on error
  1426. */
  1427. static int
  1428. rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
  1429. struct buffer_page *tail_page,
  1430. struct buffer_page *next_page)
  1431. {
  1432. struct buffer_page *new_head;
  1433. int entries;
  1434. int type;
  1435. int ret;
  1436. entries = rb_page_entries(next_page);
  1437. /*
  1438. * The hard part is here. We need to move the head
  1439. * forward, and protect against both readers on
  1440. * other CPUs and writers coming in via interrupts.
  1441. */
  1442. type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
  1443. RB_PAGE_HEAD);
  1444. /*
  1445. * type can be one of four:
  1446. * NORMAL - an interrupt already moved it for us
  1447. * HEAD - we are the first to get here.
  1448. * UPDATE - we are the interrupt interrupting
  1449. * a current move.
  1450. * MOVED - a reader on another CPU moved the next
  1451. * pointer to its reader page. Give up
  1452. * and try again.
  1453. */
  1454. switch (type) {
  1455. case RB_PAGE_HEAD:
  1456. /*
  1457. * We changed the head to UPDATE, thus
  1458. * it is our responsibility to update
  1459. * the counters.
  1460. */
  1461. local_add(entries, &cpu_buffer->overrun);
  1462. local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
  1463. /*
  1464. * The entries will be zeroed out when we move the
  1465. * tail page.
  1466. */
  1467. /* still more to do */
  1468. break;
  1469. case RB_PAGE_UPDATE:
  1470. /*
  1471. * This is an interrupt that interrupt the
  1472. * previous update. Still more to do.
  1473. */
  1474. break;
  1475. case RB_PAGE_NORMAL:
  1476. /*
  1477. * An interrupt came in before the update
  1478. * and processed this for us.
  1479. * Nothing left to do.
  1480. */
  1481. return 1;
  1482. case RB_PAGE_MOVED:
  1483. /*
  1484. * The reader is on another CPU and just did
  1485. * a swap with our next_page.
  1486. * Try again.
  1487. */
  1488. return 1;
  1489. default:
  1490. RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
  1491. return -1;
  1492. }
  1493. /*
  1494. * Now that we are here, the old head pointer is
  1495. * set to UPDATE. This will keep the reader from
  1496. * swapping the head page with the reader page.
  1497. * The reader (on another CPU) will spin till
  1498. * we are finished.
  1499. *
  1500. * We just need to protect against interrupts
  1501. * doing the job. We will set the next pointer
  1502. * to HEAD. After that, we set the old pointer
  1503. * to NORMAL, but only if it was HEAD before.
  1504. * otherwise we are an interrupt, and only
  1505. * want the outer most commit to reset it.
  1506. */
  1507. new_head = next_page;
  1508. rb_inc_page(cpu_buffer, &new_head);
  1509. ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
  1510. RB_PAGE_NORMAL);
  1511. /*
  1512. * Valid returns are:
  1513. * HEAD - an interrupt came in and already set it.
  1514. * NORMAL - One of two things:
  1515. * 1) We really set it.
  1516. * 2) A bunch of interrupts came in and moved
  1517. * the page forward again.
  1518. */
  1519. switch (ret) {
  1520. case RB_PAGE_HEAD:
  1521. case RB_PAGE_NORMAL:
  1522. /* OK */
  1523. break;
  1524. default:
  1525. RB_WARN_ON(cpu_buffer, 1);
  1526. return -1;
  1527. }
  1528. /*
  1529. * It is possible that an interrupt came in,
  1530. * set the head up, then more interrupts came in
  1531. * and moved it again. When we get back here,
  1532. * the page would have been set to NORMAL but we
  1533. * just set it back to HEAD.
  1534. *
  1535. * How do you detect this? Well, if that happened
  1536. * the tail page would have moved.
  1537. */
  1538. if (ret == RB_PAGE_NORMAL) {
  1539. /*
  1540. * If the tail had moved passed next, then we need
  1541. * to reset the pointer.
  1542. */
  1543. if (cpu_buffer->tail_page != tail_page &&
  1544. cpu_buffer->tail_page != next_page)
  1545. rb_head_page_set_normal(cpu_buffer, new_head,
  1546. next_page,
  1547. RB_PAGE_HEAD);
  1548. }
  1549. /*
  1550. * If this was the outer most commit (the one that
  1551. * changed the original pointer from HEAD to UPDATE),
  1552. * then it is up to us to reset it to NORMAL.
  1553. */
  1554. if (type == RB_PAGE_HEAD) {
  1555. ret = rb_head_page_set_normal(cpu_buffer, next_page,
  1556. tail_page,
  1557. RB_PAGE_UPDATE);
  1558. if (RB_WARN_ON(cpu_buffer,
  1559. ret != RB_PAGE_UPDATE))
  1560. return -1;
  1561. }
  1562. return 0;
  1563. }
  1564. static unsigned rb_calculate_event_length(unsigned length)
  1565. {
  1566. struct ring_buffer_event event; /* Used only for sizeof array */
  1567. /* zero length can cause confusions */
  1568. if (!length)
  1569. length = 1;
  1570. if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
  1571. length += sizeof(event.array[0]);
  1572. length += RB_EVNT_HDR_SIZE;
  1573. length = ALIGN(length, RB_ARCH_ALIGNMENT);
  1574. return length;
  1575. }
  1576. static inline void
  1577. rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
  1578. struct buffer_page *tail_page,
  1579. unsigned long tail, unsigned long length)
  1580. {
  1581. struct ring_buffer_event *event;
  1582. /*
  1583. * Only the event that crossed the page boundary
  1584. * must fill the old tail_page with padding.
  1585. */
  1586. if (tail >= BUF_PAGE_SIZE) {
  1587. /*
  1588. * If the page was filled, then we still need
  1589. * to update the real_end. Reset it to zero
  1590. * and the reader will ignore it.
  1591. */
  1592. if (tail == BUF_PAGE_SIZE)
  1593. tail_page->real_end = 0;
  1594. local_sub(length, &tail_page->write);
  1595. return;
  1596. }
  1597. event = __rb_page_index(tail_page, tail);
  1598. kmemcheck_annotate_bitfield(event, bitfield);
  1599. /* account for padding bytes */
  1600. local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
  1601. /*
  1602. * Save the original length to the meta data.
  1603. * This will be used by the reader to add lost event
  1604. * counter.
  1605. */
  1606. tail_page->real_end = tail;
  1607. /*
  1608. * If this event is bigger than the minimum size, then
  1609. * we need to be careful that we don't subtract the
  1610. * write counter enough to allow another writer to slip
  1611. * in on this page.
  1612. * We put in a discarded commit instead, to make sure
  1613. * that this space is not used again.
  1614. *
  1615. * If we are less than the minimum size, we don't need to
  1616. * worry about it.
  1617. */
  1618. if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
  1619. /* No room for any events */
  1620. /* Mark the rest of the page with padding */
  1621. rb_event_set_padding(event);
  1622. /* Set the write back to the previous setting */
  1623. local_sub(length, &tail_page->write);
  1624. return;
  1625. }
  1626. /* Put in a discarded event */
  1627. event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
  1628. event->type_len = RINGBUF_TYPE_PADDING;
  1629. /* time delta must be non zero */
  1630. event->time_delta = 1;
  1631. /* Set write to end of buffer */
  1632. length = (tail + length) - BUF_PAGE_SIZE;
  1633. local_sub(length, &tail_page->write);
  1634. }
  1635. /*
  1636. * This is the slow path, force gcc not to inline it.
  1637. */
  1638. static noinline struct ring_buffer_event *
  1639. rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
  1640. unsigned long length, unsigned long tail,
  1641. struct buffer_page *tail_page, u64 ts)
  1642. {
  1643. struct buffer_page *commit_page = cpu_buffer->commit_page;
  1644. struct ring_buffer *buffer = cpu_buffer->buffer;
  1645. struct buffer_page *next_page;
  1646. int ret;
  1647. next_page = tail_page;
  1648. rb_inc_page(cpu_buffer, &next_page);
  1649. /*
  1650. * If for some reason, we had an interrupt storm that made
  1651. * it all the way around the buffer, bail, and warn
  1652. * about it.
  1653. */
  1654. if (unlikely(next_page == commit_page)) {
  1655. local_inc(&cpu_buffer->commit_overrun);
  1656. goto out_reset;
  1657. }
  1658. /*
  1659. * This is where the fun begins!
  1660. *
  1661. * We are fighting against races between a reader that
  1662. * could be on another CPU trying to swap its reader
  1663. * page with the buffer head.
  1664. *
  1665. * We are also fighting against interrupts coming in and
  1666. * moving the head or tail on us as well.
  1667. *
  1668. * If the next page is the head page then we have filled
  1669. * the buffer, unless the commit page is still on the
  1670. * reader page.
  1671. */
  1672. if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
  1673. /*
  1674. * If the commit is not on the reader page, then
  1675. * move the header page.
  1676. */
  1677. if (!rb_is_reader_page(cpu_buffer->commit_page)) {
  1678. /*
  1679. * If we are not in overwrite mode,
  1680. * this is easy, just stop here.
  1681. */
  1682. if (!(buffer->flags & RB_FL_OVERWRITE))
  1683. goto out_reset;
  1684. ret = rb_handle_head_page(cpu_buffer,
  1685. tail_page,
  1686. next_page);
  1687. if (ret < 0)
  1688. goto out_reset;
  1689. if (ret)
  1690. goto out_again;
  1691. } else {
  1692. /*
  1693. * We need to be careful here too. The
  1694. * commit page could still be on the reader
  1695. * page. We could have a small buffer, and
  1696. * have filled up the buffer with events
  1697. * from interrupts and such, and wrapped.
  1698. *
  1699. * Note, if the tail page is also the on the
  1700. * reader_page, we let it move out.
  1701. */
  1702. if (unlikely((cpu_buffer->commit_page !=
  1703. cpu_buffer->tail_page) &&
  1704. (cpu_buffer->commit_page ==
  1705. cpu_buffer->reader_page))) {
  1706. local_inc(&cpu_buffer->commit_overrun);
  1707. goto out_reset;
  1708. }
  1709. }
  1710. }
  1711. ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
  1712. if (ret) {
  1713. /*
  1714. * Nested commits always have zero deltas, so
  1715. * just reread the time stamp
  1716. */
  1717. ts = rb_time_stamp(buffer);
  1718. next_page->page->time_stamp = ts;
  1719. }
  1720. out_again:
  1721. rb_reset_tail(cpu_buffer, tail_page, tail, length);
  1722. /* fail and let the caller try again */
  1723. return ERR_PTR(-EAGAIN);
  1724. out_reset:
  1725. /* reset write */
  1726. rb_reset_tail(cpu_buffer, tail_page, tail, length);
  1727. return NULL;
  1728. }
  1729. static struct ring_buffer_event *
  1730. __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
  1731. unsigned long length, u64 ts,
  1732. u64 delta, int add_timestamp)
  1733. {
  1734. struct buffer_page *tail_page;
  1735. struct ring_buffer_event *event;
  1736. unsigned long tail, write;
  1737. /*
  1738. * If the time delta since the last event is too big to
  1739. * hold in the time field of the event, then we append a
  1740. * TIME EXTEND event ahead of the data event.
  1741. */
  1742. if (unlikely(add_timestamp))
  1743. length += RB_LEN_TIME_EXTEND;
  1744. tail_page = cpu_buffer->tail_page;
  1745. write = local_add_return(length, &tail_page->write);
  1746. /* set write to only the index of the write */
  1747. write &= RB_WRITE_MASK;
  1748. tail = write - length;
  1749. /* See if we shot pass the end of this buffer page */
  1750. if (unlikely(write > BUF_PAGE_SIZE))
  1751. return rb_move_tail(cpu_buffer, length, tail,
  1752. tail_page, ts);
  1753. /* We reserved something on the buffer */
  1754. event = __rb_page_index(tail_page, tail);
  1755. kmemcheck_annotate_bitfield(event, bitfield);
  1756. rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
  1757. local_inc(&tail_page->entries);
  1758. /*
  1759. * If this is the first commit on the page, then update
  1760. * its timestamp.
  1761. */
  1762. if (!tail)
  1763. tail_page->page->time_stamp = ts;
  1764. /* account for these added bytes */
  1765. local_add(length, &cpu_buffer->entries_bytes);
  1766. return event;
  1767. }
  1768. static inline int
  1769. rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
  1770. struct ring_buffer_event *event)
  1771. {
  1772. unsigned long new_index, old_index;
  1773. struct buffer_page *bpage;
  1774. unsigned long index;
  1775. unsigned long addr;
  1776. new_index = rb_event_index(event);
  1777. old_index = new_index + rb_event_ts_length(event);
  1778. addr = (unsigned long)event;
  1779. addr &= PAGE_MASK;
  1780. bpage = cpu_buffer->tail_page;
  1781. if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
  1782. unsigned long write_mask =
  1783. local_read(&bpage->write) & ~RB_WRITE_MASK;
  1784. unsigned long event_length = rb_event_length(event);
  1785. /*
  1786. * This is on the tail page. It is possible that
  1787. * a write could come in and move the tail page
  1788. * and write to the next page. That is fine
  1789. * because we just shorten what is on this page.
  1790. */
  1791. old_index += write_mask;
  1792. new_index += write_mask;
  1793. index = local_cmpxchg(&bpage->write, old_index, new_index);
  1794. if (index == old_index) {
  1795. /* update counters */
  1796. local_sub(event_length, &cpu_buffer->entries_bytes);
  1797. return 1;
  1798. }
  1799. }
  1800. /* could not discard */
  1801. return 0;
  1802. }
  1803. static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
  1804. {
  1805. local_inc(&cpu_buffer->committing);
  1806. local_inc(&cpu_buffer->commits);
  1807. }
  1808. static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
  1809. {
  1810. unsigned long commits;
  1811. if (RB_WARN_ON(cpu_buffer,
  1812. !local_read(&cpu_buffer->committing)))
  1813. return;
  1814. again:
  1815. commits = local_read(&cpu_buffer->commits);
  1816. /* synchronize with interrupts */
  1817. barrier();
  1818. if (local_read(&cpu_buffer->committing) == 1)
  1819. rb_set_commit_to_write(cpu_buffer);
  1820. local_dec(&cpu_buffer->committing);
  1821. /* synchronize with interrupts */
  1822. barrier();
  1823. /*
  1824. * Need to account for interrupts coming in between the
  1825. * updating of the commit page and the clearing of the
  1826. * committing counter.
  1827. */
  1828. if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
  1829. !local_read(&cpu_buffer->committing)) {
  1830. local_inc(&cpu_buffer->committing);
  1831. goto again;
  1832. }
  1833. }
  1834. static struct ring_buffer_event *
  1835. rb_reserve_next_event(struct ring_buffer *buffer,
  1836. struct ring_buffer_per_cpu *cpu_buffer,
  1837. unsigned long length)
  1838. {
  1839. struct ring_buffer_event *event;
  1840. u64 ts, delta;
  1841. int nr_loops = 0;
  1842. int add_timestamp;
  1843. u64 diff;
  1844. rb_start_commit(cpu_buffer);
  1845. #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
  1846. /*
  1847. * Due to the ability to swap a cpu buffer from a buffer
  1848. * it is possible it was swapped before we committed.
  1849. * (committing stops a swap). We check for it here and
  1850. * if it happened, we have to fail the write.
  1851. */
  1852. barrier();
  1853. if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
  1854. local_dec(&cpu_buffer->committing);
  1855. local_dec(&cpu_buffer->commits);
  1856. return NULL;
  1857. }
  1858. #endif
  1859. length = rb_calculate_event_length(length);
  1860. again:
  1861. add_timestamp = 0;
  1862. delta = 0;
  1863. /*
  1864. * We allow for interrupts to reenter here and do a trace.
  1865. * If one does, it will cause this original code to loop
  1866. * back here. Even with heavy interrupts happening, this
  1867. * should only happen a few times in a row. If this happens
  1868. * 1000 times in a row, there must be either an interrupt
  1869. * storm or we have something buggy.
  1870. * Bail!
  1871. */
  1872. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
  1873. goto out_fail;
  1874. ts = rb_time_stamp(cpu_buffer->buffer);
  1875. diff = ts - cpu_buffer->write_stamp;
  1876. /* make sure this diff is calculated here */
  1877. barrier();
  1878. /* Did the write stamp get updated already? */
  1879. if (likely(ts >= cpu_buffer->write_stamp)) {
  1880. delta = diff;
  1881. if (unlikely(test_time_stamp(delta))) {
  1882. int local_clock_stable = 1;
  1883. #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
  1884. local_clock_stable = sched_clock_stable;
  1885. #endif
  1886. WARN_ONCE(delta > (1ULL << 59),
  1887. KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
  1888. (unsigned long long)delta,
  1889. (unsigned long long)ts,
  1890. (unsigned long long)cpu_buffer->write_stamp,
  1891. local_clock_stable ? "" :
  1892. "If you just came from a suspend/resume,\n"
  1893. "please switch to the trace global clock:\n"
  1894. " echo global > /sys/kernel/debug/tracing/trace_clock\n");
  1895. add_timestamp = 1;
  1896. }
  1897. }
  1898. event = __rb_reserve_next(cpu_buffer, length, ts,
  1899. delta, add_timestamp);
  1900. if (unlikely(PTR_ERR(event) == -EAGAIN))
  1901. goto again;
  1902. if (!event)
  1903. goto out_fail;
  1904. return event;
  1905. out_fail:
  1906. rb_end_commit(cpu_buffer);
  1907. return NULL;
  1908. }
  1909. #ifdef CONFIG_TRACING
  1910. #define TRACE_RECURSIVE_DEPTH 16
  1911. /* Keep this code out of the fast path cache */
  1912. static noinline void trace_recursive_fail(void)
  1913. {
  1914. /* Disable all tracing before we do anything else */
  1915. tracing_off_permanent();
  1916. printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
  1917. "HC[%lu]:SC[%lu]:NMI[%lu]\n",
  1918. trace_recursion_buffer(),
  1919. hardirq_count() >> HARDIRQ_SHIFT,
  1920. softirq_count() >> SOFTIRQ_SHIFT,
  1921. in_nmi());
  1922. WARN_ON_ONCE(1);
  1923. }
  1924. static inline int trace_recursive_lock(void)
  1925. {
  1926. trace_recursion_inc();
  1927. if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH))
  1928. return 0;
  1929. trace_recursive_fail();
  1930. return -1;
  1931. }
  1932. static inline void trace_recursive_unlock(void)
  1933. {
  1934. WARN_ON_ONCE(!trace_recursion_buffer());
  1935. trace_recursion_dec();
  1936. }
  1937. #else
  1938. #define trace_recursive_lock() (0)
  1939. #define trace_recursive_unlock() do { } while (0)
  1940. #endif
  1941. /**
  1942. * ring_buffer_lock_reserve - reserve a part of the buffer
  1943. * @buffer: the ring buffer to reserve from
  1944. * @length: the length of the data to reserve (excluding event header)
  1945. *
  1946. * Returns a reseverd event on the ring buffer to copy directly to.
  1947. * The user of this interface will need to get the body to write into
  1948. * and can use the ring_buffer_event_data() interface.
  1949. *
  1950. * The length is the length of the data needed, not the event length
  1951. * which also includes the event header.
  1952. *
  1953. * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
  1954. * If NULL is returned, then nothing has been allocated or locked.
  1955. */
  1956. struct ring_buffer_event *
  1957. ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
  1958. {
  1959. struct ring_buffer_per_cpu *cpu_buffer;
  1960. struct ring_buffer_event *event;
  1961. int cpu;
  1962. if (ring_buffer_flags != RB_BUFFERS_ON)
  1963. return NULL;
  1964. /* If we are tracing schedule, we don't want to recurse */
  1965. preempt_disable_notrace();
  1966. if (atomic_read(&buffer->record_disabled))
  1967. goto out_nocheck;
  1968. if (trace_recursive_lock())
  1969. goto out_nocheck;
  1970. cpu = raw_smp_processor_id();
  1971. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  1972. goto out;
  1973. cpu_buffer = buffer->buffers[cpu];
  1974. if (atomic_read(&cpu_buffer->record_disabled))
  1975. goto out;
  1976. if (length > BUF_MAX_DATA_SIZE)
  1977. goto out;
  1978. event = rb_reserve_next_event(buffer, cpu_buffer, length);
  1979. if (!event)
  1980. goto out;
  1981. return event;
  1982. out:
  1983. trace_recursive_unlock();
  1984. out_nocheck:
  1985. preempt_enable_notrace();
  1986. return NULL;
  1987. }
  1988. EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
  1989. static void
  1990. rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  1991. struct ring_buffer_event *event)
  1992. {
  1993. u64 delta;
  1994. /*
  1995. * The event first in the commit queue updates the
  1996. * time stamp.
  1997. */
  1998. if (rb_event_is_commit(cpu_buffer, event)) {
  1999. /*
  2000. * A commit event that is first on a page
  2001. * updates the write timestamp with the page stamp
  2002. */
  2003. if (!rb_event_index(event))
  2004. cpu_buffer->write_stamp =
  2005. cpu_buffer->commit_page->page->time_stamp;
  2006. else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
  2007. delta = event->array[0];
  2008. delta <<= TS_SHIFT;
  2009. delta += event->time_delta;
  2010. cpu_buffer->write_stamp += delta;
  2011. } else
  2012. cpu_buffer->write_stamp += event->time_delta;
  2013. }
  2014. }
  2015. static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
  2016. struct ring_buffer_event *event)
  2017. {
  2018. local_inc(&cpu_buffer->entries);
  2019. rb_update_write_stamp(cpu_buffer, event);
  2020. rb_end_commit(cpu_buffer);
  2021. }
  2022. /**
  2023. * ring_buffer_unlock_commit - commit a reserved
  2024. * @buffer: The buffer to commit to
  2025. * @event: The event pointer to commit.
  2026. *
  2027. * This commits the data to the ring buffer, and releases any locks held.
  2028. *
  2029. * Must be paired with ring_buffer_lock_reserve.
  2030. */
  2031. int ring_buffer_unlock_commit(struct ring_buffer *buffer,
  2032. struct ring_buffer_event *event)
  2033. {
  2034. struct ring_buffer_per_cpu *cpu_buffer;
  2035. int cpu = raw_smp_processor_id();
  2036. cpu_buffer = buffer->buffers[cpu];
  2037. rb_commit(cpu_buffer, event);
  2038. trace_recursive_unlock();
  2039. preempt_enable_notrace();
  2040. return 0;
  2041. }
  2042. EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
  2043. static inline void rb_event_discard(struct ring_buffer_event *event)
  2044. {
  2045. if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
  2046. event = skip_time_extend(event);
  2047. /* array[0] holds the actual length for the discarded event */
  2048. event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
  2049. event->type_len = RINGBUF_TYPE_PADDING;
  2050. /* time delta must be non zero */
  2051. if (!event->time_delta)
  2052. event->time_delta = 1;
  2053. }
  2054. /*
  2055. * Decrement the entries to the page that an event is on.
  2056. * The event does not even need to exist, only the pointer
  2057. * to the page it is on. This may only be called before the commit
  2058. * takes place.
  2059. */
  2060. static inline void
  2061. rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
  2062. struct ring_buffer_event *event)
  2063. {
  2064. unsigned long addr = (unsigned long)event;
  2065. struct buffer_page *bpage = cpu_buffer->commit_page;
  2066. struct buffer_page *start;
  2067. addr &= PAGE_MASK;
  2068. /* Do the likely case first */
  2069. if (likely(bpage->page == (void *)addr)) {
  2070. local_dec(&bpage->entries);
  2071. return;
  2072. }
  2073. /*
  2074. * Because the commit page may be on the reader page we
  2075. * start with the next page and check the end loop there.
  2076. */
  2077. rb_inc_page(cpu_buffer, &bpage);
  2078. start = bpage;
  2079. do {
  2080. if (bpage->page == (void *)addr) {
  2081. local_dec(&bpage->entries);
  2082. return;
  2083. }
  2084. rb_inc_page(cpu_buffer, &bpage);
  2085. } while (bpage != start);
  2086. /* commit not part of this buffer?? */
  2087. RB_WARN_ON(cpu_buffer, 1);
  2088. }
  2089. /**
  2090. * ring_buffer_commit_discard - discard an event that has not been committed
  2091. * @buffer: the ring buffer
  2092. * @event: non committed event to discard
  2093. *
  2094. * Sometimes an event that is in the ring buffer needs to be ignored.
  2095. * This function lets the user discard an event in the ring buffer
  2096. * and then that event will not be read later.
  2097. *
  2098. * This function only works if it is called before the the item has been
  2099. * committed. It will try to free the event from the ring buffer
  2100. * if another event has not been added behind it.
  2101. *
  2102. * If another event has been added behind it, it will set the event
  2103. * up as discarded, and perform the commit.
  2104. *
  2105. * If this function is called, do not call ring_buffer_unlock_commit on
  2106. * the event.
  2107. */
  2108. void ring_buffer_discard_commit(struct ring_buffer *buffer,
  2109. struct ring_buffer_event *event)
  2110. {
  2111. struct ring_buffer_per_cpu *cpu_buffer;
  2112. int cpu;
  2113. /* The event is discarded regardless */
  2114. rb_event_discard(event);
  2115. cpu = smp_processor_id();
  2116. cpu_buffer = buffer->buffers[cpu];
  2117. /*
  2118. * This must only be called if the event has not been
  2119. * committed yet. Thus we can assume that preemption
  2120. * is still disabled.
  2121. */
  2122. RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
  2123. rb_decrement_entry(cpu_buffer, event);
  2124. if (rb_try_to_discard(cpu_buffer, event))
  2125. goto out;
  2126. /*
  2127. * The commit is still visible by the reader, so we
  2128. * must still update the timestamp.
  2129. */
  2130. rb_update_write_stamp(cpu_buffer, event);
  2131. out:
  2132. rb_end_commit(cpu_buffer);
  2133. trace_recursive_unlock();
  2134. preempt_enable_notrace();
  2135. }
  2136. EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
  2137. /**
  2138. * ring_buffer_write - write data to the buffer without reserving
  2139. * @buffer: The ring buffer to write to.
  2140. * @length: The length of the data being written (excluding the event header)
  2141. * @data: The data to write to the buffer.
  2142. *
  2143. * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
  2144. * one function. If you already have the data to write to the buffer, it
  2145. * may be easier to simply call this function.
  2146. *
  2147. * Note, like ring_buffer_lock_reserve, the length is the length of the data
  2148. * and not the length of the event which would hold the header.
  2149. */
  2150. int ring_buffer_write(struct ring_buffer *buffer,
  2151. unsigned long length,
  2152. void *data)
  2153. {
  2154. struct ring_buffer_per_cpu *cpu_buffer;
  2155. struct ring_buffer_event *event;
  2156. void *body;
  2157. int ret = -EBUSY;
  2158. int cpu;
  2159. if (ring_buffer_flags != RB_BUFFERS_ON)
  2160. return -EBUSY;
  2161. preempt_disable_notrace();
  2162. if (atomic_read(&buffer->record_disabled))
  2163. goto out;
  2164. cpu = raw_smp_processor_id();
  2165. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2166. goto out;
  2167. cpu_buffer = buffer->buffers[cpu];
  2168. if (atomic_read(&cpu_buffer->record_disabled))
  2169. goto out;
  2170. if (length > BUF_MAX_DATA_SIZE)
  2171. goto out;
  2172. event = rb_reserve_next_event(buffer, cpu_buffer, length);
  2173. if (!event)
  2174. goto out;
  2175. body = rb_event_data(event);
  2176. memcpy(body, data, length);
  2177. rb_commit(cpu_buffer, event);
  2178. ret = 0;
  2179. out:
  2180. preempt_enable_notrace();
  2181. return ret;
  2182. }
  2183. EXPORT_SYMBOL_GPL(ring_buffer_write);
  2184. static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
  2185. {
  2186. struct buffer_page *reader = cpu_buffer->reader_page;
  2187. struct buffer_page *head = rb_set_head_page(cpu_buffer);
  2188. struct buffer_page *commit = cpu_buffer->commit_page;
  2189. /* In case of error, head will be NULL */
  2190. if (unlikely(!head))
  2191. return 1;
  2192. return reader->read == rb_page_commit(reader) &&
  2193. (commit == reader ||
  2194. (commit == head &&
  2195. head->read == rb_page_commit(commit)));
  2196. }
  2197. /**
  2198. * ring_buffer_record_disable - stop all writes into the buffer
  2199. * @buffer: The ring buffer to stop writes to.
  2200. *
  2201. * This prevents all writes to the buffer. Any attempt to write
  2202. * to the buffer after this will fail and return NULL.
  2203. *
  2204. * The caller should call synchronize_sched() after this.
  2205. */
  2206. void ring_buffer_record_disable(struct ring_buffer *buffer)
  2207. {
  2208. atomic_inc(&buffer->record_disabled);
  2209. }
  2210. EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
  2211. /**
  2212. * ring_buffer_record_enable - enable writes to the buffer
  2213. * @buffer: The ring buffer to enable writes
  2214. *
  2215. * Note, multiple disables will need the same number of enables
  2216. * to truly enable the writing (much like preempt_disable).
  2217. */
  2218. void ring_buffer_record_enable(struct ring_buffer *buffer)
  2219. {
  2220. atomic_dec(&buffer->record_disabled);
  2221. }
  2222. EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
  2223. /**
  2224. * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
  2225. * @buffer: The ring buffer to stop writes to.
  2226. * @cpu: The CPU buffer to stop
  2227. *
  2228. * This prevents all writes to the buffer. Any attempt to write
  2229. * to the buffer after this will fail and return NULL.
  2230. *
  2231. * The caller should call synchronize_sched() after this.
  2232. */
  2233. void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
  2234. {
  2235. struct ring_buffer_per_cpu *cpu_buffer;
  2236. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2237. return;
  2238. cpu_buffer = buffer->buffers[cpu];
  2239. atomic_inc(&cpu_buffer->record_disabled);
  2240. }
  2241. EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
  2242. /**
  2243. * ring_buffer_record_enable_cpu - enable writes to the buffer
  2244. * @buffer: The ring buffer to enable writes
  2245. * @cpu: The CPU to enable.
  2246. *
  2247. * Note, multiple disables will need the same number of enables
  2248. * to truly enable the writing (much like preempt_disable).
  2249. */
  2250. void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
  2251. {
  2252. struct ring_buffer_per_cpu *cpu_buffer;
  2253. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2254. return;
  2255. cpu_buffer = buffer->buffers[cpu];
  2256. atomic_dec(&cpu_buffer->record_disabled);
  2257. }
  2258. EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
  2259. /*
  2260. * The total entries in the ring buffer is the running counter
  2261. * of entries entered into the ring buffer, minus the sum of
  2262. * the entries read from the ring buffer and the number of
  2263. * entries that were overwritten.
  2264. */
  2265. static inline unsigned long
  2266. rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
  2267. {
  2268. return local_read(&cpu_buffer->entries) -
  2269. (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
  2270. }
  2271. /**
  2272. * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
  2273. * @buffer: The ring buffer
  2274. * @cpu: The per CPU buffer to read from.
  2275. */
  2276. unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
  2277. {
  2278. unsigned long flags;
  2279. struct ring_buffer_per_cpu *cpu_buffer;
  2280. struct buffer_page *bpage;
  2281. unsigned long ret;
  2282. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2283. return 0;
  2284. cpu_buffer = buffer->buffers[cpu];
  2285. raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2286. /*
  2287. * if the tail is on reader_page, oldest time stamp is on the reader
  2288. * page
  2289. */
  2290. if (cpu_buffer->tail_page == cpu_buffer->reader_page)
  2291. bpage = cpu_buffer->reader_page;
  2292. else
  2293. bpage = rb_set_head_page(cpu_buffer);
  2294. ret = bpage->page->time_stamp;
  2295. raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2296. return ret;
  2297. }
  2298. EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
  2299. /**
  2300. * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
  2301. * @buffer: The ring buffer
  2302. * @cpu: The per CPU buffer to read from.
  2303. */
  2304. unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
  2305. {
  2306. struct ring_buffer_per_cpu *cpu_buffer;
  2307. unsigned long ret;
  2308. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2309. return 0;
  2310. cpu_buffer = buffer->buffers[cpu];
  2311. ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
  2312. return ret;
  2313. }
  2314. EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
  2315. /**
  2316. * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
  2317. * @buffer: The ring buffer
  2318. * @cpu: The per CPU buffer to get the entries from.
  2319. */
  2320. unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
  2321. {
  2322. struct ring_buffer_per_cpu *cpu_buffer;
  2323. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2324. return 0;
  2325. cpu_buffer = buffer->buffers[cpu];
  2326. return rb_num_of_entries(cpu_buffer);
  2327. }
  2328. EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
  2329. /**
  2330. * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
  2331. * @buffer: The ring buffer
  2332. * @cpu: The per CPU buffer to get the number of overruns from
  2333. */
  2334. unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
  2335. {
  2336. struct ring_buffer_per_cpu *cpu_buffer;
  2337. unsigned long ret;
  2338. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2339. return 0;
  2340. cpu_buffer = buffer->buffers[cpu];
  2341. ret = local_read(&cpu_buffer->overrun);
  2342. return ret;
  2343. }
  2344. EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
  2345. /**
  2346. * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
  2347. * @buffer: The ring buffer
  2348. * @cpu: The per CPU buffer to get the number of overruns from
  2349. */
  2350. unsigned long
  2351. ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
  2352. {
  2353. struct ring_buffer_per_cpu *cpu_buffer;
  2354. unsigned long ret;
  2355. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2356. return 0;
  2357. cpu_buffer = buffer->buffers[cpu];
  2358. ret = local_read(&cpu_buffer->commit_overrun);
  2359. return ret;
  2360. }
  2361. EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
  2362. /**
  2363. * ring_buffer_entries - get the number of entries in a buffer
  2364. * @buffer: The ring buffer
  2365. *
  2366. * Returns the total number of entries in the ring buffer
  2367. * (all CPU entries)
  2368. */
  2369. unsigned long ring_buffer_entries(struct ring_buffer *buffer)
  2370. {
  2371. struct ring_buffer_per_cpu *cpu_buffer;
  2372. unsigned long entries = 0;
  2373. int cpu;
  2374. /* if you care about this being correct, lock the buffer */
  2375. for_each_buffer_cpu(buffer, cpu) {
  2376. cpu_buffer = buffer->buffers[cpu];
  2377. entries += rb_num_of_entries(cpu_buffer);
  2378. }
  2379. return entries;
  2380. }
  2381. EXPORT_SYMBOL_GPL(ring_buffer_entries);
  2382. /**
  2383. * ring_buffer_overruns - get the number of overruns in buffer
  2384. * @buffer: The ring buffer
  2385. *
  2386. * Returns the total number of overruns in the ring buffer
  2387. * (all CPU entries)
  2388. */
  2389. unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
  2390. {
  2391. struct ring_buffer_per_cpu *cpu_buffer;
  2392. unsigned long overruns = 0;
  2393. int cpu;
  2394. /* if you care about this being correct, lock the buffer */
  2395. for_each_buffer_cpu(buffer, cpu) {
  2396. cpu_buffer = buffer->buffers[cpu];
  2397. overruns += local_read(&cpu_buffer->overrun);
  2398. }
  2399. return overruns;
  2400. }
  2401. EXPORT_SYMBOL_GPL(ring_buffer_overruns);
  2402. static void rb_iter_reset(struct ring_buffer_iter *iter)
  2403. {
  2404. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2405. /* Iterator usage is expected to have record disabled */
  2406. if (list_empty(&cpu_buffer->reader_page->list)) {
  2407. iter->head_page = rb_set_head_page(cpu_buffer);
  2408. if (unlikely(!iter->head_page))
  2409. return;
  2410. iter->head = iter->head_page->read;
  2411. } else {
  2412. iter->head_page = cpu_buffer->reader_page;
  2413. iter->head = cpu_buffer->reader_page->read;
  2414. }
  2415. if (iter->head)
  2416. iter->read_stamp = cpu_buffer->read_stamp;
  2417. else
  2418. iter->read_stamp = iter->head_page->page->time_stamp;
  2419. iter->cache_reader_page = cpu_buffer->reader_page;
  2420. iter->cache_read = cpu_buffer->read;
  2421. }
  2422. /**
  2423. * ring_buffer_iter_reset - reset an iterator
  2424. * @iter: The iterator to reset
  2425. *
  2426. * Resets the iterator, so that it will start from the beginning
  2427. * again.
  2428. */
  2429. void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
  2430. {
  2431. struct ring_buffer_per_cpu *cpu_buffer;
  2432. unsigned long flags;
  2433. if (!iter)
  2434. return;
  2435. cpu_buffer = iter->cpu_buffer;
  2436. raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2437. rb_iter_reset(iter);
  2438. raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2439. }
  2440. EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
  2441. /**
  2442. * ring_buffer_iter_empty - check if an iterator has no more to read
  2443. * @iter: The iterator to check
  2444. */
  2445. int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
  2446. {
  2447. struct ring_buffer_per_cpu *cpu_buffer;
  2448. cpu_buffer = iter->cpu_buffer;
  2449. return iter->head_page == cpu_buffer->commit_page &&
  2450. iter->head == rb_commit_index(cpu_buffer);
  2451. }
  2452. EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
  2453. static void
  2454. rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  2455. struct ring_buffer_event *event)
  2456. {
  2457. u64 delta;
  2458. switch (event->type_len) {
  2459. case RINGBUF_TYPE_PADDING:
  2460. return;
  2461. case RINGBUF_TYPE_TIME_EXTEND:
  2462. delta = event->array[0];
  2463. delta <<= TS_SHIFT;
  2464. delta += event->time_delta;
  2465. cpu_buffer->read_stamp += delta;
  2466. return;
  2467. case RINGBUF_TYPE_TIME_STAMP:
  2468. /* FIXME: not implemented */
  2469. return;
  2470. case RINGBUF_TYPE_DATA:
  2471. cpu_buffer->read_stamp += event->time_delta;
  2472. return;
  2473. default:
  2474. BUG();
  2475. }
  2476. return;
  2477. }
  2478. static void
  2479. rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
  2480. struct ring_buffer_event *event)
  2481. {
  2482. u64 delta;
  2483. switch (event->type_len) {
  2484. case RINGBUF_TYPE_PADDING:
  2485. return;
  2486. case RINGBUF_TYPE_TIME_EXTEND:
  2487. delta = event->array[0];
  2488. delta <<= TS_SHIFT;
  2489. delta += event->time_delta;
  2490. iter->read_stamp += delta;
  2491. return;
  2492. case RINGBUF_TYPE_TIME_STAMP:
  2493. /* FIXME: not implemented */
  2494. return;
  2495. case RINGBUF_TYPE_DATA:
  2496. iter->read_stamp += event->time_delta;
  2497. return;
  2498. default:
  2499. BUG();
  2500. }
  2501. return;
  2502. }
  2503. static struct buffer_page *
  2504. rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
  2505. {
  2506. struct buffer_page *reader = NULL;
  2507. unsigned long overwrite;
  2508. unsigned long flags;
  2509. int nr_loops = 0;
  2510. int ret;
  2511. local_irq_save(flags);
  2512. arch_spin_lock(&cpu_buffer->lock);
  2513. again:
  2514. /*
  2515. * This should normally only loop twice. But because the
  2516. * start of the reader inserts an empty page, it causes
  2517. * a case where we will loop three times. There should be no
  2518. * reason to loop four times (that I know of).
  2519. */
  2520. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
  2521. reader = NULL;
  2522. goto out;
  2523. }
  2524. reader = cpu_buffer->reader_page;
  2525. /* If there's more to read, return this page */
  2526. if (cpu_buffer->reader_page->read < rb_page_size(reader))
  2527. goto out;
  2528. /* Never should we have an index greater than the size */
  2529. if (RB_WARN_ON(cpu_buffer,
  2530. cpu_buffer->reader_page->read > rb_page_size(reader)))
  2531. goto out;
  2532. /* check if we caught up to the tail */
  2533. reader = NULL;
  2534. if (cpu_buffer->commit_page == cpu_buffer->reader_page)
  2535. goto out;
  2536. /*
  2537. * Reset the reader page to size zero.
  2538. */
  2539. local_set(&cpu_buffer->reader_page->write, 0);
  2540. local_set(&cpu_buffer->reader_page->entries, 0);
  2541. local_set(&cpu_buffer->reader_page->page->commit, 0);
  2542. cpu_buffer->reader_page->real_end = 0;
  2543. spin:
  2544. /*
  2545. * Splice the empty reader page into the list around the head.
  2546. */
  2547. reader = rb_set_head_page(cpu_buffer);
  2548. cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
  2549. cpu_buffer->reader_page->list.prev = reader->list.prev;
  2550. /*
  2551. * cpu_buffer->pages just needs to point to the buffer, it
  2552. * has no specific buffer page to point to. Lets move it out
  2553. * of our way so we don't accidentally swap it.
  2554. */
  2555. cpu_buffer->pages = reader->list.prev;
  2556. /* The reader page will be pointing to the new head */
  2557. rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
  2558. /*
  2559. * We want to make sure we read the overruns after we set up our
  2560. * pointers to the next object. The writer side does a
  2561. * cmpxchg to cross pages which acts as the mb on the writer
  2562. * side. Note, the reader will constantly fail the swap
  2563. * while the writer is updating the pointers, so this
  2564. * guarantees that the overwrite recorded here is the one we
  2565. * want to compare with the last_overrun.
  2566. */
  2567. smp_mb();
  2568. overwrite = local_read(&(cpu_buffer->overrun));
  2569. /*
  2570. * Here's the tricky part.
  2571. *
  2572. * We need to move the pointer past the header page.
  2573. * But we can only do that if a writer is not currently
  2574. * moving it. The page before the header page has the
  2575. * flag bit '1' set if it is pointing to the page we want.
  2576. * but if the writer is in the process of moving it
  2577. * than it will be '2' or already moved '0'.
  2578. */
  2579. ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
  2580. /*
  2581. * If we did not convert it, then we must try again.
  2582. */
  2583. if (!ret)
  2584. goto spin;
  2585. /*
  2586. * Yeah! We succeeded in replacing the page.
  2587. *
  2588. * Now make the new head point back to the reader page.
  2589. */
  2590. rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
  2591. rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
  2592. /* Finally update the reader page to the new head */
  2593. cpu_buffer->reader_page = reader;
  2594. rb_reset_reader_page(cpu_buffer);
  2595. if (overwrite != cpu_buffer->last_overrun) {
  2596. cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
  2597. cpu_buffer->last_overrun = overwrite;
  2598. }
  2599. goto again;
  2600. out:
  2601. arch_spin_unlock(&cpu_buffer->lock);
  2602. local_irq_restore(flags);
  2603. return reader;
  2604. }
  2605. static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
  2606. {
  2607. struct ring_buffer_event *event;
  2608. struct buffer_page *reader;
  2609. unsigned length;
  2610. reader = rb_get_reader_page(cpu_buffer);
  2611. /* This function should not be called when buffer is empty */
  2612. if (RB_WARN_ON(cpu_buffer, !reader))
  2613. return;
  2614. event = rb_reader_event(cpu_buffer);
  2615. if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  2616. cpu_buffer->read++;
  2617. rb_update_read_stamp(cpu_buffer, event);
  2618. length = rb_event_length(event);
  2619. cpu_buffer->reader_page->read += length;
  2620. }
  2621. static void rb_advance_iter(struct ring_buffer_iter *iter)
  2622. {
  2623. struct ring_buffer_per_cpu *cpu_buffer;
  2624. struct ring_buffer_event *event;
  2625. unsigned length;
  2626. cpu_buffer = iter->cpu_buffer;
  2627. /*
  2628. * Check if we are at the end of the buffer.
  2629. */
  2630. if (iter->head >= rb_page_size(iter->head_page)) {
  2631. /* discarded commits can make the page empty */
  2632. if (iter->head_page == cpu_buffer->commit_page)
  2633. return;
  2634. rb_inc_iter(iter);
  2635. return;
  2636. }
  2637. event = rb_iter_head_event(iter);
  2638. length = rb_event_length(event);
  2639. /*
  2640. * This should not be called to advance the header if we are
  2641. * at the tail of the buffer.
  2642. */
  2643. if (RB_WARN_ON(cpu_buffer,
  2644. (iter->head_page == cpu_buffer->commit_page) &&
  2645. (iter->head + length > rb_commit_index(cpu_buffer))))
  2646. return;
  2647. rb_update_iter_read_stamp(iter, event);
  2648. iter->head += length;
  2649. /* check for end of page padding */
  2650. if ((iter->head >= rb_page_size(iter->head_page)) &&
  2651. (iter->head_page != cpu_buffer->commit_page))
  2652. rb_advance_iter(iter);
  2653. }
  2654. static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
  2655. {
  2656. return cpu_buffer->lost_events;
  2657. }
  2658. static struct ring_buffer_event *
  2659. rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
  2660. unsigned long *lost_events)
  2661. {
  2662. struct ring_buffer_event *event;
  2663. struct buffer_page *reader;
  2664. int nr_loops = 0;
  2665. again:
  2666. /*
  2667. * We repeat when a time extend is encountered.
  2668. * Since the time extend is always attached to a data event,
  2669. * we should never loop more than once.
  2670. * (We never hit the following condition more than twice).
  2671. */
  2672. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
  2673. return NULL;
  2674. reader = rb_get_reader_page(cpu_buffer);
  2675. if (!reader)
  2676. return NULL;
  2677. event = rb_reader_event(cpu_buffer);
  2678. switch (event->type_len) {
  2679. case RINGBUF_TYPE_PADDING:
  2680. if (rb_null_event(event))
  2681. RB_WARN_ON(cpu_buffer, 1);
  2682. /*
  2683. * Because the writer could be discarding every
  2684. * event it creates (which would probably be bad)
  2685. * if we were to go back to "again" then we may never
  2686. * catch up, and will trigger the warn on, or lock
  2687. * the box. Return the padding, and we will release
  2688. * the current locks, and try again.
  2689. */
  2690. return event;
  2691. case RINGBUF_TYPE_TIME_EXTEND:
  2692. /* Internal data, OK to advance */
  2693. rb_advance_reader(cpu_buffer);
  2694. goto again;
  2695. case RINGBUF_TYPE_TIME_STAMP:
  2696. /* FIXME: not implemented */
  2697. rb_advance_reader(cpu_buffer);
  2698. goto again;
  2699. case RINGBUF_TYPE_DATA:
  2700. if (ts) {
  2701. *ts = cpu_buffer->read_stamp + event->time_delta;
  2702. ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
  2703. cpu_buffer->cpu, ts);
  2704. }
  2705. if (lost_events)
  2706. *lost_events = rb_lost_events(cpu_buffer);
  2707. return event;
  2708. default:
  2709. BUG();
  2710. }
  2711. return NULL;
  2712. }
  2713. EXPORT_SYMBOL_GPL(ring_buffer_peek);
  2714. static struct ring_buffer_event *
  2715. rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  2716. {
  2717. struct ring_buffer *buffer;
  2718. struct ring_buffer_per_cpu *cpu_buffer;
  2719. struct ring_buffer_event *event;
  2720. int nr_loops = 0;
  2721. cpu_buffer = iter->cpu_buffer;
  2722. buffer = cpu_buffer->buffer;
  2723. /*
  2724. * Check if someone performed a consuming read to
  2725. * the buffer. A consuming read invalidates the iterator
  2726. * and we need to reset the iterator in this case.
  2727. */
  2728. if (unlikely(iter->cache_read != cpu_buffer->read ||
  2729. iter->cache_reader_page != cpu_buffer->reader_page))
  2730. rb_iter_reset(iter);
  2731. again:
  2732. if (ring_buffer_iter_empty(iter))
  2733. return NULL;
  2734. /*
  2735. * We repeat when a time extend is encountered.
  2736. * Since the time extend is always attached to a data event,
  2737. * we should never loop more than once.
  2738. * (We never hit the following condition more than twice).
  2739. */
  2740. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
  2741. return NULL;
  2742. if (rb_per_cpu_empty(cpu_buffer))
  2743. return NULL;
  2744. if (iter->head >= local_read(&iter->head_page->page->commit)) {
  2745. rb_inc_iter(iter);
  2746. goto again;
  2747. }
  2748. event = rb_iter_head_event(iter);
  2749. switch (event->type_len) {
  2750. case RINGBUF_TYPE_PADDING:
  2751. if (rb_null_event(event)) {
  2752. rb_inc_iter(iter);
  2753. goto again;
  2754. }
  2755. rb_advance_iter(iter);
  2756. return event;
  2757. case RINGBUF_TYPE_TIME_EXTEND:
  2758. /* Internal data, OK to advance */
  2759. rb_advance_iter(iter);
  2760. goto again;
  2761. case RINGBUF_TYPE_TIME_STAMP:
  2762. /* FIXME: not implemented */
  2763. rb_advance_iter(iter);
  2764. goto again;
  2765. case RINGBUF_TYPE_DATA:
  2766. if (ts) {
  2767. *ts = iter->read_stamp + event->time_delta;
  2768. ring_buffer_normalize_time_stamp(buffer,
  2769. cpu_buffer->cpu, ts);
  2770. }
  2771. return event;
  2772. default:
  2773. BUG();
  2774. }
  2775. return NULL;
  2776. }
  2777. EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
  2778. static inline int rb_ok_to_lock(void)
  2779. {
  2780. /*
  2781. * If an NMI die dumps out the content of the ring buffer
  2782. * do not grab locks. We also permanently disable the ring
  2783. * buffer too. A one time deal is all you get from reading
  2784. * the ring buffer from an NMI.
  2785. */
  2786. if (likely(!in_nmi()))
  2787. return 1;
  2788. tracing_off_permanent();
  2789. return 0;
  2790. }
  2791. /**
  2792. * ring_buffer_peek - peek at the next event to be read
  2793. * @buffer: The ring buffer to read
  2794. * @cpu: The cpu to peak at
  2795. * @ts: The timestamp counter of this event.
  2796. * @lost_events: a variable to store if events were lost (may be NULL)
  2797. *
  2798. * This will return the event that will be read next, but does
  2799. * not consume the data.
  2800. */
  2801. struct ring_buffer_event *
  2802. ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
  2803. unsigned long *lost_events)
  2804. {
  2805. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  2806. struct ring_buffer_event *event;
  2807. unsigned long flags;
  2808. int dolock;
  2809. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2810. return NULL;
  2811. dolock = rb_ok_to_lock();
  2812. again:
  2813. local_irq_save(flags);
  2814. if (dolock)
  2815. raw_spin_lock(&cpu_buffer->reader_lock);
  2816. event = rb_buffer_peek(cpu_buffer, ts, lost_events);
  2817. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2818. rb_advance_reader(cpu_buffer);
  2819. if (dolock)
  2820. raw_spin_unlock(&cpu_buffer->reader_lock);
  2821. local_irq_restore(flags);
  2822. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2823. goto again;
  2824. return event;
  2825. }
  2826. /**
  2827. * ring_buffer_iter_peek - peek at the next event to be read
  2828. * @iter: The ring buffer iterator
  2829. * @ts: The timestamp counter of this event.
  2830. *
  2831. * This will return the event that will be read next, but does
  2832. * not increment the iterator.
  2833. */
  2834. struct ring_buffer_event *
  2835. ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  2836. {
  2837. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2838. struct ring_buffer_event *event;
  2839. unsigned long flags;
  2840. again:
  2841. raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2842. event = rb_iter_peek(iter, ts);
  2843. raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2844. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2845. goto again;
  2846. return event;
  2847. }
  2848. /**
  2849. * ring_buffer_consume - return an event and consume it
  2850. * @buffer: The ring buffer to get the next event from
  2851. * @cpu: the cpu to read the buffer from
  2852. * @ts: a variable to store the timestamp (may be NULL)
  2853. * @lost_events: a variable to store if events were lost (may be NULL)
  2854. *
  2855. * Returns the next event in the ring buffer, and that event is consumed.
  2856. * Meaning, that sequential reads will keep returning a different event,
  2857. * and eventually empty the ring buffer if the producer is slower.
  2858. */
  2859. struct ring_buffer_event *
  2860. ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
  2861. unsigned long *lost_events)
  2862. {
  2863. struct ring_buffer_per_cpu *cpu_buffer;
  2864. struct ring_buffer_event *event = NULL;
  2865. unsigned long flags;
  2866. int dolock;
  2867. dolock = rb_ok_to_lock();
  2868. again:
  2869. /* might be called in atomic */
  2870. preempt_disable();
  2871. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2872. goto out;
  2873. cpu_buffer = buffer->buffers[cpu];
  2874. local_irq_save(flags);
  2875. if (dolock)
  2876. raw_spin_lock(&cpu_buffer->reader_lock);
  2877. event = rb_buffer_peek(cpu_buffer, ts, lost_events);
  2878. if (event) {
  2879. cpu_buffer->lost_events = 0;
  2880. rb_advance_reader(cpu_buffer);
  2881. }
  2882. if (dolock)
  2883. raw_spin_unlock(&cpu_buffer->reader_lock);
  2884. local_irq_restore(flags);
  2885. out:
  2886. preempt_enable();
  2887. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2888. goto again;
  2889. return event;
  2890. }
  2891. EXPORT_SYMBOL_GPL(ring_buffer_consume);
  2892. /**
  2893. * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
  2894. * @buffer: The ring buffer to read from
  2895. * @cpu: The cpu buffer to iterate over
  2896. *
  2897. * This performs the initial preparations necessary to iterate
  2898. * through the buffer. Memory is allocated, buffer recording
  2899. * is disabled, and the iterator pointer is returned to the caller.
  2900. *
  2901. * Disabling buffer recordng prevents the reading from being
  2902. * corrupted. This is not a consuming read, so a producer is not
  2903. * expected.
  2904. *
  2905. * After a sequence of ring_buffer_read_prepare calls, the user is
  2906. * expected to make at least one call to ring_buffer_prepare_sync.
  2907. * Afterwards, ring_buffer_read_start is invoked to get things going
  2908. * for real.
  2909. *
  2910. * This overall must be paired with ring_buffer_finish.
  2911. */
  2912. struct ring_buffer_iter *
  2913. ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
  2914. {
  2915. struct ring_buffer_per_cpu *cpu_buffer;
  2916. struct ring_buffer_iter *iter;
  2917. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2918. return NULL;
  2919. iter = kmalloc(sizeof(*iter), GFP_KERNEL);
  2920. if (!iter)
  2921. return NULL;
  2922. cpu_buffer = buffer->buffers[cpu];
  2923. iter->cpu_buffer = cpu_buffer;
  2924. atomic_inc(&cpu_buffer->record_disabled);
  2925. return iter;
  2926. }
  2927. EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
  2928. /**
  2929. * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
  2930. *
  2931. * All previously invoked ring_buffer_read_prepare calls to prepare
  2932. * iterators will be synchronized. Afterwards, read_buffer_read_start
  2933. * calls on those iterators are allowed.
  2934. */
  2935. void
  2936. ring_buffer_read_prepare_sync(void)
  2937. {
  2938. synchronize_sched();
  2939. }
  2940. EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
  2941. /**
  2942. * ring_buffer_read_start - start a non consuming read of the buffer
  2943. * @iter: The iterator returned by ring_buffer_read_prepare
  2944. *
  2945. * This finalizes the startup of an iteration through the buffer.
  2946. * The iterator comes from a call to ring_buffer_read_prepare and
  2947. * an intervening ring_buffer_read_prepare_sync must have been
  2948. * performed.
  2949. *
  2950. * Must be paired with ring_buffer_finish.
  2951. */
  2952. void
  2953. ring_buffer_read_start(struct ring_buffer_iter *iter)
  2954. {
  2955. struct ring_buffer_per_cpu *cpu_buffer;
  2956. unsigned long flags;
  2957. if (!iter)
  2958. return;
  2959. cpu_buffer = iter->cpu_buffer;
  2960. raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2961. arch_spin_lock(&cpu_buffer->lock);
  2962. rb_iter_reset(iter);
  2963. arch_spin_unlock(&cpu_buffer->lock);
  2964. raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2965. }
  2966. EXPORT_SYMBOL_GPL(ring_buffer_read_start);
  2967. /**
  2968. * ring_buffer_finish - finish reading the iterator of the buffer
  2969. * @iter: The iterator retrieved by ring_buffer_start
  2970. *
  2971. * This re-enables the recording to the buffer, and frees the
  2972. * iterator.
  2973. */
  2974. void
  2975. ring_buffer_read_finish(struct ring_buffer_iter *iter)
  2976. {
  2977. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2978. atomic_dec(&cpu_buffer->record_disabled);
  2979. kfree(iter);
  2980. }
  2981. EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
  2982. /**
  2983. * ring_buffer_read - read the next item in the ring buffer by the iterator
  2984. * @iter: The ring buffer iterator
  2985. * @ts: The time stamp of the event read.
  2986. *
  2987. * This reads the next event in the ring buffer and increments the iterator.
  2988. */
  2989. struct ring_buffer_event *
  2990. ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
  2991. {
  2992. struct ring_buffer_event *event;
  2993. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2994. unsigned long flags;
  2995. raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2996. again:
  2997. event = rb_iter_peek(iter, ts);
  2998. if (!event)
  2999. goto out;
  3000. if (event->type_len == RINGBUF_TYPE_PADDING)
  3001. goto again;
  3002. rb_advance_iter(iter);
  3003. out:
  3004. raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  3005. return event;
  3006. }
  3007. EXPORT_SYMBOL_GPL(ring_buffer_read);
  3008. /**
  3009. * ring_buffer_size - return the size of the ring buffer (in bytes)
  3010. * @buffer: The ring buffer.
  3011. */
  3012. unsigned long ring_buffer_size(struct ring_buffer *buffer)
  3013. {
  3014. return BUF_PAGE_SIZE * buffer->pages;
  3015. }
  3016. EXPORT_SYMBOL_GPL(ring_buffer_size);
  3017. static void
  3018. rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
  3019. {
  3020. rb_head_page_deactivate(cpu_buffer);
  3021. cpu_buffer->head_page
  3022. = list_entry(cpu_buffer->pages, struct buffer_page, list);
  3023. local_set(&cpu_buffer->head_page->write, 0);
  3024. local_set(&cpu_buffer->head_page->entries, 0);
  3025. local_set(&cpu_buffer->head_page->page->commit, 0);
  3026. cpu_buffer->head_page->read = 0;
  3027. cpu_buffer->tail_page = cpu_buffer->head_page;
  3028. cpu_buffer->commit_page = cpu_buffer->head_page;
  3029. INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
  3030. local_set(&cpu_buffer->reader_page->write, 0);
  3031. local_set(&cpu_buffer->reader_page->entries, 0);
  3032. local_set(&cpu_buffer->reader_page->page->commit, 0);
  3033. cpu_buffer->reader_page->read = 0;
  3034. local_set(&cpu_buffer->commit_overrun, 0);
  3035. local_set(&cpu_buffer->entries_bytes, 0);
  3036. local_set(&cpu_buffer->overrun, 0);
  3037. local_set(&cpu_buffer->entries, 0);
  3038. local_set(&cpu_buffer->committing, 0);
  3039. local_set(&cpu_buffer->commits, 0);
  3040. cpu_buffer->read = 0;
  3041. cpu_buffer->read_bytes = 0;
  3042. cpu_buffer->write_stamp = 0;
  3043. cpu_buffer->read_stamp = 0;
  3044. cpu_buffer->lost_events = 0;
  3045. cpu_buffer->last_overrun = 0;
  3046. rb_head_page_activate(cpu_buffer);
  3047. }
  3048. /**
  3049. * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
  3050. * @buffer: The ring buffer to reset a per cpu buffer of
  3051. * @cpu: The CPU buffer to be reset
  3052. */
  3053. void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
  3054. {
  3055. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  3056. unsigned long flags;
  3057. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  3058. return;
  3059. atomic_inc(&cpu_buffer->record_disabled);
  3060. raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  3061. if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
  3062. goto out;
  3063. arch_spin_lock(&cpu_buffer->lock);
  3064. rb_reset_cpu(cpu_buffer);
  3065. arch_spin_unlock(&cpu_buffer->lock);
  3066. out:
  3067. raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  3068. atomic_dec(&cpu_buffer->record_disabled);
  3069. }
  3070. EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
  3071. /**
  3072. * ring_buffer_reset - reset a ring buffer
  3073. * @buffer: The ring buffer to reset all cpu buffers
  3074. */
  3075. void ring_buffer_reset(struct ring_buffer *buffer)
  3076. {
  3077. int cpu;
  3078. for_each_buffer_cpu(buffer, cpu)
  3079. ring_buffer_reset_cpu(buffer, cpu);
  3080. }
  3081. EXPORT_SYMBOL_GPL(ring_buffer_reset);
  3082. /**
  3083. * rind_buffer_empty - is the ring buffer empty?
  3084. * @buffer: The ring buffer to test
  3085. */
  3086. int ring_buffer_empty(struct ring_buffer *buffer)
  3087. {
  3088. struct ring_buffer_per_cpu *cpu_buffer;
  3089. unsigned long flags;
  3090. int dolock;
  3091. int cpu;
  3092. int ret;
  3093. dolock = rb_ok_to_lock();
  3094. /* yes this is racy, but if you don't like the race, lock the buffer */
  3095. for_each_buffer_cpu(buffer, cpu) {
  3096. cpu_buffer = buffer->buffers[cpu];
  3097. local_irq_save(flags);
  3098. if (dolock)
  3099. raw_spin_lock(&cpu_buffer->reader_lock);
  3100. ret = rb_per_cpu_empty(cpu_buffer);
  3101. if (dolock)
  3102. raw_spin_unlock(&cpu_buffer->reader_lock);
  3103. local_irq_restore(flags);
  3104. if (!ret)
  3105. return 0;
  3106. }
  3107. return 1;
  3108. }
  3109. EXPORT_SYMBOL_GPL(ring_buffer_empty);
  3110. /**
  3111. * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
  3112. * @buffer: The ring buffer
  3113. * @cpu: The CPU buffer to test
  3114. */
  3115. int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
  3116. {
  3117. struct ring_buffer_per_cpu *cpu_buffer;
  3118. unsigned long flags;
  3119. int dolock;
  3120. int ret;
  3121. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  3122. return 1;
  3123. dolock = rb_ok_to_lock();
  3124. cpu_buffer = buffer->buffers[cpu];
  3125. local_irq_save(flags);
  3126. if (dolock)
  3127. raw_spin_lock(&cpu_buffer->reader_lock);
  3128. ret = rb_per_cpu_empty(cpu_buffer);
  3129. if (dolock)
  3130. raw_spin_unlock(&cpu_buffer->reader_lock);
  3131. local_irq_restore(flags);
  3132. return ret;
  3133. }
  3134. EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
  3135. #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
  3136. /**
  3137. * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
  3138. * @buffer_a: One buffer to swap with
  3139. * @buffer_b: The other buffer to swap with
  3140. *
  3141. * This function is useful for tracers that want to take a "snapshot"
  3142. * of a CPU buffer and has another back up buffer lying around.
  3143. * it is expected that the tracer handles the cpu buffer not being
  3144. * used at the moment.
  3145. */
  3146. int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
  3147. struct ring_buffer *buffer_b, int cpu)
  3148. {
  3149. struct ring_buffer_per_cpu *cpu_buffer_a;
  3150. struct ring_buffer_per_cpu *cpu_buffer_b;
  3151. int ret = -EINVAL;
  3152. if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
  3153. !cpumask_test_cpu(cpu, buffer_b->cpumask))
  3154. goto out;
  3155. /* At least make sure the two buffers are somewhat the same */
  3156. if (buffer_a->pages != buffer_b->pages)
  3157. goto out;
  3158. ret = -EAGAIN;
  3159. if (ring_buffer_flags != RB_BUFFERS_ON)
  3160. goto out;
  3161. if (atomic_read(&buffer_a->record_disabled))
  3162. goto out;
  3163. if (atomic_read(&buffer_b->record_disabled))
  3164. goto out;
  3165. cpu_buffer_a = buffer_a->buffers[cpu];
  3166. cpu_buffer_b = buffer_b->buffers[cpu];
  3167. if (atomic_read(&cpu_buffer_a->record_disabled))
  3168. goto out;
  3169. if (atomic_read(&cpu_buffer_b->record_disabled))
  3170. goto out;
  3171. /*
  3172. * We can't do a synchronize_sched here because this
  3173. * function can be called in atomic context.
  3174. * Normally this will be called from the same CPU as cpu.
  3175. * If not it's up to the caller to protect this.
  3176. */
  3177. atomic_inc(&cpu_buffer_a->record_disabled);
  3178. atomic_inc(&cpu_buffer_b->record_disabled);
  3179. ret = -EBUSY;
  3180. if (local_read(&cpu_buffer_a->committing))
  3181. goto out_dec;
  3182. if (local_read(&cpu_buffer_b->committing))
  3183. goto out_dec;
  3184. buffer_a->buffers[cpu] = cpu_buffer_b;
  3185. buffer_b->buffers[cpu] = cpu_buffer_a;
  3186. cpu_buffer_b->buffer = buffer_a;
  3187. cpu_buffer_a->buffer = buffer_b;
  3188. ret = 0;
  3189. out_dec:
  3190. atomic_dec(&cpu_buffer_a->record_disabled);
  3191. atomic_dec(&cpu_buffer_b->record_disabled);
  3192. out:
  3193. return ret;
  3194. }
  3195. EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
  3196. #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
  3197. /**
  3198. * ring_buffer_alloc_read_page - allocate a page to read from buffer
  3199. * @buffer: the buffer to allocate for.
  3200. *
  3201. * This function is used in conjunction with ring_buffer_read_page.
  3202. * When reading a full page from the ring buffer, these functions
  3203. * can be used to speed up the process. The calling function should
  3204. * allocate a few pages first with this function. Then when it
  3205. * needs to get pages from the ring buffer, it passes the result
  3206. * of this function into ring_buffer_read_page, which will swap
  3207. * the page that was allocated, with the read page of the buffer.
  3208. *
  3209. * Returns:
  3210. * The page allocated, or NULL on error.
  3211. */
  3212. void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
  3213. {
  3214. struct buffer_data_page *bpage;
  3215. struct page *page;
  3216. page = alloc_pages_node(cpu_to_node(cpu),
  3217. GFP_KERNEL | __GFP_NORETRY, 0);
  3218. if (!page)
  3219. return NULL;
  3220. bpage = page_address(page);
  3221. rb_init_page(bpage);
  3222. return bpage;
  3223. }
  3224. EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
  3225. /**
  3226. * ring_buffer_free_read_page - free an allocated read page
  3227. * @buffer: the buffer the page was allocate for
  3228. * @data: the page to free
  3229. *
  3230. * Free a page allocated from ring_buffer_alloc_read_page.
  3231. */
  3232. void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
  3233. {
  3234. free_page((unsigned long)data);
  3235. }
  3236. EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
  3237. /**
  3238. * ring_buffer_read_page - extract a page from the ring buffer
  3239. * @buffer: buffer to extract from
  3240. * @data_page: the page to use allocated from ring_buffer_alloc_read_page
  3241. * @len: amount to extract
  3242. * @cpu: the cpu of the buffer to extract
  3243. * @full: should the extraction only happen when the page is full.
  3244. *
  3245. * This function will pull out a page from the ring buffer and consume it.
  3246. * @data_page must be the address of the variable that was returned
  3247. * from ring_buffer_alloc_read_page. This is because the page might be used
  3248. * to swap with a page in the ring buffer.
  3249. *
  3250. * for example:
  3251. * rpage = ring_buffer_alloc_read_page(buffer);
  3252. * if (!rpage)
  3253. * return error;
  3254. * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
  3255. * if (ret >= 0)
  3256. * process_page(rpage, ret);
  3257. *
  3258. * When @full is set, the function will not return true unless
  3259. * the writer is off the reader page.
  3260. *
  3261. * Note: it is up to the calling functions to handle sleeps and wakeups.
  3262. * The ring buffer can be used anywhere in the kernel and can not
  3263. * blindly call wake_up. The layer that uses the ring buffer must be
  3264. * responsible for that.
  3265. *
  3266. * Returns:
  3267. * >=0 if data has been transferred, returns the offset of consumed data.
  3268. * <0 if no data has been transferred.
  3269. */
  3270. int ring_buffer_read_page(struct ring_buffer *buffer,
  3271. void **data_page, size_t len, int cpu, int full)
  3272. {
  3273. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  3274. struct ring_buffer_event *event;
  3275. struct buffer_data_page *bpage;
  3276. struct buffer_page *reader;
  3277. unsigned long missed_events;
  3278. unsigned long flags;
  3279. unsigned int commit;
  3280. unsigned int read;
  3281. u64 save_timestamp;
  3282. int ret = -1;
  3283. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  3284. goto out;
  3285. /*
  3286. * If len is not big enough to hold the page header, then
  3287. * we can not copy anything.
  3288. */
  3289. if (len <= BUF_PAGE_HDR_SIZE)
  3290. goto out;
  3291. len -= BUF_PAGE_HDR_SIZE;
  3292. if (!data_page)
  3293. goto out;
  3294. bpage = *data_page;
  3295. if (!bpage)
  3296. goto out;
  3297. raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  3298. reader = rb_get_reader_page(cpu_buffer);
  3299. if (!reader)
  3300. goto out_unlock;
  3301. event = rb_reader_event(cpu_buffer);
  3302. read = reader->read;
  3303. commit = rb_page_commit(reader);
  3304. /* Check if any events were dropped */
  3305. missed_events = cpu_buffer->lost_events;
  3306. /*
  3307. * If this page has been partially read or
  3308. * if len is not big enough to read the rest of the page or
  3309. * a writer is still on the page, then
  3310. * we must copy the data from the page to the buffer.
  3311. * Otherwise, we can simply swap the page with the one passed in.
  3312. */
  3313. if (read || (len < (commit - read)) ||
  3314. cpu_buffer->reader_page == cpu_buffer->commit_page) {
  3315. struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
  3316. unsigned int rpos = read;
  3317. unsigned int pos = 0;
  3318. unsigned int size;
  3319. if (full)
  3320. goto out_unlock;
  3321. if (len > (commit - read))
  3322. len = (commit - read);
  3323. /* Always keep the time extend and data together */
  3324. size = rb_event_ts_length(event);
  3325. if (len < size)
  3326. goto out_unlock;
  3327. /* save the current timestamp, since the user will need it */
  3328. save_timestamp = cpu_buffer->read_stamp;
  3329. /* Need to copy one event at a time */
  3330. do {
  3331. /* We need the size of one event, because
  3332. * rb_advance_reader only advances by one event,
  3333. * whereas rb_event_ts_length may include the size of
  3334. * one or two events.
  3335. * We have already ensured there's enough space if this
  3336. * is a time extend. */
  3337. size = rb_event_length(event);
  3338. memcpy(bpage->data + pos, rpage->data + rpos, size);
  3339. len -= size;
  3340. rb_advance_reader(cpu_buffer);
  3341. rpos = reader->read;
  3342. pos += size;
  3343. if (rpos >= commit)
  3344. break;
  3345. event = rb_reader_event(cpu_buffer);
  3346. /* Always keep the time extend and data together */
  3347. size = rb_event_ts_length(event);
  3348. } while (len >= size);
  3349. /* update bpage */
  3350. local_set(&bpage->commit, pos);
  3351. bpage->time_stamp = save_timestamp;
  3352. /* we copied everything to the beginning */
  3353. read = 0;
  3354. } else {
  3355. /* update the entry counter */
  3356. cpu_buffer->read += rb_page_entries(reader);
  3357. cpu_buffer->read_bytes += BUF_PAGE_SIZE;
  3358. /* swap the pages */
  3359. rb_init_page(bpage);
  3360. bpage = reader->page;
  3361. reader->page = *data_page;
  3362. local_set(&reader->write, 0);
  3363. local_set(&reader->entries, 0);
  3364. reader->read = 0;
  3365. *data_page = bpage;
  3366. /*
  3367. * Use the real_end for the data size,
  3368. * This gives us a chance to store the lost events
  3369. * on the page.
  3370. */
  3371. if (reader->real_end)
  3372. local_set(&bpage->commit, reader->real_end);
  3373. }
  3374. ret = read;
  3375. cpu_buffer->lost_events = 0;
  3376. commit = local_read(&bpage->commit);
  3377. /*
  3378. * Set a flag in the commit field if we lost events
  3379. */
  3380. if (missed_events) {
  3381. /* If there is room at the end of the page to save the
  3382. * missed events, then record it there.
  3383. */
  3384. if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
  3385. memcpy(&bpage->data[commit], &missed_events,
  3386. sizeof(missed_events));
  3387. local_add(RB_MISSED_STORED, &bpage->commit);
  3388. commit += sizeof(missed_events);
  3389. }
  3390. local_add(RB_MISSED_EVENTS, &bpage->commit);
  3391. }
  3392. /*
  3393. * This page may be off to user land. Zero it out here.
  3394. */
  3395. if (commit < BUF_PAGE_SIZE)
  3396. memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
  3397. out_unlock:
  3398. raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  3399. out:
  3400. return ret;
  3401. }
  3402. EXPORT_SYMBOL_GPL(ring_buffer_read_page);
  3403. #ifdef CONFIG_TRACING
  3404. static ssize_t
  3405. rb_simple_read(struct file *filp, char __user *ubuf,
  3406. size_t cnt, loff_t *ppos)
  3407. {
  3408. unsigned long *p = filp->private_data;
  3409. char buf[64];
  3410. int r;
  3411. if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
  3412. r = sprintf(buf, "permanently disabled\n");
  3413. else
  3414. r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
  3415. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  3416. }
  3417. static ssize_t
  3418. rb_simple_write(struct file *filp, const char __user *ubuf,
  3419. size_t cnt, loff_t *ppos)
  3420. {
  3421. unsigned long *p = filp->private_data;
  3422. unsigned long val;
  3423. int ret;
  3424. ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  3425. if (ret)
  3426. return ret;
  3427. if (val)
  3428. set_bit(RB_BUFFERS_ON_BIT, p);
  3429. else
  3430. clear_bit(RB_BUFFERS_ON_BIT, p);
  3431. (*ppos)++;
  3432. return cnt;
  3433. }
  3434. static const struct file_operations rb_simple_fops = {
  3435. .open = tracing_open_generic,
  3436. .read = rb_simple_read,
  3437. .write = rb_simple_write,
  3438. .llseek = default_llseek,
  3439. };
  3440. static __init int rb_init_debugfs(void)
  3441. {
  3442. struct dentry *d_tracer;
  3443. d_tracer = tracing_init_dentry();
  3444. trace_create_file("tracing_on", 0644, d_tracer,
  3445. &ring_buffer_flags, &rb_simple_fops);
  3446. return 0;
  3447. }
  3448. fs_initcall(rb_init_debugfs);
  3449. #endif
  3450. #ifdef CONFIG_HOTPLUG_CPU
  3451. static int rb_cpu_notify(struct notifier_block *self,
  3452. unsigned long action, void *hcpu)
  3453. {
  3454. struct ring_buffer *buffer =
  3455. container_of(self, struct ring_buffer, cpu_notify);
  3456. long cpu = (long)hcpu;
  3457. switch (action) {
  3458. case CPU_UP_PREPARE:
  3459. case CPU_UP_PREPARE_FROZEN:
  3460. if (cpumask_test_cpu(cpu, buffer->cpumask))
  3461. return NOTIFY_OK;
  3462. buffer->buffers[cpu] =
  3463. rb_allocate_cpu_buffer(buffer, cpu);
  3464. if (!buffer->buffers[cpu]) {
  3465. WARN(1, "failed to allocate ring buffer on CPU %ld\n",
  3466. cpu);
  3467. return NOTIFY_OK;
  3468. }
  3469. smp_wmb();
  3470. cpumask_set_cpu(cpu, buffer->cpumask);
  3471. break;
  3472. case CPU_DOWN_PREPARE:
  3473. case CPU_DOWN_PREPARE_FROZEN:
  3474. /*
  3475. * Do nothing.
  3476. * If we were to free the buffer, then the user would
  3477. * lose any trace that was in the buffer.
  3478. */
  3479. break;
  3480. default:
  3481. break;
  3482. }
  3483. return NOTIFY_OK;
  3484. }
  3485. #endif