ring_buffer.c 76 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100
  1. /*
  2. * Generic ring buffer
  3. *
  4. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/ring_buffer.h>
  7. #include <linux/trace_clock.h>
  8. #include <linux/ftrace_irq.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/hardirq.h>
  13. #include <linux/module.h>
  14. #include <linux/percpu.h>
  15. #include <linux/mutex.h>
  16. #include <linux/init.h>
  17. #include <linux/hash.h>
  18. #include <linux/list.h>
  19. #include <linux/cpu.h>
  20. #include <linux/fs.h>
  21. #include "trace.h"
  22. /*
  23. * The ring buffer header is special. We must manually up keep it.
  24. */
  25. int ring_buffer_print_entry_header(struct trace_seq *s)
  26. {
  27. int ret;
  28. ret = trace_seq_printf(s, "# compressed entry header\n");
  29. ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
  30. ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
  31. ret = trace_seq_printf(s, "\tarray : 32 bits\n");
  32. ret = trace_seq_printf(s, "\n");
  33. ret = trace_seq_printf(s, "\tpadding : type == %d\n",
  34. RINGBUF_TYPE_PADDING);
  35. ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
  36. RINGBUF_TYPE_TIME_EXTEND);
  37. ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
  38. RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  39. return ret;
  40. }
  41. /*
  42. * The ring buffer is made up of a list of pages. A separate list of pages is
  43. * allocated for each CPU. A writer may only write to a buffer that is
  44. * associated with the CPU it is currently executing on. A reader may read
  45. * from any per cpu buffer.
  46. *
  47. * The reader is special. For each per cpu buffer, the reader has its own
  48. * reader page. When a reader has read the entire reader page, this reader
  49. * page is swapped with another page in the ring buffer.
  50. *
  51. * Now, as long as the writer is off the reader page, the reader can do what
  52. * ever it wants with that page. The writer will never write to that page
  53. * again (as long as it is out of the ring buffer).
  54. *
  55. * Here's some silly ASCII art.
  56. *
  57. * +------+
  58. * |reader| RING BUFFER
  59. * |page |
  60. * +------+ +---+ +---+ +---+
  61. * | |-->| |-->| |
  62. * +---+ +---+ +---+
  63. * ^ |
  64. * | |
  65. * +---------------+
  66. *
  67. *
  68. * +------+
  69. * |reader| RING BUFFER
  70. * |page |------------------v
  71. * +------+ +---+ +---+ +---+
  72. * | |-->| |-->| |
  73. * +---+ +---+ +---+
  74. * ^ |
  75. * | |
  76. * +---------------+
  77. *
  78. *
  79. * +------+
  80. * |reader| RING BUFFER
  81. * |page |------------------v
  82. * +------+ +---+ +---+ +---+
  83. * ^ | |-->| |-->| |
  84. * | +---+ +---+ +---+
  85. * | |
  86. * | |
  87. * +------------------------------+
  88. *
  89. *
  90. * +------+
  91. * |buffer| RING BUFFER
  92. * |page |------------------v
  93. * +------+ +---+ +---+ +---+
  94. * ^ | | | |-->| |
  95. * | New +---+ +---+ +---+
  96. * | Reader------^ |
  97. * | page |
  98. * +------------------------------+
  99. *
  100. *
  101. * After we make this swap, the reader can hand this page off to the splice
  102. * code and be done with it. It can even allocate a new page if it needs to
  103. * and swap that into the ring buffer.
  104. *
  105. * We will be using cmpxchg soon to make all this lockless.
  106. *
  107. */
  108. /*
  109. * A fast way to enable or disable all ring buffers is to
  110. * call tracing_on or tracing_off. Turning off the ring buffers
  111. * prevents all ring buffers from being recorded to.
  112. * Turning this switch on, makes it OK to write to the
  113. * ring buffer, if the ring buffer is enabled itself.
  114. *
  115. * There's three layers that must be on in order to write
  116. * to the ring buffer.
  117. *
  118. * 1) This global flag must be set.
  119. * 2) The ring buffer must be enabled for recording.
  120. * 3) The per cpu buffer must be enabled for recording.
  121. *
  122. * In case of an anomaly, this global flag has a bit set that
  123. * will permantly disable all ring buffers.
  124. */
  125. /*
  126. * Global flag to disable all recording to ring buffers
  127. * This has two bits: ON, DISABLED
  128. *
  129. * ON DISABLED
  130. * ---- ----------
  131. * 0 0 : ring buffers are off
  132. * 1 0 : ring buffers are on
  133. * X 1 : ring buffers are permanently disabled
  134. */
  135. enum {
  136. RB_BUFFERS_ON_BIT = 0,
  137. RB_BUFFERS_DISABLED_BIT = 1,
  138. };
  139. enum {
  140. RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
  141. RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
  142. };
  143. static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
  144. #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
  145. /**
  146. * tracing_on - enable all tracing buffers
  147. *
  148. * This function enables all tracing buffers that may have been
  149. * disabled with tracing_off.
  150. */
  151. void tracing_on(void)
  152. {
  153. set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
  154. }
  155. EXPORT_SYMBOL_GPL(tracing_on);
  156. /**
  157. * tracing_off - turn off all tracing buffers
  158. *
  159. * This function stops all tracing buffers from recording data.
  160. * It does not disable any overhead the tracers themselves may
  161. * be causing. This function simply causes all recording to
  162. * the ring buffers to fail.
  163. */
  164. void tracing_off(void)
  165. {
  166. clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
  167. }
  168. EXPORT_SYMBOL_GPL(tracing_off);
  169. /**
  170. * tracing_off_permanent - permanently disable ring buffers
  171. *
  172. * This function, once called, will disable all ring buffers
  173. * permanently.
  174. */
  175. void tracing_off_permanent(void)
  176. {
  177. set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
  178. }
  179. /**
  180. * tracing_is_on - show state of ring buffers enabled
  181. */
  182. int tracing_is_on(void)
  183. {
  184. return ring_buffer_flags == RB_BUFFERS_ON;
  185. }
  186. EXPORT_SYMBOL_GPL(tracing_is_on);
  187. #include "trace.h"
  188. #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
  189. #define RB_ALIGNMENT 4U
  190. #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  191. /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
  192. #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
  193. enum {
  194. RB_LEN_TIME_EXTEND = 8,
  195. RB_LEN_TIME_STAMP = 16,
  196. };
  197. static inline int rb_null_event(struct ring_buffer_event *event)
  198. {
  199. return event->type_len == RINGBUF_TYPE_PADDING
  200. && event->time_delta == 0;
  201. }
  202. static inline int rb_discarded_event(struct ring_buffer_event *event)
  203. {
  204. return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
  205. }
  206. static void rb_event_set_padding(struct ring_buffer_event *event)
  207. {
  208. event->type_len = RINGBUF_TYPE_PADDING;
  209. event->time_delta = 0;
  210. }
  211. static unsigned
  212. rb_event_data_length(struct ring_buffer_event *event)
  213. {
  214. unsigned length;
  215. if (event->type_len)
  216. length = event->type_len * RB_ALIGNMENT;
  217. else
  218. length = event->array[0];
  219. return length + RB_EVNT_HDR_SIZE;
  220. }
  221. /* inline for ring buffer fast paths */
  222. static unsigned
  223. rb_event_length(struct ring_buffer_event *event)
  224. {
  225. switch (event->type_len) {
  226. case RINGBUF_TYPE_PADDING:
  227. if (rb_null_event(event))
  228. /* undefined */
  229. return -1;
  230. return event->array[0] + RB_EVNT_HDR_SIZE;
  231. case RINGBUF_TYPE_TIME_EXTEND:
  232. return RB_LEN_TIME_EXTEND;
  233. case RINGBUF_TYPE_TIME_STAMP:
  234. return RB_LEN_TIME_STAMP;
  235. case RINGBUF_TYPE_DATA:
  236. return rb_event_data_length(event);
  237. default:
  238. BUG();
  239. }
  240. /* not hit */
  241. return 0;
  242. }
  243. /**
  244. * ring_buffer_event_length - return the length of the event
  245. * @event: the event to get the length of
  246. */
  247. unsigned ring_buffer_event_length(struct ring_buffer_event *event)
  248. {
  249. unsigned length = rb_event_length(event);
  250. if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  251. return length;
  252. length -= RB_EVNT_HDR_SIZE;
  253. if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
  254. length -= sizeof(event->array[0]);
  255. return length;
  256. }
  257. EXPORT_SYMBOL_GPL(ring_buffer_event_length);
  258. /* inline for ring buffer fast paths */
  259. static void *
  260. rb_event_data(struct ring_buffer_event *event)
  261. {
  262. BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  263. /* If length is in len field, then array[0] has the data */
  264. if (event->type_len)
  265. return (void *)&event->array[0];
  266. /* Otherwise length is in array[0] and array[1] has the data */
  267. return (void *)&event->array[1];
  268. }
  269. /**
  270. * ring_buffer_event_data - return the data of the event
  271. * @event: the event to get the data from
  272. */
  273. void *ring_buffer_event_data(struct ring_buffer_event *event)
  274. {
  275. return rb_event_data(event);
  276. }
  277. EXPORT_SYMBOL_GPL(ring_buffer_event_data);
  278. #define for_each_buffer_cpu(buffer, cpu) \
  279. for_each_cpu(cpu, buffer->cpumask)
  280. #define TS_SHIFT 27
  281. #define TS_MASK ((1ULL << TS_SHIFT) - 1)
  282. #define TS_DELTA_TEST (~TS_MASK)
  283. struct buffer_data_page {
  284. u64 time_stamp; /* page time stamp */
  285. local_t commit; /* write committed index */
  286. unsigned char data[]; /* data of buffer page */
  287. };
  288. struct buffer_page {
  289. struct list_head list; /* list of buffer pages */
  290. local_t write; /* index for next write */
  291. unsigned read; /* index for next read */
  292. local_t entries; /* entries on this page */
  293. struct buffer_data_page *page; /* Actual data page */
  294. };
  295. static void rb_init_page(struct buffer_data_page *bpage)
  296. {
  297. local_set(&bpage->commit, 0);
  298. }
  299. /**
  300. * ring_buffer_page_len - the size of data on the page.
  301. * @page: The page to read
  302. *
  303. * Returns the amount of data on the page, including buffer page header.
  304. */
  305. size_t ring_buffer_page_len(void *page)
  306. {
  307. return local_read(&((struct buffer_data_page *)page)->commit)
  308. + BUF_PAGE_HDR_SIZE;
  309. }
  310. /*
  311. * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
  312. * this issue out.
  313. */
  314. static void free_buffer_page(struct buffer_page *bpage)
  315. {
  316. free_page((unsigned long)bpage->page);
  317. kfree(bpage);
  318. }
  319. /*
  320. * We need to fit the time_stamp delta into 27 bits.
  321. */
  322. static inline int test_time_stamp(u64 delta)
  323. {
  324. if (delta & TS_DELTA_TEST)
  325. return 1;
  326. return 0;
  327. }
  328. #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
  329. int ring_buffer_print_page_header(struct trace_seq *s)
  330. {
  331. struct buffer_data_page field;
  332. int ret;
  333. ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
  334. "offset:0;\tsize:%u;\n",
  335. (unsigned int)sizeof(field.time_stamp));
  336. ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
  337. "offset:%u;\tsize:%u;\n",
  338. (unsigned int)offsetof(typeof(field), commit),
  339. (unsigned int)sizeof(field.commit));
  340. ret = trace_seq_printf(s, "\tfield: char data;\t"
  341. "offset:%u;\tsize:%u;\n",
  342. (unsigned int)offsetof(typeof(field), data),
  343. (unsigned int)BUF_PAGE_SIZE);
  344. return ret;
  345. }
  346. /*
  347. * head_page == tail_page && head == tail then buffer is empty.
  348. */
  349. struct ring_buffer_per_cpu {
  350. int cpu;
  351. struct ring_buffer *buffer;
  352. spinlock_t reader_lock; /* serialize readers */
  353. raw_spinlock_t lock;
  354. struct lock_class_key lock_key;
  355. struct list_head pages;
  356. struct buffer_page *head_page; /* read from head */
  357. struct buffer_page *tail_page; /* write to tail */
  358. struct buffer_page *commit_page; /* committed pages */
  359. struct buffer_page *reader_page;
  360. unsigned long nmi_dropped;
  361. unsigned long commit_overrun;
  362. unsigned long overrun;
  363. unsigned long read;
  364. local_t entries;
  365. u64 write_stamp;
  366. u64 read_stamp;
  367. atomic_t record_disabled;
  368. };
  369. struct ring_buffer {
  370. unsigned pages;
  371. unsigned flags;
  372. int cpus;
  373. atomic_t record_disabled;
  374. cpumask_var_t cpumask;
  375. struct mutex mutex;
  376. struct ring_buffer_per_cpu **buffers;
  377. #ifdef CONFIG_HOTPLUG_CPU
  378. struct notifier_block cpu_notify;
  379. #endif
  380. u64 (*clock)(void);
  381. };
  382. struct ring_buffer_iter {
  383. struct ring_buffer_per_cpu *cpu_buffer;
  384. unsigned long head;
  385. struct buffer_page *head_page;
  386. u64 read_stamp;
  387. };
  388. /* buffer may be either ring_buffer or ring_buffer_per_cpu */
  389. #define RB_WARN_ON(buffer, cond) \
  390. ({ \
  391. int _____ret = unlikely(cond); \
  392. if (_____ret) { \
  393. atomic_inc(&buffer->record_disabled); \
  394. WARN_ON(1); \
  395. } \
  396. _____ret; \
  397. })
  398. /* Up this if you want to test the TIME_EXTENTS and normalization */
  399. #define DEBUG_SHIFT 0
  400. u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
  401. {
  402. u64 time;
  403. preempt_disable_notrace();
  404. /* shift to debug/test normalization and TIME_EXTENTS */
  405. time = buffer->clock() << DEBUG_SHIFT;
  406. preempt_enable_no_resched_notrace();
  407. return time;
  408. }
  409. EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
  410. void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
  411. int cpu, u64 *ts)
  412. {
  413. /* Just stupid testing the normalize function and deltas */
  414. *ts >>= DEBUG_SHIFT;
  415. }
  416. EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
  417. /**
  418. * check_pages - integrity check of buffer pages
  419. * @cpu_buffer: CPU buffer with pages to test
  420. *
  421. * As a safety measure we check to make sure the data pages have not
  422. * been corrupted.
  423. */
  424. static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
  425. {
  426. struct list_head *head = &cpu_buffer->pages;
  427. struct buffer_page *bpage, *tmp;
  428. if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
  429. return -1;
  430. if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
  431. return -1;
  432. list_for_each_entry_safe(bpage, tmp, head, list) {
  433. if (RB_WARN_ON(cpu_buffer,
  434. bpage->list.next->prev != &bpage->list))
  435. return -1;
  436. if (RB_WARN_ON(cpu_buffer,
  437. bpage->list.prev->next != &bpage->list))
  438. return -1;
  439. }
  440. return 0;
  441. }
  442. static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
  443. unsigned nr_pages)
  444. {
  445. struct list_head *head = &cpu_buffer->pages;
  446. struct buffer_page *bpage, *tmp;
  447. unsigned long addr;
  448. LIST_HEAD(pages);
  449. unsigned i;
  450. for (i = 0; i < nr_pages; i++) {
  451. bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
  452. GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
  453. if (!bpage)
  454. goto free_pages;
  455. list_add(&bpage->list, &pages);
  456. addr = __get_free_page(GFP_KERNEL);
  457. if (!addr)
  458. goto free_pages;
  459. bpage->page = (void *)addr;
  460. rb_init_page(bpage->page);
  461. }
  462. list_splice(&pages, head);
  463. rb_check_pages(cpu_buffer);
  464. return 0;
  465. free_pages:
  466. list_for_each_entry_safe(bpage, tmp, &pages, list) {
  467. list_del_init(&bpage->list);
  468. free_buffer_page(bpage);
  469. }
  470. return -ENOMEM;
  471. }
  472. static struct ring_buffer_per_cpu *
  473. rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
  474. {
  475. struct ring_buffer_per_cpu *cpu_buffer;
  476. struct buffer_page *bpage;
  477. unsigned long addr;
  478. int ret;
  479. cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
  480. GFP_KERNEL, cpu_to_node(cpu));
  481. if (!cpu_buffer)
  482. return NULL;
  483. cpu_buffer->cpu = cpu;
  484. cpu_buffer->buffer = buffer;
  485. spin_lock_init(&cpu_buffer->reader_lock);
  486. cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
  487. INIT_LIST_HEAD(&cpu_buffer->pages);
  488. bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
  489. GFP_KERNEL, cpu_to_node(cpu));
  490. if (!bpage)
  491. goto fail_free_buffer;
  492. cpu_buffer->reader_page = bpage;
  493. addr = __get_free_page(GFP_KERNEL);
  494. if (!addr)
  495. goto fail_free_reader;
  496. bpage->page = (void *)addr;
  497. rb_init_page(bpage->page);
  498. INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
  499. ret = rb_allocate_pages(cpu_buffer, buffer->pages);
  500. if (ret < 0)
  501. goto fail_free_reader;
  502. cpu_buffer->head_page
  503. = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
  504. cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
  505. return cpu_buffer;
  506. fail_free_reader:
  507. free_buffer_page(cpu_buffer->reader_page);
  508. fail_free_buffer:
  509. kfree(cpu_buffer);
  510. return NULL;
  511. }
  512. static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
  513. {
  514. struct list_head *head = &cpu_buffer->pages;
  515. struct buffer_page *bpage, *tmp;
  516. free_buffer_page(cpu_buffer->reader_page);
  517. list_for_each_entry_safe(bpage, tmp, head, list) {
  518. list_del_init(&bpage->list);
  519. free_buffer_page(bpage);
  520. }
  521. kfree(cpu_buffer);
  522. }
  523. /*
  524. * Causes compile errors if the struct buffer_page gets bigger
  525. * than the struct page.
  526. */
  527. extern int ring_buffer_page_too_big(void);
  528. #ifdef CONFIG_HOTPLUG_CPU
  529. static int rb_cpu_notify(struct notifier_block *self,
  530. unsigned long action, void *hcpu);
  531. #endif
  532. /**
  533. * ring_buffer_alloc - allocate a new ring_buffer
  534. * @size: the size in bytes per cpu that is needed.
  535. * @flags: attributes to set for the ring buffer.
  536. *
  537. * Currently the only flag that is available is the RB_FL_OVERWRITE
  538. * flag. This flag means that the buffer will overwrite old data
  539. * when the buffer wraps. If this flag is not set, the buffer will
  540. * drop data when the tail hits the head.
  541. */
  542. struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
  543. {
  544. struct ring_buffer *buffer;
  545. int bsize;
  546. int cpu;
  547. /* Paranoid! Optimizes out when all is well */
  548. if (sizeof(struct buffer_page) > sizeof(struct page))
  549. ring_buffer_page_too_big();
  550. /* keep it in its own cache line */
  551. buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
  552. GFP_KERNEL);
  553. if (!buffer)
  554. return NULL;
  555. if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
  556. goto fail_free_buffer;
  557. buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  558. buffer->flags = flags;
  559. buffer->clock = trace_clock_local;
  560. /* need at least two pages */
  561. if (buffer->pages == 1)
  562. buffer->pages++;
  563. /*
  564. * In case of non-hotplug cpu, if the ring-buffer is allocated
  565. * in early initcall, it will not be notified of secondary cpus.
  566. * In that off case, we need to allocate for all possible cpus.
  567. */
  568. #ifdef CONFIG_HOTPLUG_CPU
  569. get_online_cpus();
  570. cpumask_copy(buffer->cpumask, cpu_online_mask);
  571. #else
  572. cpumask_copy(buffer->cpumask, cpu_possible_mask);
  573. #endif
  574. buffer->cpus = nr_cpu_ids;
  575. bsize = sizeof(void *) * nr_cpu_ids;
  576. buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
  577. GFP_KERNEL);
  578. if (!buffer->buffers)
  579. goto fail_free_cpumask;
  580. for_each_buffer_cpu(buffer, cpu) {
  581. buffer->buffers[cpu] =
  582. rb_allocate_cpu_buffer(buffer, cpu);
  583. if (!buffer->buffers[cpu])
  584. goto fail_free_buffers;
  585. }
  586. #ifdef CONFIG_HOTPLUG_CPU
  587. buffer->cpu_notify.notifier_call = rb_cpu_notify;
  588. buffer->cpu_notify.priority = 0;
  589. register_cpu_notifier(&buffer->cpu_notify);
  590. #endif
  591. put_online_cpus();
  592. mutex_init(&buffer->mutex);
  593. return buffer;
  594. fail_free_buffers:
  595. for_each_buffer_cpu(buffer, cpu) {
  596. if (buffer->buffers[cpu])
  597. rb_free_cpu_buffer(buffer->buffers[cpu]);
  598. }
  599. kfree(buffer->buffers);
  600. fail_free_cpumask:
  601. free_cpumask_var(buffer->cpumask);
  602. put_online_cpus();
  603. fail_free_buffer:
  604. kfree(buffer);
  605. return NULL;
  606. }
  607. EXPORT_SYMBOL_GPL(ring_buffer_alloc);
  608. /**
  609. * ring_buffer_free - free a ring buffer.
  610. * @buffer: the buffer to free.
  611. */
  612. void
  613. ring_buffer_free(struct ring_buffer *buffer)
  614. {
  615. int cpu;
  616. get_online_cpus();
  617. #ifdef CONFIG_HOTPLUG_CPU
  618. unregister_cpu_notifier(&buffer->cpu_notify);
  619. #endif
  620. for_each_buffer_cpu(buffer, cpu)
  621. rb_free_cpu_buffer(buffer->buffers[cpu]);
  622. put_online_cpus();
  623. free_cpumask_var(buffer->cpumask);
  624. kfree(buffer);
  625. }
  626. EXPORT_SYMBOL_GPL(ring_buffer_free);
  627. void ring_buffer_set_clock(struct ring_buffer *buffer,
  628. u64 (*clock)(void))
  629. {
  630. buffer->clock = clock;
  631. }
  632. static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
  633. static void
  634. rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
  635. {
  636. struct buffer_page *bpage;
  637. struct list_head *p;
  638. unsigned i;
  639. atomic_inc(&cpu_buffer->record_disabled);
  640. synchronize_sched();
  641. for (i = 0; i < nr_pages; i++) {
  642. if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
  643. return;
  644. p = cpu_buffer->pages.next;
  645. bpage = list_entry(p, struct buffer_page, list);
  646. list_del_init(&bpage->list);
  647. free_buffer_page(bpage);
  648. }
  649. if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
  650. return;
  651. rb_reset_cpu(cpu_buffer);
  652. rb_check_pages(cpu_buffer);
  653. atomic_dec(&cpu_buffer->record_disabled);
  654. }
  655. static void
  656. rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
  657. struct list_head *pages, unsigned nr_pages)
  658. {
  659. struct buffer_page *bpage;
  660. struct list_head *p;
  661. unsigned i;
  662. atomic_inc(&cpu_buffer->record_disabled);
  663. synchronize_sched();
  664. for (i = 0; i < nr_pages; i++) {
  665. if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
  666. return;
  667. p = pages->next;
  668. bpage = list_entry(p, struct buffer_page, list);
  669. list_del_init(&bpage->list);
  670. list_add_tail(&bpage->list, &cpu_buffer->pages);
  671. }
  672. rb_reset_cpu(cpu_buffer);
  673. rb_check_pages(cpu_buffer);
  674. atomic_dec(&cpu_buffer->record_disabled);
  675. }
  676. /**
  677. * ring_buffer_resize - resize the ring buffer
  678. * @buffer: the buffer to resize.
  679. * @size: the new size.
  680. *
  681. * The tracer is responsible for making sure that the buffer is
  682. * not being used while changing the size.
  683. * Note: We may be able to change the above requirement by using
  684. * RCU synchronizations.
  685. *
  686. * Minimum size is 2 * BUF_PAGE_SIZE.
  687. *
  688. * Returns -1 on failure.
  689. */
  690. int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
  691. {
  692. struct ring_buffer_per_cpu *cpu_buffer;
  693. unsigned nr_pages, rm_pages, new_pages;
  694. struct buffer_page *bpage, *tmp;
  695. unsigned long buffer_size;
  696. unsigned long addr;
  697. LIST_HEAD(pages);
  698. int i, cpu;
  699. /*
  700. * Always succeed at resizing a non-existent buffer:
  701. */
  702. if (!buffer)
  703. return size;
  704. size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  705. size *= BUF_PAGE_SIZE;
  706. buffer_size = buffer->pages * BUF_PAGE_SIZE;
  707. /* we need a minimum of two pages */
  708. if (size < BUF_PAGE_SIZE * 2)
  709. size = BUF_PAGE_SIZE * 2;
  710. if (size == buffer_size)
  711. return size;
  712. mutex_lock(&buffer->mutex);
  713. get_online_cpus();
  714. nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  715. if (size < buffer_size) {
  716. /* easy case, just free pages */
  717. if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
  718. goto out_fail;
  719. rm_pages = buffer->pages - nr_pages;
  720. for_each_buffer_cpu(buffer, cpu) {
  721. cpu_buffer = buffer->buffers[cpu];
  722. rb_remove_pages(cpu_buffer, rm_pages);
  723. }
  724. goto out;
  725. }
  726. /*
  727. * This is a bit more difficult. We only want to add pages
  728. * when we can allocate enough for all CPUs. We do this
  729. * by allocating all the pages and storing them on a local
  730. * link list. If we succeed in our allocation, then we
  731. * add these pages to the cpu_buffers. Otherwise we just free
  732. * them all and return -ENOMEM;
  733. */
  734. if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
  735. goto out_fail;
  736. new_pages = nr_pages - buffer->pages;
  737. for_each_buffer_cpu(buffer, cpu) {
  738. for (i = 0; i < new_pages; i++) {
  739. bpage = kzalloc_node(ALIGN(sizeof(*bpage),
  740. cache_line_size()),
  741. GFP_KERNEL, cpu_to_node(cpu));
  742. if (!bpage)
  743. goto free_pages;
  744. list_add(&bpage->list, &pages);
  745. addr = __get_free_page(GFP_KERNEL);
  746. if (!addr)
  747. goto free_pages;
  748. bpage->page = (void *)addr;
  749. rb_init_page(bpage->page);
  750. }
  751. }
  752. for_each_buffer_cpu(buffer, cpu) {
  753. cpu_buffer = buffer->buffers[cpu];
  754. rb_insert_pages(cpu_buffer, &pages, new_pages);
  755. }
  756. if (RB_WARN_ON(buffer, !list_empty(&pages)))
  757. goto out_fail;
  758. out:
  759. buffer->pages = nr_pages;
  760. put_online_cpus();
  761. mutex_unlock(&buffer->mutex);
  762. return size;
  763. free_pages:
  764. list_for_each_entry_safe(bpage, tmp, &pages, list) {
  765. list_del_init(&bpage->list);
  766. free_buffer_page(bpage);
  767. }
  768. put_online_cpus();
  769. mutex_unlock(&buffer->mutex);
  770. return -ENOMEM;
  771. /*
  772. * Something went totally wrong, and we are too paranoid
  773. * to even clean up the mess.
  774. */
  775. out_fail:
  776. put_online_cpus();
  777. mutex_unlock(&buffer->mutex);
  778. return -1;
  779. }
  780. EXPORT_SYMBOL_GPL(ring_buffer_resize);
  781. static inline void *
  782. __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
  783. {
  784. return bpage->data + index;
  785. }
  786. static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
  787. {
  788. return bpage->page->data + index;
  789. }
  790. static inline struct ring_buffer_event *
  791. rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
  792. {
  793. return __rb_page_index(cpu_buffer->reader_page,
  794. cpu_buffer->reader_page->read);
  795. }
  796. static inline struct ring_buffer_event *
  797. rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
  798. {
  799. return __rb_page_index(cpu_buffer->head_page,
  800. cpu_buffer->head_page->read);
  801. }
  802. static inline struct ring_buffer_event *
  803. rb_iter_head_event(struct ring_buffer_iter *iter)
  804. {
  805. return __rb_page_index(iter->head_page, iter->head);
  806. }
  807. static inline unsigned rb_page_write(struct buffer_page *bpage)
  808. {
  809. return local_read(&bpage->write);
  810. }
  811. static inline unsigned rb_page_commit(struct buffer_page *bpage)
  812. {
  813. return local_read(&bpage->page->commit);
  814. }
  815. /* Size is determined by what has been commited */
  816. static inline unsigned rb_page_size(struct buffer_page *bpage)
  817. {
  818. return rb_page_commit(bpage);
  819. }
  820. static inline unsigned
  821. rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
  822. {
  823. return rb_page_commit(cpu_buffer->commit_page);
  824. }
  825. static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
  826. {
  827. return rb_page_commit(cpu_buffer->head_page);
  828. }
  829. static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
  830. struct buffer_page **bpage)
  831. {
  832. struct list_head *p = (*bpage)->list.next;
  833. if (p == &cpu_buffer->pages)
  834. p = p->next;
  835. *bpage = list_entry(p, struct buffer_page, list);
  836. }
  837. static inline unsigned
  838. rb_event_index(struct ring_buffer_event *event)
  839. {
  840. unsigned long addr = (unsigned long)event;
  841. return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
  842. }
  843. static int
  844. rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
  845. struct ring_buffer_event *event)
  846. {
  847. unsigned long addr = (unsigned long)event;
  848. unsigned long index;
  849. index = rb_event_index(event);
  850. addr &= PAGE_MASK;
  851. return cpu_buffer->commit_page->page == (void *)addr &&
  852. rb_commit_index(cpu_buffer) == index;
  853. }
  854. static void
  855. rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
  856. struct ring_buffer_event *event)
  857. {
  858. unsigned long addr = (unsigned long)event;
  859. unsigned long index;
  860. index = rb_event_index(event);
  861. addr &= PAGE_MASK;
  862. while (cpu_buffer->commit_page->page != (void *)addr) {
  863. if (RB_WARN_ON(cpu_buffer,
  864. cpu_buffer->commit_page == cpu_buffer->tail_page))
  865. return;
  866. cpu_buffer->commit_page->page->commit =
  867. cpu_buffer->commit_page->write;
  868. rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
  869. cpu_buffer->write_stamp =
  870. cpu_buffer->commit_page->page->time_stamp;
  871. }
  872. /* Now set the commit to the event's index */
  873. local_set(&cpu_buffer->commit_page->page->commit, index);
  874. }
  875. static void
  876. rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
  877. {
  878. /*
  879. * We only race with interrupts and NMIs on this CPU.
  880. * If we own the commit event, then we can commit
  881. * all others that interrupted us, since the interruptions
  882. * are in stack format (they finish before they come
  883. * back to us). This allows us to do a simple loop to
  884. * assign the commit to the tail.
  885. */
  886. again:
  887. while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
  888. cpu_buffer->commit_page->page->commit =
  889. cpu_buffer->commit_page->write;
  890. rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
  891. cpu_buffer->write_stamp =
  892. cpu_buffer->commit_page->page->time_stamp;
  893. /* add barrier to keep gcc from optimizing too much */
  894. barrier();
  895. }
  896. while (rb_commit_index(cpu_buffer) !=
  897. rb_page_write(cpu_buffer->commit_page)) {
  898. cpu_buffer->commit_page->page->commit =
  899. cpu_buffer->commit_page->write;
  900. barrier();
  901. }
  902. /* again, keep gcc from optimizing */
  903. barrier();
  904. /*
  905. * If an interrupt came in just after the first while loop
  906. * and pushed the tail page forward, we will be left with
  907. * a dangling commit that will never go forward.
  908. */
  909. if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
  910. goto again;
  911. }
  912. static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
  913. {
  914. cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
  915. cpu_buffer->reader_page->read = 0;
  916. }
  917. static void rb_inc_iter(struct ring_buffer_iter *iter)
  918. {
  919. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  920. /*
  921. * The iterator could be on the reader page (it starts there).
  922. * But the head could have moved, since the reader was
  923. * found. Check for this case and assign the iterator
  924. * to the head page instead of next.
  925. */
  926. if (iter->head_page == cpu_buffer->reader_page)
  927. iter->head_page = cpu_buffer->head_page;
  928. else
  929. rb_inc_page(cpu_buffer, &iter->head_page);
  930. iter->read_stamp = iter->head_page->page->time_stamp;
  931. iter->head = 0;
  932. }
  933. /**
  934. * ring_buffer_update_event - update event type and data
  935. * @event: the even to update
  936. * @type: the type of event
  937. * @length: the size of the event field in the ring buffer
  938. *
  939. * Update the type and data fields of the event. The length
  940. * is the actual size that is written to the ring buffer,
  941. * and with this, we can determine what to place into the
  942. * data field.
  943. */
  944. static void
  945. rb_update_event(struct ring_buffer_event *event,
  946. unsigned type, unsigned length)
  947. {
  948. event->type_len = type;
  949. switch (type) {
  950. case RINGBUF_TYPE_PADDING:
  951. case RINGBUF_TYPE_TIME_EXTEND:
  952. case RINGBUF_TYPE_TIME_STAMP:
  953. break;
  954. case 0:
  955. length -= RB_EVNT_HDR_SIZE;
  956. if (length > RB_MAX_SMALL_DATA)
  957. event->array[0] = length;
  958. else
  959. event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
  960. break;
  961. default:
  962. BUG();
  963. }
  964. }
  965. static unsigned rb_calculate_event_length(unsigned length)
  966. {
  967. struct ring_buffer_event event; /* Used only for sizeof array */
  968. /* zero length can cause confusions */
  969. if (!length)
  970. length = 1;
  971. if (length > RB_MAX_SMALL_DATA)
  972. length += sizeof(event.array[0]);
  973. length += RB_EVNT_HDR_SIZE;
  974. length = ALIGN(length, RB_ALIGNMENT);
  975. return length;
  976. }
  977. static struct ring_buffer_event *
  978. rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
  979. unsigned long length, unsigned long tail,
  980. struct buffer_page *commit_page,
  981. struct buffer_page *tail_page, u64 *ts)
  982. {
  983. struct buffer_page *next_page, *head_page, *reader_page;
  984. struct ring_buffer *buffer = cpu_buffer->buffer;
  985. struct ring_buffer_event *event;
  986. bool lock_taken = false;
  987. unsigned long flags;
  988. next_page = tail_page;
  989. local_irq_save(flags);
  990. /*
  991. * Since the write to the buffer is still not
  992. * fully lockless, we must be careful with NMIs.
  993. * The locks in the writers are taken when a write
  994. * crosses to a new page. The locks protect against
  995. * races with the readers (this will soon be fixed
  996. * with a lockless solution).
  997. *
  998. * Because we can not protect against NMIs, and we
  999. * want to keep traces reentrant, we need to manage
  1000. * what happens when we are in an NMI.
  1001. *
  1002. * NMIs can happen after we take the lock.
  1003. * If we are in an NMI, only take the lock
  1004. * if it is not already taken. Otherwise
  1005. * simply fail.
  1006. */
  1007. if (unlikely(in_nmi())) {
  1008. if (!__raw_spin_trylock(&cpu_buffer->lock)) {
  1009. cpu_buffer->nmi_dropped++;
  1010. goto out_reset;
  1011. }
  1012. } else
  1013. __raw_spin_lock(&cpu_buffer->lock);
  1014. lock_taken = true;
  1015. rb_inc_page(cpu_buffer, &next_page);
  1016. head_page = cpu_buffer->head_page;
  1017. reader_page = cpu_buffer->reader_page;
  1018. /* we grabbed the lock before incrementing */
  1019. if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
  1020. goto out_reset;
  1021. /*
  1022. * If for some reason, we had an interrupt storm that made
  1023. * it all the way around the buffer, bail, and warn
  1024. * about it.
  1025. */
  1026. if (unlikely(next_page == commit_page)) {
  1027. cpu_buffer->commit_overrun++;
  1028. goto out_reset;
  1029. }
  1030. if (next_page == head_page) {
  1031. if (!(buffer->flags & RB_FL_OVERWRITE))
  1032. goto out_reset;
  1033. /* tail_page has not moved yet? */
  1034. if (tail_page == cpu_buffer->tail_page) {
  1035. /* count overflows */
  1036. cpu_buffer->overrun +=
  1037. local_read(&head_page->entries);
  1038. rb_inc_page(cpu_buffer, &head_page);
  1039. cpu_buffer->head_page = head_page;
  1040. cpu_buffer->head_page->read = 0;
  1041. }
  1042. }
  1043. /*
  1044. * If the tail page is still the same as what we think
  1045. * it is, then it is up to us to update the tail
  1046. * pointer.
  1047. */
  1048. if (tail_page == cpu_buffer->tail_page) {
  1049. local_set(&next_page->write, 0);
  1050. local_set(&next_page->entries, 0);
  1051. local_set(&next_page->page->commit, 0);
  1052. cpu_buffer->tail_page = next_page;
  1053. /* reread the time stamp */
  1054. *ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu);
  1055. cpu_buffer->tail_page->page->time_stamp = *ts;
  1056. }
  1057. /*
  1058. * The actual tail page has moved forward.
  1059. */
  1060. if (tail < BUF_PAGE_SIZE) {
  1061. /* Mark the rest of the page with padding */
  1062. event = __rb_page_index(tail_page, tail);
  1063. rb_event_set_padding(event);
  1064. }
  1065. /* Set the write back to the previous setting */
  1066. local_sub(length, &tail_page->write);
  1067. /*
  1068. * If this was a commit entry that failed,
  1069. * increment that too
  1070. */
  1071. if (tail_page == cpu_buffer->commit_page &&
  1072. tail == rb_commit_index(cpu_buffer)) {
  1073. rb_set_commit_to_write(cpu_buffer);
  1074. }
  1075. __raw_spin_unlock(&cpu_buffer->lock);
  1076. local_irq_restore(flags);
  1077. /* fail and let the caller try again */
  1078. return ERR_PTR(-EAGAIN);
  1079. out_reset:
  1080. /* reset write */
  1081. local_sub(length, &tail_page->write);
  1082. if (likely(lock_taken))
  1083. __raw_spin_unlock(&cpu_buffer->lock);
  1084. local_irq_restore(flags);
  1085. return NULL;
  1086. }
  1087. static struct ring_buffer_event *
  1088. __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
  1089. unsigned type, unsigned long length, u64 *ts)
  1090. {
  1091. struct buffer_page *tail_page, *commit_page;
  1092. struct ring_buffer_event *event;
  1093. unsigned long tail, write;
  1094. commit_page = cpu_buffer->commit_page;
  1095. /* we just need to protect against interrupts */
  1096. barrier();
  1097. tail_page = cpu_buffer->tail_page;
  1098. write = local_add_return(length, &tail_page->write);
  1099. tail = write - length;
  1100. /* See if we shot pass the end of this buffer page */
  1101. if (write > BUF_PAGE_SIZE)
  1102. return rb_move_tail(cpu_buffer, length, tail,
  1103. commit_page, tail_page, ts);
  1104. /* We reserved something on the buffer */
  1105. if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
  1106. return NULL;
  1107. event = __rb_page_index(tail_page, tail);
  1108. rb_update_event(event, type, length);
  1109. /* The passed in type is zero for DATA */
  1110. if (likely(!type))
  1111. local_inc(&tail_page->entries);
  1112. /*
  1113. * If this is a commit and the tail is zero, then update
  1114. * this page's time stamp.
  1115. */
  1116. if (!tail && rb_is_commit(cpu_buffer, event))
  1117. cpu_buffer->commit_page->page->time_stamp = *ts;
  1118. return event;
  1119. }
  1120. static int
  1121. rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  1122. u64 *ts, u64 *delta)
  1123. {
  1124. struct ring_buffer_event *event;
  1125. static int once;
  1126. int ret;
  1127. if (unlikely(*delta > (1ULL << 59) && !once++)) {
  1128. printk(KERN_WARNING "Delta way too big! %llu"
  1129. " ts=%llu write stamp = %llu\n",
  1130. (unsigned long long)*delta,
  1131. (unsigned long long)*ts,
  1132. (unsigned long long)cpu_buffer->write_stamp);
  1133. WARN_ON(1);
  1134. }
  1135. /*
  1136. * The delta is too big, we to add a
  1137. * new timestamp.
  1138. */
  1139. event = __rb_reserve_next(cpu_buffer,
  1140. RINGBUF_TYPE_TIME_EXTEND,
  1141. RB_LEN_TIME_EXTEND,
  1142. ts);
  1143. if (!event)
  1144. return -EBUSY;
  1145. if (PTR_ERR(event) == -EAGAIN)
  1146. return -EAGAIN;
  1147. /* Only a commited time event can update the write stamp */
  1148. if (rb_is_commit(cpu_buffer, event)) {
  1149. /*
  1150. * If this is the first on the page, then we need to
  1151. * update the page itself, and just put in a zero.
  1152. */
  1153. if (rb_event_index(event)) {
  1154. event->time_delta = *delta & TS_MASK;
  1155. event->array[0] = *delta >> TS_SHIFT;
  1156. } else {
  1157. cpu_buffer->commit_page->page->time_stamp = *ts;
  1158. event->time_delta = 0;
  1159. event->array[0] = 0;
  1160. }
  1161. cpu_buffer->write_stamp = *ts;
  1162. /* let the caller know this was the commit */
  1163. ret = 1;
  1164. } else {
  1165. /* Darn, this is just wasted space */
  1166. event->time_delta = 0;
  1167. event->array[0] = 0;
  1168. ret = 0;
  1169. }
  1170. *delta = 0;
  1171. return ret;
  1172. }
  1173. static struct ring_buffer_event *
  1174. rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
  1175. unsigned type, unsigned long length)
  1176. {
  1177. struct ring_buffer_event *event;
  1178. u64 ts, delta;
  1179. int commit = 0;
  1180. int nr_loops = 0;
  1181. again:
  1182. /*
  1183. * We allow for interrupts to reenter here and do a trace.
  1184. * If one does, it will cause this original code to loop
  1185. * back here. Even with heavy interrupts happening, this
  1186. * should only happen a few times in a row. If this happens
  1187. * 1000 times in a row, there must be either an interrupt
  1188. * storm or we have something buggy.
  1189. * Bail!
  1190. */
  1191. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
  1192. return NULL;
  1193. ts = ring_buffer_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
  1194. /*
  1195. * Only the first commit can update the timestamp.
  1196. * Yes there is a race here. If an interrupt comes in
  1197. * just after the conditional and it traces too, then it
  1198. * will also check the deltas. More than one timestamp may
  1199. * also be made. But only the entry that did the actual
  1200. * commit will be something other than zero.
  1201. */
  1202. if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
  1203. rb_page_write(cpu_buffer->tail_page) ==
  1204. rb_commit_index(cpu_buffer)) {
  1205. delta = ts - cpu_buffer->write_stamp;
  1206. /* make sure this delta is calculated here */
  1207. barrier();
  1208. /* Did the write stamp get updated already? */
  1209. if (unlikely(ts < cpu_buffer->write_stamp))
  1210. delta = 0;
  1211. if (test_time_stamp(delta)) {
  1212. commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
  1213. if (commit == -EBUSY)
  1214. return NULL;
  1215. if (commit == -EAGAIN)
  1216. goto again;
  1217. RB_WARN_ON(cpu_buffer, commit < 0);
  1218. }
  1219. } else
  1220. /* Non commits have zero deltas */
  1221. delta = 0;
  1222. event = __rb_reserve_next(cpu_buffer, type, length, &ts);
  1223. if (PTR_ERR(event) == -EAGAIN)
  1224. goto again;
  1225. if (!event) {
  1226. if (unlikely(commit))
  1227. /*
  1228. * Ouch! We needed a timestamp and it was commited. But
  1229. * we didn't get our event reserved.
  1230. */
  1231. rb_set_commit_to_write(cpu_buffer);
  1232. return NULL;
  1233. }
  1234. /*
  1235. * If the timestamp was commited, make the commit our entry
  1236. * now so that we will update it when needed.
  1237. */
  1238. if (commit)
  1239. rb_set_commit_event(cpu_buffer, event);
  1240. else if (!rb_is_commit(cpu_buffer, event))
  1241. delta = 0;
  1242. event->time_delta = delta;
  1243. return event;
  1244. }
  1245. #define TRACE_RECURSIVE_DEPTH 16
  1246. static int trace_recursive_lock(void)
  1247. {
  1248. current->trace_recursion++;
  1249. if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
  1250. return 0;
  1251. /* Disable all tracing before we do anything else */
  1252. tracing_off_permanent();
  1253. printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
  1254. "HC[%lu]:SC[%lu]:NMI[%lu]\n",
  1255. current->trace_recursion,
  1256. hardirq_count() >> HARDIRQ_SHIFT,
  1257. softirq_count() >> SOFTIRQ_SHIFT,
  1258. in_nmi());
  1259. WARN_ON_ONCE(1);
  1260. return -1;
  1261. }
  1262. static void trace_recursive_unlock(void)
  1263. {
  1264. WARN_ON_ONCE(!current->trace_recursion);
  1265. current->trace_recursion--;
  1266. }
  1267. static DEFINE_PER_CPU(int, rb_need_resched);
  1268. /**
  1269. * ring_buffer_lock_reserve - reserve a part of the buffer
  1270. * @buffer: the ring buffer to reserve from
  1271. * @length: the length of the data to reserve (excluding event header)
  1272. *
  1273. * Returns a reseverd event on the ring buffer to copy directly to.
  1274. * The user of this interface will need to get the body to write into
  1275. * and can use the ring_buffer_event_data() interface.
  1276. *
  1277. * The length is the length of the data needed, not the event length
  1278. * which also includes the event header.
  1279. *
  1280. * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
  1281. * If NULL is returned, then nothing has been allocated or locked.
  1282. */
  1283. struct ring_buffer_event *
  1284. ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
  1285. {
  1286. struct ring_buffer_per_cpu *cpu_buffer;
  1287. struct ring_buffer_event *event;
  1288. int cpu, resched;
  1289. if (ring_buffer_flags != RB_BUFFERS_ON)
  1290. return NULL;
  1291. if (atomic_read(&buffer->record_disabled))
  1292. return NULL;
  1293. /* If we are tracing schedule, we don't want to recurse */
  1294. resched = ftrace_preempt_disable();
  1295. if (trace_recursive_lock())
  1296. goto out_nocheck;
  1297. cpu = raw_smp_processor_id();
  1298. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  1299. goto out;
  1300. cpu_buffer = buffer->buffers[cpu];
  1301. if (atomic_read(&cpu_buffer->record_disabled))
  1302. goto out;
  1303. length = rb_calculate_event_length(length);
  1304. if (length > BUF_PAGE_SIZE)
  1305. goto out;
  1306. event = rb_reserve_next_event(cpu_buffer, 0, length);
  1307. if (!event)
  1308. goto out;
  1309. /*
  1310. * Need to store resched state on this cpu.
  1311. * Only the first needs to.
  1312. */
  1313. if (preempt_count() == 1)
  1314. per_cpu(rb_need_resched, cpu) = resched;
  1315. return event;
  1316. out:
  1317. trace_recursive_unlock();
  1318. out_nocheck:
  1319. ftrace_preempt_enable(resched);
  1320. return NULL;
  1321. }
  1322. EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
  1323. static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
  1324. struct ring_buffer_event *event)
  1325. {
  1326. local_inc(&cpu_buffer->entries);
  1327. /* Only process further if we own the commit */
  1328. if (!rb_is_commit(cpu_buffer, event))
  1329. return;
  1330. cpu_buffer->write_stamp += event->time_delta;
  1331. rb_set_commit_to_write(cpu_buffer);
  1332. }
  1333. /**
  1334. * ring_buffer_unlock_commit - commit a reserved
  1335. * @buffer: The buffer to commit to
  1336. * @event: The event pointer to commit.
  1337. *
  1338. * This commits the data to the ring buffer, and releases any locks held.
  1339. *
  1340. * Must be paired with ring_buffer_lock_reserve.
  1341. */
  1342. int ring_buffer_unlock_commit(struct ring_buffer *buffer,
  1343. struct ring_buffer_event *event)
  1344. {
  1345. struct ring_buffer_per_cpu *cpu_buffer;
  1346. int cpu = raw_smp_processor_id();
  1347. cpu_buffer = buffer->buffers[cpu];
  1348. rb_commit(cpu_buffer, event);
  1349. trace_recursive_unlock();
  1350. /*
  1351. * Only the last preempt count needs to restore preemption.
  1352. */
  1353. if (preempt_count() == 1)
  1354. ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
  1355. else
  1356. preempt_enable_no_resched_notrace();
  1357. return 0;
  1358. }
  1359. EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
  1360. static inline void rb_event_discard(struct ring_buffer_event *event)
  1361. {
  1362. /* array[0] holds the actual length for the discarded event */
  1363. event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
  1364. event->type_len = RINGBUF_TYPE_PADDING;
  1365. /* time delta must be non zero */
  1366. if (!event->time_delta)
  1367. event->time_delta = 1;
  1368. }
  1369. /**
  1370. * ring_buffer_event_discard - discard any event in the ring buffer
  1371. * @event: the event to discard
  1372. *
  1373. * Sometimes a event that is in the ring buffer needs to be ignored.
  1374. * This function lets the user discard an event in the ring buffer
  1375. * and then that event will not be read later.
  1376. *
  1377. * Note, it is up to the user to be careful with this, and protect
  1378. * against races. If the user discards an event that has been consumed
  1379. * it is possible that it could corrupt the ring buffer.
  1380. */
  1381. void ring_buffer_event_discard(struct ring_buffer_event *event)
  1382. {
  1383. rb_event_discard(event);
  1384. }
  1385. EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
  1386. /**
  1387. * ring_buffer_commit_discard - discard an event that has not been committed
  1388. * @buffer: the ring buffer
  1389. * @event: non committed event to discard
  1390. *
  1391. * This is similar to ring_buffer_event_discard but must only be
  1392. * performed on an event that has not been committed yet. The difference
  1393. * is that this will also try to free the event from the ring buffer
  1394. * if another event has not been added behind it.
  1395. *
  1396. * If another event has been added behind it, it will set the event
  1397. * up as discarded, and perform the commit.
  1398. *
  1399. * If this function is called, do not call ring_buffer_unlock_commit on
  1400. * the event.
  1401. */
  1402. void ring_buffer_discard_commit(struct ring_buffer *buffer,
  1403. struct ring_buffer_event *event)
  1404. {
  1405. struct ring_buffer_per_cpu *cpu_buffer;
  1406. unsigned long new_index, old_index;
  1407. struct buffer_page *bpage;
  1408. unsigned long index;
  1409. unsigned long addr;
  1410. int cpu;
  1411. /* The event is discarded regardless */
  1412. rb_event_discard(event);
  1413. /*
  1414. * This must only be called if the event has not been
  1415. * committed yet. Thus we can assume that preemption
  1416. * is still disabled.
  1417. */
  1418. RB_WARN_ON(buffer, !preempt_count());
  1419. cpu = smp_processor_id();
  1420. cpu_buffer = buffer->buffers[cpu];
  1421. new_index = rb_event_index(event);
  1422. old_index = new_index + rb_event_length(event);
  1423. addr = (unsigned long)event;
  1424. addr &= PAGE_MASK;
  1425. bpage = cpu_buffer->tail_page;
  1426. if (bpage == (void *)addr && rb_page_write(bpage) == old_index) {
  1427. /*
  1428. * This is on the tail page. It is possible that
  1429. * a write could come in and move the tail page
  1430. * and write to the next page. That is fine
  1431. * because we just shorten what is on this page.
  1432. */
  1433. index = local_cmpxchg(&bpage->write, old_index, new_index);
  1434. if (index == old_index)
  1435. goto out;
  1436. }
  1437. /*
  1438. * The commit is still visible by the reader, so we
  1439. * must increment entries.
  1440. */
  1441. local_inc(&cpu_buffer->entries);
  1442. out:
  1443. /*
  1444. * If a write came in and pushed the tail page
  1445. * we still need to update the commit pointer
  1446. * if we were the commit.
  1447. */
  1448. if (rb_is_commit(cpu_buffer, event))
  1449. rb_set_commit_to_write(cpu_buffer);
  1450. trace_recursive_unlock();
  1451. /*
  1452. * Only the last preempt count needs to restore preemption.
  1453. */
  1454. if (preempt_count() == 1)
  1455. ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
  1456. else
  1457. preempt_enable_no_resched_notrace();
  1458. }
  1459. EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
  1460. /**
  1461. * ring_buffer_write - write data to the buffer without reserving
  1462. * @buffer: The ring buffer to write to.
  1463. * @length: The length of the data being written (excluding the event header)
  1464. * @data: The data to write to the buffer.
  1465. *
  1466. * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
  1467. * one function. If you already have the data to write to the buffer, it
  1468. * may be easier to simply call this function.
  1469. *
  1470. * Note, like ring_buffer_lock_reserve, the length is the length of the data
  1471. * and not the length of the event which would hold the header.
  1472. */
  1473. int ring_buffer_write(struct ring_buffer *buffer,
  1474. unsigned long length,
  1475. void *data)
  1476. {
  1477. struct ring_buffer_per_cpu *cpu_buffer;
  1478. struct ring_buffer_event *event;
  1479. unsigned long event_length;
  1480. void *body;
  1481. int ret = -EBUSY;
  1482. int cpu, resched;
  1483. if (ring_buffer_flags != RB_BUFFERS_ON)
  1484. return -EBUSY;
  1485. if (atomic_read(&buffer->record_disabled))
  1486. return -EBUSY;
  1487. resched = ftrace_preempt_disable();
  1488. cpu = raw_smp_processor_id();
  1489. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  1490. goto out;
  1491. cpu_buffer = buffer->buffers[cpu];
  1492. if (atomic_read(&cpu_buffer->record_disabled))
  1493. goto out;
  1494. event_length = rb_calculate_event_length(length);
  1495. event = rb_reserve_next_event(cpu_buffer, 0, event_length);
  1496. if (!event)
  1497. goto out;
  1498. body = rb_event_data(event);
  1499. memcpy(body, data, length);
  1500. rb_commit(cpu_buffer, event);
  1501. ret = 0;
  1502. out:
  1503. ftrace_preempt_enable(resched);
  1504. return ret;
  1505. }
  1506. EXPORT_SYMBOL_GPL(ring_buffer_write);
  1507. static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
  1508. {
  1509. struct buffer_page *reader = cpu_buffer->reader_page;
  1510. struct buffer_page *head = cpu_buffer->head_page;
  1511. struct buffer_page *commit = cpu_buffer->commit_page;
  1512. return reader->read == rb_page_commit(reader) &&
  1513. (commit == reader ||
  1514. (commit == head &&
  1515. head->read == rb_page_commit(commit)));
  1516. }
  1517. /**
  1518. * ring_buffer_record_disable - stop all writes into the buffer
  1519. * @buffer: The ring buffer to stop writes to.
  1520. *
  1521. * This prevents all writes to the buffer. Any attempt to write
  1522. * to the buffer after this will fail and return NULL.
  1523. *
  1524. * The caller should call synchronize_sched() after this.
  1525. */
  1526. void ring_buffer_record_disable(struct ring_buffer *buffer)
  1527. {
  1528. atomic_inc(&buffer->record_disabled);
  1529. }
  1530. EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
  1531. /**
  1532. * ring_buffer_record_enable - enable writes to the buffer
  1533. * @buffer: The ring buffer to enable writes
  1534. *
  1535. * Note, multiple disables will need the same number of enables
  1536. * to truely enable the writing (much like preempt_disable).
  1537. */
  1538. void ring_buffer_record_enable(struct ring_buffer *buffer)
  1539. {
  1540. atomic_dec(&buffer->record_disabled);
  1541. }
  1542. EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
  1543. /**
  1544. * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
  1545. * @buffer: The ring buffer to stop writes to.
  1546. * @cpu: The CPU buffer to stop
  1547. *
  1548. * This prevents all writes to the buffer. Any attempt to write
  1549. * to the buffer after this will fail and return NULL.
  1550. *
  1551. * The caller should call synchronize_sched() after this.
  1552. */
  1553. void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
  1554. {
  1555. struct ring_buffer_per_cpu *cpu_buffer;
  1556. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  1557. return;
  1558. cpu_buffer = buffer->buffers[cpu];
  1559. atomic_inc(&cpu_buffer->record_disabled);
  1560. }
  1561. EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
  1562. /**
  1563. * ring_buffer_record_enable_cpu - enable writes to the buffer
  1564. * @buffer: The ring buffer to enable writes
  1565. * @cpu: The CPU to enable.
  1566. *
  1567. * Note, multiple disables will need the same number of enables
  1568. * to truely enable the writing (much like preempt_disable).
  1569. */
  1570. void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
  1571. {
  1572. struct ring_buffer_per_cpu *cpu_buffer;
  1573. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  1574. return;
  1575. cpu_buffer = buffer->buffers[cpu];
  1576. atomic_dec(&cpu_buffer->record_disabled);
  1577. }
  1578. EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
  1579. /**
  1580. * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
  1581. * @buffer: The ring buffer
  1582. * @cpu: The per CPU buffer to get the entries from.
  1583. */
  1584. unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
  1585. {
  1586. struct ring_buffer_per_cpu *cpu_buffer;
  1587. unsigned long ret;
  1588. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  1589. return 0;
  1590. cpu_buffer = buffer->buffers[cpu];
  1591. ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun)
  1592. - cpu_buffer->read;
  1593. return ret;
  1594. }
  1595. EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
  1596. /**
  1597. * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
  1598. * @buffer: The ring buffer
  1599. * @cpu: The per CPU buffer to get the number of overruns from
  1600. */
  1601. unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
  1602. {
  1603. struct ring_buffer_per_cpu *cpu_buffer;
  1604. unsigned long ret;
  1605. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  1606. return 0;
  1607. cpu_buffer = buffer->buffers[cpu];
  1608. ret = cpu_buffer->overrun;
  1609. return ret;
  1610. }
  1611. EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
  1612. /**
  1613. * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped
  1614. * @buffer: The ring buffer
  1615. * @cpu: The per CPU buffer to get the number of overruns from
  1616. */
  1617. unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu)
  1618. {
  1619. struct ring_buffer_per_cpu *cpu_buffer;
  1620. unsigned long ret;
  1621. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  1622. return 0;
  1623. cpu_buffer = buffer->buffers[cpu];
  1624. ret = cpu_buffer->nmi_dropped;
  1625. return ret;
  1626. }
  1627. EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu);
  1628. /**
  1629. * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
  1630. * @buffer: The ring buffer
  1631. * @cpu: The per CPU buffer to get the number of overruns from
  1632. */
  1633. unsigned long
  1634. ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
  1635. {
  1636. struct ring_buffer_per_cpu *cpu_buffer;
  1637. unsigned long ret;
  1638. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  1639. return 0;
  1640. cpu_buffer = buffer->buffers[cpu];
  1641. ret = cpu_buffer->commit_overrun;
  1642. return ret;
  1643. }
  1644. EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
  1645. /**
  1646. * ring_buffer_entries - get the number of entries in a buffer
  1647. * @buffer: The ring buffer
  1648. *
  1649. * Returns the total number of entries in the ring buffer
  1650. * (all CPU entries)
  1651. */
  1652. unsigned long ring_buffer_entries(struct ring_buffer *buffer)
  1653. {
  1654. struct ring_buffer_per_cpu *cpu_buffer;
  1655. unsigned long entries = 0;
  1656. int cpu;
  1657. /* if you care about this being correct, lock the buffer */
  1658. for_each_buffer_cpu(buffer, cpu) {
  1659. cpu_buffer = buffer->buffers[cpu];
  1660. entries += (local_read(&cpu_buffer->entries) -
  1661. cpu_buffer->overrun) - cpu_buffer->read;
  1662. }
  1663. return entries;
  1664. }
  1665. EXPORT_SYMBOL_GPL(ring_buffer_entries);
  1666. /**
  1667. * ring_buffer_overrun_cpu - get the number of overruns in buffer
  1668. * @buffer: The ring buffer
  1669. *
  1670. * Returns the total number of overruns in the ring buffer
  1671. * (all CPU entries)
  1672. */
  1673. unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
  1674. {
  1675. struct ring_buffer_per_cpu *cpu_buffer;
  1676. unsigned long overruns = 0;
  1677. int cpu;
  1678. /* if you care about this being correct, lock the buffer */
  1679. for_each_buffer_cpu(buffer, cpu) {
  1680. cpu_buffer = buffer->buffers[cpu];
  1681. overruns += cpu_buffer->overrun;
  1682. }
  1683. return overruns;
  1684. }
  1685. EXPORT_SYMBOL_GPL(ring_buffer_overruns);
  1686. static void rb_iter_reset(struct ring_buffer_iter *iter)
  1687. {
  1688. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  1689. /* Iterator usage is expected to have record disabled */
  1690. if (list_empty(&cpu_buffer->reader_page->list)) {
  1691. iter->head_page = cpu_buffer->head_page;
  1692. iter->head = cpu_buffer->head_page->read;
  1693. } else {
  1694. iter->head_page = cpu_buffer->reader_page;
  1695. iter->head = cpu_buffer->reader_page->read;
  1696. }
  1697. if (iter->head)
  1698. iter->read_stamp = cpu_buffer->read_stamp;
  1699. else
  1700. iter->read_stamp = iter->head_page->page->time_stamp;
  1701. }
  1702. /**
  1703. * ring_buffer_iter_reset - reset an iterator
  1704. * @iter: The iterator to reset
  1705. *
  1706. * Resets the iterator, so that it will start from the beginning
  1707. * again.
  1708. */
  1709. void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
  1710. {
  1711. struct ring_buffer_per_cpu *cpu_buffer;
  1712. unsigned long flags;
  1713. if (!iter)
  1714. return;
  1715. cpu_buffer = iter->cpu_buffer;
  1716. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  1717. rb_iter_reset(iter);
  1718. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  1719. }
  1720. EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
  1721. /**
  1722. * ring_buffer_iter_empty - check if an iterator has no more to read
  1723. * @iter: The iterator to check
  1724. */
  1725. int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
  1726. {
  1727. struct ring_buffer_per_cpu *cpu_buffer;
  1728. cpu_buffer = iter->cpu_buffer;
  1729. return iter->head_page == cpu_buffer->commit_page &&
  1730. iter->head == rb_commit_index(cpu_buffer);
  1731. }
  1732. EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
  1733. static void
  1734. rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  1735. struct ring_buffer_event *event)
  1736. {
  1737. u64 delta;
  1738. switch (event->type_len) {
  1739. case RINGBUF_TYPE_PADDING:
  1740. return;
  1741. case RINGBUF_TYPE_TIME_EXTEND:
  1742. delta = event->array[0];
  1743. delta <<= TS_SHIFT;
  1744. delta += event->time_delta;
  1745. cpu_buffer->read_stamp += delta;
  1746. return;
  1747. case RINGBUF_TYPE_TIME_STAMP:
  1748. /* FIXME: not implemented */
  1749. return;
  1750. case RINGBUF_TYPE_DATA:
  1751. cpu_buffer->read_stamp += event->time_delta;
  1752. return;
  1753. default:
  1754. BUG();
  1755. }
  1756. return;
  1757. }
  1758. static void
  1759. rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
  1760. struct ring_buffer_event *event)
  1761. {
  1762. u64 delta;
  1763. switch (event->type_len) {
  1764. case RINGBUF_TYPE_PADDING:
  1765. return;
  1766. case RINGBUF_TYPE_TIME_EXTEND:
  1767. delta = event->array[0];
  1768. delta <<= TS_SHIFT;
  1769. delta += event->time_delta;
  1770. iter->read_stamp += delta;
  1771. return;
  1772. case RINGBUF_TYPE_TIME_STAMP:
  1773. /* FIXME: not implemented */
  1774. return;
  1775. case RINGBUF_TYPE_DATA:
  1776. iter->read_stamp += event->time_delta;
  1777. return;
  1778. default:
  1779. BUG();
  1780. }
  1781. return;
  1782. }
  1783. static struct buffer_page *
  1784. rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
  1785. {
  1786. struct buffer_page *reader = NULL;
  1787. unsigned long flags;
  1788. int nr_loops = 0;
  1789. local_irq_save(flags);
  1790. __raw_spin_lock(&cpu_buffer->lock);
  1791. again:
  1792. /*
  1793. * This should normally only loop twice. But because the
  1794. * start of the reader inserts an empty page, it causes
  1795. * a case where we will loop three times. There should be no
  1796. * reason to loop four times (that I know of).
  1797. */
  1798. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
  1799. reader = NULL;
  1800. goto out;
  1801. }
  1802. reader = cpu_buffer->reader_page;
  1803. /* If there's more to read, return this page */
  1804. if (cpu_buffer->reader_page->read < rb_page_size(reader))
  1805. goto out;
  1806. /* Never should we have an index greater than the size */
  1807. if (RB_WARN_ON(cpu_buffer,
  1808. cpu_buffer->reader_page->read > rb_page_size(reader)))
  1809. goto out;
  1810. /* check if we caught up to the tail */
  1811. reader = NULL;
  1812. if (cpu_buffer->commit_page == cpu_buffer->reader_page)
  1813. goto out;
  1814. /*
  1815. * Splice the empty reader page into the list around the head.
  1816. * Reset the reader page to size zero.
  1817. */
  1818. reader = cpu_buffer->head_page;
  1819. cpu_buffer->reader_page->list.next = reader->list.next;
  1820. cpu_buffer->reader_page->list.prev = reader->list.prev;
  1821. local_set(&cpu_buffer->reader_page->write, 0);
  1822. local_set(&cpu_buffer->reader_page->entries, 0);
  1823. local_set(&cpu_buffer->reader_page->page->commit, 0);
  1824. /* Make the reader page now replace the head */
  1825. reader->list.prev->next = &cpu_buffer->reader_page->list;
  1826. reader->list.next->prev = &cpu_buffer->reader_page->list;
  1827. /*
  1828. * If the tail is on the reader, then we must set the head
  1829. * to the inserted page, otherwise we set it one before.
  1830. */
  1831. cpu_buffer->head_page = cpu_buffer->reader_page;
  1832. if (cpu_buffer->commit_page != reader)
  1833. rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
  1834. /* Finally update the reader page to the new head */
  1835. cpu_buffer->reader_page = reader;
  1836. rb_reset_reader_page(cpu_buffer);
  1837. goto again;
  1838. out:
  1839. __raw_spin_unlock(&cpu_buffer->lock);
  1840. local_irq_restore(flags);
  1841. return reader;
  1842. }
  1843. static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
  1844. {
  1845. struct ring_buffer_event *event;
  1846. struct buffer_page *reader;
  1847. unsigned length;
  1848. reader = rb_get_reader_page(cpu_buffer);
  1849. /* This function should not be called when buffer is empty */
  1850. if (RB_WARN_ON(cpu_buffer, !reader))
  1851. return;
  1852. event = rb_reader_event(cpu_buffer);
  1853. if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
  1854. || rb_discarded_event(event))
  1855. cpu_buffer->read++;
  1856. rb_update_read_stamp(cpu_buffer, event);
  1857. length = rb_event_length(event);
  1858. cpu_buffer->reader_page->read += length;
  1859. }
  1860. static void rb_advance_iter(struct ring_buffer_iter *iter)
  1861. {
  1862. struct ring_buffer *buffer;
  1863. struct ring_buffer_per_cpu *cpu_buffer;
  1864. struct ring_buffer_event *event;
  1865. unsigned length;
  1866. cpu_buffer = iter->cpu_buffer;
  1867. buffer = cpu_buffer->buffer;
  1868. /*
  1869. * Check if we are at the end of the buffer.
  1870. */
  1871. if (iter->head >= rb_page_size(iter->head_page)) {
  1872. if (RB_WARN_ON(buffer,
  1873. iter->head_page == cpu_buffer->commit_page))
  1874. return;
  1875. rb_inc_iter(iter);
  1876. return;
  1877. }
  1878. event = rb_iter_head_event(iter);
  1879. length = rb_event_length(event);
  1880. /*
  1881. * This should not be called to advance the header if we are
  1882. * at the tail of the buffer.
  1883. */
  1884. if (RB_WARN_ON(cpu_buffer,
  1885. (iter->head_page == cpu_buffer->commit_page) &&
  1886. (iter->head + length > rb_commit_index(cpu_buffer))))
  1887. return;
  1888. rb_update_iter_read_stamp(iter, event);
  1889. iter->head += length;
  1890. /* check for end of page padding */
  1891. if ((iter->head >= rb_page_size(iter->head_page)) &&
  1892. (iter->head_page != cpu_buffer->commit_page))
  1893. rb_advance_iter(iter);
  1894. }
  1895. static struct ring_buffer_event *
  1896. rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
  1897. {
  1898. struct ring_buffer_per_cpu *cpu_buffer;
  1899. struct ring_buffer_event *event;
  1900. struct buffer_page *reader;
  1901. int nr_loops = 0;
  1902. cpu_buffer = buffer->buffers[cpu];
  1903. again:
  1904. /*
  1905. * We repeat when a timestamp is encountered. It is possible
  1906. * to get multiple timestamps from an interrupt entering just
  1907. * as one timestamp is about to be written. The max times
  1908. * that this can happen is the number of nested interrupts we
  1909. * can have. Nesting 10 deep of interrupts is clearly
  1910. * an anomaly.
  1911. */
  1912. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
  1913. return NULL;
  1914. reader = rb_get_reader_page(cpu_buffer);
  1915. if (!reader)
  1916. return NULL;
  1917. event = rb_reader_event(cpu_buffer);
  1918. switch (event->type_len) {
  1919. case RINGBUF_TYPE_PADDING:
  1920. if (rb_null_event(event))
  1921. RB_WARN_ON(cpu_buffer, 1);
  1922. /*
  1923. * Because the writer could be discarding every
  1924. * event it creates (which would probably be bad)
  1925. * if we were to go back to "again" then we may never
  1926. * catch up, and will trigger the warn on, or lock
  1927. * the box. Return the padding, and we will release
  1928. * the current locks, and try again.
  1929. */
  1930. rb_advance_reader(cpu_buffer);
  1931. return event;
  1932. case RINGBUF_TYPE_TIME_EXTEND:
  1933. /* Internal data, OK to advance */
  1934. rb_advance_reader(cpu_buffer);
  1935. goto again;
  1936. case RINGBUF_TYPE_TIME_STAMP:
  1937. /* FIXME: not implemented */
  1938. rb_advance_reader(cpu_buffer);
  1939. goto again;
  1940. case RINGBUF_TYPE_DATA:
  1941. if (ts) {
  1942. *ts = cpu_buffer->read_stamp + event->time_delta;
  1943. ring_buffer_normalize_time_stamp(buffer,
  1944. cpu_buffer->cpu, ts);
  1945. }
  1946. return event;
  1947. default:
  1948. BUG();
  1949. }
  1950. return NULL;
  1951. }
  1952. EXPORT_SYMBOL_GPL(ring_buffer_peek);
  1953. static struct ring_buffer_event *
  1954. rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  1955. {
  1956. struct ring_buffer *buffer;
  1957. struct ring_buffer_per_cpu *cpu_buffer;
  1958. struct ring_buffer_event *event;
  1959. int nr_loops = 0;
  1960. if (ring_buffer_iter_empty(iter))
  1961. return NULL;
  1962. cpu_buffer = iter->cpu_buffer;
  1963. buffer = cpu_buffer->buffer;
  1964. again:
  1965. /*
  1966. * We repeat when a timestamp is encountered. It is possible
  1967. * to get multiple timestamps from an interrupt entering just
  1968. * as one timestamp is about to be written. The max times
  1969. * that this can happen is the number of nested interrupts we
  1970. * can have. Nesting 10 deep of interrupts is clearly
  1971. * an anomaly.
  1972. */
  1973. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
  1974. return NULL;
  1975. if (rb_per_cpu_empty(cpu_buffer))
  1976. return NULL;
  1977. event = rb_iter_head_event(iter);
  1978. switch (event->type_len) {
  1979. case RINGBUF_TYPE_PADDING:
  1980. if (rb_null_event(event)) {
  1981. rb_inc_iter(iter);
  1982. goto again;
  1983. }
  1984. rb_advance_iter(iter);
  1985. return event;
  1986. case RINGBUF_TYPE_TIME_EXTEND:
  1987. /* Internal data, OK to advance */
  1988. rb_advance_iter(iter);
  1989. goto again;
  1990. case RINGBUF_TYPE_TIME_STAMP:
  1991. /* FIXME: not implemented */
  1992. rb_advance_iter(iter);
  1993. goto again;
  1994. case RINGBUF_TYPE_DATA:
  1995. if (ts) {
  1996. *ts = iter->read_stamp + event->time_delta;
  1997. ring_buffer_normalize_time_stamp(buffer,
  1998. cpu_buffer->cpu, ts);
  1999. }
  2000. return event;
  2001. default:
  2002. BUG();
  2003. }
  2004. return NULL;
  2005. }
  2006. EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
  2007. /**
  2008. * ring_buffer_peek - peek at the next event to be read
  2009. * @buffer: The ring buffer to read
  2010. * @cpu: The cpu to peak at
  2011. * @ts: The timestamp counter of this event.
  2012. *
  2013. * This will return the event that will be read next, but does
  2014. * not consume the data.
  2015. */
  2016. struct ring_buffer_event *
  2017. ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
  2018. {
  2019. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  2020. struct ring_buffer_event *event;
  2021. unsigned long flags;
  2022. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2023. return NULL;
  2024. again:
  2025. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2026. event = rb_buffer_peek(buffer, cpu, ts);
  2027. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2028. if (event && event->type_len == RINGBUF_TYPE_PADDING) {
  2029. cpu_relax();
  2030. goto again;
  2031. }
  2032. return event;
  2033. }
  2034. /**
  2035. * ring_buffer_iter_peek - peek at the next event to be read
  2036. * @iter: The ring buffer iterator
  2037. * @ts: The timestamp counter of this event.
  2038. *
  2039. * This will return the event that will be read next, but does
  2040. * not increment the iterator.
  2041. */
  2042. struct ring_buffer_event *
  2043. ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  2044. {
  2045. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2046. struct ring_buffer_event *event;
  2047. unsigned long flags;
  2048. again:
  2049. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2050. event = rb_iter_peek(iter, ts);
  2051. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2052. if (event && event->type_len == RINGBUF_TYPE_PADDING) {
  2053. cpu_relax();
  2054. goto again;
  2055. }
  2056. return event;
  2057. }
  2058. /**
  2059. * ring_buffer_consume - return an event and consume it
  2060. * @buffer: The ring buffer to get the next event from
  2061. *
  2062. * Returns the next event in the ring buffer, and that event is consumed.
  2063. * Meaning, that sequential reads will keep returning a different event,
  2064. * and eventually empty the ring buffer if the producer is slower.
  2065. */
  2066. struct ring_buffer_event *
  2067. ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
  2068. {
  2069. struct ring_buffer_per_cpu *cpu_buffer;
  2070. struct ring_buffer_event *event = NULL;
  2071. unsigned long flags;
  2072. again:
  2073. /* might be called in atomic */
  2074. preempt_disable();
  2075. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2076. goto out;
  2077. cpu_buffer = buffer->buffers[cpu];
  2078. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2079. event = rb_buffer_peek(buffer, cpu, ts);
  2080. if (!event)
  2081. goto out_unlock;
  2082. rb_advance_reader(cpu_buffer);
  2083. out_unlock:
  2084. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2085. out:
  2086. preempt_enable();
  2087. if (event && event->type_len == RINGBUF_TYPE_PADDING) {
  2088. cpu_relax();
  2089. goto again;
  2090. }
  2091. return event;
  2092. }
  2093. EXPORT_SYMBOL_GPL(ring_buffer_consume);
  2094. /**
  2095. * ring_buffer_read_start - start a non consuming read of the buffer
  2096. * @buffer: The ring buffer to read from
  2097. * @cpu: The cpu buffer to iterate over
  2098. *
  2099. * This starts up an iteration through the buffer. It also disables
  2100. * the recording to the buffer until the reading is finished.
  2101. * This prevents the reading from being corrupted. This is not
  2102. * a consuming read, so a producer is not expected.
  2103. *
  2104. * Must be paired with ring_buffer_finish.
  2105. */
  2106. struct ring_buffer_iter *
  2107. ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
  2108. {
  2109. struct ring_buffer_per_cpu *cpu_buffer;
  2110. struct ring_buffer_iter *iter;
  2111. unsigned long flags;
  2112. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2113. return NULL;
  2114. iter = kmalloc(sizeof(*iter), GFP_KERNEL);
  2115. if (!iter)
  2116. return NULL;
  2117. cpu_buffer = buffer->buffers[cpu];
  2118. iter->cpu_buffer = cpu_buffer;
  2119. atomic_inc(&cpu_buffer->record_disabled);
  2120. synchronize_sched();
  2121. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2122. __raw_spin_lock(&cpu_buffer->lock);
  2123. rb_iter_reset(iter);
  2124. __raw_spin_unlock(&cpu_buffer->lock);
  2125. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2126. return iter;
  2127. }
  2128. EXPORT_SYMBOL_GPL(ring_buffer_read_start);
  2129. /**
  2130. * ring_buffer_finish - finish reading the iterator of the buffer
  2131. * @iter: The iterator retrieved by ring_buffer_start
  2132. *
  2133. * This re-enables the recording to the buffer, and frees the
  2134. * iterator.
  2135. */
  2136. void
  2137. ring_buffer_read_finish(struct ring_buffer_iter *iter)
  2138. {
  2139. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2140. atomic_dec(&cpu_buffer->record_disabled);
  2141. kfree(iter);
  2142. }
  2143. EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
  2144. /**
  2145. * ring_buffer_read - read the next item in the ring buffer by the iterator
  2146. * @iter: The ring buffer iterator
  2147. * @ts: The time stamp of the event read.
  2148. *
  2149. * This reads the next event in the ring buffer and increments the iterator.
  2150. */
  2151. struct ring_buffer_event *
  2152. ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
  2153. {
  2154. struct ring_buffer_event *event;
  2155. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2156. unsigned long flags;
  2157. again:
  2158. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2159. event = rb_iter_peek(iter, ts);
  2160. if (!event)
  2161. goto out;
  2162. rb_advance_iter(iter);
  2163. out:
  2164. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2165. if (event && event->type_len == RINGBUF_TYPE_PADDING) {
  2166. cpu_relax();
  2167. goto again;
  2168. }
  2169. return event;
  2170. }
  2171. EXPORT_SYMBOL_GPL(ring_buffer_read);
  2172. /**
  2173. * ring_buffer_size - return the size of the ring buffer (in bytes)
  2174. * @buffer: The ring buffer.
  2175. */
  2176. unsigned long ring_buffer_size(struct ring_buffer *buffer)
  2177. {
  2178. return BUF_PAGE_SIZE * buffer->pages;
  2179. }
  2180. EXPORT_SYMBOL_GPL(ring_buffer_size);
  2181. static void
  2182. rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
  2183. {
  2184. cpu_buffer->head_page
  2185. = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
  2186. local_set(&cpu_buffer->head_page->write, 0);
  2187. local_set(&cpu_buffer->head_page->entries, 0);
  2188. local_set(&cpu_buffer->head_page->page->commit, 0);
  2189. cpu_buffer->head_page->read = 0;
  2190. cpu_buffer->tail_page = cpu_buffer->head_page;
  2191. cpu_buffer->commit_page = cpu_buffer->head_page;
  2192. INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
  2193. local_set(&cpu_buffer->reader_page->write, 0);
  2194. local_set(&cpu_buffer->reader_page->entries, 0);
  2195. local_set(&cpu_buffer->reader_page->page->commit, 0);
  2196. cpu_buffer->reader_page->read = 0;
  2197. cpu_buffer->nmi_dropped = 0;
  2198. cpu_buffer->commit_overrun = 0;
  2199. cpu_buffer->overrun = 0;
  2200. cpu_buffer->read = 0;
  2201. local_set(&cpu_buffer->entries, 0);
  2202. cpu_buffer->write_stamp = 0;
  2203. cpu_buffer->read_stamp = 0;
  2204. }
  2205. /**
  2206. * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
  2207. * @buffer: The ring buffer to reset a per cpu buffer of
  2208. * @cpu: The CPU buffer to be reset
  2209. */
  2210. void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
  2211. {
  2212. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  2213. unsigned long flags;
  2214. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2215. return;
  2216. atomic_inc(&cpu_buffer->record_disabled);
  2217. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2218. __raw_spin_lock(&cpu_buffer->lock);
  2219. rb_reset_cpu(cpu_buffer);
  2220. __raw_spin_unlock(&cpu_buffer->lock);
  2221. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2222. atomic_dec(&cpu_buffer->record_disabled);
  2223. }
  2224. EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
  2225. /**
  2226. * ring_buffer_reset - reset a ring buffer
  2227. * @buffer: The ring buffer to reset all cpu buffers
  2228. */
  2229. void ring_buffer_reset(struct ring_buffer *buffer)
  2230. {
  2231. int cpu;
  2232. for_each_buffer_cpu(buffer, cpu)
  2233. ring_buffer_reset_cpu(buffer, cpu);
  2234. }
  2235. EXPORT_SYMBOL_GPL(ring_buffer_reset);
  2236. /**
  2237. * rind_buffer_empty - is the ring buffer empty?
  2238. * @buffer: The ring buffer to test
  2239. */
  2240. int ring_buffer_empty(struct ring_buffer *buffer)
  2241. {
  2242. struct ring_buffer_per_cpu *cpu_buffer;
  2243. int cpu;
  2244. /* yes this is racy, but if you don't like the race, lock the buffer */
  2245. for_each_buffer_cpu(buffer, cpu) {
  2246. cpu_buffer = buffer->buffers[cpu];
  2247. if (!rb_per_cpu_empty(cpu_buffer))
  2248. return 0;
  2249. }
  2250. return 1;
  2251. }
  2252. EXPORT_SYMBOL_GPL(ring_buffer_empty);
  2253. /**
  2254. * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
  2255. * @buffer: The ring buffer
  2256. * @cpu: The CPU buffer to test
  2257. */
  2258. int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
  2259. {
  2260. struct ring_buffer_per_cpu *cpu_buffer;
  2261. int ret;
  2262. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2263. return 1;
  2264. cpu_buffer = buffer->buffers[cpu];
  2265. ret = rb_per_cpu_empty(cpu_buffer);
  2266. return ret;
  2267. }
  2268. EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
  2269. /**
  2270. * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
  2271. * @buffer_a: One buffer to swap with
  2272. * @buffer_b: The other buffer to swap with
  2273. *
  2274. * This function is useful for tracers that want to take a "snapshot"
  2275. * of a CPU buffer and has another back up buffer lying around.
  2276. * it is expected that the tracer handles the cpu buffer not being
  2277. * used at the moment.
  2278. */
  2279. int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
  2280. struct ring_buffer *buffer_b, int cpu)
  2281. {
  2282. struct ring_buffer_per_cpu *cpu_buffer_a;
  2283. struct ring_buffer_per_cpu *cpu_buffer_b;
  2284. int ret = -EINVAL;
  2285. if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
  2286. !cpumask_test_cpu(cpu, buffer_b->cpumask))
  2287. goto out;
  2288. /* At least make sure the two buffers are somewhat the same */
  2289. if (buffer_a->pages != buffer_b->pages)
  2290. goto out;
  2291. ret = -EAGAIN;
  2292. if (ring_buffer_flags != RB_BUFFERS_ON)
  2293. goto out;
  2294. if (atomic_read(&buffer_a->record_disabled))
  2295. goto out;
  2296. if (atomic_read(&buffer_b->record_disabled))
  2297. goto out;
  2298. cpu_buffer_a = buffer_a->buffers[cpu];
  2299. cpu_buffer_b = buffer_b->buffers[cpu];
  2300. if (atomic_read(&cpu_buffer_a->record_disabled))
  2301. goto out;
  2302. if (atomic_read(&cpu_buffer_b->record_disabled))
  2303. goto out;
  2304. /*
  2305. * We can't do a synchronize_sched here because this
  2306. * function can be called in atomic context.
  2307. * Normally this will be called from the same CPU as cpu.
  2308. * If not it's up to the caller to protect this.
  2309. */
  2310. atomic_inc(&cpu_buffer_a->record_disabled);
  2311. atomic_inc(&cpu_buffer_b->record_disabled);
  2312. buffer_a->buffers[cpu] = cpu_buffer_b;
  2313. buffer_b->buffers[cpu] = cpu_buffer_a;
  2314. cpu_buffer_b->buffer = buffer_a;
  2315. cpu_buffer_a->buffer = buffer_b;
  2316. atomic_dec(&cpu_buffer_a->record_disabled);
  2317. atomic_dec(&cpu_buffer_b->record_disabled);
  2318. ret = 0;
  2319. out:
  2320. return ret;
  2321. }
  2322. EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
  2323. /**
  2324. * ring_buffer_alloc_read_page - allocate a page to read from buffer
  2325. * @buffer: the buffer to allocate for.
  2326. *
  2327. * This function is used in conjunction with ring_buffer_read_page.
  2328. * When reading a full page from the ring buffer, these functions
  2329. * can be used to speed up the process. The calling function should
  2330. * allocate a few pages first with this function. Then when it
  2331. * needs to get pages from the ring buffer, it passes the result
  2332. * of this function into ring_buffer_read_page, which will swap
  2333. * the page that was allocated, with the read page of the buffer.
  2334. *
  2335. * Returns:
  2336. * The page allocated, or NULL on error.
  2337. */
  2338. void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
  2339. {
  2340. struct buffer_data_page *bpage;
  2341. unsigned long addr;
  2342. addr = __get_free_page(GFP_KERNEL);
  2343. if (!addr)
  2344. return NULL;
  2345. bpage = (void *)addr;
  2346. rb_init_page(bpage);
  2347. return bpage;
  2348. }
  2349. EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
  2350. /**
  2351. * ring_buffer_free_read_page - free an allocated read page
  2352. * @buffer: the buffer the page was allocate for
  2353. * @data: the page to free
  2354. *
  2355. * Free a page allocated from ring_buffer_alloc_read_page.
  2356. */
  2357. void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
  2358. {
  2359. free_page((unsigned long)data);
  2360. }
  2361. EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
  2362. /**
  2363. * ring_buffer_read_page - extract a page from the ring buffer
  2364. * @buffer: buffer to extract from
  2365. * @data_page: the page to use allocated from ring_buffer_alloc_read_page
  2366. * @len: amount to extract
  2367. * @cpu: the cpu of the buffer to extract
  2368. * @full: should the extraction only happen when the page is full.
  2369. *
  2370. * This function will pull out a page from the ring buffer and consume it.
  2371. * @data_page must be the address of the variable that was returned
  2372. * from ring_buffer_alloc_read_page. This is because the page might be used
  2373. * to swap with a page in the ring buffer.
  2374. *
  2375. * for example:
  2376. * rpage = ring_buffer_alloc_read_page(buffer);
  2377. * if (!rpage)
  2378. * return error;
  2379. * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
  2380. * if (ret >= 0)
  2381. * process_page(rpage, ret);
  2382. *
  2383. * When @full is set, the function will not return true unless
  2384. * the writer is off the reader page.
  2385. *
  2386. * Note: it is up to the calling functions to handle sleeps and wakeups.
  2387. * The ring buffer can be used anywhere in the kernel and can not
  2388. * blindly call wake_up. The layer that uses the ring buffer must be
  2389. * responsible for that.
  2390. *
  2391. * Returns:
  2392. * >=0 if data has been transferred, returns the offset of consumed data.
  2393. * <0 if no data has been transferred.
  2394. */
  2395. int ring_buffer_read_page(struct ring_buffer *buffer,
  2396. void **data_page, size_t len, int cpu, int full)
  2397. {
  2398. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  2399. struct ring_buffer_event *event;
  2400. struct buffer_data_page *bpage;
  2401. struct buffer_page *reader;
  2402. unsigned long flags;
  2403. unsigned int commit;
  2404. unsigned int read;
  2405. u64 save_timestamp;
  2406. int ret = -1;
  2407. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2408. goto out;
  2409. /*
  2410. * If len is not big enough to hold the page header, then
  2411. * we can not copy anything.
  2412. */
  2413. if (len <= BUF_PAGE_HDR_SIZE)
  2414. goto out;
  2415. len -= BUF_PAGE_HDR_SIZE;
  2416. if (!data_page)
  2417. goto out;
  2418. bpage = *data_page;
  2419. if (!bpage)
  2420. goto out;
  2421. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2422. reader = rb_get_reader_page(cpu_buffer);
  2423. if (!reader)
  2424. goto out_unlock;
  2425. event = rb_reader_event(cpu_buffer);
  2426. read = reader->read;
  2427. commit = rb_page_commit(reader);
  2428. /*
  2429. * If this page has been partially read or
  2430. * if len is not big enough to read the rest of the page or
  2431. * a writer is still on the page, then
  2432. * we must copy the data from the page to the buffer.
  2433. * Otherwise, we can simply swap the page with the one passed in.
  2434. */
  2435. if (read || (len < (commit - read)) ||
  2436. cpu_buffer->reader_page == cpu_buffer->commit_page) {
  2437. struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
  2438. unsigned int rpos = read;
  2439. unsigned int pos = 0;
  2440. unsigned int size;
  2441. if (full)
  2442. goto out_unlock;
  2443. if (len > (commit - read))
  2444. len = (commit - read);
  2445. size = rb_event_length(event);
  2446. if (len < size)
  2447. goto out_unlock;
  2448. /* save the current timestamp, since the user will need it */
  2449. save_timestamp = cpu_buffer->read_stamp;
  2450. /* Need to copy one event at a time */
  2451. do {
  2452. memcpy(bpage->data + pos, rpage->data + rpos, size);
  2453. len -= size;
  2454. rb_advance_reader(cpu_buffer);
  2455. rpos = reader->read;
  2456. pos += size;
  2457. event = rb_reader_event(cpu_buffer);
  2458. size = rb_event_length(event);
  2459. } while (len > size);
  2460. /* update bpage */
  2461. local_set(&bpage->commit, pos);
  2462. bpage->time_stamp = save_timestamp;
  2463. /* we copied everything to the beginning */
  2464. read = 0;
  2465. } else {
  2466. /* update the entry counter */
  2467. cpu_buffer->read += local_read(&reader->entries);
  2468. /* swap the pages */
  2469. rb_init_page(bpage);
  2470. bpage = reader->page;
  2471. reader->page = *data_page;
  2472. local_set(&reader->write, 0);
  2473. local_set(&reader->entries, 0);
  2474. reader->read = 0;
  2475. *data_page = bpage;
  2476. }
  2477. ret = read;
  2478. out_unlock:
  2479. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2480. out:
  2481. return ret;
  2482. }
  2483. EXPORT_SYMBOL_GPL(ring_buffer_read_page);
  2484. static ssize_t
  2485. rb_simple_read(struct file *filp, char __user *ubuf,
  2486. size_t cnt, loff_t *ppos)
  2487. {
  2488. unsigned long *p = filp->private_data;
  2489. char buf[64];
  2490. int r;
  2491. if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
  2492. r = sprintf(buf, "permanently disabled\n");
  2493. else
  2494. r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
  2495. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  2496. }
  2497. static ssize_t
  2498. rb_simple_write(struct file *filp, const char __user *ubuf,
  2499. size_t cnt, loff_t *ppos)
  2500. {
  2501. unsigned long *p = filp->private_data;
  2502. char buf[64];
  2503. unsigned long val;
  2504. int ret;
  2505. if (cnt >= sizeof(buf))
  2506. return -EINVAL;
  2507. if (copy_from_user(&buf, ubuf, cnt))
  2508. return -EFAULT;
  2509. buf[cnt] = 0;
  2510. ret = strict_strtoul(buf, 10, &val);
  2511. if (ret < 0)
  2512. return ret;
  2513. if (val)
  2514. set_bit(RB_BUFFERS_ON_BIT, p);
  2515. else
  2516. clear_bit(RB_BUFFERS_ON_BIT, p);
  2517. (*ppos)++;
  2518. return cnt;
  2519. }
  2520. static const struct file_operations rb_simple_fops = {
  2521. .open = tracing_open_generic,
  2522. .read = rb_simple_read,
  2523. .write = rb_simple_write,
  2524. };
  2525. static __init int rb_init_debugfs(void)
  2526. {
  2527. struct dentry *d_tracer;
  2528. d_tracer = tracing_init_dentry();
  2529. trace_create_file("tracing_on", 0644, d_tracer,
  2530. &ring_buffer_flags, &rb_simple_fops);
  2531. return 0;
  2532. }
  2533. fs_initcall(rb_init_debugfs);
  2534. #ifdef CONFIG_HOTPLUG_CPU
  2535. static int rb_cpu_notify(struct notifier_block *self,
  2536. unsigned long action, void *hcpu)
  2537. {
  2538. struct ring_buffer *buffer =
  2539. container_of(self, struct ring_buffer, cpu_notify);
  2540. long cpu = (long)hcpu;
  2541. switch (action) {
  2542. case CPU_UP_PREPARE:
  2543. case CPU_UP_PREPARE_FROZEN:
  2544. if (cpu_isset(cpu, *buffer->cpumask))
  2545. return NOTIFY_OK;
  2546. buffer->buffers[cpu] =
  2547. rb_allocate_cpu_buffer(buffer, cpu);
  2548. if (!buffer->buffers[cpu]) {
  2549. WARN(1, "failed to allocate ring buffer on CPU %ld\n",
  2550. cpu);
  2551. return NOTIFY_OK;
  2552. }
  2553. smp_wmb();
  2554. cpu_set(cpu, *buffer->cpumask);
  2555. break;
  2556. case CPU_DOWN_PREPARE:
  2557. case CPU_DOWN_PREPARE_FROZEN:
  2558. /*
  2559. * Do nothing.
  2560. * If we were to free the buffer, then the user would
  2561. * lose any trace that was in the buffer.
  2562. */
  2563. break;
  2564. default:
  2565. break;
  2566. }
  2567. return NOTIFY_OK;
  2568. }
  2569. #endif