ring_buffer.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241
  1. /*
  2. * Generic ring buffer
  3. *
  4. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/ring_buffer.h>
  7. #include <linux/spinlock.h>
  8. #include <linux/debugfs.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/module.h>
  11. #include <linux/percpu.h>
  12. #include <linux/mutex.h>
  13. #include <linux/sched.h> /* used for sched_clock() (for now) */
  14. #include <linux/init.h>
  15. #include <linux/hash.h>
  16. #include <linux/list.h>
  17. #include <linux/fs.h>
  18. #include "trace.h"
  19. /* Global flag to disable all recording to ring buffers */
  20. static int ring_buffers_off __read_mostly;
  21. /**
  22. * tracing_on - enable all tracing buffers
  23. *
  24. * This function enables all tracing buffers that may have been
  25. * disabled with tracing_off.
  26. */
  27. void tracing_on(void)
  28. {
  29. ring_buffers_off = 0;
  30. }
  31. /**
  32. * tracing_off - turn off all tracing buffers
  33. *
  34. * This function stops all tracing buffers from recording data.
  35. * It does not disable any overhead the tracers themselves may
  36. * be causing. This function simply causes all recording to
  37. * the ring buffers to fail.
  38. */
  39. void tracing_off(void)
  40. {
  41. ring_buffers_off = 1;
  42. }
  43. #include "trace.h"
  44. /* Up this if you want to test the TIME_EXTENTS and normalization */
  45. #define DEBUG_SHIFT 0
  46. /* FIXME!!! */
  47. u64 ring_buffer_time_stamp(int cpu)
  48. {
  49. u64 time;
  50. preempt_disable_notrace();
  51. /* shift to debug/test normalization and TIME_EXTENTS */
  52. time = sched_clock() << DEBUG_SHIFT;
  53. preempt_enable_notrace();
  54. return time;
  55. }
  56. void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
  57. {
  58. /* Just stupid testing the normalize function and deltas */
  59. *ts >>= DEBUG_SHIFT;
  60. }
  61. #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
  62. #define RB_ALIGNMENT_SHIFT 2
  63. #define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
  64. #define RB_MAX_SMALL_DATA 28
  65. enum {
  66. RB_LEN_TIME_EXTEND = 8,
  67. RB_LEN_TIME_STAMP = 16,
  68. };
  69. /* inline for ring buffer fast paths */
  70. static inline unsigned
  71. rb_event_length(struct ring_buffer_event *event)
  72. {
  73. unsigned length;
  74. switch (event->type) {
  75. case RINGBUF_TYPE_PADDING:
  76. /* undefined */
  77. return -1;
  78. case RINGBUF_TYPE_TIME_EXTEND:
  79. return RB_LEN_TIME_EXTEND;
  80. case RINGBUF_TYPE_TIME_STAMP:
  81. return RB_LEN_TIME_STAMP;
  82. case RINGBUF_TYPE_DATA:
  83. if (event->len)
  84. length = event->len << RB_ALIGNMENT_SHIFT;
  85. else
  86. length = event->array[0];
  87. return length + RB_EVNT_HDR_SIZE;
  88. default:
  89. BUG();
  90. }
  91. /* not hit */
  92. return 0;
  93. }
  94. /**
  95. * ring_buffer_event_length - return the length of the event
  96. * @event: the event to get the length of
  97. */
  98. unsigned ring_buffer_event_length(struct ring_buffer_event *event)
  99. {
  100. return rb_event_length(event);
  101. }
  102. /* inline for ring buffer fast paths */
  103. static inline void *
  104. rb_event_data(struct ring_buffer_event *event)
  105. {
  106. BUG_ON(event->type != RINGBUF_TYPE_DATA);
  107. /* If length is in len field, then array[0] has the data */
  108. if (event->len)
  109. return (void *)&event->array[0];
  110. /* Otherwise length is in array[0] and array[1] has the data */
  111. return (void *)&event->array[1];
  112. }
  113. /**
  114. * ring_buffer_event_data - return the data of the event
  115. * @event: the event to get the data from
  116. */
  117. void *ring_buffer_event_data(struct ring_buffer_event *event)
  118. {
  119. return rb_event_data(event);
  120. }
  121. #define for_each_buffer_cpu(buffer, cpu) \
  122. for_each_cpu_mask(cpu, buffer->cpumask)
  123. #define TS_SHIFT 27
  124. #define TS_MASK ((1ULL << TS_SHIFT) - 1)
  125. #define TS_DELTA_TEST (~TS_MASK)
  126. /*
  127. * This hack stolen from mm/slob.c.
  128. * We can store per page timing information in the page frame of the page.
  129. * Thanks to Peter Zijlstra for suggesting this idea.
  130. */
  131. struct buffer_page {
  132. u64 time_stamp; /* page time stamp */
  133. local_t write; /* index for next write */
  134. local_t commit; /* write commited index */
  135. unsigned read; /* index for next read */
  136. struct list_head list; /* list of free pages */
  137. void *page; /* Actual data page */
  138. };
  139. /*
  140. * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
  141. * this issue out.
  142. */
  143. static inline void free_buffer_page(struct buffer_page *bpage)
  144. {
  145. if (bpage->page)
  146. free_page((unsigned long)bpage->page);
  147. kfree(bpage);
  148. }
  149. /*
  150. * We need to fit the time_stamp delta into 27 bits.
  151. */
  152. static inline int test_time_stamp(u64 delta)
  153. {
  154. if (delta & TS_DELTA_TEST)
  155. return 1;
  156. return 0;
  157. }
  158. #define BUF_PAGE_SIZE PAGE_SIZE
  159. /*
  160. * head_page == tail_page && head == tail then buffer is empty.
  161. */
  162. struct ring_buffer_per_cpu {
  163. int cpu;
  164. struct ring_buffer *buffer;
  165. spinlock_t reader_lock; /* serialize readers */
  166. raw_spinlock_t lock;
  167. struct lock_class_key lock_key;
  168. struct list_head pages;
  169. struct buffer_page *head_page; /* read from head */
  170. struct buffer_page *tail_page; /* write to tail */
  171. struct buffer_page *commit_page; /* commited pages */
  172. struct buffer_page *reader_page;
  173. unsigned long overrun;
  174. unsigned long entries;
  175. u64 write_stamp;
  176. u64 read_stamp;
  177. atomic_t record_disabled;
  178. };
  179. struct ring_buffer {
  180. unsigned long size;
  181. unsigned pages;
  182. unsigned flags;
  183. int cpus;
  184. cpumask_t cpumask;
  185. atomic_t record_disabled;
  186. struct mutex mutex;
  187. struct ring_buffer_per_cpu **buffers;
  188. };
  189. struct ring_buffer_iter {
  190. struct ring_buffer_per_cpu *cpu_buffer;
  191. unsigned long head;
  192. struct buffer_page *head_page;
  193. u64 read_stamp;
  194. };
  195. /* buffer may be either ring_buffer or ring_buffer_per_cpu */
  196. #define RB_WARN_ON(buffer, cond) \
  197. ({ \
  198. int _____ret = unlikely(cond); \
  199. if (_____ret) { \
  200. atomic_inc(&buffer->record_disabled); \
  201. WARN_ON(1); \
  202. } \
  203. _____ret; \
  204. })
  205. /**
  206. * check_pages - integrity check of buffer pages
  207. * @cpu_buffer: CPU buffer with pages to test
  208. *
  209. * As a safty measure we check to make sure the data pages have not
  210. * been corrupted.
  211. */
  212. static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
  213. {
  214. struct list_head *head = &cpu_buffer->pages;
  215. struct buffer_page *page, *tmp;
  216. if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
  217. return -1;
  218. if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
  219. return -1;
  220. list_for_each_entry_safe(page, tmp, head, list) {
  221. if (RB_WARN_ON(cpu_buffer,
  222. page->list.next->prev != &page->list))
  223. return -1;
  224. if (RB_WARN_ON(cpu_buffer,
  225. page->list.prev->next != &page->list))
  226. return -1;
  227. }
  228. return 0;
  229. }
  230. static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
  231. unsigned nr_pages)
  232. {
  233. struct list_head *head = &cpu_buffer->pages;
  234. struct buffer_page *page, *tmp;
  235. unsigned long addr;
  236. LIST_HEAD(pages);
  237. unsigned i;
  238. for (i = 0; i < nr_pages; i++) {
  239. page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
  240. GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
  241. if (!page)
  242. goto free_pages;
  243. list_add(&page->list, &pages);
  244. addr = __get_free_page(GFP_KERNEL);
  245. if (!addr)
  246. goto free_pages;
  247. page->page = (void *)addr;
  248. }
  249. list_splice(&pages, head);
  250. rb_check_pages(cpu_buffer);
  251. return 0;
  252. free_pages:
  253. list_for_each_entry_safe(page, tmp, &pages, list) {
  254. list_del_init(&page->list);
  255. free_buffer_page(page);
  256. }
  257. return -ENOMEM;
  258. }
  259. static struct ring_buffer_per_cpu *
  260. rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
  261. {
  262. struct ring_buffer_per_cpu *cpu_buffer;
  263. struct buffer_page *page;
  264. unsigned long addr;
  265. int ret;
  266. cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
  267. GFP_KERNEL, cpu_to_node(cpu));
  268. if (!cpu_buffer)
  269. return NULL;
  270. cpu_buffer->cpu = cpu;
  271. cpu_buffer->buffer = buffer;
  272. spin_lock_init(&cpu_buffer->reader_lock);
  273. cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
  274. INIT_LIST_HEAD(&cpu_buffer->pages);
  275. page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
  276. GFP_KERNEL, cpu_to_node(cpu));
  277. if (!page)
  278. goto fail_free_buffer;
  279. cpu_buffer->reader_page = page;
  280. addr = __get_free_page(GFP_KERNEL);
  281. if (!addr)
  282. goto fail_free_reader;
  283. page->page = (void *)addr;
  284. INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
  285. ret = rb_allocate_pages(cpu_buffer, buffer->pages);
  286. if (ret < 0)
  287. goto fail_free_reader;
  288. cpu_buffer->head_page
  289. = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
  290. cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
  291. return cpu_buffer;
  292. fail_free_reader:
  293. free_buffer_page(cpu_buffer->reader_page);
  294. fail_free_buffer:
  295. kfree(cpu_buffer);
  296. return NULL;
  297. }
  298. static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
  299. {
  300. struct list_head *head = &cpu_buffer->pages;
  301. struct buffer_page *page, *tmp;
  302. list_del_init(&cpu_buffer->reader_page->list);
  303. free_buffer_page(cpu_buffer->reader_page);
  304. list_for_each_entry_safe(page, tmp, head, list) {
  305. list_del_init(&page->list);
  306. free_buffer_page(page);
  307. }
  308. kfree(cpu_buffer);
  309. }
  310. /*
  311. * Causes compile errors if the struct buffer_page gets bigger
  312. * than the struct page.
  313. */
  314. extern int ring_buffer_page_too_big(void);
  315. /**
  316. * ring_buffer_alloc - allocate a new ring_buffer
  317. * @size: the size in bytes that is needed.
  318. * @flags: attributes to set for the ring buffer.
  319. *
  320. * Currently the only flag that is available is the RB_FL_OVERWRITE
  321. * flag. This flag means that the buffer will overwrite old data
  322. * when the buffer wraps. If this flag is not set, the buffer will
  323. * drop data when the tail hits the head.
  324. */
  325. struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
  326. {
  327. struct ring_buffer *buffer;
  328. int bsize;
  329. int cpu;
  330. /* Paranoid! Optimizes out when all is well */
  331. if (sizeof(struct buffer_page) > sizeof(struct page))
  332. ring_buffer_page_too_big();
  333. /* keep it in its own cache line */
  334. buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
  335. GFP_KERNEL);
  336. if (!buffer)
  337. return NULL;
  338. buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  339. buffer->flags = flags;
  340. /* need at least two pages */
  341. if (buffer->pages == 1)
  342. buffer->pages++;
  343. buffer->cpumask = cpu_possible_map;
  344. buffer->cpus = nr_cpu_ids;
  345. bsize = sizeof(void *) * nr_cpu_ids;
  346. buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
  347. GFP_KERNEL);
  348. if (!buffer->buffers)
  349. goto fail_free_buffer;
  350. for_each_buffer_cpu(buffer, cpu) {
  351. buffer->buffers[cpu] =
  352. rb_allocate_cpu_buffer(buffer, cpu);
  353. if (!buffer->buffers[cpu])
  354. goto fail_free_buffers;
  355. }
  356. mutex_init(&buffer->mutex);
  357. return buffer;
  358. fail_free_buffers:
  359. for_each_buffer_cpu(buffer, cpu) {
  360. if (buffer->buffers[cpu])
  361. rb_free_cpu_buffer(buffer->buffers[cpu]);
  362. }
  363. kfree(buffer->buffers);
  364. fail_free_buffer:
  365. kfree(buffer);
  366. return NULL;
  367. }
  368. /**
  369. * ring_buffer_free - free a ring buffer.
  370. * @buffer: the buffer to free.
  371. */
  372. void
  373. ring_buffer_free(struct ring_buffer *buffer)
  374. {
  375. int cpu;
  376. for_each_buffer_cpu(buffer, cpu)
  377. rb_free_cpu_buffer(buffer->buffers[cpu]);
  378. kfree(buffer);
  379. }
  380. static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
  381. static void
  382. rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
  383. {
  384. struct buffer_page *page;
  385. struct list_head *p;
  386. unsigned i;
  387. atomic_inc(&cpu_buffer->record_disabled);
  388. synchronize_sched();
  389. for (i = 0; i < nr_pages; i++) {
  390. if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
  391. return;
  392. p = cpu_buffer->pages.next;
  393. page = list_entry(p, struct buffer_page, list);
  394. list_del_init(&page->list);
  395. free_buffer_page(page);
  396. }
  397. if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
  398. return;
  399. rb_reset_cpu(cpu_buffer);
  400. rb_check_pages(cpu_buffer);
  401. atomic_dec(&cpu_buffer->record_disabled);
  402. }
  403. static void
  404. rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
  405. struct list_head *pages, unsigned nr_pages)
  406. {
  407. struct buffer_page *page;
  408. struct list_head *p;
  409. unsigned i;
  410. atomic_inc(&cpu_buffer->record_disabled);
  411. synchronize_sched();
  412. for (i = 0; i < nr_pages; i++) {
  413. if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
  414. return;
  415. p = pages->next;
  416. page = list_entry(p, struct buffer_page, list);
  417. list_del_init(&page->list);
  418. list_add_tail(&page->list, &cpu_buffer->pages);
  419. }
  420. rb_reset_cpu(cpu_buffer);
  421. rb_check_pages(cpu_buffer);
  422. atomic_dec(&cpu_buffer->record_disabled);
  423. }
  424. /**
  425. * ring_buffer_resize - resize the ring buffer
  426. * @buffer: the buffer to resize.
  427. * @size: the new size.
  428. *
  429. * The tracer is responsible for making sure that the buffer is
  430. * not being used while changing the size.
  431. * Note: We may be able to change the above requirement by using
  432. * RCU synchronizations.
  433. *
  434. * Minimum size is 2 * BUF_PAGE_SIZE.
  435. *
  436. * Returns -1 on failure.
  437. */
  438. int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
  439. {
  440. struct ring_buffer_per_cpu *cpu_buffer;
  441. unsigned nr_pages, rm_pages, new_pages;
  442. struct buffer_page *page, *tmp;
  443. unsigned long buffer_size;
  444. unsigned long addr;
  445. LIST_HEAD(pages);
  446. int i, cpu;
  447. /*
  448. * Always succeed at resizing a non-existent buffer:
  449. */
  450. if (!buffer)
  451. return size;
  452. size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  453. size *= BUF_PAGE_SIZE;
  454. buffer_size = buffer->pages * BUF_PAGE_SIZE;
  455. /* we need a minimum of two pages */
  456. if (size < BUF_PAGE_SIZE * 2)
  457. size = BUF_PAGE_SIZE * 2;
  458. if (size == buffer_size)
  459. return size;
  460. mutex_lock(&buffer->mutex);
  461. nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  462. if (size < buffer_size) {
  463. /* easy case, just free pages */
  464. if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
  465. mutex_unlock(&buffer->mutex);
  466. return -1;
  467. }
  468. rm_pages = buffer->pages - nr_pages;
  469. for_each_buffer_cpu(buffer, cpu) {
  470. cpu_buffer = buffer->buffers[cpu];
  471. rb_remove_pages(cpu_buffer, rm_pages);
  472. }
  473. goto out;
  474. }
  475. /*
  476. * This is a bit more difficult. We only want to add pages
  477. * when we can allocate enough for all CPUs. We do this
  478. * by allocating all the pages and storing them on a local
  479. * link list. If we succeed in our allocation, then we
  480. * add these pages to the cpu_buffers. Otherwise we just free
  481. * them all and return -ENOMEM;
  482. */
  483. if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
  484. mutex_unlock(&buffer->mutex);
  485. return -1;
  486. }
  487. new_pages = nr_pages - buffer->pages;
  488. for_each_buffer_cpu(buffer, cpu) {
  489. for (i = 0; i < new_pages; i++) {
  490. page = kzalloc_node(ALIGN(sizeof(*page),
  491. cache_line_size()),
  492. GFP_KERNEL, cpu_to_node(cpu));
  493. if (!page)
  494. goto free_pages;
  495. list_add(&page->list, &pages);
  496. addr = __get_free_page(GFP_KERNEL);
  497. if (!addr)
  498. goto free_pages;
  499. page->page = (void *)addr;
  500. }
  501. }
  502. for_each_buffer_cpu(buffer, cpu) {
  503. cpu_buffer = buffer->buffers[cpu];
  504. rb_insert_pages(cpu_buffer, &pages, new_pages);
  505. }
  506. if (RB_WARN_ON(buffer, !list_empty(&pages))) {
  507. mutex_unlock(&buffer->mutex);
  508. return -1;
  509. }
  510. out:
  511. buffer->pages = nr_pages;
  512. mutex_unlock(&buffer->mutex);
  513. return size;
  514. free_pages:
  515. list_for_each_entry_safe(page, tmp, &pages, list) {
  516. list_del_init(&page->list);
  517. free_buffer_page(page);
  518. }
  519. return -ENOMEM;
  520. }
  521. static inline int rb_null_event(struct ring_buffer_event *event)
  522. {
  523. return event->type == RINGBUF_TYPE_PADDING;
  524. }
  525. static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
  526. {
  527. return page->page + index;
  528. }
  529. static inline struct ring_buffer_event *
  530. rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
  531. {
  532. return __rb_page_index(cpu_buffer->reader_page,
  533. cpu_buffer->reader_page->read);
  534. }
  535. static inline struct ring_buffer_event *
  536. rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
  537. {
  538. return __rb_page_index(cpu_buffer->head_page,
  539. cpu_buffer->head_page->read);
  540. }
  541. static inline struct ring_buffer_event *
  542. rb_iter_head_event(struct ring_buffer_iter *iter)
  543. {
  544. return __rb_page_index(iter->head_page, iter->head);
  545. }
  546. static inline unsigned rb_page_write(struct buffer_page *bpage)
  547. {
  548. return local_read(&bpage->write);
  549. }
  550. static inline unsigned rb_page_commit(struct buffer_page *bpage)
  551. {
  552. return local_read(&bpage->commit);
  553. }
  554. /* Size is determined by what has been commited */
  555. static inline unsigned rb_page_size(struct buffer_page *bpage)
  556. {
  557. return rb_page_commit(bpage);
  558. }
  559. static inline unsigned
  560. rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
  561. {
  562. return rb_page_commit(cpu_buffer->commit_page);
  563. }
  564. static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
  565. {
  566. return rb_page_commit(cpu_buffer->head_page);
  567. }
  568. /*
  569. * When the tail hits the head and the buffer is in overwrite mode,
  570. * the head jumps to the next page and all content on the previous
  571. * page is discarded. But before doing so, we update the overrun
  572. * variable of the buffer.
  573. */
  574. static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
  575. {
  576. struct ring_buffer_event *event;
  577. unsigned long head;
  578. for (head = 0; head < rb_head_size(cpu_buffer);
  579. head += rb_event_length(event)) {
  580. event = __rb_page_index(cpu_buffer->head_page, head);
  581. if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
  582. return;
  583. /* Only count data entries */
  584. if (event->type != RINGBUF_TYPE_DATA)
  585. continue;
  586. cpu_buffer->overrun++;
  587. cpu_buffer->entries--;
  588. }
  589. }
  590. static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
  591. struct buffer_page **page)
  592. {
  593. struct list_head *p = (*page)->list.next;
  594. if (p == &cpu_buffer->pages)
  595. p = p->next;
  596. *page = list_entry(p, struct buffer_page, list);
  597. }
  598. static inline unsigned
  599. rb_event_index(struct ring_buffer_event *event)
  600. {
  601. unsigned long addr = (unsigned long)event;
  602. return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
  603. }
  604. static inline int
  605. rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
  606. struct ring_buffer_event *event)
  607. {
  608. unsigned long addr = (unsigned long)event;
  609. unsigned long index;
  610. index = rb_event_index(event);
  611. addr &= PAGE_MASK;
  612. return cpu_buffer->commit_page->page == (void *)addr &&
  613. rb_commit_index(cpu_buffer) == index;
  614. }
  615. static inline void
  616. rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
  617. struct ring_buffer_event *event)
  618. {
  619. unsigned long addr = (unsigned long)event;
  620. unsigned long index;
  621. index = rb_event_index(event);
  622. addr &= PAGE_MASK;
  623. while (cpu_buffer->commit_page->page != (void *)addr) {
  624. if (RB_WARN_ON(cpu_buffer,
  625. cpu_buffer->commit_page == cpu_buffer->tail_page))
  626. return;
  627. cpu_buffer->commit_page->commit =
  628. cpu_buffer->commit_page->write;
  629. rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
  630. cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
  631. }
  632. /* Now set the commit to the event's index */
  633. local_set(&cpu_buffer->commit_page->commit, index);
  634. }
  635. static inline void
  636. rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
  637. {
  638. /*
  639. * We only race with interrupts and NMIs on this CPU.
  640. * If we own the commit event, then we can commit
  641. * all others that interrupted us, since the interruptions
  642. * are in stack format (they finish before they come
  643. * back to us). This allows us to do a simple loop to
  644. * assign the commit to the tail.
  645. */
  646. while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
  647. cpu_buffer->commit_page->commit =
  648. cpu_buffer->commit_page->write;
  649. rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
  650. cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
  651. /* add barrier to keep gcc from optimizing too much */
  652. barrier();
  653. }
  654. while (rb_commit_index(cpu_buffer) !=
  655. rb_page_write(cpu_buffer->commit_page)) {
  656. cpu_buffer->commit_page->commit =
  657. cpu_buffer->commit_page->write;
  658. barrier();
  659. }
  660. }
  661. static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
  662. {
  663. cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
  664. cpu_buffer->reader_page->read = 0;
  665. }
  666. static inline void rb_inc_iter(struct ring_buffer_iter *iter)
  667. {
  668. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  669. /*
  670. * The iterator could be on the reader page (it starts there).
  671. * But the head could have moved, since the reader was
  672. * found. Check for this case and assign the iterator
  673. * to the head page instead of next.
  674. */
  675. if (iter->head_page == cpu_buffer->reader_page)
  676. iter->head_page = cpu_buffer->head_page;
  677. else
  678. rb_inc_page(cpu_buffer, &iter->head_page);
  679. iter->read_stamp = iter->head_page->time_stamp;
  680. iter->head = 0;
  681. }
  682. /**
  683. * ring_buffer_update_event - update event type and data
  684. * @event: the even to update
  685. * @type: the type of event
  686. * @length: the size of the event field in the ring buffer
  687. *
  688. * Update the type and data fields of the event. The length
  689. * is the actual size that is written to the ring buffer,
  690. * and with this, we can determine what to place into the
  691. * data field.
  692. */
  693. static inline void
  694. rb_update_event(struct ring_buffer_event *event,
  695. unsigned type, unsigned length)
  696. {
  697. event->type = type;
  698. switch (type) {
  699. case RINGBUF_TYPE_PADDING:
  700. break;
  701. case RINGBUF_TYPE_TIME_EXTEND:
  702. event->len =
  703. (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
  704. >> RB_ALIGNMENT_SHIFT;
  705. break;
  706. case RINGBUF_TYPE_TIME_STAMP:
  707. event->len =
  708. (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
  709. >> RB_ALIGNMENT_SHIFT;
  710. break;
  711. case RINGBUF_TYPE_DATA:
  712. length -= RB_EVNT_HDR_SIZE;
  713. if (length > RB_MAX_SMALL_DATA) {
  714. event->len = 0;
  715. event->array[0] = length;
  716. } else
  717. event->len =
  718. (length + (RB_ALIGNMENT-1))
  719. >> RB_ALIGNMENT_SHIFT;
  720. break;
  721. default:
  722. BUG();
  723. }
  724. }
  725. static inline unsigned rb_calculate_event_length(unsigned length)
  726. {
  727. struct ring_buffer_event event; /* Used only for sizeof array */
  728. /* zero length can cause confusions */
  729. if (!length)
  730. length = 1;
  731. if (length > RB_MAX_SMALL_DATA)
  732. length += sizeof(event.array[0]);
  733. length += RB_EVNT_HDR_SIZE;
  734. length = ALIGN(length, RB_ALIGNMENT);
  735. return length;
  736. }
  737. static struct ring_buffer_event *
  738. __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
  739. unsigned type, unsigned long length, u64 *ts)
  740. {
  741. struct buffer_page *tail_page, *head_page, *reader_page;
  742. unsigned long tail, write;
  743. struct ring_buffer *buffer = cpu_buffer->buffer;
  744. struct ring_buffer_event *event;
  745. unsigned long flags;
  746. tail_page = cpu_buffer->tail_page;
  747. write = local_add_return(length, &tail_page->write);
  748. tail = write - length;
  749. /* See if we shot pass the end of this buffer page */
  750. if (write > BUF_PAGE_SIZE) {
  751. struct buffer_page *next_page = tail_page;
  752. local_irq_save(flags);
  753. __raw_spin_lock(&cpu_buffer->lock);
  754. rb_inc_page(cpu_buffer, &next_page);
  755. head_page = cpu_buffer->head_page;
  756. reader_page = cpu_buffer->reader_page;
  757. /* we grabbed the lock before incrementing */
  758. if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
  759. goto out_unlock;
  760. /*
  761. * If for some reason, we had an interrupt storm that made
  762. * it all the way around the buffer, bail, and warn
  763. * about it.
  764. */
  765. if (unlikely(next_page == cpu_buffer->commit_page)) {
  766. WARN_ON_ONCE(1);
  767. goto out_unlock;
  768. }
  769. if (next_page == head_page) {
  770. if (!(buffer->flags & RB_FL_OVERWRITE)) {
  771. /* reset write */
  772. if (tail <= BUF_PAGE_SIZE)
  773. local_set(&tail_page->write, tail);
  774. goto out_unlock;
  775. }
  776. /* tail_page has not moved yet? */
  777. if (tail_page == cpu_buffer->tail_page) {
  778. /* count overflows */
  779. rb_update_overflow(cpu_buffer);
  780. rb_inc_page(cpu_buffer, &head_page);
  781. cpu_buffer->head_page = head_page;
  782. cpu_buffer->head_page->read = 0;
  783. }
  784. }
  785. /*
  786. * If the tail page is still the same as what we think
  787. * it is, then it is up to us to update the tail
  788. * pointer.
  789. */
  790. if (tail_page == cpu_buffer->tail_page) {
  791. local_set(&next_page->write, 0);
  792. local_set(&next_page->commit, 0);
  793. cpu_buffer->tail_page = next_page;
  794. /* reread the time stamp */
  795. *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
  796. cpu_buffer->tail_page->time_stamp = *ts;
  797. }
  798. /*
  799. * The actual tail page has moved forward.
  800. */
  801. if (tail < BUF_PAGE_SIZE) {
  802. /* Mark the rest of the page with padding */
  803. event = __rb_page_index(tail_page, tail);
  804. event->type = RINGBUF_TYPE_PADDING;
  805. }
  806. if (tail <= BUF_PAGE_SIZE)
  807. /* Set the write back to the previous setting */
  808. local_set(&tail_page->write, tail);
  809. /*
  810. * If this was a commit entry that failed,
  811. * increment that too
  812. */
  813. if (tail_page == cpu_buffer->commit_page &&
  814. tail == rb_commit_index(cpu_buffer)) {
  815. rb_set_commit_to_write(cpu_buffer);
  816. }
  817. __raw_spin_unlock(&cpu_buffer->lock);
  818. local_irq_restore(flags);
  819. /* fail and let the caller try again */
  820. return ERR_PTR(-EAGAIN);
  821. }
  822. /* We reserved something on the buffer */
  823. if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
  824. return NULL;
  825. event = __rb_page_index(tail_page, tail);
  826. rb_update_event(event, type, length);
  827. /*
  828. * If this is a commit and the tail is zero, then update
  829. * this page's time stamp.
  830. */
  831. if (!tail && rb_is_commit(cpu_buffer, event))
  832. cpu_buffer->commit_page->time_stamp = *ts;
  833. return event;
  834. out_unlock:
  835. __raw_spin_unlock(&cpu_buffer->lock);
  836. local_irq_restore(flags);
  837. return NULL;
  838. }
  839. static int
  840. rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  841. u64 *ts, u64 *delta)
  842. {
  843. struct ring_buffer_event *event;
  844. static int once;
  845. int ret;
  846. if (unlikely(*delta > (1ULL << 59) && !once++)) {
  847. printk(KERN_WARNING "Delta way too big! %llu"
  848. " ts=%llu write stamp = %llu\n",
  849. (unsigned long long)*delta,
  850. (unsigned long long)*ts,
  851. (unsigned long long)cpu_buffer->write_stamp);
  852. WARN_ON(1);
  853. }
  854. /*
  855. * The delta is too big, we to add a
  856. * new timestamp.
  857. */
  858. event = __rb_reserve_next(cpu_buffer,
  859. RINGBUF_TYPE_TIME_EXTEND,
  860. RB_LEN_TIME_EXTEND,
  861. ts);
  862. if (!event)
  863. return -EBUSY;
  864. if (PTR_ERR(event) == -EAGAIN)
  865. return -EAGAIN;
  866. /* Only a commited time event can update the write stamp */
  867. if (rb_is_commit(cpu_buffer, event)) {
  868. /*
  869. * If this is the first on the page, then we need to
  870. * update the page itself, and just put in a zero.
  871. */
  872. if (rb_event_index(event)) {
  873. event->time_delta = *delta & TS_MASK;
  874. event->array[0] = *delta >> TS_SHIFT;
  875. } else {
  876. cpu_buffer->commit_page->time_stamp = *ts;
  877. event->time_delta = 0;
  878. event->array[0] = 0;
  879. }
  880. cpu_buffer->write_stamp = *ts;
  881. /* let the caller know this was the commit */
  882. ret = 1;
  883. } else {
  884. /* Darn, this is just wasted space */
  885. event->time_delta = 0;
  886. event->array[0] = 0;
  887. ret = 0;
  888. }
  889. *delta = 0;
  890. return ret;
  891. }
  892. static struct ring_buffer_event *
  893. rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
  894. unsigned type, unsigned long length)
  895. {
  896. struct ring_buffer_event *event;
  897. u64 ts, delta;
  898. int commit = 0;
  899. int nr_loops = 0;
  900. again:
  901. /*
  902. * We allow for interrupts to reenter here and do a trace.
  903. * If one does, it will cause this original code to loop
  904. * back here. Even with heavy interrupts happening, this
  905. * should only happen a few times in a row. If this happens
  906. * 1000 times in a row, there must be either an interrupt
  907. * storm or we have something buggy.
  908. * Bail!
  909. */
  910. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
  911. return NULL;
  912. ts = ring_buffer_time_stamp(cpu_buffer->cpu);
  913. /*
  914. * Only the first commit can update the timestamp.
  915. * Yes there is a race here. If an interrupt comes in
  916. * just after the conditional and it traces too, then it
  917. * will also check the deltas. More than one timestamp may
  918. * also be made. But only the entry that did the actual
  919. * commit will be something other than zero.
  920. */
  921. if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
  922. rb_page_write(cpu_buffer->tail_page) ==
  923. rb_commit_index(cpu_buffer)) {
  924. delta = ts - cpu_buffer->write_stamp;
  925. /* make sure this delta is calculated here */
  926. barrier();
  927. /* Did the write stamp get updated already? */
  928. if (unlikely(ts < cpu_buffer->write_stamp))
  929. delta = 0;
  930. if (test_time_stamp(delta)) {
  931. commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
  932. if (commit == -EBUSY)
  933. return NULL;
  934. if (commit == -EAGAIN)
  935. goto again;
  936. RB_WARN_ON(cpu_buffer, commit < 0);
  937. }
  938. } else
  939. /* Non commits have zero deltas */
  940. delta = 0;
  941. event = __rb_reserve_next(cpu_buffer, type, length, &ts);
  942. if (PTR_ERR(event) == -EAGAIN)
  943. goto again;
  944. if (!event) {
  945. if (unlikely(commit))
  946. /*
  947. * Ouch! We needed a timestamp and it was commited. But
  948. * we didn't get our event reserved.
  949. */
  950. rb_set_commit_to_write(cpu_buffer);
  951. return NULL;
  952. }
  953. /*
  954. * If the timestamp was commited, make the commit our entry
  955. * now so that we will update it when needed.
  956. */
  957. if (commit)
  958. rb_set_commit_event(cpu_buffer, event);
  959. else if (!rb_is_commit(cpu_buffer, event))
  960. delta = 0;
  961. event->time_delta = delta;
  962. return event;
  963. }
  964. static DEFINE_PER_CPU(int, rb_need_resched);
  965. /**
  966. * ring_buffer_lock_reserve - reserve a part of the buffer
  967. * @buffer: the ring buffer to reserve from
  968. * @length: the length of the data to reserve (excluding event header)
  969. * @flags: a pointer to save the interrupt flags
  970. *
  971. * Returns a reseverd event on the ring buffer to copy directly to.
  972. * The user of this interface will need to get the body to write into
  973. * and can use the ring_buffer_event_data() interface.
  974. *
  975. * The length is the length of the data needed, not the event length
  976. * which also includes the event header.
  977. *
  978. * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
  979. * If NULL is returned, then nothing has been allocated or locked.
  980. */
  981. struct ring_buffer_event *
  982. ring_buffer_lock_reserve(struct ring_buffer *buffer,
  983. unsigned long length,
  984. unsigned long *flags)
  985. {
  986. struct ring_buffer_per_cpu *cpu_buffer;
  987. struct ring_buffer_event *event;
  988. int cpu, resched;
  989. if (ring_buffers_off)
  990. return NULL;
  991. if (atomic_read(&buffer->record_disabled))
  992. return NULL;
  993. /* If we are tracing schedule, we don't want to recurse */
  994. resched = ftrace_preempt_disable();
  995. cpu = raw_smp_processor_id();
  996. if (!cpu_isset(cpu, buffer->cpumask))
  997. goto out;
  998. cpu_buffer = buffer->buffers[cpu];
  999. if (atomic_read(&cpu_buffer->record_disabled))
  1000. goto out;
  1001. length = rb_calculate_event_length(length);
  1002. if (length > BUF_PAGE_SIZE)
  1003. goto out;
  1004. event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
  1005. if (!event)
  1006. goto out;
  1007. /*
  1008. * Need to store resched state on this cpu.
  1009. * Only the first needs to.
  1010. */
  1011. if (preempt_count() == 1)
  1012. per_cpu(rb_need_resched, cpu) = resched;
  1013. return event;
  1014. out:
  1015. ftrace_preempt_enable(resched);
  1016. return NULL;
  1017. }
  1018. static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
  1019. struct ring_buffer_event *event)
  1020. {
  1021. cpu_buffer->entries++;
  1022. /* Only process further if we own the commit */
  1023. if (!rb_is_commit(cpu_buffer, event))
  1024. return;
  1025. cpu_buffer->write_stamp += event->time_delta;
  1026. rb_set_commit_to_write(cpu_buffer);
  1027. }
  1028. /**
  1029. * ring_buffer_unlock_commit - commit a reserved
  1030. * @buffer: The buffer to commit to
  1031. * @event: The event pointer to commit.
  1032. * @flags: the interrupt flags received from ring_buffer_lock_reserve.
  1033. *
  1034. * This commits the data to the ring buffer, and releases any locks held.
  1035. *
  1036. * Must be paired with ring_buffer_lock_reserve.
  1037. */
  1038. int ring_buffer_unlock_commit(struct ring_buffer *buffer,
  1039. struct ring_buffer_event *event,
  1040. unsigned long flags)
  1041. {
  1042. struct ring_buffer_per_cpu *cpu_buffer;
  1043. int cpu = raw_smp_processor_id();
  1044. cpu_buffer = buffer->buffers[cpu];
  1045. rb_commit(cpu_buffer, event);
  1046. /*
  1047. * Only the last preempt count needs to restore preemption.
  1048. */
  1049. if (preempt_count() == 1)
  1050. ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
  1051. else
  1052. preempt_enable_no_resched_notrace();
  1053. return 0;
  1054. }
  1055. /**
  1056. * ring_buffer_write - write data to the buffer without reserving
  1057. * @buffer: The ring buffer to write to.
  1058. * @length: The length of the data being written (excluding the event header)
  1059. * @data: The data to write to the buffer.
  1060. *
  1061. * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
  1062. * one function. If you already have the data to write to the buffer, it
  1063. * may be easier to simply call this function.
  1064. *
  1065. * Note, like ring_buffer_lock_reserve, the length is the length of the data
  1066. * and not the length of the event which would hold the header.
  1067. */
  1068. int ring_buffer_write(struct ring_buffer *buffer,
  1069. unsigned long length,
  1070. void *data)
  1071. {
  1072. struct ring_buffer_per_cpu *cpu_buffer;
  1073. struct ring_buffer_event *event;
  1074. unsigned long event_length;
  1075. void *body;
  1076. int ret = -EBUSY;
  1077. int cpu, resched;
  1078. if (ring_buffers_off)
  1079. return -EBUSY;
  1080. if (atomic_read(&buffer->record_disabled))
  1081. return -EBUSY;
  1082. resched = ftrace_preempt_disable();
  1083. cpu = raw_smp_processor_id();
  1084. if (!cpu_isset(cpu, buffer->cpumask))
  1085. goto out;
  1086. cpu_buffer = buffer->buffers[cpu];
  1087. if (atomic_read(&cpu_buffer->record_disabled))
  1088. goto out;
  1089. event_length = rb_calculate_event_length(length);
  1090. event = rb_reserve_next_event(cpu_buffer,
  1091. RINGBUF_TYPE_DATA, event_length);
  1092. if (!event)
  1093. goto out;
  1094. body = rb_event_data(event);
  1095. memcpy(body, data, length);
  1096. rb_commit(cpu_buffer, event);
  1097. ret = 0;
  1098. out:
  1099. ftrace_preempt_enable(resched);
  1100. return ret;
  1101. }
  1102. static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
  1103. {
  1104. struct buffer_page *reader = cpu_buffer->reader_page;
  1105. struct buffer_page *head = cpu_buffer->head_page;
  1106. struct buffer_page *commit = cpu_buffer->commit_page;
  1107. return reader->read == rb_page_commit(reader) &&
  1108. (commit == reader ||
  1109. (commit == head &&
  1110. head->read == rb_page_commit(commit)));
  1111. }
  1112. /**
  1113. * ring_buffer_record_disable - stop all writes into the buffer
  1114. * @buffer: The ring buffer to stop writes to.
  1115. *
  1116. * This prevents all writes to the buffer. Any attempt to write
  1117. * to the buffer after this will fail and return NULL.
  1118. *
  1119. * The caller should call synchronize_sched() after this.
  1120. */
  1121. void ring_buffer_record_disable(struct ring_buffer *buffer)
  1122. {
  1123. atomic_inc(&buffer->record_disabled);
  1124. }
  1125. /**
  1126. * ring_buffer_record_enable - enable writes to the buffer
  1127. * @buffer: The ring buffer to enable writes
  1128. *
  1129. * Note, multiple disables will need the same number of enables
  1130. * to truely enable the writing (much like preempt_disable).
  1131. */
  1132. void ring_buffer_record_enable(struct ring_buffer *buffer)
  1133. {
  1134. atomic_dec(&buffer->record_disabled);
  1135. }
  1136. /**
  1137. * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
  1138. * @buffer: The ring buffer to stop writes to.
  1139. * @cpu: The CPU buffer to stop
  1140. *
  1141. * This prevents all writes to the buffer. Any attempt to write
  1142. * to the buffer after this will fail and return NULL.
  1143. *
  1144. * The caller should call synchronize_sched() after this.
  1145. */
  1146. void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
  1147. {
  1148. struct ring_buffer_per_cpu *cpu_buffer;
  1149. if (!cpu_isset(cpu, buffer->cpumask))
  1150. return;
  1151. cpu_buffer = buffer->buffers[cpu];
  1152. atomic_inc(&cpu_buffer->record_disabled);
  1153. }
  1154. /**
  1155. * ring_buffer_record_enable_cpu - enable writes to the buffer
  1156. * @buffer: The ring buffer to enable writes
  1157. * @cpu: The CPU to enable.
  1158. *
  1159. * Note, multiple disables will need the same number of enables
  1160. * to truely enable the writing (much like preempt_disable).
  1161. */
  1162. void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
  1163. {
  1164. struct ring_buffer_per_cpu *cpu_buffer;
  1165. if (!cpu_isset(cpu, buffer->cpumask))
  1166. return;
  1167. cpu_buffer = buffer->buffers[cpu];
  1168. atomic_dec(&cpu_buffer->record_disabled);
  1169. }
  1170. /**
  1171. * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
  1172. * @buffer: The ring buffer
  1173. * @cpu: The per CPU buffer to get the entries from.
  1174. */
  1175. unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
  1176. {
  1177. struct ring_buffer_per_cpu *cpu_buffer;
  1178. if (!cpu_isset(cpu, buffer->cpumask))
  1179. return 0;
  1180. cpu_buffer = buffer->buffers[cpu];
  1181. return cpu_buffer->entries;
  1182. }
  1183. /**
  1184. * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
  1185. * @buffer: The ring buffer
  1186. * @cpu: The per CPU buffer to get the number of overruns from
  1187. */
  1188. unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
  1189. {
  1190. struct ring_buffer_per_cpu *cpu_buffer;
  1191. if (!cpu_isset(cpu, buffer->cpumask))
  1192. return 0;
  1193. cpu_buffer = buffer->buffers[cpu];
  1194. return cpu_buffer->overrun;
  1195. }
  1196. /**
  1197. * ring_buffer_entries - get the number of entries in a buffer
  1198. * @buffer: The ring buffer
  1199. *
  1200. * Returns the total number of entries in the ring buffer
  1201. * (all CPU entries)
  1202. */
  1203. unsigned long ring_buffer_entries(struct ring_buffer *buffer)
  1204. {
  1205. struct ring_buffer_per_cpu *cpu_buffer;
  1206. unsigned long entries = 0;
  1207. int cpu;
  1208. /* if you care about this being correct, lock the buffer */
  1209. for_each_buffer_cpu(buffer, cpu) {
  1210. cpu_buffer = buffer->buffers[cpu];
  1211. entries += cpu_buffer->entries;
  1212. }
  1213. return entries;
  1214. }
  1215. /**
  1216. * ring_buffer_overrun_cpu - get the number of overruns in buffer
  1217. * @buffer: The ring buffer
  1218. *
  1219. * Returns the total number of overruns in the ring buffer
  1220. * (all CPU entries)
  1221. */
  1222. unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
  1223. {
  1224. struct ring_buffer_per_cpu *cpu_buffer;
  1225. unsigned long overruns = 0;
  1226. int cpu;
  1227. /* if you care about this being correct, lock the buffer */
  1228. for_each_buffer_cpu(buffer, cpu) {
  1229. cpu_buffer = buffer->buffers[cpu];
  1230. overruns += cpu_buffer->overrun;
  1231. }
  1232. return overruns;
  1233. }
  1234. static void rb_iter_reset(struct ring_buffer_iter *iter)
  1235. {
  1236. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  1237. /* Iterator usage is expected to have record disabled */
  1238. if (list_empty(&cpu_buffer->reader_page->list)) {
  1239. iter->head_page = cpu_buffer->head_page;
  1240. iter->head = cpu_buffer->head_page->read;
  1241. } else {
  1242. iter->head_page = cpu_buffer->reader_page;
  1243. iter->head = cpu_buffer->reader_page->read;
  1244. }
  1245. if (iter->head)
  1246. iter->read_stamp = cpu_buffer->read_stamp;
  1247. else
  1248. iter->read_stamp = iter->head_page->time_stamp;
  1249. }
  1250. /**
  1251. * ring_buffer_iter_reset - reset an iterator
  1252. * @iter: The iterator to reset
  1253. *
  1254. * Resets the iterator, so that it will start from the beginning
  1255. * again.
  1256. */
  1257. void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
  1258. {
  1259. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  1260. unsigned long flags;
  1261. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  1262. rb_iter_reset(iter);
  1263. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  1264. }
  1265. /**
  1266. * ring_buffer_iter_empty - check if an iterator has no more to read
  1267. * @iter: The iterator to check
  1268. */
  1269. int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
  1270. {
  1271. struct ring_buffer_per_cpu *cpu_buffer;
  1272. cpu_buffer = iter->cpu_buffer;
  1273. return iter->head_page == cpu_buffer->commit_page &&
  1274. iter->head == rb_commit_index(cpu_buffer);
  1275. }
  1276. static void
  1277. rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  1278. struct ring_buffer_event *event)
  1279. {
  1280. u64 delta;
  1281. switch (event->type) {
  1282. case RINGBUF_TYPE_PADDING:
  1283. return;
  1284. case RINGBUF_TYPE_TIME_EXTEND:
  1285. delta = event->array[0];
  1286. delta <<= TS_SHIFT;
  1287. delta += event->time_delta;
  1288. cpu_buffer->read_stamp += delta;
  1289. return;
  1290. case RINGBUF_TYPE_TIME_STAMP:
  1291. /* FIXME: not implemented */
  1292. return;
  1293. case RINGBUF_TYPE_DATA:
  1294. cpu_buffer->read_stamp += event->time_delta;
  1295. return;
  1296. default:
  1297. BUG();
  1298. }
  1299. return;
  1300. }
  1301. static void
  1302. rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
  1303. struct ring_buffer_event *event)
  1304. {
  1305. u64 delta;
  1306. switch (event->type) {
  1307. case RINGBUF_TYPE_PADDING:
  1308. return;
  1309. case RINGBUF_TYPE_TIME_EXTEND:
  1310. delta = event->array[0];
  1311. delta <<= TS_SHIFT;
  1312. delta += event->time_delta;
  1313. iter->read_stamp += delta;
  1314. return;
  1315. case RINGBUF_TYPE_TIME_STAMP:
  1316. /* FIXME: not implemented */
  1317. return;
  1318. case RINGBUF_TYPE_DATA:
  1319. iter->read_stamp += event->time_delta;
  1320. return;
  1321. default:
  1322. BUG();
  1323. }
  1324. return;
  1325. }
  1326. static struct buffer_page *
  1327. rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
  1328. {
  1329. struct buffer_page *reader = NULL;
  1330. unsigned long flags;
  1331. int nr_loops = 0;
  1332. local_irq_save(flags);
  1333. __raw_spin_lock(&cpu_buffer->lock);
  1334. again:
  1335. /*
  1336. * This should normally only loop twice. But because the
  1337. * start of the reader inserts an empty page, it causes
  1338. * a case where we will loop three times. There should be no
  1339. * reason to loop four times (that I know of).
  1340. */
  1341. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
  1342. reader = NULL;
  1343. goto out;
  1344. }
  1345. reader = cpu_buffer->reader_page;
  1346. /* If there's more to read, return this page */
  1347. if (cpu_buffer->reader_page->read < rb_page_size(reader))
  1348. goto out;
  1349. /* Never should we have an index greater than the size */
  1350. if (RB_WARN_ON(cpu_buffer,
  1351. cpu_buffer->reader_page->read > rb_page_size(reader)))
  1352. goto out;
  1353. /* check if we caught up to the tail */
  1354. reader = NULL;
  1355. if (cpu_buffer->commit_page == cpu_buffer->reader_page)
  1356. goto out;
  1357. /*
  1358. * Splice the empty reader page into the list around the head.
  1359. * Reset the reader page to size zero.
  1360. */
  1361. reader = cpu_buffer->head_page;
  1362. cpu_buffer->reader_page->list.next = reader->list.next;
  1363. cpu_buffer->reader_page->list.prev = reader->list.prev;
  1364. local_set(&cpu_buffer->reader_page->write, 0);
  1365. local_set(&cpu_buffer->reader_page->commit, 0);
  1366. /* Make the reader page now replace the head */
  1367. reader->list.prev->next = &cpu_buffer->reader_page->list;
  1368. reader->list.next->prev = &cpu_buffer->reader_page->list;
  1369. /*
  1370. * If the tail is on the reader, then we must set the head
  1371. * to the inserted page, otherwise we set it one before.
  1372. */
  1373. cpu_buffer->head_page = cpu_buffer->reader_page;
  1374. if (cpu_buffer->commit_page != reader)
  1375. rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
  1376. /* Finally update the reader page to the new head */
  1377. cpu_buffer->reader_page = reader;
  1378. rb_reset_reader_page(cpu_buffer);
  1379. goto again;
  1380. out:
  1381. __raw_spin_unlock(&cpu_buffer->lock);
  1382. local_irq_restore(flags);
  1383. return reader;
  1384. }
  1385. static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
  1386. {
  1387. struct ring_buffer_event *event;
  1388. struct buffer_page *reader;
  1389. unsigned length;
  1390. reader = rb_get_reader_page(cpu_buffer);
  1391. /* This function should not be called when buffer is empty */
  1392. if (RB_WARN_ON(cpu_buffer, !reader))
  1393. return;
  1394. event = rb_reader_event(cpu_buffer);
  1395. if (event->type == RINGBUF_TYPE_DATA)
  1396. cpu_buffer->entries--;
  1397. rb_update_read_stamp(cpu_buffer, event);
  1398. length = rb_event_length(event);
  1399. cpu_buffer->reader_page->read += length;
  1400. }
  1401. static void rb_advance_iter(struct ring_buffer_iter *iter)
  1402. {
  1403. struct ring_buffer *buffer;
  1404. struct ring_buffer_per_cpu *cpu_buffer;
  1405. struct ring_buffer_event *event;
  1406. unsigned length;
  1407. cpu_buffer = iter->cpu_buffer;
  1408. buffer = cpu_buffer->buffer;
  1409. /*
  1410. * Check if we are at the end of the buffer.
  1411. */
  1412. if (iter->head >= rb_page_size(iter->head_page)) {
  1413. if (RB_WARN_ON(buffer,
  1414. iter->head_page == cpu_buffer->commit_page))
  1415. return;
  1416. rb_inc_iter(iter);
  1417. return;
  1418. }
  1419. event = rb_iter_head_event(iter);
  1420. length = rb_event_length(event);
  1421. /*
  1422. * This should not be called to advance the header if we are
  1423. * at the tail of the buffer.
  1424. */
  1425. if (RB_WARN_ON(cpu_buffer,
  1426. (iter->head_page == cpu_buffer->commit_page) &&
  1427. (iter->head + length > rb_commit_index(cpu_buffer))))
  1428. return;
  1429. rb_update_iter_read_stamp(iter, event);
  1430. iter->head += length;
  1431. /* check for end of page padding */
  1432. if ((iter->head >= rb_page_size(iter->head_page)) &&
  1433. (iter->head_page != cpu_buffer->commit_page))
  1434. rb_advance_iter(iter);
  1435. }
  1436. static struct ring_buffer_event *
  1437. rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
  1438. {
  1439. struct ring_buffer_per_cpu *cpu_buffer;
  1440. struct ring_buffer_event *event;
  1441. struct buffer_page *reader;
  1442. int nr_loops = 0;
  1443. if (!cpu_isset(cpu, buffer->cpumask))
  1444. return NULL;
  1445. cpu_buffer = buffer->buffers[cpu];
  1446. again:
  1447. /*
  1448. * We repeat when a timestamp is encountered. It is possible
  1449. * to get multiple timestamps from an interrupt entering just
  1450. * as one timestamp is about to be written. The max times
  1451. * that this can happen is the number of nested interrupts we
  1452. * can have. Nesting 10 deep of interrupts is clearly
  1453. * an anomaly.
  1454. */
  1455. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
  1456. return NULL;
  1457. reader = rb_get_reader_page(cpu_buffer);
  1458. if (!reader)
  1459. return NULL;
  1460. event = rb_reader_event(cpu_buffer);
  1461. switch (event->type) {
  1462. case RINGBUF_TYPE_PADDING:
  1463. RB_WARN_ON(cpu_buffer, 1);
  1464. rb_advance_reader(cpu_buffer);
  1465. return NULL;
  1466. case RINGBUF_TYPE_TIME_EXTEND:
  1467. /* Internal data, OK to advance */
  1468. rb_advance_reader(cpu_buffer);
  1469. goto again;
  1470. case RINGBUF_TYPE_TIME_STAMP:
  1471. /* FIXME: not implemented */
  1472. rb_advance_reader(cpu_buffer);
  1473. goto again;
  1474. case RINGBUF_TYPE_DATA:
  1475. if (ts) {
  1476. *ts = cpu_buffer->read_stamp + event->time_delta;
  1477. ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
  1478. }
  1479. return event;
  1480. default:
  1481. BUG();
  1482. }
  1483. return NULL;
  1484. }
  1485. static struct ring_buffer_event *
  1486. rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  1487. {
  1488. struct ring_buffer *buffer;
  1489. struct ring_buffer_per_cpu *cpu_buffer;
  1490. struct ring_buffer_event *event;
  1491. int nr_loops = 0;
  1492. if (ring_buffer_iter_empty(iter))
  1493. return NULL;
  1494. cpu_buffer = iter->cpu_buffer;
  1495. buffer = cpu_buffer->buffer;
  1496. again:
  1497. /*
  1498. * We repeat when a timestamp is encountered. It is possible
  1499. * to get multiple timestamps from an interrupt entering just
  1500. * as one timestamp is about to be written. The max times
  1501. * that this can happen is the number of nested interrupts we
  1502. * can have. Nesting 10 deep of interrupts is clearly
  1503. * an anomaly.
  1504. */
  1505. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
  1506. return NULL;
  1507. if (rb_per_cpu_empty(cpu_buffer))
  1508. return NULL;
  1509. event = rb_iter_head_event(iter);
  1510. switch (event->type) {
  1511. case RINGBUF_TYPE_PADDING:
  1512. rb_inc_iter(iter);
  1513. goto again;
  1514. case RINGBUF_TYPE_TIME_EXTEND:
  1515. /* Internal data, OK to advance */
  1516. rb_advance_iter(iter);
  1517. goto again;
  1518. case RINGBUF_TYPE_TIME_STAMP:
  1519. /* FIXME: not implemented */
  1520. rb_advance_iter(iter);
  1521. goto again;
  1522. case RINGBUF_TYPE_DATA:
  1523. if (ts) {
  1524. *ts = iter->read_stamp + event->time_delta;
  1525. ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
  1526. }
  1527. return event;
  1528. default:
  1529. BUG();
  1530. }
  1531. return NULL;
  1532. }
  1533. /**
  1534. * ring_buffer_peek - peek at the next event to be read
  1535. * @buffer: The ring buffer to read
  1536. * @cpu: The cpu to peak at
  1537. * @ts: The timestamp counter of this event.
  1538. *
  1539. * This will return the event that will be read next, but does
  1540. * not consume the data.
  1541. */
  1542. struct ring_buffer_event *
  1543. ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
  1544. {
  1545. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  1546. struct ring_buffer_event *event;
  1547. unsigned long flags;
  1548. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  1549. event = rb_buffer_peek(buffer, cpu, ts);
  1550. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  1551. return event;
  1552. }
  1553. /**
  1554. * ring_buffer_iter_peek - peek at the next event to be read
  1555. * @iter: The ring buffer iterator
  1556. * @ts: The timestamp counter of this event.
  1557. *
  1558. * This will return the event that will be read next, but does
  1559. * not increment the iterator.
  1560. */
  1561. struct ring_buffer_event *
  1562. ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  1563. {
  1564. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  1565. struct ring_buffer_event *event;
  1566. unsigned long flags;
  1567. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  1568. event = rb_iter_peek(iter, ts);
  1569. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  1570. return event;
  1571. }
  1572. /**
  1573. * ring_buffer_consume - return an event and consume it
  1574. * @buffer: The ring buffer to get the next event from
  1575. *
  1576. * Returns the next event in the ring buffer, and that event is consumed.
  1577. * Meaning, that sequential reads will keep returning a different event,
  1578. * and eventually empty the ring buffer if the producer is slower.
  1579. */
  1580. struct ring_buffer_event *
  1581. ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
  1582. {
  1583. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  1584. struct ring_buffer_event *event;
  1585. unsigned long flags;
  1586. if (!cpu_isset(cpu, buffer->cpumask))
  1587. return NULL;
  1588. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  1589. event = rb_buffer_peek(buffer, cpu, ts);
  1590. if (!event)
  1591. goto out;
  1592. rb_advance_reader(cpu_buffer);
  1593. out:
  1594. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  1595. return event;
  1596. }
  1597. /**
  1598. * ring_buffer_read_start - start a non consuming read of the buffer
  1599. * @buffer: The ring buffer to read from
  1600. * @cpu: The cpu buffer to iterate over
  1601. *
  1602. * This starts up an iteration through the buffer. It also disables
  1603. * the recording to the buffer until the reading is finished.
  1604. * This prevents the reading from being corrupted. This is not
  1605. * a consuming read, so a producer is not expected.
  1606. *
  1607. * Must be paired with ring_buffer_finish.
  1608. */
  1609. struct ring_buffer_iter *
  1610. ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
  1611. {
  1612. struct ring_buffer_per_cpu *cpu_buffer;
  1613. struct ring_buffer_iter *iter;
  1614. unsigned long flags;
  1615. if (!cpu_isset(cpu, buffer->cpumask))
  1616. return NULL;
  1617. iter = kmalloc(sizeof(*iter), GFP_KERNEL);
  1618. if (!iter)
  1619. return NULL;
  1620. cpu_buffer = buffer->buffers[cpu];
  1621. iter->cpu_buffer = cpu_buffer;
  1622. atomic_inc(&cpu_buffer->record_disabled);
  1623. synchronize_sched();
  1624. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  1625. __raw_spin_lock(&cpu_buffer->lock);
  1626. rb_iter_reset(iter);
  1627. __raw_spin_unlock(&cpu_buffer->lock);
  1628. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  1629. return iter;
  1630. }
  1631. /**
  1632. * ring_buffer_finish - finish reading the iterator of the buffer
  1633. * @iter: The iterator retrieved by ring_buffer_start
  1634. *
  1635. * This re-enables the recording to the buffer, and frees the
  1636. * iterator.
  1637. */
  1638. void
  1639. ring_buffer_read_finish(struct ring_buffer_iter *iter)
  1640. {
  1641. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  1642. atomic_dec(&cpu_buffer->record_disabled);
  1643. kfree(iter);
  1644. }
  1645. /**
  1646. * ring_buffer_read - read the next item in the ring buffer by the iterator
  1647. * @iter: The ring buffer iterator
  1648. * @ts: The time stamp of the event read.
  1649. *
  1650. * This reads the next event in the ring buffer and increments the iterator.
  1651. */
  1652. struct ring_buffer_event *
  1653. ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
  1654. {
  1655. struct ring_buffer_event *event;
  1656. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  1657. unsigned long flags;
  1658. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  1659. event = rb_iter_peek(iter, ts);
  1660. if (!event)
  1661. goto out;
  1662. rb_advance_iter(iter);
  1663. out:
  1664. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  1665. return event;
  1666. }
  1667. /**
  1668. * ring_buffer_size - return the size of the ring buffer (in bytes)
  1669. * @buffer: The ring buffer.
  1670. */
  1671. unsigned long ring_buffer_size(struct ring_buffer *buffer)
  1672. {
  1673. return BUF_PAGE_SIZE * buffer->pages;
  1674. }
  1675. static void
  1676. rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
  1677. {
  1678. cpu_buffer->head_page
  1679. = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
  1680. local_set(&cpu_buffer->head_page->write, 0);
  1681. local_set(&cpu_buffer->head_page->commit, 0);
  1682. cpu_buffer->head_page->read = 0;
  1683. cpu_buffer->tail_page = cpu_buffer->head_page;
  1684. cpu_buffer->commit_page = cpu_buffer->head_page;
  1685. INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
  1686. local_set(&cpu_buffer->reader_page->write, 0);
  1687. local_set(&cpu_buffer->reader_page->commit, 0);
  1688. cpu_buffer->reader_page->read = 0;
  1689. cpu_buffer->overrun = 0;
  1690. cpu_buffer->entries = 0;
  1691. }
  1692. /**
  1693. * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
  1694. * @buffer: The ring buffer to reset a per cpu buffer of
  1695. * @cpu: The CPU buffer to be reset
  1696. */
  1697. void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
  1698. {
  1699. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  1700. unsigned long flags;
  1701. if (!cpu_isset(cpu, buffer->cpumask))
  1702. return;
  1703. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  1704. __raw_spin_lock(&cpu_buffer->lock);
  1705. rb_reset_cpu(cpu_buffer);
  1706. __raw_spin_unlock(&cpu_buffer->lock);
  1707. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  1708. }
  1709. /**
  1710. * ring_buffer_reset - reset a ring buffer
  1711. * @buffer: The ring buffer to reset all cpu buffers
  1712. */
  1713. void ring_buffer_reset(struct ring_buffer *buffer)
  1714. {
  1715. int cpu;
  1716. for_each_buffer_cpu(buffer, cpu)
  1717. ring_buffer_reset_cpu(buffer, cpu);
  1718. }
  1719. /**
  1720. * rind_buffer_empty - is the ring buffer empty?
  1721. * @buffer: The ring buffer to test
  1722. */
  1723. int ring_buffer_empty(struct ring_buffer *buffer)
  1724. {
  1725. struct ring_buffer_per_cpu *cpu_buffer;
  1726. int cpu;
  1727. /* yes this is racy, but if you don't like the race, lock the buffer */
  1728. for_each_buffer_cpu(buffer, cpu) {
  1729. cpu_buffer = buffer->buffers[cpu];
  1730. if (!rb_per_cpu_empty(cpu_buffer))
  1731. return 0;
  1732. }
  1733. return 1;
  1734. }
  1735. /**
  1736. * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
  1737. * @buffer: The ring buffer
  1738. * @cpu: The CPU buffer to test
  1739. */
  1740. int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
  1741. {
  1742. struct ring_buffer_per_cpu *cpu_buffer;
  1743. if (!cpu_isset(cpu, buffer->cpumask))
  1744. return 1;
  1745. cpu_buffer = buffer->buffers[cpu];
  1746. return rb_per_cpu_empty(cpu_buffer);
  1747. }
  1748. /**
  1749. * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
  1750. * @buffer_a: One buffer to swap with
  1751. * @buffer_b: The other buffer to swap with
  1752. *
  1753. * This function is useful for tracers that want to take a "snapshot"
  1754. * of a CPU buffer and has another back up buffer lying around.
  1755. * it is expected that the tracer handles the cpu buffer not being
  1756. * used at the moment.
  1757. */
  1758. int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
  1759. struct ring_buffer *buffer_b, int cpu)
  1760. {
  1761. struct ring_buffer_per_cpu *cpu_buffer_a;
  1762. struct ring_buffer_per_cpu *cpu_buffer_b;
  1763. if (!cpu_isset(cpu, buffer_a->cpumask) ||
  1764. !cpu_isset(cpu, buffer_b->cpumask))
  1765. return -EINVAL;
  1766. /* At least make sure the two buffers are somewhat the same */
  1767. if (buffer_a->size != buffer_b->size ||
  1768. buffer_a->pages != buffer_b->pages)
  1769. return -EINVAL;
  1770. cpu_buffer_a = buffer_a->buffers[cpu];
  1771. cpu_buffer_b = buffer_b->buffers[cpu];
  1772. /*
  1773. * We can't do a synchronize_sched here because this
  1774. * function can be called in atomic context.
  1775. * Normally this will be called from the same CPU as cpu.
  1776. * If not it's up to the caller to protect this.
  1777. */
  1778. atomic_inc(&cpu_buffer_a->record_disabled);
  1779. atomic_inc(&cpu_buffer_b->record_disabled);
  1780. buffer_a->buffers[cpu] = cpu_buffer_b;
  1781. buffer_b->buffers[cpu] = cpu_buffer_a;
  1782. cpu_buffer_b->buffer = buffer_a;
  1783. cpu_buffer_a->buffer = buffer_b;
  1784. atomic_dec(&cpu_buffer_a->record_disabled);
  1785. atomic_dec(&cpu_buffer_b->record_disabled);
  1786. return 0;
  1787. }
  1788. static ssize_t
  1789. rb_simple_read(struct file *filp, char __user *ubuf,
  1790. size_t cnt, loff_t *ppos)
  1791. {
  1792. int *p = filp->private_data;
  1793. char buf[64];
  1794. int r;
  1795. /* !ring_buffers_off == tracing_on */
  1796. r = sprintf(buf, "%d\n", !*p);
  1797. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  1798. }
  1799. static ssize_t
  1800. rb_simple_write(struct file *filp, const char __user *ubuf,
  1801. size_t cnt, loff_t *ppos)
  1802. {
  1803. int *p = filp->private_data;
  1804. char buf[64];
  1805. long val;
  1806. int ret;
  1807. if (cnt >= sizeof(buf))
  1808. return -EINVAL;
  1809. if (copy_from_user(&buf, ubuf, cnt))
  1810. return -EFAULT;
  1811. buf[cnt] = 0;
  1812. ret = strict_strtoul(buf, 10, &val);
  1813. if (ret < 0)
  1814. return ret;
  1815. /* !ring_buffers_off == tracing_on */
  1816. *p = !val;
  1817. (*ppos)++;
  1818. return cnt;
  1819. }
  1820. static struct file_operations rb_simple_fops = {
  1821. .open = tracing_open_generic,
  1822. .read = rb_simple_read,
  1823. .write = rb_simple_write,
  1824. };
  1825. static __init int rb_init_debugfs(void)
  1826. {
  1827. struct dentry *d_tracer;
  1828. struct dentry *entry;
  1829. d_tracer = tracing_init_dentry();
  1830. entry = debugfs_create_file("tracing_on", 0644, d_tracer,
  1831. &ring_buffers_off, &rb_simple_fops);
  1832. if (!entry)
  1833. pr_warning("Could not create debugfs 'tracing_on' entry\n");
  1834. return 0;
  1835. }
  1836. fs_initcall(rb_init_debugfs);