ring_buffer.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. /*
  2. * Performance events ring-buffer code:
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  7. * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8. *
  9. * For licensing details see kernel-base/COPYING
  10. */
  11. #include <linux/perf_event.h>
  12. #include <linux/vmalloc.h>
  13. #include <linux/slab.h>
  14. #include "internal.h"
  15. static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,
  16. unsigned long offset, unsigned long head)
  17. {
  18. unsigned long mask;
  19. if (!rb->writable)
  20. return true;
  21. mask = perf_data_size(rb) - 1;
  22. offset = (offset - tail) & mask;
  23. head = (head - tail) & mask;
  24. if ((int)(head - offset) < 0)
  25. return false;
  26. return true;
  27. }
  28. static void perf_output_wakeup(struct perf_output_handle *handle)
  29. {
  30. atomic_set(&handle->rb->poll, POLL_IN);
  31. handle->event->pending_wakeup = 1;
  32. irq_work_queue(&handle->event->pending);
  33. }
  34. /*
  35. * We need to ensure a later event_id doesn't publish a head when a former
  36. * event isn't done writing. However since we need to deal with NMIs we
  37. * cannot fully serialize things.
  38. *
  39. * We only publish the head (and generate a wakeup) when the outer-most
  40. * event completes.
  41. */
  42. static void perf_output_get_handle(struct perf_output_handle *handle)
  43. {
  44. struct ring_buffer *rb = handle->rb;
  45. preempt_disable();
  46. local_inc(&rb->nest);
  47. handle->wakeup = local_read(&rb->wakeup);
  48. }
  49. static void perf_output_put_handle(struct perf_output_handle *handle)
  50. {
  51. struct ring_buffer *rb = handle->rb;
  52. unsigned long head;
  53. again:
  54. head = local_read(&rb->head);
  55. /*
  56. * IRQ/NMI can happen here, which means we can miss a head update.
  57. */
  58. if (!local_dec_and_test(&rb->nest))
  59. goto out;
  60. /*
  61. * Publish the known good head. Rely on the full barrier implied
  62. * by atomic_dec_and_test() order the rb->head read and this
  63. * write.
  64. */
  65. rb->user_page->data_head = head;
  66. /*
  67. * Now check if we missed an update, rely on the (compiler)
  68. * barrier in atomic_dec_and_test() to re-read rb->head.
  69. */
  70. if (unlikely(head != local_read(&rb->head))) {
  71. local_inc(&rb->nest);
  72. goto again;
  73. }
  74. if (handle->wakeup != local_read(&rb->wakeup))
  75. perf_output_wakeup(handle);
  76. out:
  77. preempt_enable();
  78. }
  79. int perf_output_begin(struct perf_output_handle *handle,
  80. struct perf_event *event, unsigned int size)
  81. {
  82. struct ring_buffer *rb;
  83. unsigned long tail, offset, head;
  84. int have_lost;
  85. struct perf_sample_data sample_data;
  86. struct {
  87. struct perf_event_header header;
  88. u64 id;
  89. u64 lost;
  90. } lost_event;
  91. rcu_read_lock();
  92. /*
  93. * For inherited events we send all the output towards the parent.
  94. */
  95. if (event->parent)
  96. event = event->parent;
  97. rb = rcu_dereference(event->rb);
  98. if (!rb)
  99. goto out;
  100. handle->rb = rb;
  101. handle->event = event;
  102. if (!rb->nr_pages)
  103. goto out;
  104. have_lost = local_read(&rb->lost);
  105. if (have_lost) {
  106. lost_event.header.size = sizeof(lost_event);
  107. perf_event_header__init_id(&lost_event.header, &sample_data,
  108. event);
  109. size += lost_event.header.size;
  110. }
  111. perf_output_get_handle(handle);
  112. do {
  113. /*
  114. * Userspace could choose to issue a mb() before updating the
  115. * tail pointer. So that all reads will be completed before the
  116. * write is issued.
  117. */
  118. tail = ACCESS_ONCE(rb->user_page->data_tail);
  119. smp_rmb();
  120. offset = head = local_read(&rb->head);
  121. head += size;
  122. if (unlikely(!perf_output_space(rb, tail, offset, head)))
  123. goto fail;
  124. } while (local_cmpxchg(&rb->head, offset, head) != offset);
  125. if (head - local_read(&rb->wakeup) > rb->watermark)
  126. local_add(rb->watermark, &rb->wakeup);
  127. handle->page = offset >> (PAGE_SHIFT + page_order(rb));
  128. handle->page &= rb->nr_pages - 1;
  129. handle->size = offset & ((PAGE_SIZE << page_order(rb)) - 1);
  130. handle->addr = rb->data_pages[handle->page];
  131. handle->addr += handle->size;
  132. handle->size = (PAGE_SIZE << page_order(rb)) - handle->size;
  133. if (have_lost) {
  134. lost_event.header.type = PERF_RECORD_LOST;
  135. lost_event.header.misc = 0;
  136. lost_event.id = event->id;
  137. lost_event.lost = local_xchg(&rb->lost, 0);
  138. perf_output_put(handle, lost_event);
  139. perf_event__output_id_sample(event, handle, &sample_data);
  140. }
  141. return 0;
  142. fail:
  143. local_inc(&rb->lost);
  144. perf_output_put_handle(handle);
  145. out:
  146. rcu_read_unlock();
  147. return -ENOSPC;
  148. }
  149. void perf_output_copy(struct perf_output_handle *handle,
  150. const void *buf, unsigned int len)
  151. {
  152. __output_copy(handle, buf, len);
  153. }
  154. void perf_output_end(struct perf_output_handle *handle)
  155. {
  156. perf_output_put_handle(handle);
  157. rcu_read_unlock();
  158. }
  159. static void
  160. ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
  161. {
  162. long max_size = perf_data_size(rb);
  163. if (watermark)
  164. rb->watermark = min(max_size, watermark);
  165. if (!rb->watermark)
  166. rb->watermark = max_size / 2;
  167. if (flags & RING_BUFFER_WRITABLE)
  168. rb->writable = 1;
  169. atomic_set(&rb->refcount, 1);
  170. INIT_LIST_HEAD(&rb->event_list);
  171. spin_lock_init(&rb->event_lock);
  172. }
  173. #ifndef CONFIG_PERF_USE_VMALLOC
  174. /*
  175. * Back perf_mmap() with regular GFP_KERNEL-0 pages.
  176. */
  177. struct page *
  178. perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
  179. {
  180. if (pgoff > rb->nr_pages)
  181. return NULL;
  182. if (pgoff == 0)
  183. return virt_to_page(rb->user_page);
  184. return virt_to_page(rb->data_pages[pgoff - 1]);
  185. }
  186. static void *perf_mmap_alloc_page(int cpu)
  187. {
  188. struct page *page;
  189. int node;
  190. node = (cpu == -1) ? cpu : cpu_to_node(cpu);
  191. page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
  192. if (!page)
  193. return NULL;
  194. return page_address(page);
  195. }
  196. struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
  197. {
  198. struct ring_buffer *rb;
  199. unsigned long size;
  200. int i;
  201. size = sizeof(struct ring_buffer);
  202. size += nr_pages * sizeof(void *);
  203. rb = kzalloc(size, GFP_KERNEL);
  204. if (!rb)
  205. goto fail;
  206. rb->user_page = perf_mmap_alloc_page(cpu);
  207. if (!rb->user_page)
  208. goto fail_user_page;
  209. for (i = 0; i < nr_pages; i++) {
  210. rb->data_pages[i] = perf_mmap_alloc_page(cpu);
  211. if (!rb->data_pages[i])
  212. goto fail_data_pages;
  213. }
  214. rb->nr_pages = nr_pages;
  215. ring_buffer_init(rb, watermark, flags);
  216. return rb;
  217. fail_data_pages:
  218. for (i--; i >= 0; i--)
  219. free_page((unsigned long)rb->data_pages[i]);
  220. free_page((unsigned long)rb->user_page);
  221. fail_user_page:
  222. kfree(rb);
  223. fail:
  224. return NULL;
  225. }
  226. static void perf_mmap_free_page(unsigned long addr)
  227. {
  228. struct page *page = virt_to_page((void *)addr);
  229. page->mapping = NULL;
  230. __free_page(page);
  231. }
  232. void rb_free(struct ring_buffer *rb)
  233. {
  234. int i;
  235. perf_mmap_free_page((unsigned long)rb->user_page);
  236. for (i = 0; i < rb->nr_pages; i++)
  237. perf_mmap_free_page((unsigned long)rb->data_pages[i]);
  238. kfree(rb);
  239. }
  240. #else
  241. struct page *
  242. perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
  243. {
  244. if (pgoff > (1UL << page_order(rb)))
  245. return NULL;
  246. return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
  247. }
  248. static void perf_mmap_unmark_page(void *addr)
  249. {
  250. struct page *page = vmalloc_to_page(addr);
  251. page->mapping = NULL;
  252. }
  253. static void rb_free_work(struct work_struct *work)
  254. {
  255. struct ring_buffer *rb;
  256. void *base;
  257. int i, nr;
  258. rb = container_of(work, struct ring_buffer, work);
  259. nr = 1 << page_order(rb);
  260. base = rb->user_page;
  261. for (i = 0; i < nr + 1; i++)
  262. perf_mmap_unmark_page(base + (i * PAGE_SIZE));
  263. vfree(base);
  264. kfree(rb);
  265. }
  266. void rb_free(struct ring_buffer *rb)
  267. {
  268. schedule_work(&rb->work);
  269. }
  270. struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
  271. {
  272. struct ring_buffer *rb;
  273. unsigned long size;
  274. void *all_buf;
  275. size = sizeof(struct ring_buffer);
  276. size += sizeof(void *);
  277. rb = kzalloc(size, GFP_KERNEL);
  278. if (!rb)
  279. goto fail;
  280. INIT_WORK(&rb->work, rb_free_work);
  281. all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
  282. if (!all_buf)
  283. goto fail_all_buf;
  284. rb->user_page = all_buf;
  285. rb->data_pages[0] = all_buf + PAGE_SIZE;
  286. rb->page_order = ilog2(nr_pages);
  287. rb->nr_pages = 1;
  288. ring_buffer_init(rb, watermark, flags);
  289. return rb;
  290. fail_all_buf:
  291. kfree(rb);
  292. fail:
  293. return NULL;
  294. }
  295. #endif