internal.h 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. #ifndef _KERNEL_EVENTS_INTERNAL_H
  2. #define _KERNEL_EVENTS_INTERNAL_H
  3. #define RING_BUFFER_WRITABLE 0x01
  4. struct ring_buffer {
  5. atomic_t refcount;
  6. struct rcu_head rcu_head;
  7. #ifdef CONFIG_PERF_USE_VMALLOC
  8. struct work_struct work;
  9. int page_order; /* allocation order */
  10. #endif
  11. int nr_pages; /* nr of data pages */
  12. int writable; /* are we writable */
  13. atomic_t poll; /* POLL_ for wakeups */
  14. local_t head; /* write position */
  15. local_t nest; /* nested writers */
  16. local_t events; /* event limit */
  17. local_t wakeup; /* wakeup stamp */
  18. local_t lost; /* nr records lost */
  19. long watermark; /* wakeup watermark */
  20. struct perf_event_mmap_page *user_page;
  21. void *data_pages[0];
  22. };
  23. extern void rb_free(struct ring_buffer *rb);
  24. extern struct ring_buffer *
  25. rb_alloc(int nr_pages, long watermark, int cpu, int flags);
  26. extern void perf_event_wakeup(struct perf_event *event);
  27. extern void
  28. perf_event_header__init_id(struct perf_event_header *header,
  29. struct perf_sample_data *data,
  30. struct perf_event *event);
  31. extern void
  32. perf_event__output_id_sample(struct perf_event *event,
  33. struct perf_output_handle *handle,
  34. struct perf_sample_data *sample);
  35. extern struct page *
  36. perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
  37. #ifdef CONFIG_PERF_USE_VMALLOC
  38. /*
  39. * Back perf_mmap() with vmalloc memory.
  40. *
  41. * Required for architectures that have d-cache aliasing issues.
  42. */
  43. static inline int page_order(struct ring_buffer *rb)
  44. {
  45. return rb->page_order;
  46. }
  47. #else
  48. static inline int page_order(struct ring_buffer *rb)
  49. {
  50. return 0;
  51. }
  52. #endif
  53. static unsigned long perf_data_size(struct ring_buffer *rb)
  54. {
  55. return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
  56. }
  57. static inline void
  58. __output_copy(struct perf_output_handle *handle,
  59. const void *buf, unsigned int len)
  60. {
  61. do {
  62. unsigned long size = min_t(unsigned long, handle->size, len);
  63. memcpy(handle->addr, buf, size);
  64. len -= size;
  65. handle->addr += size;
  66. buf += size;
  67. handle->size -= size;
  68. if (!handle->size) {
  69. struct ring_buffer *rb = handle->rb;
  70. handle->page++;
  71. handle->page &= rb->nr_pages - 1;
  72. handle->addr = rb->data_pages[handle->page];
  73. handle->size = PAGE_SIZE << page_order(rb);
  74. }
  75. } while (len);
  76. }
  77. #endif /* _KERNEL_EVENTS_INTERNAL_H */