internal.h 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. #ifndef _KERNEL_EVENTS_INTERNAL_H
  2. #define _KERNEL_EVENTS_INTERNAL_H
  3. #define RING_BUFFER_WRITABLE 0x01
  4. struct ring_buffer {
  5. atomic_t refcount;
  6. struct rcu_head rcu_head;
  7. #ifdef CONFIG_PERF_USE_VMALLOC
  8. struct work_struct work;
  9. int page_order; /* allocation order */
  10. #endif
  11. int nr_pages; /* nr of data pages */
  12. int writable; /* are we writable */
  13. atomic_t poll; /* POLL_ for wakeups */
  14. local_t head; /* write position */
  15. local_t nest; /* nested writers */
  16. local_t events; /* event limit */
  17. local_t wakeup; /* wakeup stamp */
  18. local_t lost; /* nr records lost */
  19. long watermark; /* wakeup watermark */
  20. /* poll crap */
  21. spinlock_t event_lock;
  22. struct list_head event_list;
  23. struct perf_event_mmap_page *user_page;
  24. void *data_pages[0];
  25. };
  26. extern void rb_free(struct ring_buffer *rb);
  27. extern struct ring_buffer *
  28. rb_alloc(int nr_pages, long watermark, int cpu, int flags);
  29. extern void perf_event_wakeup(struct perf_event *event);
  30. extern void
  31. perf_event_header__init_id(struct perf_event_header *header,
  32. struct perf_sample_data *data,
  33. struct perf_event *event);
  34. extern void
  35. perf_event__output_id_sample(struct perf_event *event,
  36. struct perf_output_handle *handle,
  37. struct perf_sample_data *sample);
  38. extern struct page *
  39. perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
  40. #ifdef CONFIG_PERF_USE_VMALLOC
  41. /*
  42. * Back perf_mmap() with vmalloc memory.
  43. *
  44. * Required for architectures that have d-cache aliasing issues.
  45. */
  46. static inline int page_order(struct ring_buffer *rb)
  47. {
  48. return rb->page_order;
  49. }
  50. #else
  51. static inline int page_order(struct ring_buffer *rb)
  52. {
  53. return 0;
  54. }
  55. #endif
  56. static unsigned long perf_data_size(struct ring_buffer *rb)
  57. {
  58. return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
  59. }
  60. static inline void
  61. __output_copy(struct perf_output_handle *handle,
  62. const void *buf, unsigned int len)
  63. {
  64. do {
  65. unsigned long size = min_t(unsigned long, handle->size, len);
  66. memcpy(handle->addr, buf, size);
  67. len -= size;
  68. handle->addr += size;
  69. buf += size;
  70. handle->size -= size;
  71. if (!handle->size) {
  72. struct ring_buffer *rb = handle->rb;
  73. handle->page++;
  74. handle->page &= rb->nr_pages - 1;
  75. handle->addr = rb->data_pages[handle->page];
  76. handle->size = PAGE_SIZE << page_order(rb);
  77. }
  78. } while (len);
  79. }
  80. #endif /* _KERNEL_EVENTS_INTERNAL_H */