internal.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. #ifndef _KERNEL_EVENTS_INTERNAL_H
  2. #define _KERNEL_EVENTS_INTERNAL_H
  3. #include <linux/hardirq.h>
  4. /* Buffer handling */
  5. #define RING_BUFFER_WRITABLE 0x01
  6. struct ring_buffer {
  7. atomic_t refcount;
  8. struct rcu_head rcu_head;
  9. #ifdef CONFIG_PERF_USE_VMALLOC
  10. struct work_struct work;
  11. int page_order; /* allocation order */
  12. #endif
  13. int nr_pages; /* nr of data pages */
  14. int writable; /* are we writable */
  15. atomic_t poll; /* POLL_ for wakeups */
  16. local_t head; /* write position */
  17. local_t nest; /* nested writers */
  18. local_t events; /* event limit */
  19. local_t wakeup; /* wakeup stamp */
  20. local_t lost; /* nr records lost */
  21. long watermark; /* wakeup watermark */
  22. /* poll crap */
  23. spinlock_t event_lock;
  24. struct list_head event_list;
  25. struct perf_event_mmap_page *user_page;
  26. void *data_pages[0];
  27. };
  28. extern void rb_free(struct ring_buffer *rb);
  29. extern struct ring_buffer *
  30. rb_alloc(int nr_pages, long watermark, int cpu, int flags);
  31. extern void perf_event_wakeup(struct perf_event *event);
  32. extern void
  33. perf_event_header__init_id(struct perf_event_header *header,
  34. struct perf_sample_data *data,
  35. struct perf_event *event);
  36. extern void
  37. perf_event__output_id_sample(struct perf_event *event,
  38. struct perf_output_handle *handle,
  39. struct perf_sample_data *sample);
  40. extern struct page *
  41. perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
  42. #ifdef CONFIG_PERF_USE_VMALLOC
  43. /*
  44. * Back perf_mmap() with vmalloc memory.
  45. *
  46. * Required for architectures that have d-cache aliasing issues.
  47. */
  48. static inline int page_order(struct ring_buffer *rb)
  49. {
  50. return rb->page_order;
  51. }
  52. #else
  53. static inline int page_order(struct ring_buffer *rb)
  54. {
  55. return 0;
  56. }
  57. #endif
  58. static inline unsigned long perf_data_size(struct ring_buffer *rb)
  59. {
  60. return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
  61. }
  62. static inline void
  63. __output_copy(struct perf_output_handle *handle,
  64. const void *buf, unsigned int len)
  65. {
  66. do {
  67. unsigned long size = min_t(unsigned long, handle->size, len);
  68. memcpy(handle->addr, buf, size);
  69. len -= size;
  70. handle->addr += size;
  71. buf += size;
  72. handle->size -= size;
  73. if (!handle->size) {
  74. struct ring_buffer *rb = handle->rb;
  75. handle->page++;
  76. handle->page &= rb->nr_pages - 1;
  77. handle->addr = rb->data_pages[handle->page];
  78. handle->size = PAGE_SIZE << page_order(rb);
  79. }
  80. } while (len);
  81. }
  82. /* Callchain handling */
  83. extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
  84. extern int get_callchain_buffers(void);
  85. extern void put_callchain_buffers(void);
  86. static inline int get_recursion_context(int *recursion)
  87. {
  88. int rctx;
  89. if (in_nmi())
  90. rctx = 3;
  91. else if (in_irq())
  92. rctx = 2;
  93. else if (in_softirq())
  94. rctx = 1;
  95. else
  96. rctx = 0;
  97. if (recursion[rctx])
  98. return -1;
  99. recursion[rctx]++;
  100. barrier();
  101. return rctx;
  102. }
  103. static inline void put_recursion_context(int *recursion, int rctx)
  104. {
  105. barrier();
  106. recursion[rctx]--;
  107. }
  108. #endif /* _KERNEL_EVENTS_INTERNAL_H */