perf.h 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. #ifndef _PERF_PERF_H
  2. #define _PERF_PERF_H
  3. struct winsize;
  4. void get_term_dimensions(struct winsize *ws);
  5. #include <asm/unistd.h>
  6. #if defined(__i386__)
  7. #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
  8. #define cpu_relax() asm volatile("rep; nop" ::: "memory");
  9. #define CPUINFO_PROC "model name"
  10. #ifndef __NR_perf_event_open
  11. # define __NR_perf_event_open 336
  12. #endif
  13. #endif
  14. #if defined(__x86_64__)
  15. #define rmb() asm volatile("lfence" ::: "memory")
  16. #define cpu_relax() asm volatile("rep; nop" ::: "memory");
  17. #define CPUINFO_PROC "model name"
  18. #ifndef __NR_perf_event_open
  19. # define __NR_perf_event_open 298
  20. #endif
  21. #endif
  22. #ifdef __powerpc__
  23. #include "../../arch/powerpc/include/uapi/asm/unistd.h"
  24. #define rmb() asm volatile ("sync" ::: "memory")
  25. #define cpu_relax() asm volatile ("" ::: "memory");
  26. #define CPUINFO_PROC "cpu"
  27. #endif
  28. #ifdef __s390__
  29. #define rmb() asm volatile("bcr 15,0" ::: "memory")
  30. #define cpu_relax() asm volatile("" ::: "memory");
  31. #endif
  32. #ifdef __sh__
  33. #if defined(__SH4A__) || defined(__SH5__)
  34. # define rmb() asm volatile("synco" ::: "memory")
  35. #else
  36. # define rmb() asm volatile("" ::: "memory")
  37. #endif
  38. #define cpu_relax() asm volatile("" ::: "memory")
  39. #define CPUINFO_PROC "cpu type"
  40. #endif
  41. #ifdef __hppa__
  42. #define rmb() asm volatile("" ::: "memory")
  43. #define cpu_relax() asm volatile("" ::: "memory");
  44. #define CPUINFO_PROC "cpu"
  45. #endif
  46. #ifdef __sparc__
  47. #define rmb() asm volatile("":::"memory")
  48. #define cpu_relax() asm volatile("":::"memory")
  49. #define CPUINFO_PROC "cpu"
  50. #endif
  51. #ifdef __alpha__
  52. #define rmb() asm volatile("mb" ::: "memory")
  53. #define cpu_relax() asm volatile("" ::: "memory")
  54. #define CPUINFO_PROC "cpu model"
  55. #endif
  56. #ifdef __ia64__
  57. #define rmb() asm volatile ("mf" ::: "memory")
  58. #define cpu_relax() asm volatile ("hint @pause" ::: "memory")
  59. #define CPUINFO_PROC "model name"
  60. #endif
  61. #ifdef __arm__
  62. /*
  63. * Use the __kuser_memory_barrier helper in the CPU helper page. See
  64. * arch/arm/kernel/entry-armv.S in the kernel source for details.
  65. */
  66. #define rmb() ((void(*)(void))0xffff0fa0)()
  67. #define cpu_relax() asm volatile("":::"memory")
  68. #define CPUINFO_PROC "Processor"
  69. #endif
  70. #ifdef __aarch64__
  71. #define rmb() asm volatile("dmb ld" ::: "memory")
  72. #define cpu_relax() asm volatile("yield" ::: "memory")
  73. #endif
  74. #ifdef __mips__
  75. #define rmb() asm volatile( \
  76. ".set mips2\n\t" \
  77. "sync\n\t" \
  78. ".set mips0" \
  79. : /* no output */ \
  80. : /* no input */ \
  81. : "memory")
  82. #define cpu_relax() asm volatile("" ::: "memory")
  83. #define CPUINFO_PROC "cpu model"
  84. #endif
  85. #include <time.h>
  86. #include <unistd.h>
  87. #include <sys/types.h>
  88. #include <sys/syscall.h>
  89. #include <linux/perf_event.h>
  90. #include "util/types.h"
  91. #include <stdbool.h>
  92. struct perf_mmap {
  93. void *base;
  94. int mask;
  95. unsigned int prev;
  96. };
  97. static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
  98. {
  99. struct perf_event_mmap_page *pc = mm->base;
  100. int head = pc->data_head;
  101. rmb();
  102. return head;
  103. }
  104. static inline void perf_mmap__write_tail(struct perf_mmap *md,
  105. unsigned long tail)
  106. {
  107. struct perf_event_mmap_page *pc = md->base;
  108. /*
  109. * ensure all reads are done before we write the tail out.
  110. */
  111. /* mb(); */
  112. pc->data_tail = tail;
  113. }
  114. /*
  115. * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
  116. * counters in the current task.
  117. */
  118. #define PR_TASK_PERF_EVENTS_DISABLE 31
  119. #define PR_TASK_PERF_EVENTS_ENABLE 32
  120. #ifndef NSEC_PER_SEC
  121. # define NSEC_PER_SEC 1000000000ULL
  122. #endif
  123. static inline unsigned long long rdclock(void)
  124. {
  125. struct timespec ts;
  126. clock_gettime(CLOCK_MONOTONIC, &ts);
  127. return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
  128. }
  129. /*
  130. * Pick up some kernel type conventions:
  131. */
  132. #define __user
  133. #define asmlinkage
  134. #define unlikely(x) __builtin_expect(!!(x), 0)
  135. #define min(x, y) ({ \
  136. typeof(x) _min1 = (x); \
  137. typeof(y) _min2 = (y); \
  138. (void) (&_min1 == &_min2); \
  139. _min1 < _min2 ? _min1 : _min2; })
  140. extern bool test_attr__enabled;
  141. void test_attr__init(void);
  142. void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
  143. int fd, int group_fd, unsigned long flags);
  144. static inline int
  145. sys_perf_event_open(struct perf_event_attr *attr,
  146. pid_t pid, int cpu, int group_fd,
  147. unsigned long flags)
  148. {
  149. int fd;
  150. fd = syscall(__NR_perf_event_open, attr, pid, cpu,
  151. group_fd, flags);
  152. if (unlikely(test_attr__enabled))
  153. test_attr__open(attr, pid, cpu, fd, group_fd, flags);
  154. return fd;
  155. }
  156. #define MAX_COUNTERS 256
  157. #define MAX_NR_CPUS 256
  158. struct ip_callchain {
  159. u64 nr;
  160. u64 ips[0];
  161. };
  162. struct branch_flags {
  163. u64 mispred:1;
  164. u64 predicted:1;
  165. u64 reserved:62;
  166. };
  167. struct branch_entry {
  168. u64 from;
  169. u64 to;
  170. struct branch_flags flags;
  171. };
  172. struct branch_stack {
  173. u64 nr;
  174. struct branch_entry entries[0];
  175. };
  176. extern const char *input_name;
  177. extern bool perf_host, perf_guest;
  178. extern const char perf_version_string[];
  179. void pthread__unblock_sigwinch(void);
  180. #include "util/target.h"
  181. enum perf_call_graph_mode {
  182. CALLCHAIN_NONE,
  183. CALLCHAIN_FP,
  184. CALLCHAIN_DWARF
  185. };
  186. struct perf_record_opts {
  187. struct perf_target target;
  188. int call_graph;
  189. bool group;
  190. bool inherit_stat;
  191. bool no_delay;
  192. bool no_inherit;
  193. bool no_samples;
  194. bool pipe_output;
  195. bool raw_samples;
  196. bool sample_address;
  197. bool sample_time;
  198. bool sample_id_all_missing;
  199. bool exclude_guest_missing;
  200. bool period;
  201. unsigned int freq;
  202. unsigned int mmap_pages;
  203. unsigned int user_freq;
  204. u64 branch_stack;
  205. u64 default_interval;
  206. u64 user_interval;
  207. u16 stack_dump_size;
  208. };
  209. #endif