perf.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. #ifndef _PERF_PERF_H
  2. #define _PERF_PERF_H
  3. struct winsize;
  4. void get_term_dimensions(struct winsize *ws);
  5. #if defined(__i386__)
  6. #include "../../arch/x86/include/asm/unistd.h"
  7. #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
  8. #define cpu_relax() asm volatile("rep; nop" ::: "memory");
  9. #define CPUINFO_PROC "model name"
  10. #ifndef __NR_perf_event_open
  11. # define __NR_perf_event_open 336
  12. #endif
  13. #endif
  14. #if defined(__x86_64__)
  15. #include "../../arch/x86/include/asm/unistd.h"
  16. #define rmb() asm volatile("lfence" ::: "memory")
  17. #define cpu_relax() asm volatile("rep; nop" ::: "memory");
  18. #define CPUINFO_PROC "model name"
  19. #ifndef __NR_perf_event_open
  20. # define __NR_perf_event_open 298
  21. #endif
  22. #endif
  23. #ifdef __powerpc__
  24. #include "../../arch/powerpc/include/asm/unistd.h"
  25. #define rmb() asm volatile ("sync" ::: "memory")
  26. #define cpu_relax() asm volatile ("" ::: "memory");
  27. #define CPUINFO_PROC "cpu"
  28. #endif
  29. #ifdef __s390__
  30. #include "../../arch/s390/include/asm/unistd.h"
  31. #define rmb() asm volatile("bcr 15,0" ::: "memory")
  32. #define cpu_relax() asm volatile("" ::: "memory");
  33. #endif
  34. #ifdef __sh__
  35. #include "../../arch/sh/include/asm/unistd.h"
  36. #if defined(__SH4A__) || defined(__SH5__)
  37. # define rmb() asm volatile("synco" ::: "memory")
  38. #else
  39. # define rmb() asm volatile("" ::: "memory")
  40. #endif
  41. #define cpu_relax() asm volatile("" ::: "memory")
  42. #define CPUINFO_PROC "cpu type"
  43. #endif
  44. #ifdef __hppa__
  45. #include "../../arch/parisc/include/asm/unistd.h"
  46. #define rmb() asm volatile("" ::: "memory")
  47. #define cpu_relax() asm volatile("" ::: "memory");
  48. #define CPUINFO_PROC "cpu"
  49. #endif
  50. #ifdef __sparc__
  51. #include "../../arch/sparc/include/uapi/asm/unistd.h"
  52. #define rmb() asm volatile("":::"memory")
  53. #define cpu_relax() asm volatile("":::"memory")
  54. #define CPUINFO_PROC "cpu"
  55. #endif
  56. #ifdef __alpha__
  57. #include "../../arch/alpha/include/asm/unistd.h"
  58. #define rmb() asm volatile("mb" ::: "memory")
  59. #define cpu_relax() asm volatile("" ::: "memory")
  60. #define CPUINFO_PROC "cpu model"
  61. #endif
  62. #ifdef __ia64__
  63. #include "../../arch/ia64/include/asm/unistd.h"
  64. #define rmb() asm volatile ("mf" ::: "memory")
  65. #define cpu_relax() asm volatile ("hint @pause" ::: "memory")
  66. #define CPUINFO_PROC "model name"
  67. #endif
  68. #ifdef __arm__
  69. #include "../../arch/arm/include/asm/unistd.h"
  70. /*
  71. * Use the __kuser_memory_barrier helper in the CPU helper page. See
  72. * arch/arm/kernel/entry-armv.S in the kernel source for details.
  73. */
  74. #define rmb() ((void(*)(void))0xffff0fa0)()
  75. #define cpu_relax() asm volatile("":::"memory")
  76. #define CPUINFO_PROC "Processor"
  77. #endif
  78. #ifdef __aarch64__
  79. #include "../../arch/arm64/include/asm/unistd.h"
  80. #define rmb() asm volatile("dmb ld" ::: "memory")
  81. #define cpu_relax() asm volatile("yield" ::: "memory")
  82. #endif
  83. #ifdef __mips__
  84. #include "../../arch/mips/include/asm/unistd.h"
  85. #define rmb() asm volatile( \
  86. ".set mips2\n\t" \
  87. "sync\n\t" \
  88. ".set mips0" \
  89. : /* no output */ \
  90. : /* no input */ \
  91. : "memory")
  92. #define cpu_relax() asm volatile("" ::: "memory")
  93. #define CPUINFO_PROC "cpu model"
  94. #endif
  95. #include <time.h>
  96. #include <unistd.h>
  97. #include <sys/types.h>
  98. #include <sys/syscall.h>
  99. #include "../../include/uapi/linux/perf_event.h"
  100. #include "util/types.h"
  101. #include <stdbool.h>
  102. struct perf_mmap {
  103. void *base;
  104. int mask;
  105. unsigned int prev;
  106. };
  107. static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
  108. {
  109. struct perf_event_mmap_page *pc = mm->base;
  110. int head = pc->data_head;
  111. rmb();
  112. return head;
  113. }
  114. static inline void perf_mmap__write_tail(struct perf_mmap *md,
  115. unsigned long tail)
  116. {
  117. struct perf_event_mmap_page *pc = md->base;
  118. /*
  119. * ensure all reads are done before we write the tail out.
  120. */
  121. /* mb(); */
  122. pc->data_tail = tail;
  123. }
  124. /*
  125. * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
  126. * counters in the current task.
  127. */
  128. #define PR_TASK_PERF_EVENTS_DISABLE 31
  129. #define PR_TASK_PERF_EVENTS_ENABLE 32
  130. #ifndef NSEC_PER_SEC
  131. # define NSEC_PER_SEC 1000000000ULL
  132. #endif
  133. static inline unsigned long long rdclock(void)
  134. {
  135. struct timespec ts;
  136. clock_gettime(CLOCK_MONOTONIC, &ts);
  137. return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
  138. }
  139. /*
  140. * Pick up some kernel type conventions:
  141. */
  142. #define __user
  143. #define asmlinkage
  144. #define unlikely(x) __builtin_expect(!!(x), 0)
  145. #define min(x, y) ({ \
  146. typeof(x) _min1 = (x); \
  147. typeof(y) _min2 = (y); \
  148. (void) (&_min1 == &_min2); \
  149. _min1 < _min2 ? _min1 : _min2; })
  150. extern bool test_attr__enabled;
  151. void test_attr__init(void);
  152. void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
  153. int fd, int group_fd, unsigned long flags);
  154. static inline int
  155. sys_perf_event_open(struct perf_event_attr *attr,
  156. pid_t pid, int cpu, int group_fd,
  157. unsigned long flags)
  158. {
  159. int fd;
  160. fd = syscall(__NR_perf_event_open, attr, pid, cpu,
  161. group_fd, flags);
  162. if (unlikely(test_attr__enabled))
  163. test_attr__open(attr, pid, cpu, fd, group_fd, flags);
  164. return fd;
  165. }
  166. #define MAX_COUNTERS 256
  167. #define MAX_NR_CPUS 256
  168. struct ip_callchain {
  169. u64 nr;
  170. u64 ips[0];
  171. };
  172. struct branch_flags {
  173. u64 mispred:1;
  174. u64 predicted:1;
  175. u64 reserved:62;
  176. };
  177. struct branch_entry {
  178. u64 from;
  179. u64 to;
  180. struct branch_flags flags;
  181. };
  182. struct branch_stack {
  183. u64 nr;
  184. struct branch_entry entries[0];
  185. };
  186. extern const char *input_name;
  187. extern bool perf_host, perf_guest;
  188. extern const char perf_version_string[];
  189. void pthread__unblock_sigwinch(void);
  190. #include "util/target.h"
  191. enum perf_call_graph_mode {
  192. CALLCHAIN_NONE,
  193. CALLCHAIN_FP,
  194. CALLCHAIN_DWARF
  195. };
  196. struct perf_record_opts {
  197. struct perf_target target;
  198. int call_graph;
  199. bool group;
  200. bool inherit_stat;
  201. bool no_delay;
  202. bool no_inherit;
  203. bool no_samples;
  204. bool pipe_output;
  205. bool raw_samples;
  206. bool sample_address;
  207. bool sample_time;
  208. bool sample_id_all_missing;
  209. bool exclude_guest_missing;
  210. bool period;
  211. unsigned int freq;
  212. unsigned int mmap_pages;
  213. unsigned int user_freq;
  214. u64 branch_stack;
  215. u64 default_interval;
  216. u64 user_interval;
  217. u16 stack_dump_size;
  218. };
  219. #endif