perf.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. #ifndef _PERF_PERF_H
  2. #define _PERF_PERF_H
  3. #include <asm/unistd.h>
  4. #if defined(__i386__)
  5. #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
  6. #define cpu_relax() asm volatile("rep; nop" ::: "memory");
  7. #define CPUINFO_PROC "model name"
  8. #ifndef __NR_perf_event_open
  9. # define __NR_perf_event_open 336
  10. #endif
  11. #endif
  12. #if defined(__x86_64__)
  13. #define rmb() asm volatile("lfence" ::: "memory")
  14. #define cpu_relax() asm volatile("rep; nop" ::: "memory");
  15. #define CPUINFO_PROC "model name"
  16. #ifndef __NR_perf_event_open
  17. # define __NR_perf_event_open 298
  18. #endif
  19. #endif
  20. #ifdef __powerpc__
  21. #include "../../arch/powerpc/include/uapi/asm/unistd.h"
  22. #define rmb() asm volatile ("sync" ::: "memory")
  23. #define cpu_relax() asm volatile ("" ::: "memory");
  24. #define CPUINFO_PROC "cpu"
  25. #endif
  26. #ifdef __s390__
  27. #define rmb() asm volatile("bcr 15,0" ::: "memory")
  28. #define cpu_relax() asm volatile("" ::: "memory");
  29. #endif
  30. #ifdef __sh__
  31. #if defined(__SH4A__) || defined(__SH5__)
  32. # define rmb() asm volatile("synco" ::: "memory")
  33. #else
  34. # define rmb() asm volatile("" ::: "memory")
  35. #endif
  36. #define cpu_relax() asm volatile("" ::: "memory")
  37. #define CPUINFO_PROC "cpu type"
  38. #endif
  39. #ifdef __hppa__
  40. #define rmb() asm volatile("" ::: "memory")
  41. #define cpu_relax() asm volatile("" ::: "memory");
  42. #define CPUINFO_PROC "cpu"
  43. #endif
  44. #ifdef __sparc__
  45. #define rmb() asm volatile("":::"memory")
  46. #define cpu_relax() asm volatile("":::"memory")
  47. #define CPUINFO_PROC "cpu"
  48. #endif
  49. #ifdef __alpha__
  50. #define rmb() asm volatile("mb" ::: "memory")
  51. #define cpu_relax() asm volatile("" ::: "memory")
  52. #define CPUINFO_PROC "cpu model"
  53. #endif
  54. #ifdef __ia64__
  55. #define rmb() asm volatile ("mf" ::: "memory")
  56. #define cpu_relax() asm volatile ("hint @pause" ::: "memory")
  57. #define CPUINFO_PROC "model name"
  58. #endif
  59. #ifdef __arm__
  60. /*
  61. * Use the __kuser_memory_barrier helper in the CPU helper page. See
  62. * arch/arm/kernel/entry-armv.S in the kernel source for details.
  63. */
  64. #define rmb() ((void(*)(void))0xffff0fa0)()
  65. #define cpu_relax() asm volatile("":::"memory")
  66. #define CPUINFO_PROC "Processor"
  67. #endif
  68. #ifdef __aarch64__
  69. #define rmb() asm volatile("dmb ld" ::: "memory")
  70. #define cpu_relax() asm volatile("yield" ::: "memory")
  71. #endif
  72. #ifdef __mips__
  73. #define rmb() asm volatile( \
  74. ".set mips2\n\t" \
  75. "sync\n\t" \
  76. ".set mips0" \
  77. : /* no output */ \
  78. : /* no input */ \
  79. : "memory")
  80. #define cpu_relax() asm volatile("" ::: "memory")
  81. #define CPUINFO_PROC "cpu model"
  82. #endif
  83. #ifdef __arc__
  84. #define rmb() asm volatile("" ::: "memory")
  85. #define cpu_relax() rmb()
  86. #define CPUINFO_PROC "Processor"
  87. #endif
  88. #ifdef __metag__
  89. #define rmb() asm volatile("" ::: "memory")
  90. #define cpu_relax() asm volatile("" ::: "memory")
  91. #define CPUINFO_PROC "CPU"
  92. #endif
  93. #include <time.h>
  94. #include <unistd.h>
  95. #include <sys/types.h>
  96. #include <sys/syscall.h>
  97. #include <linux/perf_event.h>
  98. #include "util/types.h"
  99. #include <stdbool.h>
  100. /*
  101. * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
  102. * counters in the current task.
  103. */
  104. #define PR_TASK_PERF_EVENTS_DISABLE 31
  105. #define PR_TASK_PERF_EVENTS_ENABLE 32
  106. #ifndef NSEC_PER_SEC
  107. # define NSEC_PER_SEC 1000000000ULL
  108. #endif
  109. static inline unsigned long long rdclock(void)
  110. {
  111. struct timespec ts;
  112. clock_gettime(CLOCK_MONOTONIC, &ts);
  113. return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
  114. }
  115. /*
  116. * Pick up some kernel type conventions:
  117. */
  118. #define __user
  119. #define asmlinkage
  120. #define unlikely(x) __builtin_expect(!!(x), 0)
  121. #define min(x, y) ({ \
  122. typeof(x) _min1 = (x); \
  123. typeof(y) _min2 = (y); \
  124. (void) (&_min1 == &_min2); \
  125. _min1 < _min2 ? _min1 : _min2; })
  126. extern bool test_attr__enabled;
  127. void test_attr__init(void);
  128. void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
  129. int fd, int group_fd, unsigned long flags);
  130. static inline int
  131. sys_perf_event_open(struct perf_event_attr *attr,
  132. pid_t pid, int cpu, int group_fd,
  133. unsigned long flags)
  134. {
  135. int fd;
  136. fd = syscall(__NR_perf_event_open, attr, pid, cpu,
  137. group_fd, flags);
  138. if (unlikely(test_attr__enabled))
  139. test_attr__open(attr, pid, cpu, fd, group_fd, flags);
  140. return fd;
  141. }
  142. #define MAX_COUNTERS 256
  143. #define MAX_NR_CPUS 256
  144. struct ip_callchain {
  145. u64 nr;
  146. u64 ips[0];
  147. };
  148. struct branch_flags {
  149. u64 mispred:1;
  150. u64 predicted:1;
  151. u64 reserved:62;
  152. };
  153. struct branch_entry {
  154. u64 from;
  155. u64 to;
  156. struct branch_flags flags;
  157. };
  158. struct branch_stack {
  159. u64 nr;
  160. struct branch_entry entries[0];
  161. };
  162. extern const char *input_name;
  163. extern bool perf_host, perf_guest;
  164. extern const char perf_version_string[];
  165. void pthread__unblock_sigwinch(void);
  166. #include "util/target.h"
  167. enum perf_call_graph_mode {
  168. CALLCHAIN_NONE,
  169. CALLCHAIN_FP,
  170. CALLCHAIN_DWARF
  171. };
  172. struct perf_record_opts {
  173. struct perf_target target;
  174. int call_graph;
  175. bool group;
  176. bool inherit_stat;
  177. bool no_delay;
  178. bool no_inherit;
  179. bool no_samples;
  180. bool pipe_output;
  181. bool raw_samples;
  182. bool sample_address;
  183. bool sample_weight;
  184. bool sample_time;
  185. bool period;
  186. unsigned int freq;
  187. unsigned int mmap_pages;
  188. unsigned int user_freq;
  189. u64 branch_stack;
  190. u64 default_interval;
  191. u64 user_interval;
  192. u16 stack_dump_size;
  193. };
  194. #endif