perf_event.h 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207
  1. /*
  2. * Performance events:
  3. *
  4. * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
  7. *
  8. * Data type definitions, declarations, prototypes.
  9. *
  10. * Started by: Thomas Gleixner and Ingo Molnar
  11. *
  12. * For licencing details see kernel-base/COPYING
  13. */
  14. #ifndef _LINUX_PERF_EVENT_H
  15. #define _LINUX_PERF_EVENT_H
  16. #include <linux/types.h>
  17. #include <linux/ioctl.h>
  18. #include <asm/byteorder.h>
  19. /*
  20. * User-space ABI bits:
  21. */
  22. /*
  23. * attr.type
  24. */
  25. enum perf_type_id {
  26. PERF_TYPE_HARDWARE = 0,
  27. PERF_TYPE_SOFTWARE = 1,
  28. PERF_TYPE_TRACEPOINT = 2,
  29. PERF_TYPE_HW_CACHE = 3,
  30. PERF_TYPE_RAW = 4,
  31. PERF_TYPE_BREAKPOINT = 5,
  32. PERF_TYPE_MAX, /* non-ABI */
  33. };
  34. /*
  35. * Generalized performance event event_id types, used by the
  36. * attr.event_id parameter of the sys_perf_event_open()
  37. * syscall:
  38. */
  39. enum perf_hw_id {
  40. /*
  41. * Common hardware events, generalized by the kernel:
  42. */
  43. PERF_COUNT_HW_CPU_CYCLES = 0,
  44. PERF_COUNT_HW_INSTRUCTIONS = 1,
  45. PERF_COUNT_HW_CACHE_REFERENCES = 2,
  46. PERF_COUNT_HW_CACHE_MISSES = 3,
  47. PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
  48. PERF_COUNT_HW_BRANCH_MISSES = 5,
  49. PERF_COUNT_HW_BUS_CYCLES = 6,
  50. PERF_COUNT_HW_MAX, /* non-ABI */
  51. };
  52. /*
  53. * Generalized hardware cache events:
  54. *
  55. * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
  56. * { read, write, prefetch } x
  57. * { accesses, misses }
  58. */
  59. enum perf_hw_cache_id {
  60. PERF_COUNT_HW_CACHE_L1D = 0,
  61. PERF_COUNT_HW_CACHE_L1I = 1,
  62. PERF_COUNT_HW_CACHE_LL = 2,
  63. PERF_COUNT_HW_CACHE_DTLB = 3,
  64. PERF_COUNT_HW_CACHE_ITLB = 4,
  65. PERF_COUNT_HW_CACHE_BPU = 5,
  66. PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
  67. };
  68. enum perf_hw_cache_op_id {
  69. PERF_COUNT_HW_CACHE_OP_READ = 0,
  70. PERF_COUNT_HW_CACHE_OP_WRITE = 1,
  71. PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
  72. PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
  73. };
  74. enum perf_hw_cache_op_result_id {
  75. PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
  76. PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
  77. PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
  78. };
  79. /*
  80. * Special "software" events provided by the kernel, even if the hardware
  81. * does not support performance events. These events measure various
  82. * physical and sw events of the kernel (and allow the profiling of them as
  83. * well):
  84. */
  85. enum perf_sw_ids {
  86. PERF_COUNT_SW_CPU_CLOCK = 0,
  87. PERF_COUNT_SW_TASK_CLOCK = 1,
  88. PERF_COUNT_SW_PAGE_FAULTS = 2,
  89. PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
  90. PERF_COUNT_SW_CPU_MIGRATIONS = 4,
  91. PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
  92. PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
  93. PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
  94. PERF_COUNT_SW_EMULATION_FAULTS = 8,
  95. PERF_COUNT_SW_MAX, /* non-ABI */
  96. };
  97. /*
  98. * Bits that can be set in attr.sample_type to request information
  99. * in the overflow packets.
  100. */
  101. enum perf_event_sample_format {
  102. PERF_SAMPLE_IP = 1U << 0,
  103. PERF_SAMPLE_TID = 1U << 1,
  104. PERF_SAMPLE_TIME = 1U << 2,
  105. PERF_SAMPLE_ADDR = 1U << 3,
  106. PERF_SAMPLE_READ = 1U << 4,
  107. PERF_SAMPLE_CALLCHAIN = 1U << 5,
  108. PERF_SAMPLE_ID = 1U << 6,
  109. PERF_SAMPLE_CPU = 1U << 7,
  110. PERF_SAMPLE_PERIOD = 1U << 8,
  111. PERF_SAMPLE_STREAM_ID = 1U << 9,
  112. PERF_SAMPLE_RAW = 1U << 10,
  113. PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
  114. };
  115. /*
  116. * The format of the data returned by read() on a perf event fd,
  117. * as specified by attr.read_format:
  118. *
  119. * struct read_format {
  120. * { u64 value;
  121. * { u64 time_enabled; } && PERF_FORMAT_ENABLED
  122. * { u64 time_running; } && PERF_FORMAT_RUNNING
  123. * { u64 id; } && PERF_FORMAT_ID
  124. * } && !PERF_FORMAT_GROUP
  125. *
  126. * { u64 nr;
  127. * { u64 time_enabled; } && PERF_FORMAT_ENABLED
  128. * { u64 time_running; } && PERF_FORMAT_RUNNING
  129. * { u64 value;
  130. * { u64 id; } && PERF_FORMAT_ID
  131. * } cntr[nr];
  132. * } && PERF_FORMAT_GROUP
  133. * };
  134. */
  135. enum perf_event_read_format {
  136. PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
  137. PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
  138. PERF_FORMAT_ID = 1U << 2,
  139. PERF_FORMAT_GROUP = 1U << 3,
  140. PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
  141. };
  142. #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
  143. /*
  144. * Hardware event_id to monitor via a performance monitoring event:
  145. */
  146. struct perf_event_attr {
  147. /*
  148. * Major type: hardware/software/tracepoint/etc.
  149. */
  150. __u32 type;
  151. /*
  152. * Size of the attr structure, for fwd/bwd compat.
  153. */
  154. __u32 size;
  155. /*
  156. * Type specific configuration information.
  157. */
  158. __u64 config;
  159. union {
  160. __u64 sample_period;
  161. __u64 sample_freq;
  162. };
  163. __u64 sample_type;
  164. __u64 read_format;
  165. __u64 disabled : 1, /* off by default */
  166. inherit : 1, /* children inherit it */
  167. pinned : 1, /* must always be on PMU */
  168. exclusive : 1, /* only group on PMU */
  169. exclude_user : 1, /* don't count user */
  170. exclude_kernel : 1, /* ditto kernel */
  171. exclude_hv : 1, /* ditto hypervisor */
  172. exclude_idle : 1, /* don't count when idle */
  173. mmap : 1, /* include mmap data */
  174. comm : 1, /* include comm data */
  175. freq : 1, /* use freq, not period */
  176. inherit_stat : 1, /* per task counts */
  177. enable_on_exec : 1, /* next exec enables */
  178. task : 1, /* trace fork/exit */
  179. watermark : 1, /* wakeup_watermark */
  180. /*
  181. * precise_ip:
  182. *
  183. * 0 - SAMPLE_IP can have arbitrary skid
  184. * 1 - SAMPLE_IP must have constant skid
  185. * 2 - SAMPLE_IP requested to have 0 skid
  186. * 3 - SAMPLE_IP must have 0 skid
  187. *
  188. * See also PERF_RECORD_MISC_EXACT_IP
  189. */
  190. precise_ip : 2, /* skid constraint */
  191. mmap_data : 1, /* non-exec mmap data */
  192. sample_id_all : 1, /* sample_type all events */
  193. __reserved_1 : 45;
  194. union {
  195. __u32 wakeup_events; /* wakeup every n events */
  196. __u32 wakeup_watermark; /* bytes before wakeup */
  197. };
  198. __u32 bp_type;
  199. __u64 bp_addr;
  200. __u64 bp_len;
  201. };
  202. /*
  203. * Ioctls that can be done on a perf event fd:
  204. */
  205. #define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
  206. #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
  207. #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
  208. #define PERF_EVENT_IOC_RESET _IO ('$', 3)
  209. #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
  210. #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
  211. #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
  212. enum perf_event_ioc_flags {
  213. PERF_IOC_FLAG_GROUP = 1U << 0,
  214. };
  215. /*
  216. * Structure of the page that can be mapped via mmap
  217. */
  218. struct perf_event_mmap_page {
  219. __u32 version; /* version number of this structure */
  220. __u32 compat_version; /* lowest version this is compat with */
  221. /*
  222. * Bits needed to read the hw events in user-space.
  223. *
  224. * u32 seq;
  225. * s64 count;
  226. *
  227. * do {
  228. * seq = pc->lock;
  229. *
  230. * barrier()
  231. * if (pc->index) {
  232. * count = pmc_read(pc->index - 1);
  233. * count += pc->offset;
  234. * } else
  235. * goto regular_read;
  236. *
  237. * barrier();
  238. * } while (pc->lock != seq);
  239. *
  240. * NOTE: for obvious reason this only works on self-monitoring
  241. * processes.
  242. */
  243. __u32 lock; /* seqlock for synchronization */
  244. __u32 index; /* hardware event identifier */
  245. __s64 offset; /* add to hardware event value */
  246. __u64 time_enabled; /* time event active */
  247. __u64 time_running; /* time event on cpu */
  248. /*
  249. * Hole for extension of the self monitor capabilities
  250. */
  251. __u64 __reserved[123]; /* align to 1k */
  252. /*
  253. * Control data for the mmap() data buffer.
  254. *
  255. * User-space reading the @data_head value should issue an rmb(), on
  256. * SMP capable platforms, after reading this value -- see
  257. * perf_event_wakeup().
  258. *
  259. * When the mapping is PROT_WRITE the @data_tail value should be
  260. * written by userspace to reflect the last read data. In this case
  261. * the kernel will not over-write unread data.
  262. */
  263. __u64 data_head; /* head in the data section */
  264. __u64 data_tail; /* user-space written tail */
  265. };
  266. #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
  267. #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
  268. #define PERF_RECORD_MISC_KERNEL (1 << 0)
  269. #define PERF_RECORD_MISC_USER (2 << 0)
  270. #define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
  271. #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
  272. #define PERF_RECORD_MISC_GUEST_USER (5 << 0)
  273. /*
  274. * Indicates that the content of PERF_SAMPLE_IP points to
  275. * the actual instruction that triggered the event. See also
  276. * perf_event_attr::precise_ip.
  277. */
  278. #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
  279. /*
  280. * Reserve the last bit to indicate some extended misc field
  281. */
  282. #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
  283. struct perf_event_header {
  284. __u32 type;
  285. __u16 misc;
  286. __u16 size;
  287. };
  288. enum perf_event_type {
  289. /*
  290. * If perf_event_attr.sample_id_all is set then all event types will
  291. * have the sample_type selected fields related to where/when
  292. * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
  293. * described in PERF_RECORD_SAMPLE below, it will be stashed just after
  294. * the perf_event_header and the fields already present for the existing
  295. * fields, i.e. at the end of the payload. That way a newer perf.data
  296. * file will be supported by older perf tools, with these new optional
  297. * fields being ignored.
  298. *
  299. * The MMAP events record the PROT_EXEC mappings so that we can
  300. * correlate userspace IPs to code. They have the following structure:
  301. *
  302. * struct {
  303. * struct perf_event_header header;
  304. *
  305. * u32 pid, tid;
  306. * u64 addr;
  307. * u64 len;
  308. * u64 pgoff;
  309. * char filename[];
  310. * };
  311. */
  312. PERF_RECORD_MMAP = 1,
  313. /*
  314. * struct {
  315. * struct perf_event_header header;
  316. * u64 id;
  317. * u64 lost;
  318. * };
  319. */
  320. PERF_RECORD_LOST = 2,
  321. /*
  322. * struct {
  323. * struct perf_event_header header;
  324. *
  325. * u32 pid, tid;
  326. * char comm[];
  327. * };
  328. */
  329. PERF_RECORD_COMM = 3,
  330. /*
  331. * struct {
  332. * struct perf_event_header header;
  333. * u32 pid, ppid;
  334. * u32 tid, ptid;
  335. * u64 time;
  336. * };
  337. */
  338. PERF_RECORD_EXIT = 4,
  339. /*
  340. * struct {
  341. * struct perf_event_header header;
  342. * u64 time;
  343. * u64 id;
  344. * u64 stream_id;
  345. * };
  346. */
  347. PERF_RECORD_THROTTLE = 5,
  348. PERF_RECORD_UNTHROTTLE = 6,
  349. /*
  350. * struct {
  351. * struct perf_event_header header;
  352. * u32 pid, ppid;
  353. * u32 tid, ptid;
  354. * u64 time;
  355. * };
  356. */
  357. PERF_RECORD_FORK = 7,
  358. /*
  359. * struct {
  360. * struct perf_event_header header;
  361. * u32 pid, tid;
  362. *
  363. * struct read_format values;
  364. * };
  365. */
  366. PERF_RECORD_READ = 8,
  367. /*
  368. * struct {
  369. * struct perf_event_header header;
  370. *
  371. * { u64 ip; } && PERF_SAMPLE_IP
  372. * { u32 pid, tid; } && PERF_SAMPLE_TID
  373. * { u64 time; } && PERF_SAMPLE_TIME
  374. * { u64 addr; } && PERF_SAMPLE_ADDR
  375. * { u64 id; } && PERF_SAMPLE_ID
  376. * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
  377. * { u32 cpu, res; } && PERF_SAMPLE_CPU
  378. * { u64 period; } && PERF_SAMPLE_PERIOD
  379. *
  380. * { struct read_format values; } && PERF_SAMPLE_READ
  381. *
  382. * { u64 nr,
  383. * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
  384. *
  385. * #
  386. * # The RAW record below is opaque data wrt the ABI
  387. * #
  388. * # That is, the ABI doesn't make any promises wrt to
  389. * # the stability of its content, it may vary depending
  390. * # on event, hardware, kernel version and phase of
  391. * # the moon.
  392. * #
  393. * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
  394. * #
  395. *
  396. * { u32 size;
  397. * char data[size];}&& PERF_SAMPLE_RAW
  398. * };
  399. */
  400. PERF_RECORD_SAMPLE = 9,
  401. PERF_RECORD_MAX, /* non-ABI */
  402. };
  403. enum perf_callchain_context {
  404. PERF_CONTEXT_HV = (__u64)-32,
  405. PERF_CONTEXT_KERNEL = (__u64)-128,
  406. PERF_CONTEXT_USER = (__u64)-512,
  407. PERF_CONTEXT_GUEST = (__u64)-2048,
  408. PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
  409. PERF_CONTEXT_GUEST_USER = (__u64)-2560,
  410. PERF_CONTEXT_MAX = (__u64)-4095,
  411. };
  412. #define PERF_FLAG_FD_NO_GROUP (1U << 0)
  413. #define PERF_FLAG_FD_OUTPUT (1U << 1)
  414. #define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */
  415. #ifdef __KERNEL__
  416. /*
  417. * Kernel-internal data types and definitions:
  418. */
  419. #ifdef CONFIG_PERF_EVENTS
  420. # include <linux/cgroup.h>
  421. # include <asm/perf_event.h>
  422. # include <asm/local64.h>
  423. #endif
  424. struct perf_guest_info_callbacks {
  425. int (*is_in_guest) (void);
  426. int (*is_user_mode) (void);
  427. unsigned long (*get_guest_ip) (void);
  428. };
  429. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  430. #include <asm/hw_breakpoint.h>
  431. #endif
  432. #include <linux/list.h>
  433. #include <linux/mutex.h>
  434. #include <linux/rculist.h>
  435. #include <linux/rcupdate.h>
  436. #include <linux/spinlock.h>
  437. #include <linux/hrtimer.h>
  438. #include <linux/fs.h>
  439. #include <linux/pid_namespace.h>
  440. #include <linux/workqueue.h>
  441. #include <linux/ftrace.h>
  442. #include <linux/cpu.h>
  443. #include <linux/irq_work.h>
  444. #include <linux/jump_label_ref.h>
  445. #include <asm/atomic.h>
  446. #include <asm/local.h>
  447. #define PERF_MAX_STACK_DEPTH 255
  448. struct perf_callchain_entry {
  449. __u64 nr;
  450. __u64 ip[PERF_MAX_STACK_DEPTH];
  451. };
  452. struct perf_raw_record {
  453. u32 size;
  454. void *data;
  455. };
  456. struct perf_branch_entry {
  457. __u64 from;
  458. __u64 to;
  459. __u64 flags;
  460. };
  461. struct perf_branch_stack {
  462. __u64 nr;
  463. struct perf_branch_entry entries[0];
  464. };
  465. struct task_struct;
  466. /**
  467. * struct hw_perf_event - performance event hardware details:
  468. */
  469. struct hw_perf_event {
  470. #ifdef CONFIG_PERF_EVENTS
  471. union {
  472. struct { /* hardware */
  473. u64 config;
  474. u64 last_tag;
  475. unsigned long config_base;
  476. unsigned long event_base;
  477. int idx;
  478. int last_cpu;
  479. };
  480. struct { /* software */
  481. struct hrtimer hrtimer;
  482. };
  483. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  484. struct { /* breakpoint */
  485. struct arch_hw_breakpoint info;
  486. struct list_head bp_list;
  487. /*
  488. * Crufty hack to avoid the chicken and egg
  489. * problem hw_breakpoint has with context
  490. * creation and event initalization.
  491. */
  492. struct task_struct *bp_target;
  493. };
  494. #endif
  495. };
  496. int state;
  497. local64_t prev_count;
  498. u64 sample_period;
  499. u64 last_period;
  500. local64_t period_left;
  501. u64 interrupts;
  502. u64 freq_time_stamp;
  503. u64 freq_count_stamp;
  504. #endif
  505. };
  506. /*
  507. * hw_perf_event::state flags
  508. */
  509. #define PERF_HES_STOPPED 0x01 /* the counter is stopped */
  510. #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
  511. #define PERF_HES_ARCH 0x04
  512. struct perf_event;
  513. /*
  514. * Common implementation detail of pmu::{start,commit,cancel}_txn
  515. */
  516. #define PERF_EVENT_TXN 0x1
  517. /**
  518. * struct pmu - generic performance monitoring unit
  519. */
  520. struct pmu {
  521. struct list_head entry;
  522. struct device *dev;
  523. char *name;
  524. int type;
  525. int * __percpu pmu_disable_count;
  526. struct perf_cpu_context * __percpu pmu_cpu_context;
  527. int task_ctx_nr;
  528. /*
  529. * Fully disable/enable this PMU, can be used to protect from the PMI
  530. * as well as for lazy/batch writing of the MSRs.
  531. */
  532. void (*pmu_enable) (struct pmu *pmu); /* optional */
  533. void (*pmu_disable) (struct pmu *pmu); /* optional */
  534. /*
  535. * Try and initialize the event for this PMU.
  536. * Should return -ENOENT when the @event doesn't match this PMU.
  537. */
  538. int (*event_init) (struct perf_event *event);
  539. #define PERF_EF_START 0x01 /* start the counter when adding */
  540. #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
  541. #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
  542. /*
  543. * Adds/Removes a counter to/from the PMU, can be done inside
  544. * a transaction, see the ->*_txn() methods.
  545. */
  546. int (*add) (struct perf_event *event, int flags);
  547. void (*del) (struct perf_event *event, int flags);
  548. /*
  549. * Starts/Stops a counter present on the PMU. The PMI handler
  550. * should stop the counter when perf_event_overflow() returns
  551. * !0. ->start() will be used to continue.
  552. */
  553. void (*start) (struct perf_event *event, int flags);
  554. void (*stop) (struct perf_event *event, int flags);
  555. /*
  556. * Updates the counter value of the event.
  557. */
  558. void (*read) (struct perf_event *event);
  559. /*
  560. * Group events scheduling is treated as a transaction, add
  561. * group events as a whole and perform one schedulability test.
  562. * If the test fails, roll back the whole group
  563. *
  564. * Start the transaction, after this ->add() doesn't need to
  565. * do schedulability tests.
  566. */
  567. void (*start_txn) (struct pmu *pmu); /* optional */
  568. /*
  569. * If ->start_txn() disabled the ->add() schedulability test
  570. * then ->commit_txn() is required to perform one. On success
  571. * the transaction is closed. On error the transaction is kept
  572. * open until ->cancel_txn() is called.
  573. */
  574. int (*commit_txn) (struct pmu *pmu); /* optional */
  575. /*
  576. * Will cancel the transaction, assumes ->del() is called
  577. * for each successfull ->add() during the transaction.
  578. */
  579. void (*cancel_txn) (struct pmu *pmu); /* optional */
  580. };
  581. /**
  582. * enum perf_event_active_state - the states of a event
  583. */
  584. enum perf_event_active_state {
  585. PERF_EVENT_STATE_ERROR = -2,
  586. PERF_EVENT_STATE_OFF = -1,
  587. PERF_EVENT_STATE_INACTIVE = 0,
  588. PERF_EVENT_STATE_ACTIVE = 1,
  589. };
  590. struct file;
  591. #define PERF_BUFFER_WRITABLE 0x01
  592. struct perf_buffer {
  593. atomic_t refcount;
  594. struct rcu_head rcu_head;
  595. #ifdef CONFIG_PERF_USE_VMALLOC
  596. struct work_struct work;
  597. int page_order; /* allocation order */
  598. #endif
  599. int nr_pages; /* nr of data pages */
  600. int writable; /* are we writable */
  601. atomic_t poll; /* POLL_ for wakeups */
  602. local_t head; /* write position */
  603. local_t nest; /* nested writers */
  604. local_t events; /* event limit */
  605. local_t wakeup; /* wakeup stamp */
  606. local_t lost; /* nr records lost */
  607. long watermark; /* wakeup watermark */
  608. struct perf_event_mmap_page *user_page;
  609. void *data_pages[0];
  610. };
  611. struct perf_sample_data;
  612. typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
  613. struct perf_sample_data *,
  614. struct pt_regs *regs);
  615. enum perf_group_flag {
  616. PERF_GROUP_SOFTWARE = 0x1,
  617. };
  618. #define SWEVENT_HLIST_BITS 8
  619. #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
  620. struct swevent_hlist {
  621. struct hlist_head heads[SWEVENT_HLIST_SIZE];
  622. struct rcu_head rcu_head;
  623. };
  624. #define PERF_ATTACH_CONTEXT 0x01
  625. #define PERF_ATTACH_GROUP 0x02
  626. #define PERF_ATTACH_TASK 0x04
  627. #ifdef CONFIG_CGROUP_PERF
  628. /*
  629. * perf_cgroup_info keeps track of time_enabled for a cgroup.
  630. * This is a per-cpu dynamically allocated data structure.
  631. */
  632. struct perf_cgroup_info {
  633. u64 time;
  634. u64 timestamp;
  635. };
  636. struct perf_cgroup {
  637. struct cgroup_subsys_state css;
  638. struct perf_cgroup_info *info; /* timing info, one per cpu */
  639. };
  640. #endif
  641. /**
  642. * struct perf_event - performance event kernel representation:
  643. */
  644. struct perf_event {
  645. #ifdef CONFIG_PERF_EVENTS
  646. struct list_head group_entry;
  647. struct list_head event_entry;
  648. struct list_head sibling_list;
  649. struct hlist_node hlist_entry;
  650. int nr_siblings;
  651. int group_flags;
  652. struct perf_event *group_leader;
  653. struct pmu *pmu;
  654. enum perf_event_active_state state;
  655. unsigned int attach_state;
  656. local64_t count;
  657. atomic64_t child_count;
  658. /*
  659. * These are the total time in nanoseconds that the event
  660. * has been enabled (i.e. eligible to run, and the task has
  661. * been scheduled in, if this is a per-task event)
  662. * and running (scheduled onto the CPU), respectively.
  663. *
  664. * They are computed from tstamp_enabled, tstamp_running and
  665. * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
  666. */
  667. u64 total_time_enabled;
  668. u64 total_time_running;
  669. /*
  670. * These are timestamps used for computing total_time_enabled
  671. * and total_time_running when the event is in INACTIVE or
  672. * ACTIVE state, measured in nanoseconds from an arbitrary point
  673. * in time.
  674. * tstamp_enabled: the notional time when the event was enabled
  675. * tstamp_running: the notional time when the event was scheduled on
  676. * tstamp_stopped: in INACTIVE state, the notional time when the
  677. * event was scheduled off.
  678. */
  679. u64 tstamp_enabled;
  680. u64 tstamp_running;
  681. u64 tstamp_stopped;
  682. /*
  683. * timestamp shadows the actual context timing but it can
  684. * be safely used in NMI interrupt context. It reflects the
  685. * context time as it was when the event was last scheduled in.
  686. *
  687. * ctx_time already accounts for ctx->timestamp. Therefore to
  688. * compute ctx_time for a sample, simply add perf_clock().
  689. */
  690. u64 shadow_ctx_time;
  691. struct perf_event_attr attr;
  692. u16 header_size;
  693. u16 id_header_size;
  694. u16 read_size;
  695. struct hw_perf_event hw;
  696. struct perf_event_context *ctx;
  697. struct file *filp;
  698. /*
  699. * These accumulate total time (in nanoseconds) that children
  700. * events have been enabled and running, respectively.
  701. */
  702. atomic64_t child_total_time_enabled;
  703. atomic64_t child_total_time_running;
  704. /*
  705. * Protect attach/detach and child_list:
  706. */
  707. struct mutex child_mutex;
  708. struct list_head child_list;
  709. struct perf_event *parent;
  710. int oncpu;
  711. int cpu;
  712. struct list_head owner_entry;
  713. struct task_struct *owner;
  714. /* mmap bits */
  715. struct mutex mmap_mutex;
  716. atomic_t mmap_count;
  717. int mmap_locked;
  718. struct user_struct *mmap_user;
  719. struct perf_buffer *buffer;
  720. /* poll related */
  721. wait_queue_head_t waitq;
  722. struct fasync_struct *fasync;
  723. /* delayed work for NMIs and such */
  724. int pending_wakeup;
  725. int pending_kill;
  726. int pending_disable;
  727. struct irq_work pending;
  728. atomic_t event_limit;
  729. void (*destroy)(struct perf_event *);
  730. struct rcu_head rcu_head;
  731. struct pid_namespace *ns;
  732. u64 id;
  733. perf_overflow_handler_t overflow_handler;
  734. #ifdef CONFIG_EVENT_TRACING
  735. struct ftrace_event_call *tp_event;
  736. struct event_filter *filter;
  737. #endif
  738. #ifdef CONFIG_CGROUP_PERF
  739. struct perf_cgroup *cgrp; /* cgroup event is attach to */
  740. int cgrp_defer_enabled;
  741. #endif
  742. #endif /* CONFIG_PERF_EVENTS */
  743. };
  744. enum perf_event_context_type {
  745. task_context,
  746. cpu_context,
  747. };
  748. /**
  749. * struct perf_event_context - event context structure
  750. *
  751. * Used as a container for task events and CPU events as well:
  752. */
  753. struct perf_event_context {
  754. enum perf_event_context_type type;
  755. struct pmu *pmu;
  756. /*
  757. * Protect the states of the events in the list,
  758. * nr_active, and the list:
  759. */
  760. raw_spinlock_t lock;
  761. /*
  762. * Protect the list of events. Locking either mutex or lock
  763. * is sufficient to ensure the list doesn't change; to change
  764. * the list you need to lock both the mutex and the spinlock.
  765. */
  766. struct mutex mutex;
  767. struct list_head pinned_groups;
  768. struct list_head flexible_groups;
  769. struct list_head event_list;
  770. int nr_events;
  771. int nr_active;
  772. int is_active;
  773. int nr_stat;
  774. int rotate_disable;
  775. atomic_t refcount;
  776. struct task_struct *task;
  777. /*
  778. * Context clock, runs when context enabled.
  779. */
  780. u64 time;
  781. u64 timestamp;
  782. /*
  783. * These fields let us detect when two contexts have both
  784. * been cloned (inherited) from a common ancestor.
  785. */
  786. struct perf_event_context *parent_ctx;
  787. u64 parent_gen;
  788. u64 generation;
  789. int pin_count;
  790. struct rcu_head rcu_head;
  791. int nr_cgroups; /* cgroup events present */
  792. };
  793. /*
  794. * Number of contexts where an event can trigger:
  795. * task, softirq, hardirq, nmi.
  796. */
  797. #define PERF_NR_CONTEXTS 4
  798. /**
  799. * struct perf_event_cpu_context - per cpu event context structure
  800. */
  801. struct perf_cpu_context {
  802. struct perf_event_context ctx;
  803. struct perf_event_context *task_ctx;
  804. int active_oncpu;
  805. int exclusive;
  806. struct list_head rotation_list;
  807. int jiffies_interval;
  808. struct pmu *active_pmu;
  809. #ifdef CONFIG_CGROUP_PERF
  810. struct perf_cgroup *cgrp;
  811. #endif
  812. };
  813. struct perf_output_handle {
  814. struct perf_event *event;
  815. struct perf_buffer *buffer;
  816. unsigned long wakeup;
  817. unsigned long size;
  818. void *addr;
  819. int page;
  820. int nmi;
  821. int sample;
  822. };
  823. #ifdef CONFIG_PERF_EVENTS
  824. extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
  825. extern void perf_pmu_unregister(struct pmu *pmu);
  826. extern int perf_num_counters(void);
  827. extern const char *perf_pmu_name(void);
  828. extern void __perf_event_task_sched_in(struct task_struct *task);
  829. extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
  830. extern int perf_event_init_task(struct task_struct *child);
  831. extern void perf_event_exit_task(struct task_struct *child);
  832. extern void perf_event_free_task(struct task_struct *task);
  833. extern void perf_event_delayed_put(struct task_struct *task);
  834. extern void perf_event_print_debug(void);
  835. extern void perf_pmu_disable(struct pmu *pmu);
  836. extern void perf_pmu_enable(struct pmu *pmu);
  837. extern int perf_event_task_disable(void);
  838. extern int perf_event_task_enable(void);
  839. extern void perf_event_update_userpage(struct perf_event *event);
  840. extern int perf_event_release_kernel(struct perf_event *event);
  841. extern struct perf_event *
  842. perf_event_create_kernel_counter(struct perf_event_attr *attr,
  843. int cpu,
  844. struct task_struct *task,
  845. perf_overflow_handler_t callback);
  846. extern u64 perf_event_read_value(struct perf_event *event,
  847. u64 *enabled, u64 *running);
  848. struct perf_sample_data {
  849. u64 type;
  850. u64 ip;
  851. struct {
  852. u32 pid;
  853. u32 tid;
  854. } tid_entry;
  855. u64 time;
  856. u64 addr;
  857. u64 id;
  858. u64 stream_id;
  859. struct {
  860. u32 cpu;
  861. u32 reserved;
  862. } cpu_entry;
  863. u64 period;
  864. struct perf_callchain_entry *callchain;
  865. struct perf_raw_record *raw;
  866. };
  867. static inline
  868. void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
  869. {
  870. data->addr = addr;
  871. data->raw = NULL;
  872. }
  873. extern void perf_output_sample(struct perf_output_handle *handle,
  874. struct perf_event_header *header,
  875. struct perf_sample_data *data,
  876. struct perf_event *event);
  877. extern void perf_prepare_sample(struct perf_event_header *header,
  878. struct perf_sample_data *data,
  879. struct perf_event *event,
  880. struct pt_regs *regs);
  881. extern int perf_event_overflow(struct perf_event *event, int nmi,
  882. struct perf_sample_data *data,
  883. struct pt_regs *regs);
  884. static inline bool is_sampling_event(struct perf_event *event)
  885. {
  886. return event->attr.sample_period != 0;
  887. }
  888. /*
  889. * Return 1 for a software event, 0 for a hardware event
  890. */
  891. static inline int is_software_event(struct perf_event *event)
  892. {
  893. return event->pmu->task_ctx_nr == perf_sw_context;
  894. }
  895. extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
  896. extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
  897. #ifndef perf_arch_fetch_caller_regs
  898. static inline void
  899. perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
  900. #endif
  901. /*
  902. * Take a snapshot of the regs. Skip ip and frame pointer to
  903. * the nth caller. We only need a few of the regs:
  904. * - ip for PERF_SAMPLE_IP
  905. * - cs for user_mode() tests
  906. * - bp for callchains
  907. * - eflags, for future purposes, just in case
  908. */
  909. static inline void perf_fetch_caller_regs(struct pt_regs *regs)
  910. {
  911. memset(regs, 0, sizeof(*regs));
  912. perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
  913. }
  914. static __always_inline void
  915. perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
  916. {
  917. struct pt_regs hot_regs;
  918. JUMP_LABEL(&perf_swevent_enabled[event_id], have_event);
  919. return;
  920. have_event:
  921. if (!regs) {
  922. perf_fetch_caller_regs(&hot_regs);
  923. regs = &hot_regs;
  924. }
  925. __perf_sw_event(event_id, nr, nmi, regs, addr);
  926. }
  927. extern atomic_t perf_sched_events;
  928. static inline void perf_event_task_sched_in(struct task_struct *task)
  929. {
  930. COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task));
  931. }
  932. static inline
  933. void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
  934. {
  935. perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
  936. COND_STMT(&perf_sched_events, __perf_event_task_sched_out(task, next));
  937. }
  938. extern void perf_event_mmap(struct vm_area_struct *vma);
  939. extern struct perf_guest_info_callbacks *perf_guest_cbs;
  940. extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
  941. extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
  942. extern void perf_event_comm(struct task_struct *tsk);
  943. extern void perf_event_fork(struct task_struct *tsk);
  944. /* Callchains */
  945. DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
  946. extern void perf_callchain_user(struct perf_callchain_entry *entry,
  947. struct pt_regs *regs);
  948. extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
  949. struct pt_regs *regs);
  950. static inline void
  951. perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
  952. {
  953. if (entry->nr < PERF_MAX_STACK_DEPTH)
  954. entry->ip[entry->nr++] = ip;
  955. }
  956. extern int sysctl_perf_event_paranoid;
  957. extern int sysctl_perf_event_mlock;
  958. extern int sysctl_perf_event_sample_rate;
  959. static inline bool perf_paranoid_tracepoint_raw(void)
  960. {
  961. return sysctl_perf_event_paranoid > -1;
  962. }
  963. static inline bool perf_paranoid_cpu(void)
  964. {
  965. return sysctl_perf_event_paranoid > 0;
  966. }
  967. static inline bool perf_paranoid_kernel(void)
  968. {
  969. return sysctl_perf_event_paranoid > 1;
  970. }
  971. extern void perf_event_init(void);
  972. extern void perf_tp_event(u64 addr, u64 count, void *record,
  973. int entry_size, struct pt_regs *regs,
  974. struct hlist_head *head, int rctx);
  975. extern void perf_bp_event(struct perf_event *event, void *data);
  976. #ifndef perf_misc_flags
  977. #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \
  978. PERF_RECORD_MISC_KERNEL)
  979. #define perf_instruction_pointer(regs) instruction_pointer(regs)
  980. #endif
  981. extern int perf_output_begin(struct perf_output_handle *handle,
  982. struct perf_event *event, unsigned int size,
  983. int nmi, int sample);
  984. extern void perf_output_end(struct perf_output_handle *handle);
  985. extern void perf_output_copy(struct perf_output_handle *handle,
  986. const void *buf, unsigned int len);
  987. extern int perf_swevent_get_recursion_context(void);
  988. extern void perf_swevent_put_recursion_context(int rctx);
  989. extern void perf_event_enable(struct perf_event *event);
  990. extern void perf_event_disable(struct perf_event *event);
  991. extern void perf_event_task_tick(void);
  992. #else
  993. static inline void
  994. perf_event_task_sched_in(struct task_struct *task) { }
  995. static inline void
  996. perf_event_task_sched_out(struct task_struct *task,
  997. struct task_struct *next) { }
  998. static inline int perf_event_init_task(struct task_struct *child) { return 0; }
  999. static inline void perf_event_exit_task(struct task_struct *child) { }
  1000. static inline void perf_event_free_task(struct task_struct *task) { }
  1001. static inline void perf_event_delayed_put(struct task_struct *task) { }
  1002. static inline void perf_event_print_debug(void) { }
  1003. static inline int perf_event_task_disable(void) { return -EINVAL; }
  1004. static inline int perf_event_task_enable(void) { return -EINVAL; }
  1005. static inline void
  1006. perf_sw_event(u32 event_id, u64 nr, int nmi,
  1007. struct pt_regs *regs, u64 addr) { }
  1008. static inline void
  1009. perf_bp_event(struct perf_event *event, void *data) { }
  1010. static inline int perf_register_guest_info_callbacks
  1011. (struct perf_guest_info_callbacks *callbacks) { return 0; }
  1012. static inline int perf_unregister_guest_info_callbacks
  1013. (struct perf_guest_info_callbacks *callbacks) { return 0; }
  1014. static inline void perf_event_mmap(struct vm_area_struct *vma) { }
  1015. static inline void perf_event_comm(struct task_struct *tsk) { }
  1016. static inline void perf_event_fork(struct task_struct *tsk) { }
  1017. static inline void perf_event_init(void) { }
  1018. static inline int perf_swevent_get_recursion_context(void) { return -1; }
  1019. static inline void perf_swevent_put_recursion_context(int rctx) { }
  1020. static inline void perf_event_enable(struct perf_event *event) { }
  1021. static inline void perf_event_disable(struct perf_event *event) { }
  1022. static inline void perf_event_task_tick(void) { }
  1023. #endif
  1024. #define perf_output_put(handle, x) \
  1025. perf_output_copy((handle), &(x), sizeof(x))
  1026. /*
  1027. * This has to have a higher priority than migration_notifier in sched.c.
  1028. */
  1029. #define perf_cpu_notifier(fn) \
  1030. do { \
  1031. static struct notifier_block fn##_nb __cpuinitdata = \
  1032. { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
  1033. fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
  1034. (void *)(unsigned long)smp_processor_id()); \
  1035. fn(&fn##_nb, (unsigned long)CPU_STARTING, \
  1036. (void *)(unsigned long)smp_processor_id()); \
  1037. fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
  1038. (void *)(unsigned long)smp_processor_id()); \
  1039. register_cpu_notifier(&fn##_nb); \
  1040. } while (0)
  1041. #endif /* __KERNEL__ */
  1042. #endif /* _LINUX_PERF_EVENT_H */