perf_event.h 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145
  1. /*
  2. * Performance events:
  3. *
  4. * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
  7. *
  8. * Data type definitions, declarations, prototypes.
  9. *
  10. * Started by: Thomas Gleixner and Ingo Molnar
  11. *
  12. * For licencing details see kernel-base/COPYING
  13. */
  14. #ifndef _LINUX_PERF_EVENT_H
  15. #define _LINUX_PERF_EVENT_H
  16. #include <linux/types.h>
  17. #include <linux/ioctl.h>
  18. #include <asm/byteorder.h>
  19. /*
  20. * User-space ABI bits:
  21. */
  22. /*
  23. * attr.type
  24. */
  25. enum perf_type_id {
  26. PERF_TYPE_HARDWARE = 0,
  27. PERF_TYPE_SOFTWARE = 1,
  28. PERF_TYPE_TRACEPOINT = 2,
  29. PERF_TYPE_HW_CACHE = 3,
  30. PERF_TYPE_RAW = 4,
  31. PERF_TYPE_BREAKPOINT = 5,
  32. PERF_TYPE_MAX, /* non-ABI */
  33. };
  34. /*
  35. * Generalized performance event event_id types, used by the
  36. * attr.event_id parameter of the sys_perf_event_open()
  37. * syscall:
  38. */
  39. enum perf_hw_id {
  40. /*
  41. * Common hardware events, generalized by the kernel:
  42. */
  43. PERF_COUNT_HW_CPU_CYCLES = 0,
  44. PERF_COUNT_HW_INSTRUCTIONS = 1,
  45. PERF_COUNT_HW_CACHE_REFERENCES = 2,
  46. PERF_COUNT_HW_CACHE_MISSES = 3,
  47. PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
  48. PERF_COUNT_HW_BRANCH_MISSES = 5,
  49. PERF_COUNT_HW_BUS_CYCLES = 6,
  50. PERF_COUNT_HW_MAX, /* non-ABI */
  51. };
  52. /*
  53. * Generalized hardware cache events:
  54. *
  55. * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
  56. * { read, write, prefetch } x
  57. * { accesses, misses }
  58. */
  59. enum perf_hw_cache_id {
  60. PERF_COUNT_HW_CACHE_L1D = 0,
  61. PERF_COUNT_HW_CACHE_L1I = 1,
  62. PERF_COUNT_HW_CACHE_LL = 2,
  63. PERF_COUNT_HW_CACHE_DTLB = 3,
  64. PERF_COUNT_HW_CACHE_ITLB = 4,
  65. PERF_COUNT_HW_CACHE_BPU = 5,
  66. PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
  67. };
  68. enum perf_hw_cache_op_id {
  69. PERF_COUNT_HW_CACHE_OP_READ = 0,
  70. PERF_COUNT_HW_CACHE_OP_WRITE = 1,
  71. PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
  72. PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
  73. };
  74. enum perf_hw_cache_op_result_id {
  75. PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
  76. PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
  77. PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
  78. };
  79. /*
  80. * Special "software" events provided by the kernel, even if the hardware
  81. * does not support performance events. These events measure various
  82. * physical and sw events of the kernel (and allow the profiling of them as
  83. * well):
  84. */
  85. enum perf_sw_ids {
  86. PERF_COUNT_SW_CPU_CLOCK = 0,
  87. PERF_COUNT_SW_TASK_CLOCK = 1,
  88. PERF_COUNT_SW_PAGE_FAULTS = 2,
  89. PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
  90. PERF_COUNT_SW_CPU_MIGRATIONS = 4,
  91. PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
  92. PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
  93. PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
  94. PERF_COUNT_SW_EMULATION_FAULTS = 8,
  95. PERF_COUNT_SW_MAX, /* non-ABI */
  96. };
  97. /*
  98. * Bits that can be set in attr.sample_type to request information
  99. * in the overflow packets.
  100. */
  101. enum perf_event_sample_format {
  102. PERF_SAMPLE_IP = 1U << 0,
  103. PERF_SAMPLE_TID = 1U << 1,
  104. PERF_SAMPLE_TIME = 1U << 2,
  105. PERF_SAMPLE_ADDR = 1U << 3,
  106. PERF_SAMPLE_READ = 1U << 4,
  107. PERF_SAMPLE_CALLCHAIN = 1U << 5,
  108. PERF_SAMPLE_ID = 1U << 6,
  109. PERF_SAMPLE_CPU = 1U << 7,
  110. PERF_SAMPLE_PERIOD = 1U << 8,
  111. PERF_SAMPLE_STREAM_ID = 1U << 9,
  112. PERF_SAMPLE_RAW = 1U << 10,
  113. PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
  114. };
  115. /*
  116. * The format of the data returned by read() on a perf event fd,
  117. * as specified by attr.read_format:
  118. *
  119. * struct read_format {
  120. * { u64 value;
  121. * { u64 time_enabled; } && PERF_FORMAT_ENABLED
  122. * { u64 time_running; } && PERF_FORMAT_RUNNING
  123. * { u64 id; } && PERF_FORMAT_ID
  124. * } && !PERF_FORMAT_GROUP
  125. *
  126. * { u64 nr;
  127. * { u64 time_enabled; } && PERF_FORMAT_ENABLED
  128. * { u64 time_running; } && PERF_FORMAT_RUNNING
  129. * { u64 value;
  130. * { u64 id; } && PERF_FORMAT_ID
  131. * } cntr[nr];
  132. * } && PERF_FORMAT_GROUP
  133. * };
  134. */
  135. enum perf_event_read_format {
  136. PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
  137. PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
  138. PERF_FORMAT_ID = 1U << 2,
  139. PERF_FORMAT_GROUP = 1U << 3,
  140. PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
  141. };
  142. #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
  143. /*
  144. * Hardware event_id to monitor via a performance monitoring event:
  145. */
  146. struct perf_event_attr {
  147. /*
  148. * Major type: hardware/software/tracepoint/etc.
  149. */
  150. __u32 type;
  151. /*
  152. * Size of the attr structure, for fwd/bwd compat.
  153. */
  154. __u32 size;
  155. /*
  156. * Type specific configuration information.
  157. */
  158. __u64 config;
  159. union {
  160. __u64 sample_period;
  161. __u64 sample_freq;
  162. };
  163. __u64 sample_type;
  164. __u64 read_format;
  165. __u64 disabled : 1, /* off by default */
  166. inherit : 1, /* children inherit it */
  167. pinned : 1, /* must always be on PMU */
  168. exclusive : 1, /* only group on PMU */
  169. exclude_user : 1, /* don't count user */
  170. exclude_kernel : 1, /* ditto kernel */
  171. exclude_hv : 1, /* ditto hypervisor */
  172. exclude_idle : 1, /* don't count when idle */
  173. mmap : 1, /* include mmap data */
  174. comm : 1, /* include comm data */
  175. freq : 1, /* use freq, not period */
  176. inherit_stat : 1, /* per task counts */
  177. enable_on_exec : 1, /* next exec enables */
  178. task : 1, /* trace fork/exit */
  179. watermark : 1, /* wakeup_watermark */
  180. /*
  181. * precise_ip:
  182. *
  183. * 0 - SAMPLE_IP can have arbitrary skid
  184. * 1 - SAMPLE_IP must have constant skid
  185. * 2 - SAMPLE_IP requested to have 0 skid
  186. * 3 - SAMPLE_IP must have 0 skid
  187. *
  188. * See also PERF_RECORD_MISC_EXACT_IP
  189. */
  190. precise_ip : 2, /* skid constraint */
  191. mmap_data : 1, /* non-exec mmap data */
  192. __reserved_1 : 46;
  193. union {
  194. __u32 wakeup_events; /* wakeup every n events */
  195. __u32 wakeup_watermark; /* bytes before wakeup */
  196. };
  197. __u32 bp_type;
  198. __u64 bp_addr;
  199. __u64 bp_len;
  200. };
  201. /*
  202. * Ioctls that can be done on a perf event fd:
  203. */
  204. #define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
  205. #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
  206. #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
  207. #define PERF_EVENT_IOC_RESET _IO ('$', 3)
  208. #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
  209. #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
  210. #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
  211. enum perf_event_ioc_flags {
  212. PERF_IOC_FLAG_GROUP = 1U << 0,
  213. };
  214. /*
  215. * Structure of the page that can be mapped via mmap
  216. */
  217. struct perf_event_mmap_page {
  218. __u32 version; /* version number of this structure */
  219. __u32 compat_version; /* lowest version this is compat with */
  220. /*
  221. * Bits needed to read the hw events in user-space.
  222. *
  223. * u32 seq;
  224. * s64 count;
  225. *
  226. * do {
  227. * seq = pc->lock;
  228. *
  229. * barrier()
  230. * if (pc->index) {
  231. * count = pmc_read(pc->index - 1);
  232. * count += pc->offset;
  233. * } else
  234. * goto regular_read;
  235. *
  236. * barrier();
  237. * } while (pc->lock != seq);
  238. *
  239. * NOTE: for obvious reason this only works on self-monitoring
  240. * processes.
  241. */
  242. __u32 lock; /* seqlock for synchronization */
  243. __u32 index; /* hardware event identifier */
  244. __s64 offset; /* add to hardware event value */
  245. __u64 time_enabled; /* time event active */
  246. __u64 time_running; /* time event on cpu */
  247. /*
  248. * Hole for extension of the self monitor capabilities
  249. */
  250. __u64 __reserved[123]; /* align to 1k */
  251. /*
  252. * Control data for the mmap() data buffer.
  253. *
  254. * User-space reading the @data_head value should issue an rmb(), on
  255. * SMP capable platforms, after reading this value -- see
  256. * perf_event_wakeup().
  257. *
  258. * When the mapping is PROT_WRITE the @data_tail value should be
  259. * written by userspace to reflect the last read data. In this case
  260. * the kernel will not over-write unread data.
  261. */
  262. __u64 data_head; /* head in the data section */
  263. __u64 data_tail; /* user-space written tail */
  264. };
  265. #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
  266. #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
  267. #define PERF_RECORD_MISC_KERNEL (1 << 0)
  268. #define PERF_RECORD_MISC_USER (2 << 0)
  269. #define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
  270. #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
  271. #define PERF_RECORD_MISC_GUEST_USER (5 << 0)
  272. /*
  273. * Indicates that the content of PERF_SAMPLE_IP points to
  274. * the actual instruction that triggered the event. See also
  275. * perf_event_attr::precise_ip.
  276. */
  277. #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
  278. /*
  279. * Reserve the last bit to indicate some extended misc field
  280. */
  281. #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
  282. struct perf_event_header {
  283. __u32 type;
  284. __u16 misc;
  285. __u16 size;
  286. };
  287. enum perf_event_type {
  288. /*
  289. * The MMAP events record the PROT_EXEC mappings so that we can
  290. * correlate userspace IPs to code. They have the following structure:
  291. *
  292. * struct {
  293. * struct perf_event_header header;
  294. *
  295. * u32 pid, tid;
  296. * u64 addr;
  297. * u64 len;
  298. * u64 pgoff;
  299. * char filename[];
  300. * };
  301. */
  302. PERF_RECORD_MMAP = 1,
  303. /*
  304. * struct {
  305. * struct perf_event_header header;
  306. * u64 id;
  307. * u64 lost;
  308. * };
  309. */
  310. PERF_RECORD_LOST = 2,
  311. /*
  312. * struct {
  313. * struct perf_event_header header;
  314. *
  315. * u32 pid, tid;
  316. * char comm[];
  317. * };
  318. */
  319. PERF_RECORD_COMM = 3,
  320. /*
  321. * struct {
  322. * struct perf_event_header header;
  323. * u32 pid, ppid;
  324. * u32 tid, ptid;
  325. * u64 time;
  326. * };
  327. */
  328. PERF_RECORD_EXIT = 4,
  329. /*
  330. * struct {
  331. * struct perf_event_header header;
  332. * u64 time;
  333. * u64 id;
  334. * u64 stream_id;
  335. * };
  336. */
  337. PERF_RECORD_THROTTLE = 5,
  338. PERF_RECORD_UNTHROTTLE = 6,
  339. /*
  340. * struct {
  341. * struct perf_event_header header;
  342. * u32 pid, ppid;
  343. * u32 tid, ptid;
  344. * u64 time;
  345. * };
  346. */
  347. PERF_RECORD_FORK = 7,
  348. /*
  349. * struct {
  350. * struct perf_event_header header;
  351. * u32 pid, tid;
  352. *
  353. * struct read_format values;
  354. * };
  355. */
  356. PERF_RECORD_READ = 8,
  357. /*
  358. * struct {
  359. * struct perf_event_header header;
  360. *
  361. * { u64 ip; } && PERF_SAMPLE_IP
  362. * { u32 pid, tid; } && PERF_SAMPLE_TID
  363. * { u64 time; } && PERF_SAMPLE_TIME
  364. * { u64 addr; } && PERF_SAMPLE_ADDR
  365. * { u64 id; } && PERF_SAMPLE_ID
  366. * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
  367. * { u32 cpu, res; } && PERF_SAMPLE_CPU
  368. * { u64 period; } && PERF_SAMPLE_PERIOD
  369. *
  370. * { struct read_format values; } && PERF_SAMPLE_READ
  371. *
  372. * { u64 nr,
  373. * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
  374. *
  375. * #
  376. * # The RAW record below is opaque data wrt the ABI
  377. * #
  378. * # That is, the ABI doesn't make any promises wrt to
  379. * # the stability of its content, it may vary depending
  380. * # on event, hardware, kernel version and phase of
  381. * # the moon.
  382. * #
  383. * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
  384. * #
  385. *
  386. * { u32 size;
  387. * char data[size];}&& PERF_SAMPLE_RAW
  388. * };
  389. */
  390. PERF_RECORD_SAMPLE = 9,
  391. PERF_RECORD_MAX, /* non-ABI */
  392. };
  393. enum perf_callchain_context {
  394. PERF_CONTEXT_HV = (__u64)-32,
  395. PERF_CONTEXT_KERNEL = (__u64)-128,
  396. PERF_CONTEXT_USER = (__u64)-512,
  397. PERF_CONTEXT_GUEST = (__u64)-2048,
  398. PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
  399. PERF_CONTEXT_GUEST_USER = (__u64)-2560,
  400. PERF_CONTEXT_MAX = (__u64)-4095,
  401. };
  402. #define PERF_FLAG_FD_NO_GROUP (1U << 0)
  403. #define PERF_FLAG_FD_OUTPUT (1U << 1)
  404. #ifdef __KERNEL__
  405. /*
  406. * Kernel-internal data types and definitions:
  407. */
  408. #ifdef CONFIG_PERF_EVENTS
  409. # include <asm/perf_event.h>
  410. # include <asm/local64.h>
  411. #endif
  412. struct perf_guest_info_callbacks {
  413. int (*is_in_guest) (void);
  414. int (*is_user_mode) (void);
  415. unsigned long (*get_guest_ip) (void);
  416. };
  417. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  418. #include <asm/hw_breakpoint.h>
  419. #endif
  420. #include <linux/list.h>
  421. #include <linux/mutex.h>
  422. #include <linux/rculist.h>
  423. #include <linux/rcupdate.h>
  424. #include <linux/spinlock.h>
  425. #include <linux/hrtimer.h>
  426. #include <linux/fs.h>
  427. #include <linux/pid_namespace.h>
  428. #include <linux/workqueue.h>
  429. #include <linux/ftrace.h>
  430. #include <linux/cpu.h>
  431. #include <linux/irq_work.h>
  432. #include <linux/jump_label_ref.h>
  433. #include <asm/atomic.h>
  434. #include <asm/local.h>
  435. #define PERF_MAX_STACK_DEPTH 255
  436. struct perf_callchain_entry {
  437. __u64 nr;
  438. __u64 ip[PERF_MAX_STACK_DEPTH];
  439. };
  440. struct perf_raw_record {
  441. u32 size;
  442. void *data;
  443. };
  444. struct perf_branch_entry {
  445. __u64 from;
  446. __u64 to;
  447. __u64 flags;
  448. };
  449. struct perf_branch_stack {
  450. __u64 nr;
  451. struct perf_branch_entry entries[0];
  452. };
  453. struct task_struct;
  454. /**
  455. * struct hw_perf_event - performance event hardware details:
  456. */
  457. struct hw_perf_event {
  458. #ifdef CONFIG_PERF_EVENTS
  459. union {
  460. struct { /* hardware */
  461. u64 config;
  462. u64 last_tag;
  463. unsigned long config_base;
  464. unsigned long event_base;
  465. int idx;
  466. int last_cpu;
  467. };
  468. struct { /* software */
  469. struct hrtimer hrtimer;
  470. };
  471. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  472. struct { /* breakpoint */
  473. struct arch_hw_breakpoint info;
  474. struct list_head bp_list;
  475. /*
  476. * Crufty hack to avoid the chicken and egg
  477. * problem hw_breakpoint has with context
  478. * creation and event initalization.
  479. */
  480. struct task_struct *bp_target;
  481. };
  482. #endif
  483. };
  484. int state;
  485. local64_t prev_count;
  486. u64 sample_period;
  487. u64 last_period;
  488. local64_t period_left;
  489. u64 interrupts;
  490. u64 freq_time_stamp;
  491. u64 freq_count_stamp;
  492. #endif
  493. };
  494. /*
  495. * hw_perf_event::state flags
  496. */
  497. #define PERF_HES_STOPPED 0x01 /* the counter is stopped */
  498. #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
  499. #define PERF_HES_ARCH 0x04
  500. struct perf_event;
  501. /*
  502. * Common implementation detail of pmu::{start,commit,cancel}_txn
  503. */
  504. #define PERF_EVENT_TXN 0x1
  505. /**
  506. * struct pmu - generic performance monitoring unit
  507. */
  508. struct pmu {
  509. struct list_head entry;
  510. int * __percpu pmu_disable_count;
  511. struct perf_cpu_context * __percpu pmu_cpu_context;
  512. int task_ctx_nr;
  513. /*
  514. * Fully disable/enable this PMU, can be used to protect from the PMI
  515. * as well as for lazy/batch writing of the MSRs.
  516. */
  517. void (*pmu_enable) (struct pmu *pmu); /* optional */
  518. void (*pmu_disable) (struct pmu *pmu); /* optional */
  519. /*
  520. * Try and initialize the event for this PMU.
  521. * Should return -ENOENT when the @event doesn't match this PMU.
  522. */
  523. int (*event_init) (struct perf_event *event);
  524. #define PERF_EF_START 0x01 /* start the counter when adding */
  525. #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
  526. #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
  527. /*
  528. * Adds/Removes a counter to/from the PMU, can be done inside
  529. * a transaction, see the ->*_txn() methods.
  530. */
  531. int (*add) (struct perf_event *event, int flags);
  532. void (*del) (struct perf_event *event, int flags);
  533. /*
  534. * Starts/Stops a counter present on the PMU. The PMI handler
  535. * should stop the counter when perf_event_overflow() returns
  536. * !0. ->start() will be used to continue.
  537. */
  538. void (*start) (struct perf_event *event, int flags);
  539. void (*stop) (struct perf_event *event, int flags);
  540. /*
  541. * Updates the counter value of the event.
  542. */
  543. void (*read) (struct perf_event *event);
  544. /*
  545. * Group events scheduling is treated as a transaction, add
  546. * group events as a whole and perform one schedulability test.
  547. * If the test fails, roll back the whole group
  548. *
  549. * Start the transaction, after this ->add() doesn't need to
  550. * do schedulability tests.
  551. */
  552. void (*start_txn) (struct pmu *pmu); /* optional */
  553. /*
  554. * If ->start_txn() disabled the ->add() schedulability test
  555. * then ->commit_txn() is required to perform one. On success
  556. * the transaction is closed. On error the transaction is kept
  557. * open until ->cancel_txn() is called.
  558. */
  559. int (*commit_txn) (struct pmu *pmu); /* optional */
  560. /*
  561. * Will cancel the transaction, assumes ->del() is called
  562. * for each successfull ->add() during the transaction.
  563. */
  564. void (*cancel_txn) (struct pmu *pmu); /* optional */
  565. };
  566. /**
  567. * enum perf_event_active_state - the states of a event
  568. */
  569. enum perf_event_active_state {
  570. PERF_EVENT_STATE_ERROR = -2,
  571. PERF_EVENT_STATE_OFF = -1,
  572. PERF_EVENT_STATE_INACTIVE = 0,
  573. PERF_EVENT_STATE_ACTIVE = 1,
  574. };
  575. struct file;
  576. #define PERF_BUFFER_WRITABLE 0x01
  577. struct perf_buffer {
  578. atomic_t refcount;
  579. struct rcu_head rcu_head;
  580. #ifdef CONFIG_PERF_USE_VMALLOC
  581. struct work_struct work;
  582. int page_order; /* allocation order */
  583. #endif
  584. int nr_pages; /* nr of data pages */
  585. int writable; /* are we writable */
  586. atomic_t poll; /* POLL_ for wakeups */
  587. local_t head; /* write position */
  588. local_t nest; /* nested writers */
  589. local_t events; /* event limit */
  590. local_t wakeup; /* wakeup stamp */
  591. local_t lost; /* nr records lost */
  592. long watermark; /* wakeup watermark */
  593. struct perf_event_mmap_page *user_page;
  594. void *data_pages[0];
  595. };
  596. struct perf_sample_data;
  597. typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
  598. struct perf_sample_data *,
  599. struct pt_regs *regs);
  600. enum perf_group_flag {
  601. PERF_GROUP_SOFTWARE = 0x1,
  602. };
  603. #define SWEVENT_HLIST_BITS 8
  604. #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
  605. struct swevent_hlist {
  606. struct hlist_head heads[SWEVENT_HLIST_SIZE];
  607. struct rcu_head rcu_head;
  608. };
  609. #define PERF_ATTACH_CONTEXT 0x01
  610. #define PERF_ATTACH_GROUP 0x02
  611. #define PERF_ATTACH_TASK 0x04
  612. /**
  613. * struct perf_event - performance event kernel representation:
  614. */
  615. struct perf_event {
  616. #ifdef CONFIG_PERF_EVENTS
  617. struct list_head group_entry;
  618. struct list_head event_entry;
  619. struct list_head sibling_list;
  620. struct hlist_node hlist_entry;
  621. int nr_siblings;
  622. int group_flags;
  623. struct perf_event *group_leader;
  624. struct pmu *pmu;
  625. enum perf_event_active_state state;
  626. unsigned int attach_state;
  627. local64_t count;
  628. atomic64_t child_count;
  629. /*
  630. * These are the total time in nanoseconds that the event
  631. * has been enabled (i.e. eligible to run, and the task has
  632. * been scheduled in, if this is a per-task event)
  633. * and running (scheduled onto the CPU), respectively.
  634. *
  635. * They are computed from tstamp_enabled, tstamp_running and
  636. * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
  637. */
  638. u64 total_time_enabled;
  639. u64 total_time_running;
  640. /*
  641. * These are timestamps used for computing total_time_enabled
  642. * and total_time_running when the event is in INACTIVE or
  643. * ACTIVE state, measured in nanoseconds from an arbitrary point
  644. * in time.
  645. * tstamp_enabled: the notional time when the event was enabled
  646. * tstamp_running: the notional time when the event was scheduled on
  647. * tstamp_stopped: in INACTIVE state, the notional time when the
  648. * event was scheduled off.
  649. */
  650. u64 tstamp_enabled;
  651. u64 tstamp_running;
  652. u64 tstamp_stopped;
  653. struct perf_event_attr attr;
  654. struct hw_perf_event hw;
  655. struct perf_event_context *ctx;
  656. struct file *filp;
  657. /*
  658. * These accumulate total time (in nanoseconds) that children
  659. * events have been enabled and running, respectively.
  660. */
  661. atomic64_t child_total_time_enabled;
  662. atomic64_t child_total_time_running;
  663. /*
  664. * Protect attach/detach and child_list:
  665. */
  666. struct mutex child_mutex;
  667. struct list_head child_list;
  668. struct perf_event *parent;
  669. int oncpu;
  670. int cpu;
  671. struct list_head owner_entry;
  672. struct task_struct *owner;
  673. /* mmap bits */
  674. struct mutex mmap_mutex;
  675. atomic_t mmap_count;
  676. int mmap_locked;
  677. struct user_struct *mmap_user;
  678. struct perf_buffer *buffer;
  679. /* poll related */
  680. wait_queue_head_t waitq;
  681. struct fasync_struct *fasync;
  682. /* delayed work for NMIs and such */
  683. int pending_wakeup;
  684. int pending_kill;
  685. int pending_disable;
  686. struct irq_work pending;
  687. atomic_t event_limit;
  688. void (*destroy)(struct perf_event *);
  689. struct rcu_head rcu_head;
  690. struct pid_namespace *ns;
  691. u64 id;
  692. perf_overflow_handler_t overflow_handler;
  693. #ifdef CONFIG_EVENT_TRACING
  694. struct ftrace_event_call *tp_event;
  695. struct event_filter *filter;
  696. #endif
  697. #endif /* CONFIG_PERF_EVENTS */
  698. };
  699. enum perf_event_context_type {
  700. task_context,
  701. cpu_context,
  702. };
  703. /**
  704. * struct perf_event_context - event context structure
  705. *
  706. * Used as a container for task events and CPU events as well:
  707. */
  708. struct perf_event_context {
  709. enum perf_event_context_type type;
  710. struct pmu *pmu;
  711. /*
  712. * Protect the states of the events in the list,
  713. * nr_active, and the list:
  714. */
  715. raw_spinlock_t lock;
  716. /*
  717. * Protect the list of events. Locking either mutex or lock
  718. * is sufficient to ensure the list doesn't change; to change
  719. * the list you need to lock both the mutex and the spinlock.
  720. */
  721. struct mutex mutex;
  722. struct list_head pinned_groups;
  723. struct list_head flexible_groups;
  724. struct list_head event_list;
  725. int nr_events;
  726. int nr_active;
  727. int is_active;
  728. int nr_stat;
  729. atomic_t refcount;
  730. struct task_struct *task;
  731. /*
  732. * Context clock, runs when context enabled.
  733. */
  734. u64 time;
  735. u64 timestamp;
  736. /*
  737. * These fields let us detect when two contexts have both
  738. * been cloned (inherited) from a common ancestor.
  739. */
  740. struct perf_event_context *parent_ctx;
  741. u64 parent_gen;
  742. u64 generation;
  743. int pin_count;
  744. struct rcu_head rcu_head;
  745. };
  746. /*
  747. * Number of contexts where an event can trigger:
  748. * task, softirq, hardirq, nmi.
  749. */
  750. #define PERF_NR_CONTEXTS 4
  751. /**
  752. * struct perf_event_cpu_context - per cpu event context structure
  753. */
  754. struct perf_cpu_context {
  755. struct perf_event_context ctx;
  756. struct perf_event_context *task_ctx;
  757. int active_oncpu;
  758. int exclusive;
  759. struct list_head rotation_list;
  760. int jiffies_interval;
  761. };
  762. struct perf_output_handle {
  763. struct perf_event *event;
  764. struct perf_buffer *buffer;
  765. unsigned long wakeup;
  766. unsigned long size;
  767. void *addr;
  768. int page;
  769. int nmi;
  770. int sample;
  771. };
  772. #ifdef CONFIG_PERF_EVENTS
  773. extern int perf_pmu_register(struct pmu *pmu);
  774. extern void perf_pmu_unregister(struct pmu *pmu);
  775. extern int perf_num_counters(void);
  776. extern const char *perf_pmu_name(void);
  777. extern void __perf_event_task_sched_in(struct task_struct *task);
  778. extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
  779. extern atomic_t perf_task_events;
  780. static inline void perf_event_task_sched_in(struct task_struct *task)
  781. {
  782. COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
  783. }
  784. static inline
  785. void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
  786. {
  787. COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
  788. }
  789. extern int perf_event_init_task(struct task_struct *child);
  790. extern void perf_event_exit_task(struct task_struct *child);
  791. extern void perf_event_free_task(struct task_struct *task);
  792. extern void perf_event_delayed_put(struct task_struct *task);
  793. extern void perf_event_print_debug(void);
  794. extern void perf_pmu_disable(struct pmu *pmu);
  795. extern void perf_pmu_enable(struct pmu *pmu);
  796. extern int perf_event_task_disable(void);
  797. extern int perf_event_task_enable(void);
  798. extern void perf_event_update_userpage(struct perf_event *event);
  799. extern int perf_event_release_kernel(struct perf_event *event);
  800. extern struct perf_event *
  801. perf_event_create_kernel_counter(struct perf_event_attr *attr,
  802. int cpu,
  803. struct task_struct *task,
  804. perf_overflow_handler_t callback);
  805. extern u64 perf_event_read_value(struct perf_event *event,
  806. u64 *enabled, u64 *running);
  807. struct perf_sample_data {
  808. u64 type;
  809. u64 ip;
  810. struct {
  811. u32 pid;
  812. u32 tid;
  813. } tid_entry;
  814. u64 time;
  815. u64 addr;
  816. u64 id;
  817. u64 stream_id;
  818. struct {
  819. u32 cpu;
  820. u32 reserved;
  821. } cpu_entry;
  822. u64 period;
  823. struct perf_callchain_entry *callchain;
  824. struct perf_raw_record *raw;
  825. };
  826. static inline
  827. void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
  828. {
  829. data->addr = addr;
  830. data->raw = NULL;
  831. }
  832. extern void perf_output_sample(struct perf_output_handle *handle,
  833. struct perf_event_header *header,
  834. struct perf_sample_data *data,
  835. struct perf_event *event);
  836. extern void perf_prepare_sample(struct perf_event_header *header,
  837. struct perf_sample_data *data,
  838. struct perf_event *event,
  839. struct pt_regs *regs);
  840. extern int perf_event_overflow(struct perf_event *event, int nmi,
  841. struct perf_sample_data *data,
  842. struct pt_regs *regs);
  843. /*
  844. * Return 1 for a software event, 0 for a hardware event
  845. */
  846. static inline int is_software_event(struct perf_event *event)
  847. {
  848. return event->pmu->task_ctx_nr == perf_sw_context;
  849. }
  850. extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
  851. extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
  852. #ifndef perf_arch_fetch_caller_regs
  853. static inline void
  854. perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
  855. #endif
  856. /*
  857. * Take a snapshot of the regs. Skip ip and frame pointer to
  858. * the nth caller. We only need a few of the regs:
  859. * - ip for PERF_SAMPLE_IP
  860. * - cs for user_mode() tests
  861. * - bp for callchains
  862. * - eflags, for future purposes, just in case
  863. */
  864. static inline void perf_fetch_caller_regs(struct pt_regs *regs)
  865. {
  866. memset(regs, 0, sizeof(*regs));
  867. perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
  868. }
  869. static __always_inline void
  870. perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
  871. {
  872. struct pt_regs hot_regs;
  873. JUMP_LABEL(&perf_swevent_enabled[event_id], have_event);
  874. return;
  875. have_event:
  876. if (!regs) {
  877. perf_fetch_caller_regs(&hot_regs);
  878. regs = &hot_regs;
  879. }
  880. __perf_sw_event(event_id, nr, nmi, regs, addr);
  881. }
  882. extern void perf_event_mmap(struct vm_area_struct *vma);
  883. extern struct perf_guest_info_callbacks *perf_guest_cbs;
  884. extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
  885. extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
  886. extern void perf_event_comm(struct task_struct *tsk);
  887. extern void perf_event_fork(struct task_struct *tsk);
  888. /* Callchains */
  889. DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
  890. extern void perf_callchain_user(struct perf_callchain_entry *entry,
  891. struct pt_regs *regs);
  892. extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
  893. struct pt_regs *regs);
  894. static inline void
  895. perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
  896. {
  897. if (entry->nr < PERF_MAX_STACK_DEPTH)
  898. entry->ip[entry->nr++] = ip;
  899. }
  900. extern int sysctl_perf_event_paranoid;
  901. extern int sysctl_perf_event_mlock;
  902. extern int sysctl_perf_event_sample_rate;
  903. static inline bool perf_paranoid_tracepoint_raw(void)
  904. {
  905. return sysctl_perf_event_paranoid > -1;
  906. }
  907. static inline bool perf_paranoid_cpu(void)
  908. {
  909. return sysctl_perf_event_paranoid > 0;
  910. }
  911. static inline bool perf_paranoid_kernel(void)
  912. {
  913. return sysctl_perf_event_paranoid > 1;
  914. }
  915. extern void perf_event_init(void);
  916. extern void perf_tp_event(u64 addr, u64 count, void *record,
  917. int entry_size, struct pt_regs *regs,
  918. struct hlist_head *head, int rctx);
  919. extern void perf_bp_event(struct perf_event *event, void *data);
  920. #ifndef perf_misc_flags
  921. #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \
  922. PERF_RECORD_MISC_KERNEL)
  923. #define perf_instruction_pointer(regs) instruction_pointer(regs)
  924. #endif
  925. extern int perf_output_begin(struct perf_output_handle *handle,
  926. struct perf_event *event, unsigned int size,
  927. int nmi, int sample);
  928. extern void perf_output_end(struct perf_output_handle *handle);
  929. extern void perf_output_copy(struct perf_output_handle *handle,
  930. const void *buf, unsigned int len);
  931. extern int perf_swevent_get_recursion_context(void);
  932. extern void perf_swevent_put_recursion_context(int rctx);
  933. extern void perf_event_enable(struct perf_event *event);
  934. extern void perf_event_disable(struct perf_event *event);
  935. extern void perf_event_task_tick(void);
  936. #else
  937. static inline void
  938. perf_event_task_sched_in(struct task_struct *task) { }
  939. static inline void
  940. perf_event_task_sched_out(struct task_struct *task,
  941. struct task_struct *next) { }
  942. static inline int perf_event_init_task(struct task_struct *child) { return 0; }
  943. static inline void perf_event_exit_task(struct task_struct *child) { }
  944. static inline void perf_event_free_task(struct task_struct *task) { }
  945. static inline void perf_event_delayed_put(struct task_struct *task) { }
  946. static inline void perf_event_print_debug(void) { }
  947. static inline int perf_event_task_disable(void) { return -EINVAL; }
  948. static inline int perf_event_task_enable(void) { return -EINVAL; }
  949. static inline void
  950. perf_sw_event(u32 event_id, u64 nr, int nmi,
  951. struct pt_regs *regs, u64 addr) { }
  952. static inline void
  953. perf_bp_event(struct perf_event *event, void *data) { }
  954. static inline int perf_register_guest_info_callbacks
  955. (struct perf_guest_info_callbacks *callbacks) { return 0; }
  956. static inline int perf_unregister_guest_info_callbacks
  957. (struct perf_guest_info_callbacks *callbacks) { return 0; }
  958. static inline void perf_event_mmap(struct vm_area_struct *vma) { }
  959. static inline void perf_event_comm(struct task_struct *tsk) { }
  960. static inline void perf_event_fork(struct task_struct *tsk) { }
  961. static inline void perf_event_init(void) { }
  962. static inline int perf_swevent_get_recursion_context(void) { return -1; }
  963. static inline void perf_swevent_put_recursion_context(int rctx) { }
  964. static inline void perf_event_enable(struct perf_event *event) { }
  965. static inline void perf_event_disable(struct perf_event *event) { }
  966. static inline void perf_event_task_tick(void) { }
  967. #endif
  968. #define perf_output_put(handle, x) \
  969. perf_output_copy((handle), &(x), sizeof(x))
  970. /*
  971. * This has to have a higher priority than migration_notifier in sched.c.
  972. */
  973. #define perf_cpu_notifier(fn) \
  974. do { \
  975. static struct notifier_block fn##_nb __cpuinitdata = \
  976. { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
  977. fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
  978. (void *)(unsigned long)smp_processor_id()); \
  979. fn(&fn##_nb, (unsigned long)CPU_STARTING, \
  980. (void *)(unsigned long)smp_processor_id()); \
  981. fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
  982. (void *)(unsigned long)smp_processor_id()); \
  983. register_cpu_notifier(&fn##_nb); \
  984. } while (0)
  985. #endif /* __KERNEL__ */
  986. #endif /* _LINUX_PERF_EVENT_H */