perf_event.h 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401
  1. /*
  2. * Performance events:
  3. *
  4. * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
  7. *
  8. * Data type definitions, declarations, prototypes.
  9. *
  10. * Started by: Thomas Gleixner and Ingo Molnar
  11. *
  12. * For licencing details see kernel-base/COPYING
  13. */
  14. #ifndef _LINUX_PERF_EVENT_H
  15. #define _LINUX_PERF_EVENT_H
  16. #include <linux/types.h>
  17. #include <linux/ioctl.h>
  18. #include <asm/byteorder.h>
  19. /*
  20. * User-space ABI bits:
  21. */
  22. /*
  23. * attr.type
  24. */
  25. enum perf_type_id {
  26. PERF_TYPE_HARDWARE = 0,
  27. PERF_TYPE_SOFTWARE = 1,
  28. PERF_TYPE_TRACEPOINT = 2,
  29. PERF_TYPE_HW_CACHE = 3,
  30. PERF_TYPE_RAW = 4,
  31. PERF_TYPE_BREAKPOINT = 5,
  32. PERF_TYPE_MAX, /* non-ABI */
  33. };
  34. /*
  35. * Generalized performance event event_id types, used by the
  36. * attr.event_id parameter of the sys_perf_event_open()
  37. * syscall:
  38. */
  39. enum perf_hw_id {
  40. /*
  41. * Common hardware events, generalized by the kernel:
  42. */
  43. PERF_COUNT_HW_CPU_CYCLES = 0,
  44. PERF_COUNT_HW_INSTRUCTIONS = 1,
  45. PERF_COUNT_HW_CACHE_REFERENCES = 2,
  46. PERF_COUNT_HW_CACHE_MISSES = 3,
  47. PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
  48. PERF_COUNT_HW_BRANCH_MISSES = 5,
  49. PERF_COUNT_HW_BUS_CYCLES = 6,
  50. PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
  51. PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
  52. PERF_COUNT_HW_REF_CPU_CYCLES = 9,
  53. PERF_COUNT_HW_MAX, /* non-ABI */
  54. };
  55. /*
  56. * Generalized hardware cache events:
  57. *
  58. * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
  59. * { read, write, prefetch } x
  60. * { accesses, misses }
  61. */
  62. enum perf_hw_cache_id {
  63. PERF_COUNT_HW_CACHE_L1D = 0,
  64. PERF_COUNT_HW_CACHE_L1I = 1,
  65. PERF_COUNT_HW_CACHE_LL = 2,
  66. PERF_COUNT_HW_CACHE_DTLB = 3,
  67. PERF_COUNT_HW_CACHE_ITLB = 4,
  68. PERF_COUNT_HW_CACHE_BPU = 5,
  69. PERF_COUNT_HW_CACHE_NODE = 6,
  70. PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
  71. };
  72. enum perf_hw_cache_op_id {
  73. PERF_COUNT_HW_CACHE_OP_READ = 0,
  74. PERF_COUNT_HW_CACHE_OP_WRITE = 1,
  75. PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
  76. PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
  77. };
  78. enum perf_hw_cache_op_result_id {
  79. PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
  80. PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
  81. PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
  82. };
  83. /*
  84. * Special "software" events provided by the kernel, even if the hardware
  85. * does not support performance events. These events measure various
  86. * physical and sw events of the kernel (and allow the profiling of them as
  87. * well):
  88. */
  89. enum perf_sw_ids {
  90. PERF_COUNT_SW_CPU_CLOCK = 0,
  91. PERF_COUNT_SW_TASK_CLOCK = 1,
  92. PERF_COUNT_SW_PAGE_FAULTS = 2,
  93. PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
  94. PERF_COUNT_SW_CPU_MIGRATIONS = 4,
  95. PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
  96. PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
  97. PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
  98. PERF_COUNT_SW_EMULATION_FAULTS = 8,
  99. PERF_COUNT_SW_MAX, /* non-ABI */
  100. };
  101. /*
  102. * Bits that can be set in attr.sample_type to request information
  103. * in the overflow packets.
  104. */
  105. enum perf_event_sample_format {
  106. PERF_SAMPLE_IP = 1U << 0,
  107. PERF_SAMPLE_TID = 1U << 1,
  108. PERF_SAMPLE_TIME = 1U << 2,
  109. PERF_SAMPLE_ADDR = 1U << 3,
  110. PERF_SAMPLE_READ = 1U << 4,
  111. PERF_SAMPLE_CALLCHAIN = 1U << 5,
  112. PERF_SAMPLE_ID = 1U << 6,
  113. PERF_SAMPLE_CPU = 1U << 7,
  114. PERF_SAMPLE_PERIOD = 1U << 8,
  115. PERF_SAMPLE_STREAM_ID = 1U << 9,
  116. PERF_SAMPLE_RAW = 1U << 10,
  117. PERF_SAMPLE_BRANCH_STACK = 1U << 11,
  118. PERF_SAMPLE_REGS_USER = 1U << 12,
  119. PERF_SAMPLE_MAX = 1U << 13, /* non-ABI */
  120. };
  121. /*
  122. * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
  123. *
  124. * If the user does not pass priv level information via branch_sample_type,
  125. * the kernel uses the event's priv level. Branch and event priv levels do
  126. * not have to match. Branch priv level is checked for permissions.
  127. *
  128. * The branch types can be combined, however BRANCH_ANY covers all types
  129. * of branches and therefore it supersedes all the other types.
  130. */
  131. enum perf_branch_sample_type {
  132. PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */
  133. PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */
  134. PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */
  135. PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */
  136. PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */
  137. PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */
  138. PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */
  139. PERF_SAMPLE_BRANCH_MAX = 1U << 7, /* non-ABI */
  140. };
  141. #define PERF_SAMPLE_BRANCH_PLM_ALL \
  142. (PERF_SAMPLE_BRANCH_USER|\
  143. PERF_SAMPLE_BRANCH_KERNEL|\
  144. PERF_SAMPLE_BRANCH_HV)
  145. /*
  146. * Values to determine ABI of the registers dump.
  147. */
  148. enum perf_sample_regs_abi {
  149. PERF_SAMPLE_REGS_ABI_NONE = 0,
  150. PERF_SAMPLE_REGS_ABI_32 = 1,
  151. PERF_SAMPLE_REGS_ABI_64 = 2,
  152. };
  153. /*
  154. * The format of the data returned by read() on a perf event fd,
  155. * as specified by attr.read_format:
  156. *
  157. * struct read_format {
  158. * { u64 value;
  159. * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
  160. * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  161. * { u64 id; } && PERF_FORMAT_ID
  162. * } && !PERF_FORMAT_GROUP
  163. *
  164. * { u64 nr;
  165. * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
  166. * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  167. * { u64 value;
  168. * { u64 id; } && PERF_FORMAT_ID
  169. * } cntr[nr];
  170. * } && PERF_FORMAT_GROUP
  171. * };
  172. */
  173. enum perf_event_read_format {
  174. PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
  175. PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
  176. PERF_FORMAT_ID = 1U << 2,
  177. PERF_FORMAT_GROUP = 1U << 3,
  178. PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
  179. };
  180. #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
  181. #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
  182. #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
  183. #define PERF_ATTR_SIZE_VER3 88 /* add: sample_regs_user */
  184. /*
  185. * Hardware event_id to monitor via a performance monitoring event:
  186. */
  187. struct perf_event_attr {
  188. /*
  189. * Major type: hardware/software/tracepoint/etc.
  190. */
  191. __u32 type;
  192. /*
  193. * Size of the attr structure, for fwd/bwd compat.
  194. */
  195. __u32 size;
  196. /*
  197. * Type specific configuration information.
  198. */
  199. __u64 config;
  200. union {
  201. __u64 sample_period;
  202. __u64 sample_freq;
  203. };
  204. __u64 sample_type;
  205. __u64 read_format;
  206. __u64 disabled : 1, /* off by default */
  207. inherit : 1, /* children inherit it */
  208. pinned : 1, /* must always be on PMU */
  209. exclusive : 1, /* only group on PMU */
  210. exclude_user : 1, /* don't count user */
  211. exclude_kernel : 1, /* ditto kernel */
  212. exclude_hv : 1, /* ditto hypervisor */
  213. exclude_idle : 1, /* don't count when idle */
  214. mmap : 1, /* include mmap data */
  215. comm : 1, /* include comm data */
  216. freq : 1, /* use freq, not period */
  217. inherit_stat : 1, /* per task counts */
  218. enable_on_exec : 1, /* next exec enables */
  219. task : 1, /* trace fork/exit */
  220. watermark : 1, /* wakeup_watermark */
  221. /*
  222. * precise_ip:
  223. *
  224. * 0 - SAMPLE_IP can have arbitrary skid
  225. * 1 - SAMPLE_IP must have constant skid
  226. * 2 - SAMPLE_IP requested to have 0 skid
  227. * 3 - SAMPLE_IP must have 0 skid
  228. *
  229. * See also PERF_RECORD_MISC_EXACT_IP
  230. */
  231. precise_ip : 2, /* skid constraint */
  232. mmap_data : 1, /* non-exec mmap data */
  233. sample_id_all : 1, /* sample_type all events */
  234. exclude_host : 1, /* don't count in host */
  235. exclude_guest : 1, /* don't count in guest */
  236. __reserved_1 : 43;
  237. union {
  238. __u32 wakeup_events; /* wakeup every n events */
  239. __u32 wakeup_watermark; /* bytes before wakeup */
  240. };
  241. __u32 bp_type;
  242. union {
  243. __u64 bp_addr;
  244. __u64 config1; /* extension of config */
  245. };
  246. union {
  247. __u64 bp_len;
  248. __u64 config2; /* extension of config1 */
  249. };
  250. __u64 branch_sample_type; /* enum perf_branch_sample_type */
  251. /*
  252. * Defines set of user regs to dump on samples.
  253. * See asm/perf_regs.h for details.
  254. */
  255. __u64 sample_regs_user;
  256. };
  257. /*
  258. * Ioctls that can be done on a perf event fd:
  259. */
  260. #define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
  261. #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
  262. #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
  263. #define PERF_EVENT_IOC_RESET _IO ('$', 3)
  264. #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
  265. #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
  266. #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
  267. enum perf_event_ioc_flags {
  268. PERF_IOC_FLAG_GROUP = 1U << 0,
  269. };
  270. /*
  271. * Structure of the page that can be mapped via mmap
  272. */
  273. struct perf_event_mmap_page {
  274. __u32 version; /* version number of this structure */
  275. __u32 compat_version; /* lowest version this is compat with */
  276. /*
  277. * Bits needed to read the hw events in user-space.
  278. *
  279. * u32 seq, time_mult, time_shift, idx, width;
  280. * u64 count, enabled, running;
  281. * u64 cyc, time_offset;
  282. * s64 pmc = 0;
  283. *
  284. * do {
  285. * seq = pc->lock;
  286. * barrier()
  287. *
  288. * enabled = pc->time_enabled;
  289. * running = pc->time_running;
  290. *
  291. * if (pc->cap_usr_time && enabled != running) {
  292. * cyc = rdtsc();
  293. * time_offset = pc->time_offset;
  294. * time_mult = pc->time_mult;
  295. * time_shift = pc->time_shift;
  296. * }
  297. *
  298. * idx = pc->index;
  299. * count = pc->offset;
  300. * if (pc->cap_usr_rdpmc && idx) {
  301. * width = pc->pmc_width;
  302. * pmc = rdpmc(idx - 1);
  303. * }
  304. *
  305. * barrier();
  306. * } while (pc->lock != seq);
  307. *
  308. * NOTE: for obvious reason this only works on self-monitoring
  309. * processes.
  310. */
  311. __u32 lock; /* seqlock for synchronization */
  312. __u32 index; /* hardware event identifier */
  313. __s64 offset; /* add to hardware event value */
  314. __u64 time_enabled; /* time event active */
  315. __u64 time_running; /* time event on cpu */
  316. union {
  317. __u64 capabilities;
  318. __u64 cap_usr_time : 1,
  319. cap_usr_rdpmc : 1,
  320. cap_____res : 62;
  321. };
  322. /*
  323. * If cap_usr_rdpmc this field provides the bit-width of the value
  324. * read using the rdpmc() or equivalent instruction. This can be used
  325. * to sign extend the result like:
  326. *
  327. * pmc <<= 64 - width;
  328. * pmc >>= 64 - width; // signed shift right
  329. * count += pmc;
  330. */
  331. __u16 pmc_width;
  332. /*
  333. * If cap_usr_time the below fields can be used to compute the time
  334. * delta since time_enabled (in ns) using rdtsc or similar.
  335. *
  336. * u64 quot, rem;
  337. * u64 delta;
  338. *
  339. * quot = (cyc >> time_shift);
  340. * rem = cyc & ((1 << time_shift) - 1);
  341. * delta = time_offset + quot * time_mult +
  342. * ((rem * time_mult) >> time_shift);
  343. *
  344. * Where time_offset,time_mult,time_shift and cyc are read in the
  345. * seqcount loop described above. This delta can then be added to
  346. * enabled and possible running (if idx), improving the scaling:
  347. *
  348. * enabled += delta;
  349. * if (idx)
  350. * running += delta;
  351. *
  352. * quot = count / running;
  353. * rem = count % running;
  354. * count = quot * enabled + (rem * enabled) / running;
  355. */
  356. __u16 time_shift;
  357. __u32 time_mult;
  358. __u64 time_offset;
  359. /*
  360. * Hole for extension of the self monitor capabilities
  361. */
  362. __u64 __reserved[120]; /* align to 1k */
  363. /*
  364. * Control data for the mmap() data buffer.
  365. *
  366. * User-space reading the @data_head value should issue an rmb(), on
  367. * SMP capable platforms, after reading this value -- see
  368. * perf_event_wakeup().
  369. *
  370. * When the mapping is PROT_WRITE the @data_tail value should be
  371. * written by userspace to reflect the last read data. In this case
  372. * the kernel will not over-write unread data.
  373. */
  374. __u64 data_head; /* head in the data section */
  375. __u64 data_tail; /* user-space written tail */
  376. };
  377. #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
  378. #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
  379. #define PERF_RECORD_MISC_KERNEL (1 << 0)
  380. #define PERF_RECORD_MISC_USER (2 << 0)
  381. #define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
  382. #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
  383. #define PERF_RECORD_MISC_GUEST_USER (5 << 0)
  384. /*
  385. * Indicates that the content of PERF_SAMPLE_IP points to
  386. * the actual instruction that triggered the event. See also
  387. * perf_event_attr::precise_ip.
  388. */
  389. #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
  390. /*
  391. * Reserve the last bit to indicate some extended misc field
  392. */
  393. #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
  394. struct perf_event_header {
  395. __u32 type;
  396. __u16 misc;
  397. __u16 size;
  398. };
  399. enum perf_event_type {
  400. /*
  401. * If perf_event_attr.sample_id_all is set then all event types will
  402. * have the sample_type selected fields related to where/when
  403. * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
  404. * described in PERF_RECORD_SAMPLE below, it will be stashed just after
  405. * the perf_event_header and the fields already present for the existing
  406. * fields, i.e. at the end of the payload. That way a newer perf.data
  407. * file will be supported by older perf tools, with these new optional
  408. * fields being ignored.
  409. *
  410. * The MMAP events record the PROT_EXEC mappings so that we can
  411. * correlate userspace IPs to code. They have the following structure:
  412. *
  413. * struct {
  414. * struct perf_event_header header;
  415. *
  416. * u32 pid, tid;
  417. * u64 addr;
  418. * u64 len;
  419. * u64 pgoff;
  420. * char filename[];
  421. * };
  422. */
  423. PERF_RECORD_MMAP = 1,
  424. /*
  425. * struct {
  426. * struct perf_event_header header;
  427. * u64 id;
  428. * u64 lost;
  429. * };
  430. */
  431. PERF_RECORD_LOST = 2,
  432. /*
  433. * struct {
  434. * struct perf_event_header header;
  435. *
  436. * u32 pid, tid;
  437. * char comm[];
  438. * };
  439. */
  440. PERF_RECORD_COMM = 3,
  441. /*
  442. * struct {
  443. * struct perf_event_header header;
  444. * u32 pid, ppid;
  445. * u32 tid, ptid;
  446. * u64 time;
  447. * };
  448. */
  449. PERF_RECORD_EXIT = 4,
  450. /*
  451. * struct {
  452. * struct perf_event_header header;
  453. * u64 time;
  454. * u64 id;
  455. * u64 stream_id;
  456. * };
  457. */
  458. PERF_RECORD_THROTTLE = 5,
  459. PERF_RECORD_UNTHROTTLE = 6,
  460. /*
  461. * struct {
  462. * struct perf_event_header header;
  463. * u32 pid, ppid;
  464. * u32 tid, ptid;
  465. * u64 time;
  466. * };
  467. */
  468. PERF_RECORD_FORK = 7,
  469. /*
  470. * struct {
  471. * struct perf_event_header header;
  472. * u32 pid, tid;
  473. *
  474. * struct read_format values;
  475. * };
  476. */
  477. PERF_RECORD_READ = 8,
  478. /*
  479. * struct {
  480. * struct perf_event_header header;
  481. *
  482. * { u64 ip; } && PERF_SAMPLE_IP
  483. * { u32 pid, tid; } && PERF_SAMPLE_TID
  484. * { u64 time; } && PERF_SAMPLE_TIME
  485. * { u64 addr; } && PERF_SAMPLE_ADDR
  486. * { u64 id; } && PERF_SAMPLE_ID
  487. * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
  488. * { u32 cpu, res; } && PERF_SAMPLE_CPU
  489. * { u64 period; } && PERF_SAMPLE_PERIOD
  490. *
  491. * { struct read_format values; } && PERF_SAMPLE_READ
  492. *
  493. * { u64 nr,
  494. * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
  495. *
  496. * #
  497. * # The RAW record below is opaque data wrt the ABI
  498. * #
  499. * # That is, the ABI doesn't make any promises wrt to
  500. * # the stability of its content, it may vary depending
  501. * # on event, hardware, kernel version and phase of
  502. * # the moon.
  503. * #
  504. * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
  505. * #
  506. *
  507. * { u32 size;
  508. * char data[size];}&& PERF_SAMPLE_RAW
  509. *
  510. * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
  511. *
  512. * { u64 abi; # enum perf_sample_regs_abi
  513. * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
  514. * };
  515. */
  516. PERF_RECORD_SAMPLE = 9,
  517. PERF_RECORD_MAX, /* non-ABI */
  518. };
  519. #define PERF_MAX_STACK_DEPTH 127
  520. enum perf_callchain_context {
  521. PERF_CONTEXT_HV = (__u64)-32,
  522. PERF_CONTEXT_KERNEL = (__u64)-128,
  523. PERF_CONTEXT_USER = (__u64)-512,
  524. PERF_CONTEXT_GUEST = (__u64)-2048,
  525. PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
  526. PERF_CONTEXT_GUEST_USER = (__u64)-2560,
  527. PERF_CONTEXT_MAX = (__u64)-4095,
  528. };
  529. #define PERF_FLAG_FD_NO_GROUP (1U << 0)
  530. #define PERF_FLAG_FD_OUTPUT (1U << 1)
  531. #define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */
  532. #ifdef __KERNEL__
  533. /*
  534. * Kernel-internal data types and definitions:
  535. */
  536. #ifdef CONFIG_PERF_EVENTS
  537. # include <linux/cgroup.h>
  538. # include <asm/perf_event.h>
  539. # include <asm/local64.h>
  540. #endif
  541. struct perf_guest_info_callbacks {
  542. int (*is_in_guest)(void);
  543. int (*is_user_mode)(void);
  544. unsigned long (*get_guest_ip)(void);
  545. };
  546. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  547. #include <asm/hw_breakpoint.h>
  548. #endif
  549. #include <linux/list.h>
  550. #include <linux/mutex.h>
  551. #include <linux/rculist.h>
  552. #include <linux/rcupdate.h>
  553. #include <linux/spinlock.h>
  554. #include <linux/hrtimer.h>
  555. #include <linux/fs.h>
  556. #include <linux/pid_namespace.h>
  557. #include <linux/workqueue.h>
  558. #include <linux/ftrace.h>
  559. #include <linux/cpu.h>
  560. #include <linux/irq_work.h>
  561. #include <linux/static_key.h>
  562. #include <linux/atomic.h>
  563. #include <linux/sysfs.h>
  564. #include <linux/perf_regs.h>
  565. #include <asm/local.h>
  566. struct perf_callchain_entry {
  567. __u64 nr;
  568. __u64 ip[PERF_MAX_STACK_DEPTH];
  569. };
  570. struct perf_raw_record {
  571. u32 size;
  572. void *data;
  573. };
  574. /*
  575. * single taken branch record layout:
  576. *
  577. * from: source instruction (may not always be a branch insn)
  578. * to: branch target
  579. * mispred: branch target was mispredicted
  580. * predicted: branch target was predicted
  581. *
  582. * support for mispred, predicted is optional. In case it
  583. * is not supported mispred = predicted = 0.
  584. */
  585. struct perf_branch_entry {
  586. __u64 from;
  587. __u64 to;
  588. __u64 mispred:1, /* target mispredicted */
  589. predicted:1,/* target predicted */
  590. reserved:62;
  591. };
  592. /*
  593. * branch stack layout:
  594. * nr: number of taken branches stored in entries[]
  595. *
  596. * Note that nr can vary from sample to sample
  597. * branches (to, from) are stored from most recent
  598. * to least recent, i.e., entries[0] contains the most
  599. * recent branch.
  600. */
  601. struct perf_branch_stack {
  602. __u64 nr;
  603. struct perf_branch_entry entries[0];
  604. };
  605. struct perf_regs_user {
  606. __u64 abi;
  607. struct pt_regs *regs;
  608. };
  609. struct task_struct;
  610. /*
  611. * extra PMU register associated with an event
  612. */
  613. struct hw_perf_event_extra {
  614. u64 config; /* register value */
  615. unsigned int reg; /* register address or index */
  616. int alloc; /* extra register already allocated */
  617. int idx; /* index in shared_regs->regs[] */
  618. };
  619. /**
  620. * struct hw_perf_event - performance event hardware details:
  621. */
  622. struct hw_perf_event {
  623. #ifdef CONFIG_PERF_EVENTS
  624. union {
  625. struct { /* hardware */
  626. u64 config;
  627. u64 last_tag;
  628. unsigned long config_base;
  629. unsigned long event_base;
  630. int event_base_rdpmc;
  631. int idx;
  632. int last_cpu;
  633. struct hw_perf_event_extra extra_reg;
  634. struct hw_perf_event_extra branch_reg;
  635. };
  636. struct { /* software */
  637. struct hrtimer hrtimer;
  638. };
  639. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  640. struct { /* breakpoint */
  641. struct arch_hw_breakpoint info;
  642. struct list_head bp_list;
  643. /*
  644. * Crufty hack to avoid the chicken and egg
  645. * problem hw_breakpoint has with context
  646. * creation and event initalization.
  647. */
  648. struct task_struct *bp_target;
  649. };
  650. #endif
  651. };
  652. int state;
  653. local64_t prev_count;
  654. u64 sample_period;
  655. u64 last_period;
  656. local64_t period_left;
  657. u64 interrupts_seq;
  658. u64 interrupts;
  659. u64 freq_time_stamp;
  660. u64 freq_count_stamp;
  661. #endif
  662. };
  663. /*
  664. * hw_perf_event::state flags
  665. */
  666. #define PERF_HES_STOPPED 0x01 /* the counter is stopped */
  667. #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
  668. #define PERF_HES_ARCH 0x04
  669. struct perf_event;
  670. /*
  671. * Common implementation detail of pmu::{start,commit,cancel}_txn
  672. */
  673. #define PERF_EVENT_TXN 0x1
  674. /**
  675. * struct pmu - generic performance monitoring unit
  676. */
  677. struct pmu {
  678. struct list_head entry;
  679. struct device *dev;
  680. const struct attribute_group **attr_groups;
  681. char *name;
  682. int type;
  683. int * __percpu pmu_disable_count;
  684. struct perf_cpu_context * __percpu pmu_cpu_context;
  685. int task_ctx_nr;
  686. /*
  687. * Fully disable/enable this PMU, can be used to protect from the PMI
  688. * as well as for lazy/batch writing of the MSRs.
  689. */
  690. void (*pmu_enable) (struct pmu *pmu); /* optional */
  691. void (*pmu_disable) (struct pmu *pmu); /* optional */
  692. /*
  693. * Try and initialize the event for this PMU.
  694. * Should return -ENOENT when the @event doesn't match this PMU.
  695. */
  696. int (*event_init) (struct perf_event *event);
  697. #define PERF_EF_START 0x01 /* start the counter when adding */
  698. #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
  699. #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
  700. /*
  701. * Adds/Removes a counter to/from the PMU, can be done inside
  702. * a transaction, see the ->*_txn() methods.
  703. */
  704. int (*add) (struct perf_event *event, int flags);
  705. void (*del) (struct perf_event *event, int flags);
  706. /*
  707. * Starts/Stops a counter present on the PMU. The PMI handler
  708. * should stop the counter when perf_event_overflow() returns
  709. * !0. ->start() will be used to continue.
  710. */
  711. void (*start) (struct perf_event *event, int flags);
  712. void (*stop) (struct perf_event *event, int flags);
  713. /*
  714. * Updates the counter value of the event.
  715. */
  716. void (*read) (struct perf_event *event);
  717. /*
  718. * Group events scheduling is treated as a transaction, add
  719. * group events as a whole and perform one schedulability test.
  720. * If the test fails, roll back the whole group
  721. *
  722. * Start the transaction, after this ->add() doesn't need to
  723. * do schedulability tests.
  724. */
  725. void (*start_txn) (struct pmu *pmu); /* optional */
  726. /*
  727. * If ->start_txn() disabled the ->add() schedulability test
  728. * then ->commit_txn() is required to perform one. On success
  729. * the transaction is closed. On error the transaction is kept
  730. * open until ->cancel_txn() is called.
  731. */
  732. int (*commit_txn) (struct pmu *pmu); /* optional */
  733. /*
  734. * Will cancel the transaction, assumes ->del() is called
  735. * for each successful ->add() during the transaction.
  736. */
  737. void (*cancel_txn) (struct pmu *pmu); /* optional */
  738. /*
  739. * Will return the value for perf_event_mmap_page::index for this event,
  740. * if no implementation is provided it will default to: event->hw.idx + 1.
  741. */
  742. int (*event_idx) (struct perf_event *event); /*optional */
  743. /*
  744. * flush branch stack on context-switches (needed in cpu-wide mode)
  745. */
  746. void (*flush_branch_stack) (void);
  747. };
  748. /**
  749. * enum perf_event_active_state - the states of a event
  750. */
  751. enum perf_event_active_state {
  752. PERF_EVENT_STATE_ERROR = -2,
  753. PERF_EVENT_STATE_OFF = -1,
  754. PERF_EVENT_STATE_INACTIVE = 0,
  755. PERF_EVENT_STATE_ACTIVE = 1,
  756. };
  757. struct file;
  758. struct perf_sample_data;
  759. typedef void (*perf_overflow_handler_t)(struct perf_event *,
  760. struct perf_sample_data *,
  761. struct pt_regs *regs);
  762. enum perf_group_flag {
  763. PERF_GROUP_SOFTWARE = 0x1,
  764. };
  765. #define SWEVENT_HLIST_BITS 8
  766. #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
  767. struct swevent_hlist {
  768. struct hlist_head heads[SWEVENT_HLIST_SIZE];
  769. struct rcu_head rcu_head;
  770. };
  771. #define PERF_ATTACH_CONTEXT 0x01
  772. #define PERF_ATTACH_GROUP 0x02
  773. #define PERF_ATTACH_TASK 0x04
  774. #ifdef CONFIG_CGROUP_PERF
  775. /*
  776. * perf_cgroup_info keeps track of time_enabled for a cgroup.
  777. * This is a per-cpu dynamically allocated data structure.
  778. */
  779. struct perf_cgroup_info {
  780. u64 time;
  781. u64 timestamp;
  782. };
  783. struct perf_cgroup {
  784. struct cgroup_subsys_state css;
  785. struct perf_cgroup_info *info; /* timing info, one per cpu */
  786. };
  787. #endif
  788. struct ring_buffer;
  789. /**
  790. * struct perf_event - performance event kernel representation:
  791. */
  792. struct perf_event {
  793. #ifdef CONFIG_PERF_EVENTS
  794. struct list_head group_entry;
  795. struct list_head event_entry;
  796. struct list_head sibling_list;
  797. struct hlist_node hlist_entry;
  798. int nr_siblings;
  799. int group_flags;
  800. struct perf_event *group_leader;
  801. struct pmu *pmu;
  802. enum perf_event_active_state state;
  803. unsigned int attach_state;
  804. local64_t count;
  805. atomic64_t child_count;
  806. /*
  807. * These are the total time in nanoseconds that the event
  808. * has been enabled (i.e. eligible to run, and the task has
  809. * been scheduled in, if this is a per-task event)
  810. * and running (scheduled onto the CPU), respectively.
  811. *
  812. * They are computed from tstamp_enabled, tstamp_running and
  813. * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
  814. */
  815. u64 total_time_enabled;
  816. u64 total_time_running;
  817. /*
  818. * These are timestamps used for computing total_time_enabled
  819. * and total_time_running when the event is in INACTIVE or
  820. * ACTIVE state, measured in nanoseconds from an arbitrary point
  821. * in time.
  822. * tstamp_enabled: the notional time when the event was enabled
  823. * tstamp_running: the notional time when the event was scheduled on
  824. * tstamp_stopped: in INACTIVE state, the notional time when the
  825. * event was scheduled off.
  826. */
  827. u64 tstamp_enabled;
  828. u64 tstamp_running;
  829. u64 tstamp_stopped;
  830. /*
  831. * timestamp shadows the actual context timing but it can
  832. * be safely used in NMI interrupt context. It reflects the
  833. * context time as it was when the event was last scheduled in.
  834. *
  835. * ctx_time already accounts for ctx->timestamp. Therefore to
  836. * compute ctx_time for a sample, simply add perf_clock().
  837. */
  838. u64 shadow_ctx_time;
  839. struct perf_event_attr attr;
  840. u16 header_size;
  841. u16 id_header_size;
  842. u16 read_size;
  843. struct hw_perf_event hw;
  844. struct perf_event_context *ctx;
  845. struct file *filp;
  846. /*
  847. * These accumulate total time (in nanoseconds) that children
  848. * events have been enabled and running, respectively.
  849. */
  850. atomic64_t child_total_time_enabled;
  851. atomic64_t child_total_time_running;
  852. /*
  853. * Protect attach/detach and child_list:
  854. */
  855. struct mutex child_mutex;
  856. struct list_head child_list;
  857. struct perf_event *parent;
  858. int oncpu;
  859. int cpu;
  860. struct list_head owner_entry;
  861. struct task_struct *owner;
  862. /* mmap bits */
  863. struct mutex mmap_mutex;
  864. atomic_t mmap_count;
  865. int mmap_locked;
  866. struct user_struct *mmap_user;
  867. struct ring_buffer *rb;
  868. struct list_head rb_entry;
  869. /* poll related */
  870. wait_queue_head_t waitq;
  871. struct fasync_struct *fasync;
  872. /* delayed work for NMIs and such */
  873. int pending_wakeup;
  874. int pending_kill;
  875. int pending_disable;
  876. struct irq_work pending;
  877. atomic_t event_limit;
  878. void (*destroy)(struct perf_event *);
  879. struct rcu_head rcu_head;
  880. struct pid_namespace *ns;
  881. u64 id;
  882. perf_overflow_handler_t overflow_handler;
  883. void *overflow_handler_context;
  884. #ifdef CONFIG_EVENT_TRACING
  885. struct ftrace_event_call *tp_event;
  886. struct event_filter *filter;
  887. #ifdef CONFIG_FUNCTION_TRACER
  888. struct ftrace_ops ftrace_ops;
  889. #endif
  890. #endif
  891. #ifdef CONFIG_CGROUP_PERF
  892. struct perf_cgroup *cgrp; /* cgroup event is attach to */
  893. int cgrp_defer_enabled;
  894. #endif
  895. #endif /* CONFIG_PERF_EVENTS */
  896. };
  897. enum perf_event_context_type {
  898. task_context,
  899. cpu_context,
  900. };
  901. /**
  902. * struct perf_event_context - event context structure
  903. *
  904. * Used as a container for task events and CPU events as well:
  905. */
  906. struct perf_event_context {
  907. struct pmu *pmu;
  908. enum perf_event_context_type type;
  909. /*
  910. * Protect the states of the events in the list,
  911. * nr_active, and the list:
  912. */
  913. raw_spinlock_t lock;
  914. /*
  915. * Protect the list of events. Locking either mutex or lock
  916. * is sufficient to ensure the list doesn't change; to change
  917. * the list you need to lock both the mutex and the spinlock.
  918. */
  919. struct mutex mutex;
  920. struct list_head pinned_groups;
  921. struct list_head flexible_groups;
  922. struct list_head event_list;
  923. int nr_events;
  924. int nr_active;
  925. int is_active;
  926. int nr_stat;
  927. int nr_freq;
  928. int rotate_disable;
  929. atomic_t refcount;
  930. struct task_struct *task;
  931. /*
  932. * Context clock, runs when context enabled.
  933. */
  934. u64 time;
  935. u64 timestamp;
  936. /*
  937. * These fields let us detect when two contexts have both
  938. * been cloned (inherited) from a common ancestor.
  939. */
  940. struct perf_event_context *parent_ctx;
  941. u64 parent_gen;
  942. u64 generation;
  943. int pin_count;
  944. int nr_cgroups; /* cgroup evts */
  945. int nr_branch_stack; /* branch_stack evt */
  946. struct rcu_head rcu_head;
  947. };
  948. /*
  949. * Number of contexts where an event can trigger:
  950. * task, softirq, hardirq, nmi.
  951. */
  952. #define PERF_NR_CONTEXTS 4
  953. /**
  954. * struct perf_event_cpu_context - per cpu event context structure
  955. */
  956. struct perf_cpu_context {
  957. struct perf_event_context ctx;
  958. struct perf_event_context *task_ctx;
  959. int active_oncpu;
  960. int exclusive;
  961. struct list_head rotation_list;
  962. int jiffies_interval;
  963. struct pmu *active_pmu;
  964. struct perf_cgroup *cgrp;
  965. };
  966. struct perf_output_handle {
  967. struct perf_event *event;
  968. struct ring_buffer *rb;
  969. unsigned long wakeup;
  970. unsigned long size;
  971. void *addr;
  972. int page;
  973. };
  974. #ifdef CONFIG_PERF_EVENTS
  975. extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
  976. extern void perf_pmu_unregister(struct pmu *pmu);
  977. extern int perf_num_counters(void);
  978. extern const char *perf_pmu_name(void);
  979. extern void __perf_event_task_sched_in(struct task_struct *prev,
  980. struct task_struct *task);
  981. extern void __perf_event_task_sched_out(struct task_struct *prev,
  982. struct task_struct *next);
  983. extern int perf_event_init_task(struct task_struct *child);
  984. extern void perf_event_exit_task(struct task_struct *child);
  985. extern void perf_event_free_task(struct task_struct *task);
  986. extern void perf_event_delayed_put(struct task_struct *task);
  987. extern void perf_event_print_debug(void);
  988. extern void perf_pmu_disable(struct pmu *pmu);
  989. extern void perf_pmu_enable(struct pmu *pmu);
  990. extern int perf_event_task_disable(void);
  991. extern int perf_event_task_enable(void);
  992. extern int perf_event_refresh(struct perf_event *event, int refresh);
  993. extern void perf_event_update_userpage(struct perf_event *event);
  994. extern int perf_event_release_kernel(struct perf_event *event);
  995. extern struct perf_event *
  996. perf_event_create_kernel_counter(struct perf_event_attr *attr,
  997. int cpu,
  998. struct task_struct *task,
  999. perf_overflow_handler_t callback,
  1000. void *context);
  1001. extern void perf_pmu_migrate_context(struct pmu *pmu,
  1002. int src_cpu, int dst_cpu);
  1003. extern u64 perf_event_read_value(struct perf_event *event,
  1004. u64 *enabled, u64 *running);
  1005. struct perf_sample_data {
  1006. u64 type;
  1007. u64 ip;
  1008. struct {
  1009. u32 pid;
  1010. u32 tid;
  1011. } tid_entry;
  1012. u64 time;
  1013. u64 addr;
  1014. u64 id;
  1015. u64 stream_id;
  1016. struct {
  1017. u32 cpu;
  1018. u32 reserved;
  1019. } cpu_entry;
  1020. u64 period;
  1021. struct perf_callchain_entry *callchain;
  1022. struct perf_raw_record *raw;
  1023. struct perf_branch_stack *br_stack;
  1024. struct perf_regs_user regs_user;
  1025. };
  1026. static inline void perf_sample_data_init(struct perf_sample_data *data,
  1027. u64 addr, u64 period)
  1028. {
  1029. /* remaining struct members initialized in perf_prepare_sample() */
  1030. data->addr = addr;
  1031. data->raw = NULL;
  1032. data->br_stack = NULL;
  1033. data->period = period;
  1034. data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE;
  1035. data->regs_user.regs = NULL;
  1036. }
  1037. extern void perf_output_sample(struct perf_output_handle *handle,
  1038. struct perf_event_header *header,
  1039. struct perf_sample_data *data,
  1040. struct perf_event *event);
  1041. extern void perf_prepare_sample(struct perf_event_header *header,
  1042. struct perf_sample_data *data,
  1043. struct perf_event *event,
  1044. struct pt_regs *regs);
  1045. extern int perf_event_overflow(struct perf_event *event,
  1046. struct perf_sample_data *data,
  1047. struct pt_regs *regs);
  1048. static inline bool is_sampling_event(struct perf_event *event)
  1049. {
  1050. return event->attr.sample_period != 0;
  1051. }
  1052. /*
  1053. * Return 1 for a software event, 0 for a hardware event
  1054. */
  1055. static inline int is_software_event(struct perf_event *event)
  1056. {
  1057. return event->pmu->task_ctx_nr == perf_sw_context;
  1058. }
  1059. extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
  1060. extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
  1061. #ifndef perf_arch_fetch_caller_regs
  1062. static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
  1063. #endif
  1064. /*
  1065. * Take a snapshot of the regs. Skip ip and frame pointer to
  1066. * the nth caller. We only need a few of the regs:
  1067. * - ip for PERF_SAMPLE_IP
  1068. * - cs for user_mode() tests
  1069. * - bp for callchains
  1070. * - eflags, for future purposes, just in case
  1071. */
  1072. static inline void perf_fetch_caller_regs(struct pt_regs *regs)
  1073. {
  1074. memset(regs, 0, sizeof(*regs));
  1075. perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
  1076. }
  1077. static __always_inline void
  1078. perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
  1079. {
  1080. struct pt_regs hot_regs;
  1081. if (static_key_false(&perf_swevent_enabled[event_id])) {
  1082. if (!regs) {
  1083. perf_fetch_caller_regs(&hot_regs);
  1084. regs = &hot_regs;
  1085. }
  1086. __perf_sw_event(event_id, nr, regs, addr);
  1087. }
  1088. }
  1089. extern struct static_key_deferred perf_sched_events;
  1090. static inline void perf_event_task_sched_in(struct task_struct *prev,
  1091. struct task_struct *task)
  1092. {
  1093. if (static_key_false(&perf_sched_events.key))
  1094. __perf_event_task_sched_in(prev, task);
  1095. }
  1096. static inline void perf_event_task_sched_out(struct task_struct *prev,
  1097. struct task_struct *next)
  1098. {
  1099. perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
  1100. if (static_key_false(&perf_sched_events.key))
  1101. __perf_event_task_sched_out(prev, next);
  1102. }
  1103. extern void perf_event_mmap(struct vm_area_struct *vma);
  1104. extern struct perf_guest_info_callbacks *perf_guest_cbs;
  1105. extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
  1106. extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
  1107. extern void perf_event_comm(struct task_struct *tsk);
  1108. extern void perf_event_fork(struct task_struct *tsk);
  1109. /* Callchains */
  1110. DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
  1111. extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
  1112. extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
  1113. static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
  1114. {
  1115. if (entry->nr < PERF_MAX_STACK_DEPTH)
  1116. entry->ip[entry->nr++] = ip;
  1117. }
  1118. extern int sysctl_perf_event_paranoid;
  1119. extern int sysctl_perf_event_mlock;
  1120. extern int sysctl_perf_event_sample_rate;
  1121. extern int perf_proc_update_handler(struct ctl_table *table, int write,
  1122. void __user *buffer, size_t *lenp,
  1123. loff_t *ppos);
  1124. static inline bool perf_paranoid_tracepoint_raw(void)
  1125. {
  1126. return sysctl_perf_event_paranoid > -1;
  1127. }
  1128. static inline bool perf_paranoid_cpu(void)
  1129. {
  1130. return sysctl_perf_event_paranoid > 0;
  1131. }
  1132. static inline bool perf_paranoid_kernel(void)
  1133. {
  1134. return sysctl_perf_event_paranoid > 1;
  1135. }
  1136. extern void perf_event_init(void);
  1137. extern void perf_tp_event(u64 addr, u64 count, void *record,
  1138. int entry_size, struct pt_regs *regs,
  1139. struct hlist_head *head, int rctx,
  1140. struct task_struct *task);
  1141. extern void perf_bp_event(struct perf_event *event, void *data);
  1142. #ifndef perf_misc_flags
  1143. # define perf_misc_flags(regs) \
  1144. (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
  1145. # define perf_instruction_pointer(regs) instruction_pointer(regs)
  1146. #endif
  1147. static inline bool has_branch_stack(struct perf_event *event)
  1148. {
  1149. return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
  1150. }
  1151. extern int perf_output_begin(struct perf_output_handle *handle,
  1152. struct perf_event *event, unsigned int size);
  1153. extern void perf_output_end(struct perf_output_handle *handle);
  1154. extern unsigned int perf_output_copy(struct perf_output_handle *handle,
  1155. const void *buf, unsigned int len);
  1156. extern int perf_swevent_get_recursion_context(void);
  1157. extern void perf_swevent_put_recursion_context(int rctx);
  1158. extern void perf_event_enable(struct perf_event *event);
  1159. extern void perf_event_disable(struct perf_event *event);
  1160. extern void perf_event_task_tick(void);
  1161. #else
  1162. static inline void
  1163. perf_event_task_sched_in(struct task_struct *prev,
  1164. struct task_struct *task) { }
  1165. static inline void
  1166. perf_event_task_sched_out(struct task_struct *prev,
  1167. struct task_struct *next) { }
  1168. static inline int perf_event_init_task(struct task_struct *child) { return 0; }
  1169. static inline void perf_event_exit_task(struct task_struct *child) { }
  1170. static inline void perf_event_free_task(struct task_struct *task) { }
  1171. static inline void perf_event_delayed_put(struct task_struct *task) { }
  1172. static inline void perf_event_print_debug(void) { }
  1173. static inline int perf_event_task_disable(void) { return -EINVAL; }
  1174. static inline int perf_event_task_enable(void) { return -EINVAL; }
  1175. static inline int perf_event_refresh(struct perf_event *event, int refresh)
  1176. {
  1177. return -EINVAL;
  1178. }
  1179. static inline void
  1180. perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
  1181. static inline void
  1182. perf_bp_event(struct perf_event *event, void *data) { }
  1183. static inline int perf_register_guest_info_callbacks
  1184. (struct perf_guest_info_callbacks *callbacks) { return 0; }
  1185. static inline int perf_unregister_guest_info_callbacks
  1186. (struct perf_guest_info_callbacks *callbacks) { return 0; }
  1187. static inline void perf_event_mmap(struct vm_area_struct *vma) { }
  1188. static inline void perf_event_comm(struct task_struct *tsk) { }
  1189. static inline void perf_event_fork(struct task_struct *tsk) { }
  1190. static inline void perf_event_init(void) { }
  1191. static inline int perf_swevent_get_recursion_context(void) { return -1; }
  1192. static inline void perf_swevent_put_recursion_context(int rctx) { }
  1193. static inline void perf_event_enable(struct perf_event *event) { }
  1194. static inline void perf_event_disable(struct perf_event *event) { }
  1195. static inline void perf_event_task_tick(void) { }
  1196. #endif
  1197. #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
  1198. /*
  1199. * This has to have a higher priority than migration_notifier in sched.c.
  1200. */
  1201. #define perf_cpu_notifier(fn) \
  1202. do { \
  1203. static struct notifier_block fn##_nb __cpuinitdata = \
  1204. { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
  1205. fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
  1206. (void *)(unsigned long)smp_processor_id()); \
  1207. fn(&fn##_nb, (unsigned long)CPU_STARTING, \
  1208. (void *)(unsigned long)smp_processor_id()); \
  1209. fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
  1210. (void *)(unsigned long)smp_processor_id()); \
  1211. register_cpu_notifier(&fn##_nb); \
  1212. } while (0)
  1213. #define PMU_FORMAT_ATTR(_name, _format) \
  1214. static ssize_t \
  1215. _name##_show(struct device *dev, \
  1216. struct device_attribute *attr, \
  1217. char *page) \
  1218. { \
  1219. BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
  1220. return sprintf(page, _format "\n"); \
  1221. } \
  1222. \
  1223. static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
  1224. #endif /* __KERNEL__ */
  1225. #endif /* _LINUX_PERF_EVENT_H */