perf_event.h 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286
  1. /*
  2. * Performance events:
  3. *
  4. * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
  7. *
  8. * Data type definitions, declarations, prototypes.
  9. *
  10. * Started by: Thomas Gleixner and Ingo Molnar
  11. *
  12. * For licencing details see kernel-base/COPYING
  13. */
  14. #ifndef _LINUX_PERF_EVENT_H
  15. #define _LINUX_PERF_EVENT_H
  16. #include <linux/types.h>
  17. #include <linux/ioctl.h>
  18. #include <asm/byteorder.h>
  19. /*
  20. * User-space ABI bits:
  21. */
  22. /*
  23. * attr.type
  24. */
  25. enum perf_type_id {
  26. PERF_TYPE_HARDWARE = 0,
  27. PERF_TYPE_SOFTWARE = 1,
  28. PERF_TYPE_TRACEPOINT = 2,
  29. PERF_TYPE_HW_CACHE = 3,
  30. PERF_TYPE_RAW = 4,
  31. PERF_TYPE_BREAKPOINT = 5,
  32. PERF_TYPE_MAX, /* non-ABI */
  33. };
  34. /*
  35. * Generalized performance event event_id types, used by the
  36. * attr.event_id parameter of the sys_perf_event_open()
  37. * syscall:
  38. */
  39. enum perf_hw_id {
  40. /*
  41. * Common hardware events, generalized by the kernel:
  42. */
  43. PERF_COUNT_HW_CPU_CYCLES = 0,
  44. PERF_COUNT_HW_INSTRUCTIONS = 1,
  45. PERF_COUNT_HW_CACHE_REFERENCES = 2,
  46. PERF_COUNT_HW_CACHE_MISSES = 3,
  47. PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
  48. PERF_COUNT_HW_BRANCH_MISSES = 5,
  49. PERF_COUNT_HW_BUS_CYCLES = 6,
  50. PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
  51. PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
  52. PERF_COUNT_HW_REF_CPU_CYCLES = 9,
  53. PERF_COUNT_HW_MAX, /* non-ABI */
  54. };
  55. /*
  56. * Generalized hardware cache events:
  57. *
  58. * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
  59. * { read, write, prefetch } x
  60. * { accesses, misses }
  61. */
  62. enum perf_hw_cache_id {
  63. PERF_COUNT_HW_CACHE_L1D = 0,
  64. PERF_COUNT_HW_CACHE_L1I = 1,
  65. PERF_COUNT_HW_CACHE_LL = 2,
  66. PERF_COUNT_HW_CACHE_DTLB = 3,
  67. PERF_COUNT_HW_CACHE_ITLB = 4,
  68. PERF_COUNT_HW_CACHE_BPU = 5,
  69. PERF_COUNT_HW_CACHE_NODE = 6,
  70. PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
  71. };
  72. enum perf_hw_cache_op_id {
  73. PERF_COUNT_HW_CACHE_OP_READ = 0,
  74. PERF_COUNT_HW_CACHE_OP_WRITE = 1,
  75. PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
  76. PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
  77. };
  78. enum perf_hw_cache_op_result_id {
  79. PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
  80. PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
  81. PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
  82. };
  83. /*
  84. * Special "software" events provided by the kernel, even if the hardware
  85. * does not support performance events. These events measure various
  86. * physical and sw events of the kernel (and allow the profiling of them as
  87. * well):
  88. */
  89. enum perf_sw_ids {
  90. PERF_COUNT_SW_CPU_CLOCK = 0,
  91. PERF_COUNT_SW_TASK_CLOCK = 1,
  92. PERF_COUNT_SW_PAGE_FAULTS = 2,
  93. PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
  94. PERF_COUNT_SW_CPU_MIGRATIONS = 4,
  95. PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
  96. PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
  97. PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
  98. PERF_COUNT_SW_EMULATION_FAULTS = 8,
  99. PERF_COUNT_SW_MAX, /* non-ABI */
  100. };
  101. /*
  102. * Bits that can be set in attr.sample_type to request information
  103. * in the overflow packets.
  104. */
  105. enum perf_event_sample_format {
  106. PERF_SAMPLE_IP = 1U << 0,
  107. PERF_SAMPLE_TID = 1U << 1,
  108. PERF_SAMPLE_TIME = 1U << 2,
  109. PERF_SAMPLE_ADDR = 1U << 3,
  110. PERF_SAMPLE_READ = 1U << 4,
  111. PERF_SAMPLE_CALLCHAIN = 1U << 5,
  112. PERF_SAMPLE_ID = 1U << 6,
  113. PERF_SAMPLE_CPU = 1U << 7,
  114. PERF_SAMPLE_PERIOD = 1U << 8,
  115. PERF_SAMPLE_STREAM_ID = 1U << 9,
  116. PERF_SAMPLE_RAW = 1U << 10,
  117. PERF_SAMPLE_BRANCH_STACK = 1U << 11,
  118. PERF_SAMPLE_MAX = 1U << 12, /* non-ABI */
  119. };
  120. /*
  121. * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
  122. *
  123. * If the user does not pass priv level information via branch_sample_type,
  124. * the kernel uses the event's priv level. Branch and event priv levels do
  125. * not have to match. Branch priv level is checked for permissions.
  126. *
  127. * The branch types can be combined, however BRANCH_ANY covers all types
  128. * of branches and therefore it supersedes all the other types.
  129. */
  130. enum perf_branch_sample_type {
  131. PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */
  132. PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */
  133. PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */
  134. PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */
  135. PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */
  136. PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */
  137. PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */
  138. PERF_SAMPLE_BRANCH_MAX = 1U << 7, /* non-ABI */
  139. };
  140. #define PERF_SAMPLE_BRANCH_PLM_ALL \
  141. (PERF_SAMPLE_BRANCH_USER|\
  142. PERF_SAMPLE_BRANCH_KERNEL|\
  143. PERF_SAMPLE_BRANCH_HV)
  144. /*
  145. * The format of the data returned by read() on a perf event fd,
  146. * as specified by attr.read_format:
  147. *
  148. * struct read_format {
  149. * { u64 value;
  150. * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
  151. * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  152. * { u64 id; } && PERF_FORMAT_ID
  153. * } && !PERF_FORMAT_GROUP
  154. *
  155. * { u64 nr;
  156. * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
  157. * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  158. * { u64 value;
  159. * { u64 id; } && PERF_FORMAT_ID
  160. * } cntr[nr];
  161. * } && PERF_FORMAT_GROUP
  162. * };
  163. */
  164. enum perf_event_read_format {
  165. PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
  166. PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
  167. PERF_FORMAT_ID = 1U << 2,
  168. PERF_FORMAT_GROUP = 1U << 3,
  169. PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
  170. };
  171. #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
  172. /*
  173. * Hardware event_id to monitor via a performance monitoring event:
  174. */
  175. struct perf_event_attr {
  176. /*
  177. * Major type: hardware/software/tracepoint/etc.
  178. */
  179. __u32 type;
  180. /*
  181. * Size of the attr structure, for fwd/bwd compat.
  182. */
  183. __u32 size;
  184. /*
  185. * Type specific configuration information.
  186. */
  187. __u64 config;
  188. union {
  189. __u64 sample_period;
  190. __u64 sample_freq;
  191. };
  192. __u64 sample_type;
  193. __u64 read_format;
  194. __u64 disabled : 1, /* off by default */
  195. inherit : 1, /* children inherit it */
  196. pinned : 1, /* must always be on PMU */
  197. exclusive : 1, /* only group on PMU */
  198. exclude_user : 1, /* don't count user */
  199. exclude_kernel : 1, /* ditto kernel */
  200. exclude_hv : 1, /* ditto hypervisor */
  201. exclude_idle : 1, /* don't count when idle */
  202. mmap : 1, /* include mmap data */
  203. comm : 1, /* include comm data */
  204. freq : 1, /* use freq, not period */
  205. inherit_stat : 1, /* per task counts */
  206. enable_on_exec : 1, /* next exec enables */
  207. task : 1, /* trace fork/exit */
  208. watermark : 1, /* wakeup_watermark */
  209. /*
  210. * precise_ip:
  211. *
  212. * 0 - SAMPLE_IP can have arbitrary skid
  213. * 1 - SAMPLE_IP must have constant skid
  214. * 2 - SAMPLE_IP requested to have 0 skid
  215. * 3 - SAMPLE_IP must have 0 skid
  216. *
  217. * See also PERF_RECORD_MISC_EXACT_IP
  218. */
  219. precise_ip : 2, /* skid constraint */
  220. mmap_data : 1, /* non-exec mmap data */
  221. sample_id_all : 1, /* sample_type all events */
  222. exclude_host : 1, /* don't count in host */
  223. exclude_guest : 1, /* don't count in guest */
  224. __reserved_1 : 43;
  225. union {
  226. __u32 wakeup_events; /* wakeup every n events */
  227. __u32 wakeup_watermark; /* bytes before wakeup */
  228. };
  229. __u32 bp_type;
  230. union {
  231. __u64 bp_addr;
  232. __u64 config1; /* extension of config */
  233. };
  234. union {
  235. __u64 bp_len;
  236. __u64 config2; /* extension of config1 */
  237. };
  238. __u64 branch_sample_type; /* enum branch_sample_type */
  239. };
  240. /*
  241. * Ioctls that can be done on a perf event fd:
  242. */
  243. #define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
  244. #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
  245. #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
  246. #define PERF_EVENT_IOC_RESET _IO ('$', 3)
  247. #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
  248. #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
  249. #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
  250. enum perf_event_ioc_flags {
  251. PERF_IOC_FLAG_GROUP = 1U << 0,
  252. };
  253. /*
  254. * Structure of the page that can be mapped via mmap
  255. */
  256. struct perf_event_mmap_page {
  257. __u32 version; /* version number of this structure */
  258. __u32 compat_version; /* lowest version this is compat with */
  259. /*
  260. * Bits needed to read the hw events in user-space.
  261. *
  262. * u32 seq;
  263. * s64 count;
  264. *
  265. * do {
  266. * seq = pc->lock;
  267. *
  268. * barrier()
  269. * if (pc->index) {
  270. * count = pmc_read(pc->index - 1);
  271. * count += pc->offset;
  272. * } else
  273. * goto regular_read;
  274. *
  275. * barrier();
  276. * } while (pc->lock != seq);
  277. *
  278. * NOTE: for obvious reason this only works on self-monitoring
  279. * processes.
  280. */
  281. __u32 lock; /* seqlock for synchronization */
  282. __u32 index; /* hardware event identifier */
  283. __s64 offset; /* add to hardware event value */
  284. __u64 time_enabled; /* time event active */
  285. __u64 time_running; /* time event on cpu */
  286. __u32 time_mult, time_shift;
  287. __u64 time_offset;
  288. /*
  289. * Hole for extension of the self monitor capabilities
  290. */
  291. __u64 __reserved[121]; /* align to 1k */
  292. /*
  293. * Control data for the mmap() data buffer.
  294. *
  295. * User-space reading the @data_head value should issue an rmb(), on
  296. * SMP capable platforms, after reading this value -- see
  297. * perf_event_wakeup().
  298. *
  299. * When the mapping is PROT_WRITE the @data_tail value should be
  300. * written by userspace to reflect the last read data. In this case
  301. * the kernel will not over-write unread data.
  302. */
  303. __u64 data_head; /* head in the data section */
  304. __u64 data_tail; /* user-space written tail */
  305. };
  306. #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
  307. #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
  308. #define PERF_RECORD_MISC_KERNEL (1 << 0)
  309. #define PERF_RECORD_MISC_USER (2 << 0)
  310. #define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
  311. #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
  312. #define PERF_RECORD_MISC_GUEST_USER (5 << 0)
  313. /*
  314. * Indicates that the content of PERF_SAMPLE_IP points to
  315. * the actual instruction that triggered the event. See also
  316. * perf_event_attr::precise_ip.
  317. */
  318. #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
  319. /*
  320. * Reserve the last bit to indicate some extended misc field
  321. */
  322. #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
  323. struct perf_event_header {
  324. __u32 type;
  325. __u16 misc;
  326. __u16 size;
  327. };
  328. enum perf_event_type {
  329. /*
  330. * If perf_event_attr.sample_id_all is set then all event types will
  331. * have the sample_type selected fields related to where/when
  332. * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
  333. * described in PERF_RECORD_SAMPLE below, it will be stashed just after
  334. * the perf_event_header and the fields already present for the existing
  335. * fields, i.e. at the end of the payload. That way a newer perf.data
  336. * file will be supported by older perf tools, with these new optional
  337. * fields being ignored.
  338. *
  339. * The MMAP events record the PROT_EXEC mappings so that we can
  340. * correlate userspace IPs to code. They have the following structure:
  341. *
  342. * struct {
  343. * struct perf_event_header header;
  344. *
  345. * u32 pid, tid;
  346. * u64 addr;
  347. * u64 len;
  348. * u64 pgoff;
  349. * char filename[];
  350. * };
  351. */
  352. PERF_RECORD_MMAP = 1,
  353. /*
  354. * struct {
  355. * struct perf_event_header header;
  356. * u64 id;
  357. * u64 lost;
  358. * };
  359. */
  360. PERF_RECORD_LOST = 2,
  361. /*
  362. * struct {
  363. * struct perf_event_header header;
  364. *
  365. * u32 pid, tid;
  366. * char comm[];
  367. * };
  368. */
  369. PERF_RECORD_COMM = 3,
  370. /*
  371. * struct {
  372. * struct perf_event_header header;
  373. * u32 pid, ppid;
  374. * u32 tid, ptid;
  375. * u64 time;
  376. * };
  377. */
  378. PERF_RECORD_EXIT = 4,
  379. /*
  380. * struct {
  381. * struct perf_event_header header;
  382. * u64 time;
  383. * u64 id;
  384. * u64 stream_id;
  385. * };
  386. */
  387. PERF_RECORD_THROTTLE = 5,
  388. PERF_RECORD_UNTHROTTLE = 6,
  389. /*
  390. * struct {
  391. * struct perf_event_header header;
  392. * u32 pid, ppid;
  393. * u32 tid, ptid;
  394. * u64 time;
  395. * };
  396. */
  397. PERF_RECORD_FORK = 7,
  398. /*
  399. * struct {
  400. * struct perf_event_header header;
  401. * u32 pid, tid;
  402. *
  403. * struct read_format values;
  404. * };
  405. */
  406. PERF_RECORD_READ = 8,
  407. /*
  408. * struct {
  409. * struct perf_event_header header;
  410. *
  411. * { u64 ip; } && PERF_SAMPLE_IP
  412. * { u32 pid, tid; } && PERF_SAMPLE_TID
  413. * { u64 time; } && PERF_SAMPLE_TIME
  414. * { u64 addr; } && PERF_SAMPLE_ADDR
  415. * { u64 id; } && PERF_SAMPLE_ID
  416. * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
  417. * { u32 cpu, res; } && PERF_SAMPLE_CPU
  418. * { u64 period; } && PERF_SAMPLE_PERIOD
  419. *
  420. * { struct read_format values; } && PERF_SAMPLE_READ
  421. *
  422. * { u64 nr,
  423. * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
  424. *
  425. * #
  426. * # The RAW record below is opaque data wrt the ABI
  427. * #
  428. * # That is, the ABI doesn't make any promises wrt to
  429. * # the stability of its content, it may vary depending
  430. * # on event, hardware, kernel version and phase of
  431. * # the moon.
  432. * #
  433. * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
  434. * #
  435. *
  436. * { u32 size;
  437. * char data[size];}&& PERF_SAMPLE_RAW
  438. *
  439. * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
  440. * };
  441. */
  442. PERF_RECORD_SAMPLE = 9,
  443. PERF_RECORD_MAX, /* non-ABI */
  444. };
  445. enum perf_callchain_context {
  446. PERF_CONTEXT_HV = (__u64)-32,
  447. PERF_CONTEXT_KERNEL = (__u64)-128,
  448. PERF_CONTEXT_USER = (__u64)-512,
  449. PERF_CONTEXT_GUEST = (__u64)-2048,
  450. PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
  451. PERF_CONTEXT_GUEST_USER = (__u64)-2560,
  452. PERF_CONTEXT_MAX = (__u64)-4095,
  453. };
  454. #define PERF_FLAG_FD_NO_GROUP (1U << 0)
  455. #define PERF_FLAG_FD_OUTPUT (1U << 1)
  456. #define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */
  457. #ifdef __KERNEL__
  458. /*
  459. * Kernel-internal data types and definitions:
  460. */
  461. #ifdef CONFIG_PERF_EVENTS
  462. # include <linux/cgroup.h>
  463. # include <asm/perf_event.h>
  464. # include <asm/local64.h>
  465. #endif
  466. struct perf_guest_info_callbacks {
  467. int (*is_in_guest)(void);
  468. int (*is_user_mode)(void);
  469. unsigned long (*get_guest_ip)(void);
  470. };
  471. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  472. #include <asm/hw_breakpoint.h>
  473. #endif
  474. #include <linux/list.h>
  475. #include <linux/mutex.h>
  476. #include <linux/rculist.h>
  477. #include <linux/rcupdate.h>
  478. #include <linux/spinlock.h>
  479. #include <linux/hrtimer.h>
  480. #include <linux/fs.h>
  481. #include <linux/pid_namespace.h>
  482. #include <linux/workqueue.h>
  483. #include <linux/ftrace.h>
  484. #include <linux/cpu.h>
  485. #include <linux/irq_work.h>
  486. #include <linux/static_key.h>
  487. #include <linux/atomic.h>
  488. #include <asm/local.h>
  489. #define PERF_MAX_STACK_DEPTH 255
  490. struct perf_callchain_entry {
  491. __u64 nr;
  492. __u64 ip[PERF_MAX_STACK_DEPTH];
  493. };
  494. struct perf_raw_record {
  495. u32 size;
  496. void *data;
  497. };
  498. /*
  499. * single taken branch record layout:
  500. *
  501. * from: source instruction (may not always be a branch insn)
  502. * to: branch target
  503. * mispred: branch target was mispredicted
  504. * predicted: branch target was predicted
  505. *
  506. * support for mispred, predicted is optional. In case it
  507. * is not supported mispred = predicted = 0.
  508. */
  509. struct perf_branch_entry {
  510. __u64 from;
  511. __u64 to;
  512. __u64 mispred:1, /* target mispredicted */
  513. predicted:1,/* target predicted */
  514. reserved:62;
  515. };
  516. /*
  517. * branch stack layout:
  518. * nr: number of taken branches stored in entries[]
  519. *
  520. * Note that nr can vary from sample to sample
  521. * branches (to, from) are stored from most recent
  522. * to least recent, i.e., entries[0] contains the most
  523. * recent branch.
  524. */
  525. struct perf_branch_stack {
  526. __u64 nr;
  527. struct perf_branch_entry entries[0];
  528. };
  529. struct task_struct;
  530. /*
  531. * extra PMU register associated with an event
  532. */
  533. struct hw_perf_event_extra {
  534. u64 config; /* register value */
  535. unsigned int reg; /* register address or index */
  536. int alloc; /* extra register already allocated */
  537. int idx; /* index in shared_regs->regs[] */
  538. };
  539. /**
  540. * struct hw_perf_event - performance event hardware details:
  541. */
  542. struct hw_perf_event {
  543. #ifdef CONFIG_PERF_EVENTS
  544. union {
  545. struct { /* hardware */
  546. u64 config;
  547. u64 last_tag;
  548. unsigned long config_base;
  549. unsigned long event_base;
  550. int idx;
  551. int last_cpu;
  552. struct hw_perf_event_extra extra_reg;
  553. struct hw_perf_event_extra branch_reg;
  554. };
  555. struct { /* software */
  556. struct hrtimer hrtimer;
  557. };
  558. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  559. struct { /* breakpoint */
  560. struct arch_hw_breakpoint info;
  561. struct list_head bp_list;
  562. /*
  563. * Crufty hack to avoid the chicken and egg
  564. * problem hw_breakpoint has with context
  565. * creation and event initalization.
  566. */
  567. struct task_struct *bp_target;
  568. };
  569. #endif
  570. };
  571. int state;
  572. local64_t prev_count;
  573. u64 sample_period;
  574. u64 last_period;
  575. local64_t period_left;
  576. u64 interrupts_seq;
  577. u64 interrupts;
  578. u64 freq_time_stamp;
  579. u64 freq_count_stamp;
  580. #endif
  581. };
  582. /*
  583. * hw_perf_event::state flags
  584. */
  585. #define PERF_HES_STOPPED 0x01 /* the counter is stopped */
  586. #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
  587. #define PERF_HES_ARCH 0x04
  588. struct perf_event;
  589. /*
  590. * Common implementation detail of pmu::{start,commit,cancel}_txn
  591. */
  592. #define PERF_EVENT_TXN 0x1
  593. /**
  594. * struct pmu - generic performance monitoring unit
  595. */
  596. struct pmu {
  597. struct list_head entry;
  598. struct device *dev;
  599. const struct attribute_group **attr_groups;
  600. char *name;
  601. int type;
  602. int * __percpu pmu_disable_count;
  603. struct perf_cpu_context * __percpu pmu_cpu_context;
  604. int task_ctx_nr;
  605. /*
  606. * Fully disable/enable this PMU, can be used to protect from the PMI
  607. * as well as for lazy/batch writing of the MSRs.
  608. */
  609. void (*pmu_enable) (struct pmu *pmu); /* optional */
  610. void (*pmu_disable) (struct pmu *pmu); /* optional */
  611. /*
  612. * Try and initialize the event for this PMU.
  613. * Should return -ENOENT when the @event doesn't match this PMU.
  614. */
  615. int (*event_init) (struct perf_event *event);
  616. #define PERF_EF_START 0x01 /* start the counter when adding */
  617. #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
  618. #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
  619. /*
  620. * Adds/Removes a counter to/from the PMU, can be done inside
  621. * a transaction, see the ->*_txn() methods.
  622. */
  623. int (*add) (struct perf_event *event, int flags);
  624. void (*del) (struct perf_event *event, int flags);
  625. /*
  626. * Starts/Stops a counter present on the PMU. The PMI handler
  627. * should stop the counter when perf_event_overflow() returns
  628. * !0. ->start() will be used to continue.
  629. */
  630. void (*start) (struct perf_event *event, int flags);
  631. void (*stop) (struct perf_event *event, int flags);
  632. /*
  633. * Updates the counter value of the event.
  634. */
  635. void (*read) (struct perf_event *event);
  636. /*
  637. * Group events scheduling is treated as a transaction, add
  638. * group events as a whole and perform one schedulability test.
  639. * If the test fails, roll back the whole group
  640. *
  641. * Start the transaction, after this ->add() doesn't need to
  642. * do schedulability tests.
  643. */
  644. void (*start_txn) (struct pmu *pmu); /* optional */
  645. /*
  646. * If ->start_txn() disabled the ->add() schedulability test
  647. * then ->commit_txn() is required to perform one. On success
  648. * the transaction is closed. On error the transaction is kept
  649. * open until ->cancel_txn() is called.
  650. */
  651. int (*commit_txn) (struct pmu *pmu); /* optional */
  652. /*
  653. * Will cancel the transaction, assumes ->del() is called
  654. * for each successful ->add() during the transaction.
  655. */
  656. void (*cancel_txn) (struct pmu *pmu); /* optional */
  657. /*
  658. * Will return the value for perf_event_mmap_page::index for this event,
  659. * if no implementation is provided it will default to: event->hw.idx + 1.
  660. */
  661. int (*event_idx) (struct perf_event *event); /*optional */
  662. };
  663. /**
  664. * enum perf_event_active_state - the states of a event
  665. */
  666. enum perf_event_active_state {
  667. PERF_EVENT_STATE_ERROR = -2,
  668. PERF_EVENT_STATE_OFF = -1,
  669. PERF_EVENT_STATE_INACTIVE = 0,
  670. PERF_EVENT_STATE_ACTIVE = 1,
  671. };
  672. struct file;
  673. struct perf_sample_data;
  674. typedef void (*perf_overflow_handler_t)(struct perf_event *,
  675. struct perf_sample_data *,
  676. struct pt_regs *regs);
  677. enum perf_group_flag {
  678. PERF_GROUP_SOFTWARE = 0x1,
  679. };
  680. #define SWEVENT_HLIST_BITS 8
  681. #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
  682. struct swevent_hlist {
  683. struct hlist_head heads[SWEVENT_HLIST_SIZE];
  684. struct rcu_head rcu_head;
  685. };
  686. #define PERF_ATTACH_CONTEXT 0x01
  687. #define PERF_ATTACH_GROUP 0x02
  688. #define PERF_ATTACH_TASK 0x04
  689. #ifdef CONFIG_CGROUP_PERF
  690. /*
  691. * perf_cgroup_info keeps track of time_enabled for a cgroup.
  692. * This is a per-cpu dynamically allocated data structure.
  693. */
  694. struct perf_cgroup_info {
  695. u64 time;
  696. u64 timestamp;
  697. };
  698. struct perf_cgroup {
  699. struct cgroup_subsys_state css;
  700. struct perf_cgroup_info *info; /* timing info, one per cpu */
  701. };
  702. #endif
  703. struct ring_buffer;
  704. /**
  705. * struct perf_event - performance event kernel representation:
  706. */
  707. struct perf_event {
  708. #ifdef CONFIG_PERF_EVENTS
  709. struct list_head group_entry;
  710. struct list_head event_entry;
  711. struct list_head sibling_list;
  712. struct hlist_node hlist_entry;
  713. int nr_siblings;
  714. int group_flags;
  715. struct perf_event *group_leader;
  716. struct pmu *pmu;
  717. enum perf_event_active_state state;
  718. unsigned int attach_state;
  719. local64_t count;
  720. atomic64_t child_count;
  721. /*
  722. * These are the total time in nanoseconds that the event
  723. * has been enabled (i.e. eligible to run, and the task has
  724. * been scheduled in, if this is a per-task event)
  725. * and running (scheduled onto the CPU), respectively.
  726. *
  727. * They are computed from tstamp_enabled, tstamp_running and
  728. * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
  729. */
  730. u64 total_time_enabled;
  731. u64 total_time_running;
  732. /*
  733. * These are timestamps used for computing total_time_enabled
  734. * and total_time_running when the event is in INACTIVE or
  735. * ACTIVE state, measured in nanoseconds from an arbitrary point
  736. * in time.
  737. * tstamp_enabled: the notional time when the event was enabled
  738. * tstamp_running: the notional time when the event was scheduled on
  739. * tstamp_stopped: in INACTIVE state, the notional time when the
  740. * event was scheduled off.
  741. */
  742. u64 tstamp_enabled;
  743. u64 tstamp_running;
  744. u64 tstamp_stopped;
  745. /*
  746. * timestamp shadows the actual context timing but it can
  747. * be safely used in NMI interrupt context. It reflects the
  748. * context time as it was when the event was last scheduled in.
  749. *
  750. * ctx_time already accounts for ctx->timestamp. Therefore to
  751. * compute ctx_time for a sample, simply add perf_clock().
  752. */
  753. u64 shadow_ctx_time;
  754. struct perf_event_attr attr;
  755. u16 header_size;
  756. u16 id_header_size;
  757. u16 read_size;
  758. struct hw_perf_event hw;
  759. struct perf_event_context *ctx;
  760. struct file *filp;
  761. /*
  762. * These accumulate total time (in nanoseconds) that children
  763. * events have been enabled and running, respectively.
  764. */
  765. atomic64_t child_total_time_enabled;
  766. atomic64_t child_total_time_running;
  767. /*
  768. * Protect attach/detach and child_list:
  769. */
  770. struct mutex child_mutex;
  771. struct list_head child_list;
  772. struct perf_event *parent;
  773. int oncpu;
  774. int cpu;
  775. struct list_head owner_entry;
  776. struct task_struct *owner;
  777. /* mmap bits */
  778. struct mutex mmap_mutex;
  779. atomic_t mmap_count;
  780. int mmap_locked;
  781. struct user_struct *mmap_user;
  782. struct ring_buffer *rb;
  783. struct list_head rb_entry;
  784. /* poll related */
  785. wait_queue_head_t waitq;
  786. struct fasync_struct *fasync;
  787. /* delayed work for NMIs and such */
  788. int pending_wakeup;
  789. int pending_kill;
  790. int pending_disable;
  791. struct irq_work pending;
  792. atomic_t event_limit;
  793. void (*destroy)(struct perf_event *);
  794. struct rcu_head rcu_head;
  795. struct pid_namespace *ns;
  796. u64 id;
  797. perf_overflow_handler_t overflow_handler;
  798. void *overflow_handler_context;
  799. #ifdef CONFIG_EVENT_TRACING
  800. struct ftrace_event_call *tp_event;
  801. struct event_filter *filter;
  802. #ifdef CONFIG_FUNCTION_TRACER
  803. struct ftrace_ops ftrace_ops;
  804. #endif
  805. #endif
  806. #ifdef CONFIG_CGROUP_PERF
  807. struct perf_cgroup *cgrp; /* cgroup event is attach to */
  808. int cgrp_defer_enabled;
  809. #endif
  810. #endif /* CONFIG_PERF_EVENTS */
  811. };
  812. enum perf_event_context_type {
  813. task_context,
  814. cpu_context,
  815. };
  816. /**
  817. * struct perf_event_context - event context structure
  818. *
  819. * Used as a container for task events and CPU events as well:
  820. */
  821. struct perf_event_context {
  822. struct pmu *pmu;
  823. enum perf_event_context_type type;
  824. /*
  825. * Protect the states of the events in the list,
  826. * nr_active, and the list:
  827. */
  828. raw_spinlock_t lock;
  829. /*
  830. * Protect the list of events. Locking either mutex or lock
  831. * is sufficient to ensure the list doesn't change; to change
  832. * the list you need to lock both the mutex and the spinlock.
  833. */
  834. struct mutex mutex;
  835. struct list_head pinned_groups;
  836. struct list_head flexible_groups;
  837. struct list_head event_list;
  838. int nr_events;
  839. int nr_active;
  840. int is_active;
  841. int nr_stat;
  842. int nr_freq;
  843. int rotate_disable;
  844. atomic_t refcount;
  845. struct task_struct *task;
  846. /*
  847. * Context clock, runs when context enabled.
  848. */
  849. u64 time;
  850. u64 timestamp;
  851. /*
  852. * These fields let us detect when two contexts have both
  853. * been cloned (inherited) from a common ancestor.
  854. */
  855. struct perf_event_context *parent_ctx;
  856. u64 parent_gen;
  857. u64 generation;
  858. int pin_count;
  859. int nr_cgroups; /* cgroup events present */
  860. struct rcu_head rcu_head;
  861. };
  862. /*
  863. * Number of contexts where an event can trigger:
  864. * task, softirq, hardirq, nmi.
  865. */
  866. #define PERF_NR_CONTEXTS 4
  867. /**
  868. * struct perf_event_cpu_context - per cpu event context structure
  869. */
  870. struct perf_cpu_context {
  871. struct perf_event_context ctx;
  872. struct perf_event_context *task_ctx;
  873. int active_oncpu;
  874. int exclusive;
  875. struct list_head rotation_list;
  876. int jiffies_interval;
  877. struct pmu *active_pmu;
  878. struct perf_cgroup *cgrp;
  879. };
  880. struct perf_output_handle {
  881. struct perf_event *event;
  882. struct ring_buffer *rb;
  883. unsigned long wakeup;
  884. unsigned long size;
  885. void *addr;
  886. int page;
  887. };
  888. #ifdef CONFIG_PERF_EVENTS
  889. extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
  890. extern void perf_pmu_unregister(struct pmu *pmu);
  891. extern int perf_num_counters(void);
  892. extern const char *perf_pmu_name(void);
  893. extern void __perf_event_task_sched_in(struct task_struct *prev,
  894. struct task_struct *task);
  895. extern void __perf_event_task_sched_out(struct task_struct *prev,
  896. struct task_struct *next);
  897. extern int perf_event_init_task(struct task_struct *child);
  898. extern void perf_event_exit_task(struct task_struct *child);
  899. extern void perf_event_free_task(struct task_struct *task);
  900. extern void perf_event_delayed_put(struct task_struct *task);
  901. extern void perf_event_print_debug(void);
  902. extern void perf_pmu_disable(struct pmu *pmu);
  903. extern void perf_pmu_enable(struct pmu *pmu);
  904. extern int perf_event_task_disable(void);
  905. extern int perf_event_task_enable(void);
  906. extern int perf_event_refresh(struct perf_event *event, int refresh);
  907. extern void perf_event_update_userpage(struct perf_event *event);
  908. extern int perf_event_release_kernel(struct perf_event *event);
  909. extern struct perf_event *
  910. perf_event_create_kernel_counter(struct perf_event_attr *attr,
  911. int cpu,
  912. struct task_struct *task,
  913. perf_overflow_handler_t callback,
  914. void *context);
  915. extern u64 perf_event_read_value(struct perf_event *event,
  916. u64 *enabled, u64 *running);
  917. struct perf_sample_data {
  918. u64 type;
  919. u64 ip;
  920. struct {
  921. u32 pid;
  922. u32 tid;
  923. } tid_entry;
  924. u64 time;
  925. u64 addr;
  926. u64 id;
  927. u64 stream_id;
  928. struct {
  929. u32 cpu;
  930. u32 reserved;
  931. } cpu_entry;
  932. u64 period;
  933. struct perf_callchain_entry *callchain;
  934. struct perf_raw_record *raw;
  935. struct perf_branch_stack *br_stack;
  936. };
  937. static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
  938. {
  939. data->addr = addr;
  940. data->raw = NULL;
  941. data->br_stack = NULL;
  942. }
  943. extern void perf_output_sample(struct perf_output_handle *handle,
  944. struct perf_event_header *header,
  945. struct perf_sample_data *data,
  946. struct perf_event *event);
  947. extern void perf_prepare_sample(struct perf_event_header *header,
  948. struct perf_sample_data *data,
  949. struct perf_event *event,
  950. struct pt_regs *regs);
  951. extern int perf_event_overflow(struct perf_event *event,
  952. struct perf_sample_data *data,
  953. struct pt_regs *regs);
  954. static inline bool is_sampling_event(struct perf_event *event)
  955. {
  956. return event->attr.sample_period != 0;
  957. }
  958. /*
  959. * Return 1 for a software event, 0 for a hardware event
  960. */
  961. static inline int is_software_event(struct perf_event *event)
  962. {
  963. return event->pmu->task_ctx_nr == perf_sw_context;
  964. }
  965. extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
  966. extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
  967. #ifndef perf_arch_fetch_caller_regs
  968. static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
  969. #endif
  970. /*
  971. * Take a snapshot of the regs. Skip ip and frame pointer to
  972. * the nth caller. We only need a few of the regs:
  973. * - ip for PERF_SAMPLE_IP
  974. * - cs for user_mode() tests
  975. * - bp for callchains
  976. * - eflags, for future purposes, just in case
  977. */
  978. static inline void perf_fetch_caller_regs(struct pt_regs *regs)
  979. {
  980. memset(regs, 0, sizeof(*regs));
  981. perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
  982. }
  983. static __always_inline void
  984. perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
  985. {
  986. struct pt_regs hot_regs;
  987. if (static_key_false(&perf_swevent_enabled[event_id])) {
  988. if (!regs) {
  989. perf_fetch_caller_regs(&hot_regs);
  990. regs = &hot_regs;
  991. }
  992. __perf_sw_event(event_id, nr, regs, addr);
  993. }
  994. }
  995. extern struct static_key_deferred perf_sched_events;
  996. static inline void perf_event_task_sched_in(struct task_struct *prev,
  997. struct task_struct *task)
  998. {
  999. if (static_key_false(&perf_sched_events.key))
  1000. __perf_event_task_sched_in(prev, task);
  1001. }
  1002. static inline void perf_event_task_sched_out(struct task_struct *prev,
  1003. struct task_struct *next)
  1004. {
  1005. perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
  1006. if (static_key_false(&perf_sched_events.key))
  1007. __perf_event_task_sched_out(prev, next);
  1008. }
  1009. extern void perf_event_mmap(struct vm_area_struct *vma);
  1010. extern struct perf_guest_info_callbacks *perf_guest_cbs;
  1011. extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
  1012. extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
  1013. extern void perf_event_comm(struct task_struct *tsk);
  1014. extern void perf_event_fork(struct task_struct *tsk);
  1015. /* Callchains */
  1016. DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
  1017. extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
  1018. extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
  1019. static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
  1020. {
  1021. if (entry->nr < PERF_MAX_STACK_DEPTH)
  1022. entry->ip[entry->nr++] = ip;
  1023. }
  1024. extern int sysctl_perf_event_paranoid;
  1025. extern int sysctl_perf_event_mlock;
  1026. extern int sysctl_perf_event_sample_rate;
  1027. extern int perf_proc_update_handler(struct ctl_table *table, int write,
  1028. void __user *buffer, size_t *lenp,
  1029. loff_t *ppos);
  1030. static inline bool perf_paranoid_tracepoint_raw(void)
  1031. {
  1032. return sysctl_perf_event_paranoid > -1;
  1033. }
  1034. static inline bool perf_paranoid_cpu(void)
  1035. {
  1036. return sysctl_perf_event_paranoid > 0;
  1037. }
  1038. static inline bool perf_paranoid_kernel(void)
  1039. {
  1040. return sysctl_perf_event_paranoid > 1;
  1041. }
  1042. extern void perf_event_init(void);
  1043. extern void perf_tp_event(u64 addr, u64 count, void *record,
  1044. int entry_size, struct pt_regs *regs,
  1045. struct hlist_head *head, int rctx);
  1046. extern void perf_bp_event(struct perf_event *event, void *data);
  1047. #ifndef perf_misc_flags
  1048. # define perf_misc_flags(regs) \
  1049. (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
  1050. # define perf_instruction_pointer(regs) instruction_pointer(regs)
  1051. #endif
  1052. static inline bool has_branch_stack(struct perf_event *event)
  1053. {
  1054. return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
  1055. }
  1056. extern int perf_output_begin(struct perf_output_handle *handle,
  1057. struct perf_event *event, unsigned int size);
  1058. extern void perf_output_end(struct perf_output_handle *handle);
  1059. extern void perf_output_copy(struct perf_output_handle *handle,
  1060. const void *buf, unsigned int len);
  1061. extern int perf_swevent_get_recursion_context(void);
  1062. extern void perf_swevent_put_recursion_context(int rctx);
  1063. extern void perf_event_enable(struct perf_event *event);
  1064. extern void perf_event_disable(struct perf_event *event);
  1065. extern void perf_event_task_tick(void);
  1066. #else
  1067. static inline void
  1068. perf_event_task_sched_in(struct task_struct *prev,
  1069. struct task_struct *task) { }
  1070. static inline void
  1071. perf_event_task_sched_out(struct task_struct *prev,
  1072. struct task_struct *next) { }
  1073. static inline int perf_event_init_task(struct task_struct *child) { return 0; }
  1074. static inline void perf_event_exit_task(struct task_struct *child) { }
  1075. static inline void perf_event_free_task(struct task_struct *task) { }
  1076. static inline void perf_event_delayed_put(struct task_struct *task) { }
  1077. static inline void perf_event_print_debug(void) { }
  1078. static inline int perf_event_task_disable(void) { return -EINVAL; }
  1079. static inline int perf_event_task_enable(void) { return -EINVAL; }
  1080. static inline int perf_event_refresh(struct perf_event *event, int refresh)
  1081. {
  1082. return -EINVAL;
  1083. }
  1084. static inline void
  1085. perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
  1086. static inline void
  1087. perf_bp_event(struct perf_event *event, void *data) { }
  1088. static inline int perf_register_guest_info_callbacks
  1089. (struct perf_guest_info_callbacks *callbacks) { return 0; }
  1090. static inline int perf_unregister_guest_info_callbacks
  1091. (struct perf_guest_info_callbacks *callbacks) { return 0; }
  1092. static inline void perf_event_mmap(struct vm_area_struct *vma) { }
  1093. static inline void perf_event_comm(struct task_struct *tsk) { }
  1094. static inline void perf_event_fork(struct task_struct *tsk) { }
  1095. static inline void perf_event_init(void) { }
  1096. static inline int perf_swevent_get_recursion_context(void) { return -1; }
  1097. static inline void perf_swevent_put_recursion_context(int rctx) { }
  1098. static inline void perf_event_enable(struct perf_event *event) { }
  1099. static inline void perf_event_disable(struct perf_event *event) { }
  1100. static inline void perf_event_task_tick(void) { }
  1101. #endif
  1102. #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
  1103. /*
  1104. * This has to have a higher priority than migration_notifier in sched.c.
  1105. */
  1106. #define perf_cpu_notifier(fn) \
  1107. do { \
  1108. static struct notifier_block fn##_nb __cpuinitdata = \
  1109. { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
  1110. fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
  1111. (void *)(unsigned long)smp_processor_id()); \
  1112. fn(&fn##_nb, (unsigned long)CPU_STARTING, \
  1113. (void *)(unsigned long)smp_processor_id()); \
  1114. fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
  1115. (void *)(unsigned long)smp_processor_id()); \
  1116. register_cpu_notifier(&fn##_nb); \
  1117. } while (0)
  1118. #endif /* __KERNEL__ */
  1119. #endif /* _LINUX_PERF_EVENT_H */