perf_counter.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242
  1. /*
  2. * Performance counter x86 architecture code
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2009 Jaswinder Singh Rajput
  7. * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
  8. * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  9. *
  10. * For licencing details see kernel-base/COPYING
  11. */
  12. #include <linux/perf_counter.h>
  13. #include <linux/capability.h>
  14. #include <linux/notifier.h>
  15. #include <linux/hardirq.h>
  16. #include <linux/kprobes.h>
  17. #include <linux/module.h>
  18. #include <linux/kdebug.h>
  19. #include <linux/sched.h>
  20. #include <linux/uaccess.h>
  21. #include <asm/apic.h>
  22. #include <asm/stacktrace.h>
  23. #include <asm/nmi.h>
  24. static u64 perf_counter_mask __read_mostly;
  25. struct cpu_hw_counters {
  26. struct perf_counter *counters[X86_PMC_IDX_MAX];
  27. unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  28. unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  29. unsigned long interrupts;
  30. int enabled;
  31. };
  32. /*
  33. * struct x86_pmu - generic x86 pmu
  34. */
  35. struct x86_pmu {
  36. const char *name;
  37. int version;
  38. int (*handle_irq)(struct pt_regs *, int);
  39. void (*disable_all)(void);
  40. void (*enable_all)(void);
  41. void (*enable)(struct hw_perf_counter *, int);
  42. void (*disable)(struct hw_perf_counter *, int);
  43. unsigned eventsel;
  44. unsigned perfctr;
  45. u64 (*event_map)(int);
  46. u64 (*raw_event)(u64);
  47. int max_events;
  48. int num_counters;
  49. int num_counters_fixed;
  50. int counter_bits;
  51. u64 counter_mask;
  52. u64 max_period;
  53. u64 intel_ctrl;
  54. };
  55. static struct x86_pmu x86_pmu __read_mostly;
  56. static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
  57. .enabled = 1,
  58. };
  59. /*
  60. * Intel PerfMon v3. Used on Core2 and later.
  61. */
  62. static const u64 intel_perfmon_event_map[] =
  63. {
  64. [PERF_COUNT_CPU_CYCLES] = 0x003c,
  65. [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
  66. [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
  67. [PERF_COUNT_CACHE_MISSES] = 0x412e,
  68. [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
  69. [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
  70. [PERF_COUNT_BUS_CYCLES] = 0x013c,
  71. };
  72. static u64 intel_pmu_event_map(int event)
  73. {
  74. return intel_perfmon_event_map[event];
  75. }
  76. static u64 intel_pmu_raw_event(u64 event)
  77. {
  78. #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
  79. #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
  80. #define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
  81. #define CORE_EVNTSEL_MASK \
  82. (CORE_EVNTSEL_EVENT_MASK | \
  83. CORE_EVNTSEL_UNIT_MASK | \
  84. CORE_EVNTSEL_COUNTER_MASK)
  85. return event & CORE_EVNTSEL_MASK;
  86. }
  87. /*
  88. * AMD Performance Monitor K7 and later.
  89. */
  90. static const u64 amd_perfmon_event_map[] =
  91. {
  92. [PERF_COUNT_CPU_CYCLES] = 0x0076,
  93. [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
  94. [PERF_COUNT_CACHE_REFERENCES] = 0x0080,
  95. [PERF_COUNT_CACHE_MISSES] = 0x0081,
  96. [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
  97. [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
  98. };
  99. static u64 amd_pmu_event_map(int event)
  100. {
  101. return amd_perfmon_event_map[event];
  102. }
  103. static u64 amd_pmu_raw_event(u64 event)
  104. {
  105. #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
  106. #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
  107. #define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
  108. #define K7_EVNTSEL_MASK \
  109. (K7_EVNTSEL_EVENT_MASK | \
  110. K7_EVNTSEL_UNIT_MASK | \
  111. K7_EVNTSEL_COUNTER_MASK)
  112. return event & K7_EVNTSEL_MASK;
  113. }
  114. /*
  115. * Propagate counter elapsed time into the generic counter.
  116. * Can only be executed on the CPU where the counter is active.
  117. * Returns the delta events processed.
  118. */
  119. static u64
  120. x86_perf_counter_update(struct perf_counter *counter,
  121. struct hw_perf_counter *hwc, int idx)
  122. {
  123. int shift = 64 - x86_pmu.counter_bits;
  124. u64 prev_raw_count, new_raw_count;
  125. s64 delta;
  126. /*
  127. * Careful: an NMI might modify the previous counter value.
  128. *
  129. * Our tactic to handle this is to first atomically read and
  130. * exchange a new raw count - then add that new-prev delta
  131. * count to the generic counter atomically:
  132. */
  133. again:
  134. prev_raw_count = atomic64_read(&hwc->prev_count);
  135. rdmsrl(hwc->counter_base + idx, new_raw_count);
  136. if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
  137. new_raw_count) != prev_raw_count)
  138. goto again;
  139. /*
  140. * Now we have the new raw value and have updated the prev
  141. * timestamp already. We can now calculate the elapsed delta
  142. * (counter-)time and add that to the generic counter.
  143. *
  144. * Careful, not all hw sign-extends above the physical width
  145. * of the count.
  146. */
  147. delta = (new_raw_count << shift) - (prev_raw_count << shift);
  148. delta >>= shift;
  149. atomic64_add(delta, &counter->count);
  150. atomic64_sub(delta, &hwc->period_left);
  151. return new_raw_count;
  152. }
  153. static atomic_t active_counters;
  154. static DEFINE_MUTEX(pmc_reserve_mutex);
  155. static bool reserve_pmc_hardware(void)
  156. {
  157. int i;
  158. if (nmi_watchdog == NMI_LOCAL_APIC)
  159. disable_lapic_nmi_watchdog();
  160. for (i = 0; i < x86_pmu.num_counters; i++) {
  161. if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
  162. goto perfctr_fail;
  163. }
  164. for (i = 0; i < x86_pmu.num_counters; i++) {
  165. if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
  166. goto eventsel_fail;
  167. }
  168. return true;
  169. eventsel_fail:
  170. for (i--; i >= 0; i--)
  171. release_evntsel_nmi(x86_pmu.eventsel + i);
  172. i = x86_pmu.num_counters;
  173. perfctr_fail:
  174. for (i--; i >= 0; i--)
  175. release_perfctr_nmi(x86_pmu.perfctr + i);
  176. if (nmi_watchdog == NMI_LOCAL_APIC)
  177. enable_lapic_nmi_watchdog();
  178. return false;
  179. }
  180. static void release_pmc_hardware(void)
  181. {
  182. int i;
  183. for (i = 0; i < x86_pmu.num_counters; i++) {
  184. release_perfctr_nmi(x86_pmu.perfctr + i);
  185. release_evntsel_nmi(x86_pmu.eventsel + i);
  186. }
  187. if (nmi_watchdog == NMI_LOCAL_APIC)
  188. enable_lapic_nmi_watchdog();
  189. }
  190. static void hw_perf_counter_destroy(struct perf_counter *counter)
  191. {
  192. if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
  193. release_pmc_hardware();
  194. mutex_unlock(&pmc_reserve_mutex);
  195. }
  196. }
  197. static inline int x86_pmu_initialized(void)
  198. {
  199. return x86_pmu.handle_irq != NULL;
  200. }
  201. /*
  202. * Setup the hardware configuration for a given hw_event_type
  203. */
  204. static int __hw_perf_counter_init(struct perf_counter *counter)
  205. {
  206. struct perf_counter_hw_event *hw_event = &counter->hw_event;
  207. struct hw_perf_counter *hwc = &counter->hw;
  208. int err;
  209. if (!x86_pmu_initialized())
  210. return -ENODEV;
  211. err = 0;
  212. if (!atomic_inc_not_zero(&active_counters)) {
  213. mutex_lock(&pmc_reserve_mutex);
  214. if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware())
  215. err = -EBUSY;
  216. else
  217. atomic_inc(&active_counters);
  218. mutex_unlock(&pmc_reserve_mutex);
  219. }
  220. if (err)
  221. return err;
  222. /*
  223. * Generate PMC IRQs:
  224. * (keep 'enabled' bit clear for now)
  225. */
  226. hwc->config = ARCH_PERFMON_EVENTSEL_INT;
  227. /*
  228. * Count user and OS events unless requested not to.
  229. */
  230. if (!hw_event->exclude_user)
  231. hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
  232. if (!hw_event->exclude_kernel)
  233. hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
  234. /*
  235. * If privileged enough, allow NMI events:
  236. */
  237. hwc->nmi = 0;
  238. if (hw_event->nmi) {
  239. if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
  240. return -EACCES;
  241. hwc->nmi = 1;
  242. }
  243. if (!hwc->irq_period)
  244. hwc->irq_period = x86_pmu.max_period;
  245. atomic64_set(&hwc->period_left,
  246. min(x86_pmu.max_period, hwc->irq_period));
  247. /*
  248. * Raw event type provide the config in the event structure
  249. */
  250. if (perf_event_raw(hw_event)) {
  251. hwc->config |= x86_pmu.raw_event(perf_event_config(hw_event));
  252. } else {
  253. if (perf_event_id(hw_event) >= x86_pmu.max_events)
  254. return -EINVAL;
  255. /*
  256. * The generic map:
  257. */
  258. hwc->config |= x86_pmu.event_map(perf_event_id(hw_event));
  259. }
  260. counter->destroy = hw_perf_counter_destroy;
  261. return 0;
  262. }
  263. static void intel_pmu_disable_all(void)
  264. {
  265. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
  266. }
  267. static void amd_pmu_disable_all(void)
  268. {
  269. struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
  270. int idx;
  271. if (!cpuc->enabled)
  272. return;
  273. cpuc->enabled = 0;
  274. /*
  275. * ensure we write the disable before we start disabling the
  276. * counters proper, so that amd_pmu_enable_counter() does the
  277. * right thing.
  278. */
  279. barrier();
  280. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  281. u64 val;
  282. if (!test_bit(idx, cpuc->active_mask))
  283. continue;
  284. rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
  285. if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
  286. continue;
  287. val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
  288. wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
  289. }
  290. }
  291. void hw_perf_disable(void)
  292. {
  293. if (!x86_pmu_initialized())
  294. return;
  295. return x86_pmu.disable_all();
  296. }
  297. static void intel_pmu_enable_all(void)
  298. {
  299. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
  300. }
  301. static void amd_pmu_enable_all(void)
  302. {
  303. struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
  304. int idx;
  305. if (cpuc->enabled)
  306. return;
  307. cpuc->enabled = 1;
  308. barrier();
  309. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  310. u64 val;
  311. if (!test_bit(idx, cpuc->active_mask))
  312. continue;
  313. rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
  314. if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
  315. continue;
  316. val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
  317. wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
  318. }
  319. }
  320. void hw_perf_enable(void)
  321. {
  322. if (!x86_pmu_initialized())
  323. return;
  324. x86_pmu.enable_all();
  325. }
  326. static inline u64 intel_pmu_get_status(void)
  327. {
  328. u64 status;
  329. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  330. return status;
  331. }
  332. static inline void intel_pmu_ack_status(u64 ack)
  333. {
  334. wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
  335. }
  336. static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
  337. {
  338. int err;
  339. err = checking_wrmsrl(hwc->config_base + idx,
  340. hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
  341. }
  342. static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
  343. {
  344. int err;
  345. err = checking_wrmsrl(hwc->config_base + idx,
  346. hwc->config);
  347. }
  348. static inline void
  349. intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
  350. {
  351. int idx = __idx - X86_PMC_IDX_FIXED;
  352. u64 ctrl_val, mask;
  353. int err;
  354. mask = 0xfULL << (idx * 4);
  355. rdmsrl(hwc->config_base, ctrl_val);
  356. ctrl_val &= ~mask;
  357. err = checking_wrmsrl(hwc->config_base, ctrl_val);
  358. }
  359. static inline void
  360. intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
  361. {
  362. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
  363. intel_pmu_disable_fixed(hwc, idx);
  364. return;
  365. }
  366. x86_pmu_disable_counter(hwc, idx);
  367. }
  368. static inline void
  369. amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
  370. {
  371. x86_pmu_disable_counter(hwc, idx);
  372. }
  373. static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
  374. /*
  375. * Set the next IRQ period, based on the hwc->period_left value.
  376. * To be called with the counter disabled in hw:
  377. */
  378. static void
  379. x86_perf_counter_set_period(struct perf_counter *counter,
  380. struct hw_perf_counter *hwc, int idx)
  381. {
  382. s64 left = atomic64_read(&hwc->period_left);
  383. s64 period = min(x86_pmu.max_period, hwc->irq_period);
  384. int err;
  385. /*
  386. * If we are way outside a reasoable range then just skip forward:
  387. */
  388. if (unlikely(left <= -period)) {
  389. left = period;
  390. atomic64_set(&hwc->period_left, left);
  391. }
  392. if (unlikely(left <= 0)) {
  393. left += period;
  394. atomic64_set(&hwc->period_left, left);
  395. }
  396. /*
  397. * Quirk: certain CPUs dont like it if just 1 event is left:
  398. */
  399. if (unlikely(left < 2))
  400. left = 2;
  401. per_cpu(prev_left[idx], smp_processor_id()) = left;
  402. /*
  403. * The hw counter starts counting from this counter offset,
  404. * mark it to be able to extra future deltas:
  405. */
  406. atomic64_set(&hwc->prev_count, (u64)-left);
  407. err = checking_wrmsrl(hwc->counter_base + idx,
  408. (u64)(-left) & x86_pmu.counter_mask);
  409. }
  410. static inline void
  411. intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
  412. {
  413. int idx = __idx - X86_PMC_IDX_FIXED;
  414. u64 ctrl_val, bits, mask;
  415. int err;
  416. /*
  417. * Enable IRQ generation (0x8),
  418. * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
  419. * if requested:
  420. */
  421. bits = 0x8ULL;
  422. if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
  423. bits |= 0x2;
  424. if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
  425. bits |= 0x1;
  426. bits <<= (idx * 4);
  427. mask = 0xfULL << (idx * 4);
  428. rdmsrl(hwc->config_base, ctrl_val);
  429. ctrl_val &= ~mask;
  430. ctrl_val |= bits;
  431. err = checking_wrmsrl(hwc->config_base, ctrl_val);
  432. }
  433. static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
  434. {
  435. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
  436. intel_pmu_enable_fixed(hwc, idx);
  437. return;
  438. }
  439. x86_pmu_enable_counter(hwc, idx);
  440. }
  441. static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
  442. {
  443. struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
  444. if (cpuc->enabled)
  445. x86_pmu_enable_counter(hwc, idx);
  446. else
  447. x86_pmu_disable_counter(hwc, idx);
  448. }
  449. static int
  450. fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
  451. {
  452. unsigned int event;
  453. if (!x86_pmu.num_counters_fixed)
  454. return -1;
  455. if (unlikely(hwc->nmi))
  456. return -1;
  457. event = hwc->config & ARCH_PERFMON_EVENT_MASK;
  458. if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS)))
  459. return X86_PMC_IDX_FIXED_INSTRUCTIONS;
  460. if (unlikely(event == x86_pmu.event_map(PERF_COUNT_CPU_CYCLES)))
  461. return X86_PMC_IDX_FIXED_CPU_CYCLES;
  462. if (unlikely(event == x86_pmu.event_map(PERF_COUNT_BUS_CYCLES)))
  463. return X86_PMC_IDX_FIXED_BUS_CYCLES;
  464. return -1;
  465. }
  466. /*
  467. * Find a PMC slot for the freshly enabled / scheduled in counter:
  468. */
  469. static int x86_pmu_enable(struct perf_counter *counter)
  470. {
  471. struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
  472. struct hw_perf_counter *hwc = &counter->hw;
  473. int idx;
  474. idx = fixed_mode_idx(counter, hwc);
  475. if (idx >= 0) {
  476. /*
  477. * Try to get the fixed counter, if that is already taken
  478. * then try to get a generic counter:
  479. */
  480. if (test_and_set_bit(idx, cpuc->used_mask))
  481. goto try_generic;
  482. hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
  483. /*
  484. * We set it so that counter_base + idx in wrmsr/rdmsr maps to
  485. * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
  486. */
  487. hwc->counter_base =
  488. MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
  489. hwc->idx = idx;
  490. } else {
  491. idx = hwc->idx;
  492. /* Try to get the previous generic counter again */
  493. if (test_and_set_bit(idx, cpuc->used_mask)) {
  494. try_generic:
  495. idx = find_first_zero_bit(cpuc->used_mask,
  496. x86_pmu.num_counters);
  497. if (idx == x86_pmu.num_counters)
  498. return -EAGAIN;
  499. set_bit(idx, cpuc->used_mask);
  500. hwc->idx = idx;
  501. }
  502. hwc->config_base = x86_pmu.eventsel;
  503. hwc->counter_base = x86_pmu.perfctr;
  504. }
  505. perf_counters_lapic_init(hwc->nmi);
  506. x86_pmu.disable(hwc, idx);
  507. cpuc->counters[idx] = counter;
  508. set_bit(idx, cpuc->active_mask);
  509. x86_perf_counter_set_period(counter, hwc, idx);
  510. x86_pmu.enable(hwc, idx);
  511. return 0;
  512. }
  513. void perf_counter_print_debug(void)
  514. {
  515. u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
  516. struct cpu_hw_counters *cpuc;
  517. unsigned long flags;
  518. int cpu, idx;
  519. if (!x86_pmu.num_counters)
  520. return;
  521. local_irq_save(flags);
  522. cpu = smp_processor_id();
  523. cpuc = &per_cpu(cpu_hw_counters, cpu);
  524. if (x86_pmu.version >= 2) {
  525. rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
  526. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  527. rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
  528. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
  529. pr_info("\n");
  530. pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
  531. pr_info("CPU#%d: status: %016llx\n", cpu, status);
  532. pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
  533. pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
  534. }
  535. pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
  536. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  537. rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
  538. rdmsrl(x86_pmu.perfctr + idx, pmc_count);
  539. prev_left = per_cpu(prev_left[idx], cpu);
  540. pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
  541. cpu, idx, pmc_ctrl);
  542. pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
  543. cpu, idx, pmc_count);
  544. pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
  545. cpu, idx, prev_left);
  546. }
  547. for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
  548. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
  549. pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
  550. cpu, idx, pmc_count);
  551. }
  552. local_irq_restore(flags);
  553. }
  554. static void x86_pmu_disable(struct perf_counter *counter)
  555. {
  556. struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
  557. struct hw_perf_counter *hwc = &counter->hw;
  558. int idx = hwc->idx;
  559. /*
  560. * Must be done before we disable, otherwise the nmi handler
  561. * could reenable again:
  562. */
  563. clear_bit(idx, cpuc->active_mask);
  564. x86_pmu.disable(hwc, idx);
  565. /*
  566. * Make sure the cleared pointer becomes visible before we
  567. * (potentially) free the counter:
  568. */
  569. barrier();
  570. /*
  571. * Drain the remaining delta count out of a counter
  572. * that we are disabling:
  573. */
  574. x86_perf_counter_update(counter, hwc, idx);
  575. cpuc->counters[idx] = NULL;
  576. clear_bit(idx, cpuc->used_mask);
  577. }
  578. /*
  579. * Save and restart an expired counter. Called by NMI contexts,
  580. * so it has to be careful about preempting normal counter ops:
  581. */
  582. static void intel_pmu_save_and_restart(struct perf_counter *counter)
  583. {
  584. struct hw_perf_counter *hwc = &counter->hw;
  585. int idx = hwc->idx;
  586. x86_perf_counter_update(counter, hwc, idx);
  587. x86_perf_counter_set_period(counter, hwc, idx);
  588. if (counter->state == PERF_COUNTER_STATE_ACTIVE)
  589. intel_pmu_enable_counter(hwc, idx);
  590. }
  591. /*
  592. * Maximum interrupt frequency of 100KHz per CPU
  593. */
  594. #define PERFMON_MAX_INTERRUPTS (100000/HZ)
  595. /*
  596. * This handler is triggered by the local APIC, so the APIC IRQ handling
  597. * rules apply:
  598. */
  599. static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
  600. {
  601. struct cpu_hw_counters *cpuc;
  602. struct cpu_hw_counters;
  603. int bit, cpu, loops;
  604. u64 ack, status;
  605. cpu = smp_processor_id();
  606. cpuc = &per_cpu(cpu_hw_counters, cpu);
  607. perf_disable();
  608. status = intel_pmu_get_status();
  609. if (!status) {
  610. perf_enable();
  611. return 0;
  612. }
  613. loops = 0;
  614. again:
  615. if (++loops > 100) {
  616. WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
  617. return 1;
  618. }
  619. inc_irq_stat(apic_perf_irqs);
  620. ack = status;
  621. for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
  622. struct perf_counter *counter = cpuc->counters[bit];
  623. clear_bit(bit, (unsigned long *) &status);
  624. if (!test_bit(bit, cpuc->active_mask))
  625. continue;
  626. intel_pmu_save_and_restart(counter);
  627. if (perf_counter_overflow(counter, nmi, regs, 0))
  628. intel_pmu_disable_counter(&counter->hw, bit);
  629. }
  630. intel_pmu_ack_status(ack);
  631. /*
  632. * Repeat if there is more work to be done:
  633. */
  634. status = intel_pmu_get_status();
  635. if (status)
  636. goto again;
  637. if (++cpuc->interrupts != PERFMON_MAX_INTERRUPTS)
  638. perf_enable();
  639. return 1;
  640. }
  641. static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
  642. {
  643. int cpu, idx, throttle = 0, handled = 0;
  644. struct cpu_hw_counters *cpuc;
  645. struct perf_counter *counter;
  646. struct hw_perf_counter *hwc;
  647. u64 val;
  648. cpu = smp_processor_id();
  649. cpuc = &per_cpu(cpu_hw_counters, cpu);
  650. if (++cpuc->interrupts == PERFMON_MAX_INTERRUPTS) {
  651. throttle = 1;
  652. __perf_disable();
  653. cpuc->enabled = 0;
  654. barrier();
  655. }
  656. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  657. int disable = 0;
  658. if (!test_bit(idx, cpuc->active_mask))
  659. continue;
  660. counter = cpuc->counters[idx];
  661. hwc = &counter->hw;
  662. if (counter->hw_event.nmi != nmi)
  663. goto next;
  664. val = x86_perf_counter_update(counter, hwc, idx);
  665. if (val & (1ULL << (x86_pmu.counter_bits - 1)))
  666. goto next;
  667. /* counter overflow */
  668. x86_perf_counter_set_period(counter, hwc, idx);
  669. handled = 1;
  670. inc_irq_stat(apic_perf_irqs);
  671. disable = perf_counter_overflow(counter, nmi, regs, 0);
  672. next:
  673. if (disable || throttle)
  674. amd_pmu_disable_counter(hwc, idx);
  675. }
  676. return handled;
  677. }
  678. void perf_counter_unthrottle(void)
  679. {
  680. struct cpu_hw_counters *cpuc;
  681. if (!x86_pmu_initialized())
  682. return;
  683. cpuc = &__get_cpu_var(cpu_hw_counters);
  684. if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
  685. /*
  686. * Clear them before re-enabling irqs/NMIs again:
  687. */
  688. cpuc->interrupts = 0;
  689. perf_enable();
  690. } else {
  691. cpuc->interrupts = 0;
  692. }
  693. }
  694. void smp_perf_counter_interrupt(struct pt_regs *regs)
  695. {
  696. irq_enter();
  697. apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
  698. ack_APIC_irq();
  699. x86_pmu.handle_irq(regs, 0);
  700. irq_exit();
  701. }
  702. void smp_perf_pending_interrupt(struct pt_regs *regs)
  703. {
  704. irq_enter();
  705. ack_APIC_irq();
  706. inc_irq_stat(apic_pending_irqs);
  707. perf_counter_do_pending();
  708. irq_exit();
  709. }
  710. void set_perf_counter_pending(void)
  711. {
  712. apic->send_IPI_self(LOCAL_PENDING_VECTOR);
  713. }
  714. void perf_counters_lapic_init(int nmi)
  715. {
  716. u32 apic_val;
  717. if (!x86_pmu_initialized())
  718. return;
  719. /*
  720. * Enable the performance counter vector in the APIC LVT:
  721. */
  722. apic_val = apic_read(APIC_LVTERR);
  723. apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
  724. if (nmi)
  725. apic_write(APIC_LVTPC, APIC_DM_NMI);
  726. else
  727. apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
  728. apic_write(APIC_LVTERR, apic_val);
  729. }
  730. static int __kprobes
  731. perf_counter_nmi_handler(struct notifier_block *self,
  732. unsigned long cmd, void *__args)
  733. {
  734. struct die_args *args = __args;
  735. struct pt_regs *regs;
  736. if (!atomic_read(&active_counters))
  737. return NOTIFY_DONE;
  738. switch (cmd) {
  739. case DIE_NMI:
  740. case DIE_NMI_IPI:
  741. break;
  742. default:
  743. return NOTIFY_DONE;
  744. }
  745. regs = args->regs;
  746. apic_write(APIC_LVTPC, APIC_DM_NMI);
  747. /*
  748. * Can't rely on the handled return value to say it was our NMI, two
  749. * counters could trigger 'simultaneously' raising two back-to-back NMIs.
  750. *
  751. * If the first NMI handles both, the latter will be empty and daze
  752. * the CPU.
  753. */
  754. x86_pmu.handle_irq(regs, 1);
  755. return NOTIFY_STOP;
  756. }
  757. static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
  758. .notifier_call = perf_counter_nmi_handler,
  759. .next = NULL,
  760. .priority = 1
  761. };
  762. static struct x86_pmu intel_pmu = {
  763. .name = "Intel",
  764. .handle_irq = intel_pmu_handle_irq,
  765. .disable_all = intel_pmu_disable_all,
  766. .enable_all = intel_pmu_enable_all,
  767. .enable = intel_pmu_enable_counter,
  768. .disable = intel_pmu_disable_counter,
  769. .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
  770. .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
  771. .event_map = intel_pmu_event_map,
  772. .raw_event = intel_pmu_raw_event,
  773. .max_events = ARRAY_SIZE(intel_perfmon_event_map),
  774. /*
  775. * Intel PMCs cannot be accessed sanely above 32 bit width,
  776. * so we install an artificial 1<<31 period regardless of
  777. * the generic counter period:
  778. */
  779. .max_period = (1ULL << 31) - 1,
  780. };
  781. static struct x86_pmu amd_pmu = {
  782. .name = "AMD",
  783. .handle_irq = amd_pmu_handle_irq,
  784. .disable_all = amd_pmu_disable_all,
  785. .enable_all = amd_pmu_enable_all,
  786. .enable = amd_pmu_enable_counter,
  787. .disable = amd_pmu_disable_counter,
  788. .eventsel = MSR_K7_EVNTSEL0,
  789. .perfctr = MSR_K7_PERFCTR0,
  790. .event_map = amd_pmu_event_map,
  791. .raw_event = amd_pmu_raw_event,
  792. .max_events = ARRAY_SIZE(amd_perfmon_event_map),
  793. .num_counters = 4,
  794. .counter_bits = 48,
  795. .counter_mask = (1ULL << 48) - 1,
  796. /* use highest bit to detect overflow */
  797. .max_period = (1ULL << 47) - 1,
  798. };
  799. static int intel_pmu_init(void)
  800. {
  801. union cpuid10_edx edx;
  802. union cpuid10_eax eax;
  803. unsigned int unused;
  804. unsigned int ebx;
  805. int version;
  806. if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  807. return -ENODEV;
  808. /*
  809. * Check whether the Architectural PerfMon supports
  810. * Branch Misses Retired Event or not.
  811. */
  812. cpuid(10, &eax.full, &ebx, &unused, &edx.full);
  813. if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
  814. return -ENODEV;
  815. version = eax.split.version_id;
  816. if (version < 2)
  817. return -ENODEV;
  818. x86_pmu = intel_pmu;
  819. x86_pmu.version = version;
  820. x86_pmu.num_counters = eax.split.num_counters;
  821. /*
  822. * Quirk: v2 perfmon does not report fixed-purpose counters, so
  823. * assume at least 3 counters:
  824. */
  825. x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
  826. x86_pmu.counter_bits = eax.split.bit_width;
  827. x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
  828. rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
  829. return 0;
  830. }
  831. static int amd_pmu_init(void)
  832. {
  833. x86_pmu = amd_pmu;
  834. return 0;
  835. }
  836. void __init init_hw_perf_counters(void)
  837. {
  838. int err;
  839. switch (boot_cpu_data.x86_vendor) {
  840. case X86_VENDOR_INTEL:
  841. err = intel_pmu_init();
  842. break;
  843. case X86_VENDOR_AMD:
  844. err = amd_pmu_init();
  845. break;
  846. default:
  847. return;
  848. }
  849. if (err != 0)
  850. return;
  851. pr_info("%s Performance Monitoring support detected.\n", x86_pmu.name);
  852. pr_info("... version: %d\n", x86_pmu.version);
  853. pr_info("... bit width: %d\n", x86_pmu.counter_bits);
  854. pr_info("... num counters: %d\n", x86_pmu.num_counters);
  855. if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
  856. x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
  857. WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
  858. x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
  859. }
  860. perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
  861. perf_max_counters = x86_pmu.num_counters;
  862. pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
  863. pr_info("... max period: %016Lx\n", x86_pmu.max_period);
  864. if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
  865. x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
  866. WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
  867. x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
  868. }
  869. pr_info("... fixed counters: %d\n", x86_pmu.num_counters_fixed);
  870. perf_counter_mask |=
  871. ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
  872. pr_info("... counter mask: %016Lx\n", perf_counter_mask);
  873. perf_counters_lapic_init(0);
  874. register_die_notifier(&perf_counter_nmi_notifier);
  875. }
  876. static inline void x86_pmu_read(struct perf_counter *counter)
  877. {
  878. x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
  879. }
  880. static const struct pmu pmu = {
  881. .enable = x86_pmu_enable,
  882. .disable = x86_pmu_disable,
  883. .read = x86_pmu_read,
  884. };
  885. const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
  886. {
  887. int err;
  888. err = __hw_perf_counter_init(counter);
  889. if (err)
  890. return ERR_PTR(err);
  891. return &pmu;
  892. }
  893. /*
  894. * callchain support
  895. */
  896. static inline
  897. void callchain_store(struct perf_callchain_entry *entry, unsigned long ip)
  898. {
  899. if (entry->nr < MAX_STACK_DEPTH)
  900. entry->ip[entry->nr++] = ip;
  901. }
  902. static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
  903. static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
  904. static void
  905. backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
  906. {
  907. /* Ignore warnings */
  908. }
  909. static void backtrace_warning(void *data, char *msg)
  910. {
  911. /* Ignore warnings */
  912. }
  913. static int backtrace_stack(void *data, char *name)
  914. {
  915. /* Don't bother with IRQ stacks for now */
  916. return -1;
  917. }
  918. static void backtrace_address(void *data, unsigned long addr, int reliable)
  919. {
  920. struct perf_callchain_entry *entry = data;
  921. if (reliable)
  922. callchain_store(entry, addr);
  923. }
  924. static const struct stacktrace_ops backtrace_ops = {
  925. .warning = backtrace_warning,
  926. .warning_symbol = backtrace_warning_symbol,
  927. .stack = backtrace_stack,
  928. .address = backtrace_address,
  929. };
  930. static void
  931. perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
  932. {
  933. unsigned long bp;
  934. char *stack;
  935. int nr = entry->nr;
  936. callchain_store(entry, instruction_pointer(regs));
  937. stack = ((char *)regs + sizeof(struct pt_regs));
  938. #ifdef CONFIG_FRAME_POINTER
  939. bp = frame_pointer(regs);
  940. #else
  941. bp = 0;
  942. #endif
  943. dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry);
  944. entry->kernel = entry->nr - nr;
  945. }
  946. struct stack_frame {
  947. const void __user *next_fp;
  948. unsigned long return_address;
  949. };
  950. static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
  951. {
  952. int ret;
  953. if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
  954. return 0;
  955. ret = 1;
  956. pagefault_disable();
  957. if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
  958. ret = 0;
  959. pagefault_enable();
  960. return ret;
  961. }
  962. static void
  963. perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
  964. {
  965. struct stack_frame frame;
  966. const void __user *fp;
  967. int nr = entry->nr;
  968. regs = (struct pt_regs *)current->thread.sp0 - 1;
  969. fp = (void __user *)regs->bp;
  970. callchain_store(entry, regs->ip);
  971. while (entry->nr < MAX_STACK_DEPTH) {
  972. frame.next_fp = NULL;
  973. frame.return_address = 0;
  974. if (!copy_stack_frame(fp, &frame))
  975. break;
  976. if ((unsigned long)fp < user_stack_pointer(regs))
  977. break;
  978. callchain_store(entry, frame.return_address);
  979. fp = frame.next_fp;
  980. }
  981. entry->user = entry->nr - nr;
  982. }
  983. static void
  984. perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
  985. {
  986. int is_user;
  987. if (!regs)
  988. return;
  989. is_user = user_mode(regs);
  990. if (!current || current->pid == 0)
  991. return;
  992. if (is_user && current->state != TASK_RUNNING)
  993. return;
  994. if (!is_user)
  995. perf_callchain_kernel(regs, entry);
  996. if (current->mm)
  997. perf_callchain_user(regs, entry);
  998. }
  999. struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
  1000. {
  1001. struct perf_callchain_entry *entry;
  1002. if (in_nmi())
  1003. entry = &__get_cpu_var(nmi_entry);
  1004. else
  1005. entry = &__get_cpu_var(irq_entry);
  1006. entry->nr = 0;
  1007. entry->hv = 0;
  1008. entry->kernel = 0;
  1009. entry->user = 0;
  1010. perf_do_callchain(regs, entry);
  1011. return entry;
  1012. }