perf_counter.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212
  1. /*
  2. * Performance counter x86 architecture code
  3. *
  4. * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
  6. * Copyright(C) 2009 Jaswinder Singh Rajput
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #include <linux/perf_counter.h>
  11. #include <linux/capability.h>
  12. #include <linux/notifier.h>
  13. #include <linux/hardirq.h>
  14. #include <linux/kprobes.h>
  15. #include <linux/module.h>
  16. #include <linux/kdebug.h>
  17. #include <linux/sched.h>
  18. #include <linux/uaccess.h>
  19. #include <asm/apic.h>
  20. #include <asm/stacktrace.h>
  21. #include <asm/nmi.h>
  22. static bool perf_counters_initialized __read_mostly;
  23. /*
  24. * Number of (generic) HW counters:
  25. */
  26. static int nr_counters_generic __read_mostly;
  27. static u64 perf_counter_mask __read_mostly;
  28. static u64 counter_value_mask __read_mostly;
  29. static int counter_value_bits __read_mostly;
  30. static int nr_counters_fixed __read_mostly;
  31. struct cpu_hw_counters {
  32. struct perf_counter *counters[X86_PMC_IDX_MAX];
  33. unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  34. unsigned long interrupts;
  35. u64 throttle_ctrl;
  36. unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  37. int enabled;
  38. };
  39. /*
  40. * struct pmc_x86_ops - performance counter x86 ops
  41. */
  42. struct pmc_x86_ops {
  43. u64 (*save_disable_all)(void);
  44. void (*restore_all)(u64);
  45. u64 (*get_status)(u64);
  46. void (*ack_status)(u64);
  47. void (*enable)(int, u64);
  48. void (*disable)(int, u64);
  49. unsigned eventsel;
  50. unsigned perfctr;
  51. u64 (*event_map)(int);
  52. u64 (*raw_event)(u64);
  53. int max_events;
  54. };
  55. static struct pmc_x86_ops *pmc_ops __read_mostly;
  56. static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
  57. .enabled = 1,
  58. };
  59. static __read_mostly int intel_perfmon_version;
  60. /*
  61. * Intel PerfMon v3. Used on Core2 and later.
  62. */
  63. static const u64 intel_perfmon_event_map[] =
  64. {
  65. [PERF_COUNT_CPU_CYCLES] = 0x003c,
  66. [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
  67. [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
  68. [PERF_COUNT_CACHE_MISSES] = 0x412e,
  69. [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
  70. [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
  71. [PERF_COUNT_BUS_CYCLES] = 0x013c,
  72. };
  73. static u64 pmc_intel_event_map(int event)
  74. {
  75. return intel_perfmon_event_map[event];
  76. }
  77. static u64 pmc_intel_raw_event(u64 event)
  78. {
  79. #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
  80. #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
  81. #define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
  82. #define CORE_EVNTSEL_MASK \
  83. (CORE_EVNTSEL_EVENT_MASK | \
  84. CORE_EVNTSEL_UNIT_MASK | \
  85. CORE_EVNTSEL_COUNTER_MASK)
  86. return event & CORE_EVNTSEL_MASK;
  87. }
  88. /*
  89. * AMD Performance Monitor K7 and later.
  90. */
  91. static const u64 amd_perfmon_event_map[] =
  92. {
  93. [PERF_COUNT_CPU_CYCLES] = 0x0076,
  94. [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
  95. [PERF_COUNT_CACHE_REFERENCES] = 0x0080,
  96. [PERF_COUNT_CACHE_MISSES] = 0x0081,
  97. [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
  98. [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
  99. };
  100. static u64 pmc_amd_event_map(int event)
  101. {
  102. return amd_perfmon_event_map[event];
  103. }
  104. static u64 pmc_amd_raw_event(u64 event)
  105. {
  106. #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
  107. #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
  108. #define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
  109. #define K7_EVNTSEL_MASK \
  110. (K7_EVNTSEL_EVENT_MASK | \
  111. K7_EVNTSEL_UNIT_MASK | \
  112. K7_EVNTSEL_COUNTER_MASK)
  113. return event & K7_EVNTSEL_MASK;
  114. }
  115. /*
  116. * Propagate counter elapsed time into the generic counter.
  117. * Can only be executed on the CPU where the counter is active.
  118. * Returns the delta events processed.
  119. */
  120. static void
  121. x86_perf_counter_update(struct perf_counter *counter,
  122. struct hw_perf_counter *hwc, int idx)
  123. {
  124. u64 prev_raw_count, new_raw_count, delta;
  125. /*
  126. * Careful: an NMI might modify the previous counter value.
  127. *
  128. * Our tactic to handle this is to first atomically read and
  129. * exchange a new raw count - then add that new-prev delta
  130. * count to the generic counter atomically:
  131. */
  132. again:
  133. prev_raw_count = atomic64_read(&hwc->prev_count);
  134. rdmsrl(hwc->counter_base + idx, new_raw_count);
  135. if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
  136. new_raw_count) != prev_raw_count)
  137. goto again;
  138. /*
  139. * Now we have the new raw value and have updated the prev
  140. * timestamp already. We can now calculate the elapsed delta
  141. * (counter-)time and add that to the generic counter.
  142. *
  143. * Careful, not all hw sign-extends above the physical width
  144. * of the count, so we do that by clipping the delta to 32 bits:
  145. */
  146. delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
  147. atomic64_add(delta, &counter->count);
  148. atomic64_sub(delta, &hwc->period_left);
  149. }
  150. static atomic_t num_counters;
  151. static DEFINE_MUTEX(pmc_reserve_mutex);
  152. static bool reserve_pmc_hardware(void)
  153. {
  154. int i;
  155. if (nmi_watchdog == NMI_LOCAL_APIC)
  156. disable_lapic_nmi_watchdog();
  157. for (i = 0; i < nr_counters_generic; i++) {
  158. if (!reserve_perfctr_nmi(pmc_ops->perfctr + i))
  159. goto perfctr_fail;
  160. }
  161. for (i = 0; i < nr_counters_generic; i++) {
  162. if (!reserve_evntsel_nmi(pmc_ops->eventsel + i))
  163. goto eventsel_fail;
  164. }
  165. return true;
  166. eventsel_fail:
  167. for (i--; i >= 0; i--)
  168. release_evntsel_nmi(pmc_ops->eventsel + i);
  169. i = nr_counters_generic;
  170. perfctr_fail:
  171. for (i--; i >= 0; i--)
  172. release_perfctr_nmi(pmc_ops->perfctr + i);
  173. if (nmi_watchdog == NMI_LOCAL_APIC)
  174. enable_lapic_nmi_watchdog();
  175. return false;
  176. }
  177. static void release_pmc_hardware(void)
  178. {
  179. int i;
  180. for (i = 0; i < nr_counters_generic; i++) {
  181. release_perfctr_nmi(pmc_ops->perfctr + i);
  182. release_evntsel_nmi(pmc_ops->eventsel + i);
  183. }
  184. if (nmi_watchdog == NMI_LOCAL_APIC)
  185. enable_lapic_nmi_watchdog();
  186. }
  187. static void hw_perf_counter_destroy(struct perf_counter *counter)
  188. {
  189. if (atomic_dec_and_mutex_lock(&num_counters, &pmc_reserve_mutex)) {
  190. release_pmc_hardware();
  191. mutex_unlock(&pmc_reserve_mutex);
  192. }
  193. }
  194. /*
  195. * Setup the hardware configuration for a given hw_event_type
  196. */
  197. static int __hw_perf_counter_init(struct perf_counter *counter)
  198. {
  199. struct perf_counter_hw_event *hw_event = &counter->hw_event;
  200. struct hw_perf_counter *hwc = &counter->hw;
  201. int err;
  202. if (unlikely(!perf_counters_initialized))
  203. return -EINVAL;
  204. err = 0;
  205. if (atomic_inc_not_zero(&num_counters)) {
  206. mutex_lock(&pmc_reserve_mutex);
  207. if (atomic_read(&num_counters) == 0 && !reserve_pmc_hardware())
  208. err = -EBUSY;
  209. else
  210. atomic_inc(&num_counters);
  211. mutex_unlock(&pmc_reserve_mutex);
  212. }
  213. if (err)
  214. return err;
  215. /*
  216. * Generate PMC IRQs:
  217. * (keep 'enabled' bit clear for now)
  218. */
  219. hwc->config = ARCH_PERFMON_EVENTSEL_INT;
  220. /*
  221. * Count user and OS events unless requested not to.
  222. */
  223. if (!hw_event->exclude_user)
  224. hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
  225. if (!hw_event->exclude_kernel)
  226. hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
  227. /*
  228. * If privileged enough, allow NMI events:
  229. */
  230. hwc->nmi = 0;
  231. if (capable(CAP_SYS_ADMIN) && hw_event->nmi)
  232. hwc->nmi = 1;
  233. hwc->irq_period = hw_event->irq_period;
  234. /*
  235. * Intel PMCs cannot be accessed sanely above 32 bit width,
  236. * so we install an artificial 1<<31 period regardless of
  237. * the generic counter period:
  238. */
  239. if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
  240. if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
  241. hwc->irq_period = 0x7FFFFFFF;
  242. atomic64_set(&hwc->period_left, hwc->irq_period);
  243. /*
  244. * Raw event type provide the config in the event structure
  245. */
  246. if (perf_event_raw(hw_event)) {
  247. hwc->config |= pmc_ops->raw_event(perf_event_config(hw_event));
  248. } else {
  249. if (perf_event_id(hw_event) >= pmc_ops->max_events)
  250. return -EINVAL;
  251. /*
  252. * The generic map:
  253. */
  254. hwc->config |= pmc_ops->event_map(perf_event_id(hw_event));
  255. }
  256. counter->destroy = hw_perf_counter_destroy;
  257. return 0;
  258. }
  259. static u64 pmc_intel_save_disable_all(void)
  260. {
  261. u64 ctrl;
  262. rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
  263. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
  264. return ctrl;
  265. }
  266. static u64 pmc_amd_save_disable_all(void)
  267. {
  268. struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
  269. int enabled, idx;
  270. enabled = cpuc->enabled;
  271. cpuc->enabled = 0;
  272. /*
  273. * ensure we write the disable before we start disabling the
  274. * counters proper, so that pcm_amd_enable() does the right thing.
  275. */
  276. barrier();
  277. for (idx = 0; idx < nr_counters_generic; idx++) {
  278. u64 val;
  279. rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
  280. if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) {
  281. val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
  282. wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
  283. }
  284. }
  285. return enabled;
  286. }
  287. u64 hw_perf_save_disable(void)
  288. {
  289. if (unlikely(!perf_counters_initialized))
  290. return 0;
  291. return pmc_ops->save_disable_all();
  292. }
  293. /*
  294. * Exported because of ACPI idle
  295. */
  296. EXPORT_SYMBOL_GPL(hw_perf_save_disable);
  297. static void pmc_intel_restore_all(u64 ctrl)
  298. {
  299. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
  300. }
  301. static void pmc_amd_restore_all(u64 ctrl)
  302. {
  303. struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
  304. int idx;
  305. cpuc->enabled = ctrl;
  306. barrier();
  307. if (!ctrl)
  308. return;
  309. for (idx = 0; idx < nr_counters_generic; idx++) {
  310. if (test_bit(idx, cpuc->active_mask)) {
  311. u64 val;
  312. rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
  313. val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
  314. wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
  315. }
  316. }
  317. }
  318. void hw_perf_restore(u64 ctrl)
  319. {
  320. if (unlikely(!perf_counters_initialized))
  321. return;
  322. pmc_ops->restore_all(ctrl);
  323. }
  324. /*
  325. * Exported because of ACPI idle
  326. */
  327. EXPORT_SYMBOL_GPL(hw_perf_restore);
  328. static u64 pmc_intel_get_status(u64 mask)
  329. {
  330. u64 status;
  331. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  332. return status;
  333. }
  334. static u64 pmc_amd_get_status(u64 mask)
  335. {
  336. u64 status = 0;
  337. int idx;
  338. for (idx = 0; idx < nr_counters_generic; idx++) {
  339. s64 val;
  340. if (!(mask & (1 << idx)))
  341. continue;
  342. rdmsrl(MSR_K7_PERFCTR0 + idx, val);
  343. val <<= (64 - counter_value_bits);
  344. if (val >= 0)
  345. status |= (1 << idx);
  346. }
  347. return status;
  348. }
  349. static u64 hw_perf_get_status(u64 mask)
  350. {
  351. if (unlikely(!perf_counters_initialized))
  352. return 0;
  353. return pmc_ops->get_status(mask);
  354. }
  355. static void pmc_intel_ack_status(u64 ack)
  356. {
  357. wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
  358. }
  359. static void pmc_amd_ack_status(u64 ack)
  360. {
  361. }
  362. static void hw_perf_ack_status(u64 ack)
  363. {
  364. if (unlikely(!perf_counters_initialized))
  365. return;
  366. pmc_ops->ack_status(ack);
  367. }
  368. static void pmc_intel_enable(int idx, u64 config)
  369. {
  370. wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx,
  371. config | ARCH_PERFMON_EVENTSEL0_ENABLE);
  372. }
  373. static void pmc_amd_enable(int idx, u64 config)
  374. {
  375. struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
  376. set_bit(idx, cpuc->active_mask);
  377. if (cpuc->enabled)
  378. config |= ARCH_PERFMON_EVENTSEL0_ENABLE;
  379. wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
  380. }
  381. static void hw_perf_enable(int idx, u64 config)
  382. {
  383. if (unlikely(!perf_counters_initialized))
  384. return;
  385. pmc_ops->enable(idx, config);
  386. }
  387. static void pmc_intel_disable(int idx, u64 config)
  388. {
  389. wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config);
  390. }
  391. static void pmc_amd_disable(int idx, u64 config)
  392. {
  393. struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
  394. clear_bit(idx, cpuc->active_mask);
  395. wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
  396. }
  397. static void hw_perf_disable(int idx, u64 config)
  398. {
  399. if (unlikely(!perf_counters_initialized))
  400. return;
  401. pmc_ops->disable(idx, config);
  402. }
  403. static inline void
  404. __pmc_fixed_disable(struct perf_counter *counter,
  405. struct hw_perf_counter *hwc, unsigned int __idx)
  406. {
  407. int idx = __idx - X86_PMC_IDX_FIXED;
  408. u64 ctrl_val, mask;
  409. int err;
  410. mask = 0xfULL << (idx * 4);
  411. rdmsrl(hwc->config_base, ctrl_val);
  412. ctrl_val &= ~mask;
  413. err = checking_wrmsrl(hwc->config_base, ctrl_val);
  414. }
  415. static inline void
  416. __pmc_generic_disable(struct perf_counter *counter,
  417. struct hw_perf_counter *hwc, unsigned int idx)
  418. {
  419. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
  420. __pmc_fixed_disable(counter, hwc, idx);
  421. else
  422. hw_perf_disable(idx, hwc->config);
  423. }
  424. static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
  425. /*
  426. * Set the next IRQ period, based on the hwc->period_left value.
  427. * To be called with the counter disabled in hw:
  428. */
  429. static void
  430. __hw_perf_counter_set_period(struct perf_counter *counter,
  431. struct hw_perf_counter *hwc, int idx)
  432. {
  433. s64 left = atomic64_read(&hwc->period_left);
  434. s64 period = hwc->irq_period;
  435. int err;
  436. /*
  437. * If we are way outside a reasoable range then just skip forward:
  438. */
  439. if (unlikely(left <= -period)) {
  440. left = period;
  441. atomic64_set(&hwc->period_left, left);
  442. }
  443. if (unlikely(left <= 0)) {
  444. left += period;
  445. atomic64_set(&hwc->period_left, left);
  446. }
  447. per_cpu(prev_left[idx], smp_processor_id()) = left;
  448. /*
  449. * The hw counter starts counting from this counter offset,
  450. * mark it to be able to extra future deltas:
  451. */
  452. atomic64_set(&hwc->prev_count, (u64)-left);
  453. err = checking_wrmsrl(hwc->counter_base + idx,
  454. (u64)(-left) & counter_value_mask);
  455. }
  456. static inline void
  457. __pmc_fixed_enable(struct perf_counter *counter,
  458. struct hw_perf_counter *hwc, unsigned int __idx)
  459. {
  460. int idx = __idx - X86_PMC_IDX_FIXED;
  461. u64 ctrl_val, bits, mask;
  462. int err;
  463. /*
  464. * Enable IRQ generation (0x8),
  465. * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
  466. * if requested:
  467. */
  468. bits = 0x8ULL;
  469. if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
  470. bits |= 0x2;
  471. if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
  472. bits |= 0x1;
  473. bits <<= (idx * 4);
  474. mask = 0xfULL << (idx * 4);
  475. rdmsrl(hwc->config_base, ctrl_val);
  476. ctrl_val &= ~mask;
  477. ctrl_val |= bits;
  478. err = checking_wrmsrl(hwc->config_base, ctrl_val);
  479. }
  480. static void
  481. __pmc_generic_enable(struct perf_counter *counter,
  482. struct hw_perf_counter *hwc, int idx)
  483. {
  484. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
  485. __pmc_fixed_enable(counter, hwc, idx);
  486. else
  487. hw_perf_enable(idx, hwc->config);
  488. }
  489. static int
  490. fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
  491. {
  492. unsigned int event;
  493. if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
  494. return -1;
  495. if (unlikely(hwc->nmi))
  496. return -1;
  497. event = hwc->config & ARCH_PERFMON_EVENT_MASK;
  498. if (unlikely(event == pmc_ops->event_map(PERF_COUNT_INSTRUCTIONS)))
  499. return X86_PMC_IDX_FIXED_INSTRUCTIONS;
  500. if (unlikely(event == pmc_ops->event_map(PERF_COUNT_CPU_CYCLES)))
  501. return X86_PMC_IDX_FIXED_CPU_CYCLES;
  502. if (unlikely(event == pmc_ops->event_map(PERF_COUNT_BUS_CYCLES)))
  503. return X86_PMC_IDX_FIXED_BUS_CYCLES;
  504. return -1;
  505. }
  506. /*
  507. * Find a PMC slot for the freshly enabled / scheduled in counter:
  508. */
  509. static int pmc_generic_enable(struct perf_counter *counter)
  510. {
  511. struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
  512. struct hw_perf_counter *hwc = &counter->hw;
  513. int idx;
  514. idx = fixed_mode_idx(counter, hwc);
  515. if (idx >= 0) {
  516. /*
  517. * Try to get the fixed counter, if that is already taken
  518. * then try to get a generic counter:
  519. */
  520. if (test_and_set_bit(idx, cpuc->used))
  521. goto try_generic;
  522. hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
  523. /*
  524. * We set it so that counter_base + idx in wrmsr/rdmsr maps to
  525. * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
  526. */
  527. hwc->counter_base =
  528. MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
  529. hwc->idx = idx;
  530. } else {
  531. idx = hwc->idx;
  532. /* Try to get the previous generic counter again */
  533. if (test_and_set_bit(idx, cpuc->used)) {
  534. try_generic:
  535. idx = find_first_zero_bit(cpuc->used, nr_counters_generic);
  536. if (idx == nr_counters_generic)
  537. return -EAGAIN;
  538. set_bit(idx, cpuc->used);
  539. hwc->idx = idx;
  540. }
  541. hwc->config_base = pmc_ops->eventsel;
  542. hwc->counter_base = pmc_ops->perfctr;
  543. }
  544. perf_counters_lapic_init(hwc->nmi);
  545. __pmc_generic_disable(counter, hwc, idx);
  546. cpuc->counters[idx] = counter;
  547. /*
  548. * Make it visible before enabling the hw:
  549. */
  550. smp_wmb();
  551. __hw_perf_counter_set_period(counter, hwc, idx);
  552. __pmc_generic_enable(counter, hwc, idx);
  553. return 0;
  554. }
  555. void perf_counter_print_debug(void)
  556. {
  557. u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
  558. struct cpu_hw_counters *cpuc;
  559. int cpu, idx;
  560. if (!nr_counters_generic)
  561. return;
  562. local_irq_disable();
  563. cpu = smp_processor_id();
  564. cpuc = &per_cpu(cpu_hw_counters, cpu);
  565. if (intel_perfmon_version >= 2) {
  566. rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
  567. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  568. rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
  569. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
  570. pr_info("\n");
  571. pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
  572. pr_info("CPU#%d: status: %016llx\n", cpu, status);
  573. pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
  574. pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
  575. }
  576. pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
  577. for (idx = 0; idx < nr_counters_generic; idx++) {
  578. rdmsrl(pmc_ops->eventsel + idx, pmc_ctrl);
  579. rdmsrl(pmc_ops->perfctr + idx, pmc_count);
  580. prev_left = per_cpu(prev_left[idx], cpu);
  581. pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
  582. cpu, idx, pmc_ctrl);
  583. pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
  584. cpu, idx, pmc_count);
  585. pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
  586. cpu, idx, prev_left);
  587. }
  588. for (idx = 0; idx < nr_counters_fixed; idx++) {
  589. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
  590. pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
  591. cpu, idx, pmc_count);
  592. }
  593. local_irq_enable();
  594. }
  595. static void pmc_generic_disable(struct perf_counter *counter)
  596. {
  597. struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
  598. struct hw_perf_counter *hwc = &counter->hw;
  599. unsigned int idx = hwc->idx;
  600. __pmc_generic_disable(counter, hwc, idx);
  601. clear_bit(idx, cpuc->used);
  602. cpuc->counters[idx] = NULL;
  603. /*
  604. * Make sure the cleared pointer becomes visible before we
  605. * (potentially) free the counter:
  606. */
  607. smp_wmb();
  608. /*
  609. * Drain the remaining delta count out of a counter
  610. * that we are disabling:
  611. */
  612. x86_perf_counter_update(counter, hwc, idx);
  613. }
  614. /*
  615. * Save and restart an expired counter. Called by NMI contexts,
  616. * so it has to be careful about preempting normal counter ops:
  617. */
  618. static void perf_save_and_restart(struct perf_counter *counter)
  619. {
  620. struct hw_perf_counter *hwc = &counter->hw;
  621. int idx = hwc->idx;
  622. x86_perf_counter_update(counter, hwc, idx);
  623. __hw_perf_counter_set_period(counter, hwc, idx);
  624. if (counter->state == PERF_COUNTER_STATE_ACTIVE)
  625. __pmc_generic_enable(counter, hwc, idx);
  626. }
  627. /*
  628. * Maximum interrupt frequency of 100KHz per CPU
  629. */
  630. #define PERFMON_MAX_INTERRUPTS (100000/HZ)
  631. /*
  632. * This handler is triggered by the local APIC, so the APIC IRQ handling
  633. * rules apply:
  634. */
  635. static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
  636. {
  637. int bit, cpu = smp_processor_id();
  638. u64 ack, status;
  639. struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
  640. int ret = 0;
  641. cpuc->throttle_ctrl = hw_perf_save_disable();
  642. status = hw_perf_get_status(cpuc->throttle_ctrl);
  643. if (!status)
  644. goto out;
  645. ret = 1;
  646. again:
  647. inc_irq_stat(apic_perf_irqs);
  648. ack = status;
  649. for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
  650. struct perf_counter *counter = cpuc->counters[bit];
  651. clear_bit(bit, (unsigned long *) &status);
  652. if (!counter)
  653. continue;
  654. perf_save_and_restart(counter);
  655. perf_counter_output(counter, nmi, regs);
  656. }
  657. hw_perf_ack_status(ack);
  658. /*
  659. * Repeat if there is more work to be done:
  660. */
  661. status = hw_perf_get_status(cpuc->throttle_ctrl);
  662. if (status)
  663. goto again;
  664. out:
  665. /*
  666. * Restore - do not reenable when global enable is off or throttled:
  667. */
  668. if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
  669. hw_perf_restore(cpuc->throttle_ctrl);
  670. return ret;
  671. }
  672. void perf_counter_unthrottle(void)
  673. {
  674. struct cpu_hw_counters *cpuc;
  675. if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  676. return;
  677. if (unlikely(!perf_counters_initialized))
  678. return;
  679. cpuc = &__get_cpu_var(cpu_hw_counters);
  680. if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
  681. if (printk_ratelimit())
  682. printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n");
  683. hw_perf_restore(cpuc->throttle_ctrl);
  684. }
  685. cpuc->interrupts = 0;
  686. }
  687. void smp_perf_counter_interrupt(struct pt_regs *regs)
  688. {
  689. irq_enter();
  690. apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
  691. ack_APIC_irq();
  692. __smp_perf_counter_interrupt(regs, 0);
  693. irq_exit();
  694. }
  695. void smp_perf_pending_interrupt(struct pt_regs *regs)
  696. {
  697. irq_enter();
  698. ack_APIC_irq();
  699. inc_irq_stat(apic_pending_irqs);
  700. perf_counter_do_pending();
  701. irq_exit();
  702. }
  703. void set_perf_counter_pending(void)
  704. {
  705. apic->send_IPI_self(LOCAL_PENDING_VECTOR);
  706. }
  707. void perf_counters_lapic_init(int nmi)
  708. {
  709. u32 apic_val;
  710. if (!perf_counters_initialized)
  711. return;
  712. /*
  713. * Enable the performance counter vector in the APIC LVT:
  714. */
  715. apic_val = apic_read(APIC_LVTERR);
  716. apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
  717. if (nmi)
  718. apic_write(APIC_LVTPC, APIC_DM_NMI);
  719. else
  720. apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
  721. apic_write(APIC_LVTERR, apic_val);
  722. }
  723. static int __kprobes
  724. perf_counter_nmi_handler(struct notifier_block *self,
  725. unsigned long cmd, void *__args)
  726. {
  727. struct die_args *args = __args;
  728. struct pt_regs *regs;
  729. int ret;
  730. switch (cmd) {
  731. case DIE_NMI:
  732. case DIE_NMI_IPI:
  733. break;
  734. default:
  735. return NOTIFY_DONE;
  736. }
  737. regs = args->regs;
  738. apic_write(APIC_LVTPC, APIC_DM_NMI);
  739. ret = __smp_perf_counter_interrupt(regs, 1);
  740. return ret ? NOTIFY_STOP : NOTIFY_OK;
  741. }
  742. static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
  743. .notifier_call = perf_counter_nmi_handler,
  744. .next = NULL,
  745. .priority = 1
  746. };
  747. static struct pmc_x86_ops pmc_intel_ops = {
  748. .save_disable_all = pmc_intel_save_disable_all,
  749. .restore_all = pmc_intel_restore_all,
  750. .get_status = pmc_intel_get_status,
  751. .ack_status = pmc_intel_ack_status,
  752. .enable = pmc_intel_enable,
  753. .disable = pmc_intel_disable,
  754. .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
  755. .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
  756. .event_map = pmc_intel_event_map,
  757. .raw_event = pmc_intel_raw_event,
  758. .max_events = ARRAY_SIZE(intel_perfmon_event_map),
  759. };
  760. static struct pmc_x86_ops pmc_amd_ops = {
  761. .save_disable_all = pmc_amd_save_disable_all,
  762. .restore_all = pmc_amd_restore_all,
  763. .get_status = pmc_amd_get_status,
  764. .ack_status = pmc_amd_ack_status,
  765. .enable = pmc_amd_enable,
  766. .disable = pmc_amd_disable,
  767. .eventsel = MSR_K7_EVNTSEL0,
  768. .perfctr = MSR_K7_PERFCTR0,
  769. .event_map = pmc_amd_event_map,
  770. .raw_event = pmc_amd_raw_event,
  771. .max_events = ARRAY_SIZE(amd_perfmon_event_map),
  772. };
  773. static struct pmc_x86_ops *pmc_intel_init(void)
  774. {
  775. union cpuid10_edx edx;
  776. union cpuid10_eax eax;
  777. unsigned int unused;
  778. unsigned int ebx;
  779. /*
  780. * Check whether the Architectural PerfMon supports
  781. * Branch Misses Retired Event or not.
  782. */
  783. cpuid(10, &eax.full, &ebx, &unused, &edx.full);
  784. if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
  785. return NULL;
  786. intel_perfmon_version = eax.split.version_id;
  787. if (intel_perfmon_version < 2)
  788. return NULL;
  789. pr_info("Intel Performance Monitoring support detected.\n");
  790. pr_info("... version: %d\n", intel_perfmon_version);
  791. pr_info("... bit width: %d\n", eax.split.bit_width);
  792. pr_info("... mask length: %d\n", eax.split.mask_length);
  793. nr_counters_generic = eax.split.num_counters;
  794. nr_counters_fixed = edx.split.num_counters_fixed;
  795. counter_value_mask = (1ULL << eax.split.bit_width) - 1;
  796. return &pmc_intel_ops;
  797. }
  798. static struct pmc_x86_ops *pmc_amd_init(void)
  799. {
  800. nr_counters_generic = 4;
  801. nr_counters_fixed = 0;
  802. counter_value_mask = 0x0000FFFFFFFFFFFFULL;
  803. counter_value_bits = 48;
  804. pr_info("AMD Performance Monitoring support detected.\n");
  805. return &pmc_amd_ops;
  806. }
  807. void __init init_hw_perf_counters(void)
  808. {
  809. if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  810. return;
  811. switch (boot_cpu_data.x86_vendor) {
  812. case X86_VENDOR_INTEL:
  813. pmc_ops = pmc_intel_init();
  814. break;
  815. case X86_VENDOR_AMD:
  816. pmc_ops = pmc_amd_init();
  817. break;
  818. }
  819. if (!pmc_ops)
  820. return;
  821. pr_info("... num counters: %d\n", nr_counters_generic);
  822. if (nr_counters_generic > X86_PMC_MAX_GENERIC) {
  823. nr_counters_generic = X86_PMC_MAX_GENERIC;
  824. WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
  825. nr_counters_generic, X86_PMC_MAX_GENERIC);
  826. }
  827. perf_counter_mask = (1 << nr_counters_generic) - 1;
  828. perf_max_counters = nr_counters_generic;
  829. pr_info("... value mask: %016Lx\n", counter_value_mask);
  830. if (nr_counters_fixed > X86_PMC_MAX_FIXED) {
  831. nr_counters_fixed = X86_PMC_MAX_FIXED;
  832. WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
  833. nr_counters_fixed, X86_PMC_MAX_FIXED);
  834. }
  835. pr_info("... fixed counters: %d\n", nr_counters_fixed);
  836. perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED;
  837. pr_info("... counter mask: %016Lx\n", perf_counter_mask);
  838. perf_counters_initialized = true;
  839. perf_counters_lapic_init(0);
  840. register_die_notifier(&perf_counter_nmi_notifier);
  841. }
  842. static void pmc_generic_read(struct perf_counter *counter)
  843. {
  844. x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
  845. }
  846. static const struct hw_perf_counter_ops x86_perf_counter_ops = {
  847. .enable = pmc_generic_enable,
  848. .disable = pmc_generic_disable,
  849. .read = pmc_generic_read,
  850. };
  851. const struct hw_perf_counter_ops *
  852. hw_perf_counter_init(struct perf_counter *counter)
  853. {
  854. int err;
  855. err = __hw_perf_counter_init(counter);
  856. if (err)
  857. return ERR_PTR(err);
  858. return &x86_perf_counter_ops;
  859. }
  860. /*
  861. * callchain support
  862. */
  863. static inline
  864. void callchain_store(struct perf_callchain_entry *entry, unsigned long ip)
  865. {
  866. if (entry->nr < MAX_STACK_DEPTH)
  867. entry->ip[entry->nr++] = ip;
  868. }
  869. static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
  870. static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
  871. static void
  872. backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
  873. {
  874. /* Ignore warnings */
  875. }
  876. static void backtrace_warning(void *data, char *msg)
  877. {
  878. /* Ignore warnings */
  879. }
  880. static int backtrace_stack(void *data, char *name)
  881. {
  882. /* Don't bother with IRQ stacks for now */
  883. return -1;
  884. }
  885. static void backtrace_address(void *data, unsigned long addr, int reliable)
  886. {
  887. struct perf_callchain_entry *entry = data;
  888. if (reliable)
  889. callchain_store(entry, addr);
  890. }
  891. static const struct stacktrace_ops backtrace_ops = {
  892. .warning = backtrace_warning,
  893. .warning_symbol = backtrace_warning_symbol,
  894. .stack = backtrace_stack,
  895. .address = backtrace_address,
  896. };
  897. static void
  898. perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
  899. {
  900. unsigned long bp;
  901. char *stack;
  902. int nr = entry->nr;
  903. callchain_store(entry, instruction_pointer(regs));
  904. stack = ((char *)regs + sizeof(struct pt_regs));
  905. #ifdef CONFIG_FRAME_POINTER
  906. bp = frame_pointer(regs);
  907. #else
  908. bp = 0;
  909. #endif
  910. dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry);
  911. entry->kernel = entry->nr - nr;
  912. }
  913. struct stack_frame {
  914. const void __user *next_fp;
  915. unsigned long return_address;
  916. };
  917. static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
  918. {
  919. int ret;
  920. if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
  921. return 0;
  922. ret = 1;
  923. pagefault_disable();
  924. if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
  925. ret = 0;
  926. pagefault_enable();
  927. return ret;
  928. }
  929. static void
  930. perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
  931. {
  932. struct stack_frame frame;
  933. const void __user *fp;
  934. int nr = entry->nr;
  935. regs = (struct pt_regs *)current->thread.sp0 - 1;
  936. fp = (void __user *)regs->bp;
  937. callchain_store(entry, regs->ip);
  938. while (entry->nr < MAX_STACK_DEPTH) {
  939. frame.next_fp = NULL;
  940. frame.return_address = 0;
  941. if (!copy_stack_frame(fp, &frame))
  942. break;
  943. if ((unsigned long)fp < user_stack_pointer(regs))
  944. break;
  945. callchain_store(entry, frame.return_address);
  946. fp = frame.next_fp;
  947. }
  948. entry->user = entry->nr - nr;
  949. }
  950. static void
  951. perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
  952. {
  953. int is_user;
  954. if (!regs)
  955. return;
  956. is_user = user_mode(regs);
  957. if (!current || current->pid == 0)
  958. return;
  959. if (is_user && current->state != TASK_RUNNING)
  960. return;
  961. if (!is_user)
  962. perf_callchain_kernel(regs, entry);
  963. if (current->mm)
  964. perf_callchain_user(regs, entry);
  965. }
  966. struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
  967. {
  968. struct perf_callchain_entry *entry;
  969. if (in_nmi())
  970. entry = &__get_cpu_var(nmi_entry);
  971. else
  972. entry = &__get_cpu_var(irq_entry);
  973. entry->nr = 0;
  974. entry->hv = 0;
  975. entry->kernel = 0;
  976. entry->user = 0;
  977. perf_do_callchain(regs, entry);
  978. return entry;
  979. }