perf_event_amd.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623
  1. #include <linux/perf_event.h>
  2. #include <linux/types.h>
  3. #include <linux/init.h>
  4. #include <linux/slab.h>
  5. #include <asm/apicdef.h>
  6. #include "perf_event.h"
  7. static __initconst const u64 amd_hw_cache_event_ids
  8. [PERF_COUNT_HW_CACHE_MAX]
  9. [PERF_COUNT_HW_CACHE_OP_MAX]
  10. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  11. {
  12. [ C(L1D) ] = {
  13. [ C(OP_READ) ] = {
  14. [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
  15. [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
  16. },
  17. [ C(OP_WRITE) ] = {
  18. [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
  19. [ C(RESULT_MISS) ] = 0,
  20. },
  21. [ C(OP_PREFETCH) ] = {
  22. [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
  23. [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
  24. },
  25. },
  26. [ C(L1I ) ] = {
  27. [ C(OP_READ) ] = {
  28. [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
  29. [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
  30. },
  31. [ C(OP_WRITE) ] = {
  32. [ C(RESULT_ACCESS) ] = -1,
  33. [ C(RESULT_MISS) ] = -1,
  34. },
  35. [ C(OP_PREFETCH) ] = {
  36. [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
  37. [ C(RESULT_MISS) ] = 0,
  38. },
  39. },
  40. [ C(LL ) ] = {
  41. [ C(OP_READ) ] = {
  42. [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
  43. [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
  44. },
  45. [ C(OP_WRITE) ] = {
  46. [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
  47. [ C(RESULT_MISS) ] = 0,
  48. },
  49. [ C(OP_PREFETCH) ] = {
  50. [ C(RESULT_ACCESS) ] = 0,
  51. [ C(RESULT_MISS) ] = 0,
  52. },
  53. },
  54. [ C(DTLB) ] = {
  55. [ C(OP_READ) ] = {
  56. [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
  57. [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
  58. },
  59. [ C(OP_WRITE) ] = {
  60. [ C(RESULT_ACCESS) ] = 0,
  61. [ C(RESULT_MISS) ] = 0,
  62. },
  63. [ C(OP_PREFETCH) ] = {
  64. [ C(RESULT_ACCESS) ] = 0,
  65. [ C(RESULT_MISS) ] = 0,
  66. },
  67. },
  68. [ C(ITLB) ] = {
  69. [ C(OP_READ) ] = {
  70. [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
  71. [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
  72. },
  73. [ C(OP_WRITE) ] = {
  74. [ C(RESULT_ACCESS) ] = -1,
  75. [ C(RESULT_MISS) ] = -1,
  76. },
  77. [ C(OP_PREFETCH) ] = {
  78. [ C(RESULT_ACCESS) ] = -1,
  79. [ C(RESULT_MISS) ] = -1,
  80. },
  81. },
  82. [ C(BPU ) ] = {
  83. [ C(OP_READ) ] = {
  84. [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
  85. [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
  86. },
  87. [ C(OP_WRITE) ] = {
  88. [ C(RESULT_ACCESS) ] = -1,
  89. [ C(RESULT_MISS) ] = -1,
  90. },
  91. [ C(OP_PREFETCH) ] = {
  92. [ C(RESULT_ACCESS) ] = -1,
  93. [ C(RESULT_MISS) ] = -1,
  94. },
  95. },
  96. [ C(NODE) ] = {
  97. [ C(OP_READ) ] = {
  98. [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
  99. [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
  100. },
  101. [ C(OP_WRITE) ] = {
  102. [ C(RESULT_ACCESS) ] = -1,
  103. [ C(RESULT_MISS) ] = -1,
  104. },
  105. [ C(OP_PREFETCH) ] = {
  106. [ C(RESULT_ACCESS) ] = -1,
  107. [ C(RESULT_MISS) ] = -1,
  108. },
  109. },
  110. };
  111. /*
  112. * AMD Performance Monitor K7 and later.
  113. */
  114. static const u64 amd_perfmon_event_map[] =
  115. {
  116. [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
  117. [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
  118. [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
  119. [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
  120. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
  121. [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
  122. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
  123. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
  124. };
  125. static u64 amd_pmu_event_map(int hw_event)
  126. {
  127. return amd_perfmon_event_map[hw_event];
  128. }
  129. static int amd_pmu_hw_config(struct perf_event *event)
  130. {
  131. int ret = x86_pmu_hw_config(event);
  132. if (ret)
  133. return ret;
  134. if (event->attr.exclude_host && event->attr.exclude_guest)
  135. /*
  136. * When HO == GO == 1 the hardware treats that as GO == HO == 0
  137. * and will count in both modes. We don't want to count in that
  138. * case so we emulate no-counting by setting US = OS = 0.
  139. */
  140. event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
  141. ARCH_PERFMON_EVENTSEL_OS);
  142. else if (event->attr.exclude_host)
  143. event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY;
  144. else if (event->attr.exclude_guest)
  145. event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY;
  146. if (event->attr.type != PERF_TYPE_RAW)
  147. return 0;
  148. event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
  149. return 0;
  150. }
  151. /*
  152. * AMD64 events are detected based on their event codes.
  153. */
  154. static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
  155. {
  156. return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
  157. }
  158. static inline int amd_is_nb_event(struct hw_perf_event *hwc)
  159. {
  160. return (hwc->config & 0xe0) == 0xe0;
  161. }
  162. static inline int amd_has_nb(struct cpu_hw_events *cpuc)
  163. {
  164. struct amd_nb *nb = cpuc->amd_nb;
  165. return nb && nb->nb_id != -1;
  166. }
  167. static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
  168. struct perf_event *event)
  169. {
  170. struct hw_perf_event *hwc = &event->hw;
  171. struct amd_nb *nb = cpuc->amd_nb;
  172. int i;
  173. /*
  174. * only care about NB events
  175. */
  176. if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
  177. return;
  178. /*
  179. * need to scan whole list because event may not have
  180. * been assigned during scheduling
  181. *
  182. * no race condition possible because event can only
  183. * be removed on one CPU at a time AND PMU is disabled
  184. * when we come here
  185. */
  186. for (i = 0; i < x86_pmu.num_counters; i++) {
  187. if (nb->owners[i] == event) {
  188. cmpxchg(nb->owners+i, event, NULL);
  189. break;
  190. }
  191. }
  192. }
  193. /*
  194. * AMD64 NorthBridge events need special treatment because
  195. * counter access needs to be synchronized across all cores
  196. * of a package. Refer to BKDG section 3.12
  197. *
  198. * NB events are events measuring L3 cache, Hypertransport
  199. * traffic. They are identified by an event code >= 0xe00.
  200. * They measure events on the NorthBride which is shared
  201. * by all cores on a package. NB events are counted on a
  202. * shared set of counters. When a NB event is programmed
  203. * in a counter, the data actually comes from a shared
  204. * counter. Thus, access to those counters needs to be
  205. * synchronized.
  206. *
  207. * We implement the synchronization such that no two cores
  208. * can be measuring NB events using the same counters. Thus,
  209. * we maintain a per-NB allocation table. The available slot
  210. * is propagated using the event_constraint structure.
  211. *
  212. * We provide only one choice for each NB event based on
  213. * the fact that only NB events have restrictions. Consequently,
  214. * if a counter is available, there is a guarantee the NB event
  215. * will be assigned to it. If no slot is available, an empty
  216. * constraint is returned and scheduling will eventually fail
  217. * for this event.
  218. *
  219. * Note that all cores attached the same NB compete for the same
  220. * counters to host NB events, this is why we use atomic ops. Some
  221. * multi-chip CPUs may have more than one NB.
  222. *
  223. * Given that resources are allocated (cmpxchg), they must be
  224. * eventually freed for others to use. This is accomplished by
  225. * calling amd_put_event_constraints().
  226. *
  227. * Non NB events are not impacted by this restriction.
  228. */
  229. static struct event_constraint *
  230. amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
  231. {
  232. struct hw_perf_event *hwc = &event->hw;
  233. struct amd_nb *nb = cpuc->amd_nb;
  234. struct perf_event *old = NULL;
  235. int max = x86_pmu.num_counters;
  236. int i, j, k = -1;
  237. /*
  238. * if not NB event or no NB, then no constraints
  239. */
  240. if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
  241. return &unconstrained;
  242. /*
  243. * detect if already present, if so reuse
  244. *
  245. * cannot merge with actual allocation
  246. * because of possible holes
  247. *
  248. * event can already be present yet not assigned (in hwc->idx)
  249. * because of successive calls to x86_schedule_events() from
  250. * hw_perf_group_sched_in() without hw_perf_enable()
  251. */
  252. for (i = 0; i < max; i++) {
  253. /*
  254. * keep track of first free slot
  255. */
  256. if (k == -1 && !nb->owners[i])
  257. k = i;
  258. /* already present, reuse */
  259. if (nb->owners[i] == event)
  260. goto done;
  261. }
  262. /*
  263. * not present, so grab a new slot
  264. * starting either at:
  265. */
  266. if (hwc->idx != -1) {
  267. /* previous assignment */
  268. i = hwc->idx;
  269. } else if (k != -1) {
  270. /* start from free slot found */
  271. i = k;
  272. } else {
  273. /*
  274. * event not found, no slot found in
  275. * first pass, try again from the
  276. * beginning
  277. */
  278. i = 0;
  279. }
  280. j = i;
  281. do {
  282. old = cmpxchg(nb->owners+i, NULL, event);
  283. if (!old)
  284. break;
  285. if (++i == max)
  286. i = 0;
  287. } while (i != j);
  288. done:
  289. if (!old)
  290. return &nb->event_constraints[i];
  291. return &emptyconstraint;
  292. }
  293. static struct amd_nb *amd_alloc_nb(int cpu)
  294. {
  295. struct amd_nb *nb;
  296. int i;
  297. nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
  298. cpu_to_node(cpu));
  299. if (!nb)
  300. return NULL;
  301. nb->nb_id = -1;
  302. /*
  303. * initialize all possible NB constraints
  304. */
  305. for (i = 0; i < x86_pmu.num_counters; i++) {
  306. __set_bit(i, nb->event_constraints[i].idxmsk);
  307. nb->event_constraints[i].weight = 1;
  308. }
  309. return nb;
  310. }
  311. static int amd_pmu_cpu_prepare(int cpu)
  312. {
  313. struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
  314. WARN_ON_ONCE(cpuc->amd_nb);
  315. if (boot_cpu_data.x86_max_cores < 2)
  316. return NOTIFY_OK;
  317. cpuc->amd_nb = amd_alloc_nb(cpu);
  318. if (!cpuc->amd_nb)
  319. return NOTIFY_BAD;
  320. return NOTIFY_OK;
  321. }
  322. static void amd_pmu_cpu_starting(int cpu)
  323. {
  324. struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
  325. struct amd_nb *nb;
  326. int i, nb_id;
  327. if (boot_cpu_data.x86_max_cores < 2)
  328. return;
  329. nb_id = amd_get_nb_id(cpu);
  330. WARN_ON_ONCE(nb_id == BAD_APICID);
  331. for_each_online_cpu(i) {
  332. nb = per_cpu(cpu_hw_events, i).amd_nb;
  333. if (WARN_ON_ONCE(!nb))
  334. continue;
  335. if (nb->nb_id == nb_id) {
  336. cpuc->kfree_on_online = cpuc->amd_nb;
  337. cpuc->amd_nb = nb;
  338. break;
  339. }
  340. }
  341. cpuc->amd_nb->nb_id = nb_id;
  342. cpuc->amd_nb->refcnt++;
  343. }
  344. static void amd_pmu_cpu_dead(int cpu)
  345. {
  346. struct cpu_hw_events *cpuhw;
  347. if (boot_cpu_data.x86_max_cores < 2)
  348. return;
  349. cpuhw = &per_cpu(cpu_hw_events, cpu);
  350. if (cpuhw->amd_nb) {
  351. struct amd_nb *nb = cpuhw->amd_nb;
  352. if (nb->nb_id == -1 || --nb->refcnt == 0)
  353. kfree(nb);
  354. cpuhw->amd_nb = NULL;
  355. }
  356. }
  357. static __initconst const struct x86_pmu amd_pmu = {
  358. .name = "AMD",
  359. .handle_irq = x86_pmu_handle_irq,
  360. .disable_all = x86_pmu_disable_all,
  361. .enable_all = x86_pmu_enable_all,
  362. .enable = x86_pmu_enable_event,
  363. .disable = x86_pmu_disable_event,
  364. .hw_config = amd_pmu_hw_config,
  365. .schedule_events = x86_schedule_events,
  366. .eventsel = MSR_K7_EVNTSEL0,
  367. .perfctr = MSR_K7_PERFCTR0,
  368. .event_map = amd_pmu_event_map,
  369. .max_events = ARRAY_SIZE(amd_perfmon_event_map),
  370. .num_counters = AMD64_NUM_COUNTERS,
  371. .cntval_bits = 48,
  372. .cntval_mask = (1ULL << 48) - 1,
  373. .apic = 1,
  374. /* use highest bit to detect overflow */
  375. .max_period = (1ULL << 47) - 1,
  376. .get_event_constraints = amd_get_event_constraints,
  377. .put_event_constraints = amd_put_event_constraints,
  378. .cpu_prepare = amd_pmu_cpu_prepare,
  379. .cpu_starting = amd_pmu_cpu_starting,
  380. .cpu_dead = amd_pmu_cpu_dead,
  381. };
  382. /* AMD Family 15h */
  383. #define AMD_EVENT_TYPE_MASK 0x000000F0ULL
  384. #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
  385. #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
  386. #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
  387. #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
  388. #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
  389. #define AMD_EVENT_EX_LS 0x000000C0ULL
  390. #define AMD_EVENT_DE 0x000000D0ULL
  391. #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
  392. /*
  393. * AMD family 15h event code/PMC mappings:
  394. *
  395. * type = event_code & 0x0F0:
  396. *
  397. * 0x000 FP PERF_CTL[5:3]
  398. * 0x010 FP PERF_CTL[5:3]
  399. * 0x020 LS PERF_CTL[5:0]
  400. * 0x030 LS PERF_CTL[5:0]
  401. * 0x040 DC PERF_CTL[5:0]
  402. * 0x050 DC PERF_CTL[5:0]
  403. * 0x060 CU PERF_CTL[2:0]
  404. * 0x070 CU PERF_CTL[2:0]
  405. * 0x080 IC/DE PERF_CTL[2:0]
  406. * 0x090 IC/DE PERF_CTL[2:0]
  407. * 0x0A0 ---
  408. * 0x0B0 ---
  409. * 0x0C0 EX/LS PERF_CTL[5:0]
  410. * 0x0D0 DE PERF_CTL[2:0]
  411. * 0x0E0 NB NB_PERF_CTL[3:0]
  412. * 0x0F0 NB NB_PERF_CTL[3:0]
  413. *
  414. * Exceptions:
  415. *
  416. * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
  417. * 0x003 FP PERF_CTL[3]
  418. * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
  419. * 0x00B FP PERF_CTL[3]
  420. * 0x00D FP PERF_CTL[3]
  421. * 0x023 DE PERF_CTL[2:0]
  422. * 0x02D LS PERF_CTL[3]
  423. * 0x02E LS PERF_CTL[3,0]
  424. * 0x043 CU PERF_CTL[2:0]
  425. * 0x045 CU PERF_CTL[2:0]
  426. * 0x046 CU PERF_CTL[2:0]
  427. * 0x054 CU PERF_CTL[2:0]
  428. * 0x055 CU PERF_CTL[2:0]
  429. * 0x08F IC PERF_CTL[0]
  430. * 0x187 DE PERF_CTL[0]
  431. * 0x188 DE PERF_CTL[0]
  432. * 0x0DB EX PERF_CTL[5:0]
  433. * 0x0DC LS PERF_CTL[5:0]
  434. * 0x0DD LS PERF_CTL[5:0]
  435. * 0x0DE LS PERF_CTL[5:0]
  436. * 0x0DF LS PERF_CTL[5:0]
  437. * 0x1D6 EX PERF_CTL[5:0]
  438. * 0x1D8 EX PERF_CTL[5:0]
  439. *
  440. * (*) depending on the umask all FPU counters may be used
  441. */
  442. static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
  443. static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
  444. static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
  445. static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0);
  446. static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
  447. static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
  448. static struct event_constraint *
  449. amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
  450. {
  451. struct hw_perf_event *hwc = &event->hw;
  452. unsigned int event_code = amd_get_event_code(hwc);
  453. switch (event_code & AMD_EVENT_TYPE_MASK) {
  454. case AMD_EVENT_FP:
  455. switch (event_code) {
  456. case 0x000:
  457. if (!(hwc->config & 0x0000F000ULL))
  458. break;
  459. if (!(hwc->config & 0x00000F00ULL))
  460. break;
  461. return &amd_f15_PMC3;
  462. case 0x004:
  463. if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
  464. break;
  465. return &amd_f15_PMC3;
  466. case 0x003:
  467. case 0x00B:
  468. case 0x00D:
  469. return &amd_f15_PMC3;
  470. }
  471. return &amd_f15_PMC53;
  472. case AMD_EVENT_LS:
  473. case AMD_EVENT_DC:
  474. case AMD_EVENT_EX_LS:
  475. switch (event_code) {
  476. case 0x023:
  477. case 0x043:
  478. case 0x045:
  479. case 0x046:
  480. case 0x054:
  481. case 0x055:
  482. return &amd_f15_PMC20;
  483. case 0x02D:
  484. return &amd_f15_PMC3;
  485. case 0x02E:
  486. return &amd_f15_PMC30;
  487. default:
  488. return &amd_f15_PMC50;
  489. }
  490. case AMD_EVENT_CU:
  491. case AMD_EVENT_IC_DE:
  492. case AMD_EVENT_DE:
  493. switch (event_code) {
  494. case 0x08F:
  495. case 0x187:
  496. case 0x188:
  497. return &amd_f15_PMC0;
  498. case 0x0DB ... 0x0DF:
  499. case 0x1D6:
  500. case 0x1D8:
  501. return &amd_f15_PMC50;
  502. default:
  503. return &amd_f15_PMC20;
  504. }
  505. case AMD_EVENT_NB:
  506. /* not yet implemented */
  507. return &emptyconstraint;
  508. default:
  509. return &emptyconstraint;
  510. }
  511. }
  512. static __initconst const struct x86_pmu amd_pmu_f15h = {
  513. .name = "AMD Family 15h",
  514. .handle_irq = x86_pmu_handle_irq,
  515. .disable_all = x86_pmu_disable_all,
  516. .enable_all = x86_pmu_enable_all,
  517. .enable = x86_pmu_enable_event,
  518. .disable = x86_pmu_disable_event,
  519. .hw_config = amd_pmu_hw_config,
  520. .schedule_events = x86_schedule_events,
  521. .eventsel = MSR_F15H_PERF_CTL,
  522. .perfctr = MSR_F15H_PERF_CTR,
  523. .event_map = amd_pmu_event_map,
  524. .max_events = ARRAY_SIZE(amd_perfmon_event_map),
  525. .num_counters = AMD64_NUM_COUNTERS_F15H,
  526. .cntval_bits = 48,
  527. .cntval_mask = (1ULL << 48) - 1,
  528. .apic = 1,
  529. /* use highest bit to detect overflow */
  530. .max_period = (1ULL << 47) - 1,
  531. .get_event_constraints = amd_get_event_constraints_f15h,
  532. /* nortbridge counters not yet implemented: */
  533. #if 0
  534. .put_event_constraints = amd_put_event_constraints,
  535. .cpu_prepare = amd_pmu_cpu_prepare,
  536. .cpu_starting = amd_pmu_cpu_starting,
  537. .cpu_dead = amd_pmu_cpu_dead,
  538. #endif
  539. };
  540. __init int amd_pmu_init(void)
  541. {
  542. /* Performance-monitoring supported from K7 and later: */
  543. if (boot_cpu_data.x86 < 6)
  544. return -ENODEV;
  545. /*
  546. * If core performance counter extensions exists, it must be
  547. * family 15h, otherwise fail. See x86_pmu_addr_offset().
  548. */
  549. switch (boot_cpu_data.x86) {
  550. case 0x15:
  551. if (!cpu_has_perfctr_core)
  552. return -ENODEV;
  553. x86_pmu = amd_pmu_f15h;
  554. break;
  555. default:
  556. if (cpu_has_perfctr_core)
  557. return -ENODEV;
  558. x86_pmu = amd_pmu;
  559. break;
  560. }
  561. /* Events are common for all AMDs */
  562. memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
  563. sizeof(hw_cache_event_ids));
  564. return 0;
  565. }