perf_event_amd_ibs.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830
  1. /*
  2. * Performance events - AMD IBS
  3. *
  4. * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
  5. *
  6. * For licencing details see kernel-base/COPYING
  7. */
  8. #include <linux/perf_event.h>
  9. #include <linux/module.h>
  10. #include <linux/pci.h>
  11. #include <linux/ptrace.h>
  12. #include <asm/apic.h>
  13. static u32 ibs_caps;
  14. #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
  15. #include <linux/kprobes.h>
  16. #include <linux/hardirq.h>
  17. #include <asm/nmi.h>
  18. #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
  19. #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
  20. enum ibs_states {
  21. IBS_ENABLED = 0,
  22. IBS_STARTED = 1,
  23. IBS_STOPPING = 2,
  24. IBS_MAX_STATES,
  25. };
  26. struct cpu_perf_ibs {
  27. struct perf_event *event;
  28. unsigned long state[BITS_TO_LONGS(IBS_MAX_STATES)];
  29. };
  30. struct perf_ibs {
  31. struct pmu pmu;
  32. unsigned int msr;
  33. u64 config_mask;
  34. u64 cnt_mask;
  35. u64 enable_mask;
  36. u64 valid_mask;
  37. u64 max_period;
  38. unsigned long offset_mask[1];
  39. int offset_max;
  40. struct cpu_perf_ibs __percpu *pcpu;
  41. u64 (*get_count)(u64 config);
  42. };
  43. struct perf_ibs_data {
  44. u32 size;
  45. union {
  46. u32 data[0]; /* data buffer starts here */
  47. u32 caps;
  48. };
  49. u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
  50. };
  51. static int
  52. perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
  53. {
  54. s64 left = local64_read(&hwc->period_left);
  55. s64 period = hwc->sample_period;
  56. int overflow = 0;
  57. /*
  58. * If we are way outside a reasonable range then just skip forward:
  59. */
  60. if (unlikely(left <= -period)) {
  61. left = period;
  62. local64_set(&hwc->period_left, left);
  63. hwc->last_period = period;
  64. overflow = 1;
  65. }
  66. if (unlikely(left < (s64)min)) {
  67. left += period;
  68. local64_set(&hwc->period_left, left);
  69. hwc->last_period = period;
  70. overflow = 1;
  71. }
  72. /*
  73. * If the hw period that triggers the sw overflow is too short
  74. * we might hit the irq handler. This biases the results.
  75. * Thus we shorten the next-to-last period and set the last
  76. * period to the max period.
  77. */
  78. if (left > max) {
  79. left -= max;
  80. if (left > max)
  81. left = max;
  82. else if (left < min)
  83. left = min;
  84. }
  85. *hw_period = (u64)left;
  86. return overflow;
  87. }
  88. static int
  89. perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
  90. {
  91. struct hw_perf_event *hwc = &event->hw;
  92. int shift = 64 - width;
  93. u64 prev_raw_count;
  94. u64 delta;
  95. /*
  96. * Careful: an NMI might modify the previous event value.
  97. *
  98. * Our tactic to handle this is to first atomically read and
  99. * exchange a new raw count - then add that new-prev delta
  100. * count to the generic event atomically:
  101. */
  102. prev_raw_count = local64_read(&hwc->prev_count);
  103. if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
  104. new_raw_count) != prev_raw_count)
  105. return 0;
  106. /*
  107. * Now we have the new raw value and have updated the prev
  108. * timestamp already. We can now calculate the elapsed delta
  109. * (event-)time and add that to the generic event.
  110. *
  111. * Careful, not all hw sign-extends above the physical width
  112. * of the count.
  113. */
  114. delta = (new_raw_count << shift) - (prev_raw_count << shift);
  115. delta >>= shift;
  116. local64_add(delta, &event->count);
  117. local64_sub(delta, &hwc->period_left);
  118. return 1;
  119. }
  120. static struct perf_ibs perf_ibs_fetch;
  121. static struct perf_ibs perf_ibs_op;
  122. static struct perf_ibs *get_ibs_pmu(int type)
  123. {
  124. if (perf_ibs_fetch.pmu.type == type)
  125. return &perf_ibs_fetch;
  126. if (perf_ibs_op.pmu.type == type)
  127. return &perf_ibs_op;
  128. return NULL;
  129. }
  130. /*
  131. * Use IBS for precise event sampling:
  132. *
  133. * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
  134. * perf record -a -e r076:p ... # same as -e cpu-cycles:p
  135. * perf record -a -e r0C1:p ... # use ibs op counting micro-ops
  136. *
  137. * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
  138. * MSRC001_1033) is used to select either cycle or micro-ops counting
  139. * mode.
  140. *
  141. * The rip of IBS samples has skid 0. Thus, IBS supports precise
  142. * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
  143. * rip is invalid when IBS was not able to record the rip correctly.
  144. * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
  145. *
  146. */
  147. static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
  148. {
  149. switch (event->attr.precise_ip) {
  150. case 0:
  151. return -ENOENT;
  152. case 1:
  153. case 2:
  154. break;
  155. default:
  156. return -EOPNOTSUPP;
  157. }
  158. switch (event->attr.type) {
  159. case PERF_TYPE_HARDWARE:
  160. switch (event->attr.config) {
  161. case PERF_COUNT_HW_CPU_CYCLES:
  162. *config = 0;
  163. return 0;
  164. }
  165. break;
  166. case PERF_TYPE_RAW:
  167. switch (event->attr.config) {
  168. case 0x0076:
  169. *config = 0;
  170. return 0;
  171. case 0x00C1:
  172. *config = IBS_OP_CNT_CTL;
  173. return 0;
  174. }
  175. break;
  176. default:
  177. return -ENOENT;
  178. }
  179. return -EOPNOTSUPP;
  180. }
  181. static int perf_ibs_init(struct perf_event *event)
  182. {
  183. struct hw_perf_event *hwc = &event->hw;
  184. struct perf_ibs *perf_ibs;
  185. u64 max_cnt, config;
  186. int ret;
  187. perf_ibs = get_ibs_pmu(event->attr.type);
  188. if (perf_ibs) {
  189. config = event->attr.config;
  190. } else {
  191. perf_ibs = &perf_ibs_op;
  192. ret = perf_ibs_precise_event(event, &config);
  193. if (ret)
  194. return ret;
  195. }
  196. if (event->pmu != &perf_ibs->pmu)
  197. return -ENOENT;
  198. if (config & ~perf_ibs->config_mask)
  199. return -EINVAL;
  200. if (hwc->sample_period) {
  201. if (config & perf_ibs->cnt_mask)
  202. /* raw max_cnt may not be set */
  203. return -EINVAL;
  204. if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
  205. /*
  206. * lower 4 bits can not be set in ibs max cnt,
  207. * but allowing it in case we adjust the
  208. * sample period to set a frequency.
  209. */
  210. return -EINVAL;
  211. hwc->sample_period &= ~0x0FULL;
  212. if (!hwc->sample_period)
  213. hwc->sample_period = 0x10;
  214. } else {
  215. max_cnt = config & perf_ibs->cnt_mask;
  216. config &= ~perf_ibs->cnt_mask;
  217. event->attr.sample_period = max_cnt << 4;
  218. hwc->sample_period = event->attr.sample_period;
  219. }
  220. if (!hwc->sample_period)
  221. return -EINVAL;
  222. /*
  223. * If we modify hwc->sample_period, we also need to update
  224. * hwc->last_period and hwc->period_left.
  225. */
  226. hwc->last_period = hwc->sample_period;
  227. local64_set(&hwc->period_left, hwc->sample_period);
  228. hwc->config_base = perf_ibs->msr;
  229. hwc->config = config;
  230. return 0;
  231. }
  232. static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
  233. struct hw_perf_event *hwc, u64 *period)
  234. {
  235. int overflow;
  236. /* ignore lower 4 bits in min count: */
  237. overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
  238. local64_set(&hwc->prev_count, 0);
  239. return overflow;
  240. }
  241. static u64 get_ibs_fetch_count(u64 config)
  242. {
  243. return (config & IBS_FETCH_CNT) >> 12;
  244. }
  245. static u64 get_ibs_op_count(u64 config)
  246. {
  247. return (config & IBS_OP_CUR_CNT) >> 32;
  248. }
  249. static void
  250. perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
  251. u64 config)
  252. {
  253. u64 count = perf_ibs->get_count(config);
  254. while (!perf_event_try_update(event, count, 20)) {
  255. rdmsrl(event->hw.config_base, config);
  256. count = perf_ibs->get_count(config);
  257. }
  258. }
  259. /* Note: The enable mask must be encoded in the config argument. */
  260. static inline void perf_ibs_enable_event(struct hw_perf_event *hwc, u64 config)
  261. {
  262. wrmsrl(hwc->config_base, hwc->config | config);
  263. }
  264. /*
  265. * We cannot restore the ibs pmu state, so we always needs to update
  266. * the event while stopping it and then reset the state when starting
  267. * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
  268. * perf_ibs_start()/perf_ibs_stop() and instead always do it.
  269. */
  270. static void perf_ibs_start(struct perf_event *event, int flags)
  271. {
  272. struct hw_perf_event *hwc = &event->hw;
  273. struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
  274. struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
  275. u64 config;
  276. if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
  277. return;
  278. WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
  279. hwc->state = 0;
  280. perf_ibs_set_period(perf_ibs, hwc, &config);
  281. config = (config >> 4) | perf_ibs->enable_mask;
  282. set_bit(IBS_STARTED, pcpu->state);
  283. perf_ibs_enable_event(hwc, config);
  284. perf_event_update_userpage(event);
  285. }
  286. static void perf_ibs_stop(struct perf_event *event, int flags)
  287. {
  288. struct hw_perf_event *hwc = &event->hw;
  289. struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
  290. struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
  291. u64 val;
  292. int stopping;
  293. stopping = test_and_clear_bit(IBS_STARTED, pcpu->state);
  294. if (!stopping && (hwc->state & PERF_HES_UPTODATE))
  295. return;
  296. rdmsrl(hwc->config_base, val);
  297. if (stopping) {
  298. set_bit(IBS_STOPPING, pcpu->state);
  299. val &= ~perf_ibs->enable_mask;
  300. wrmsrl(hwc->config_base, val);
  301. WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
  302. hwc->state |= PERF_HES_STOPPED;
  303. }
  304. if (hwc->state & PERF_HES_UPTODATE)
  305. return;
  306. perf_ibs_event_update(perf_ibs, event, val);
  307. hwc->state |= PERF_HES_UPTODATE;
  308. }
  309. static int perf_ibs_add(struct perf_event *event, int flags)
  310. {
  311. struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
  312. struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
  313. if (test_and_set_bit(IBS_ENABLED, pcpu->state))
  314. return -ENOSPC;
  315. event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
  316. pcpu->event = event;
  317. if (flags & PERF_EF_START)
  318. perf_ibs_start(event, PERF_EF_RELOAD);
  319. return 0;
  320. }
  321. static void perf_ibs_del(struct perf_event *event, int flags)
  322. {
  323. struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
  324. struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
  325. if (!test_and_clear_bit(IBS_ENABLED, pcpu->state))
  326. return;
  327. perf_ibs_stop(event, PERF_EF_UPDATE);
  328. pcpu->event = NULL;
  329. perf_event_update_userpage(event);
  330. }
  331. static void perf_ibs_read(struct perf_event *event) { }
  332. static struct perf_ibs perf_ibs_fetch = {
  333. .pmu = {
  334. .task_ctx_nr = perf_invalid_context,
  335. .event_init = perf_ibs_init,
  336. .add = perf_ibs_add,
  337. .del = perf_ibs_del,
  338. .start = perf_ibs_start,
  339. .stop = perf_ibs_stop,
  340. .read = perf_ibs_read,
  341. },
  342. .msr = MSR_AMD64_IBSFETCHCTL,
  343. .config_mask = IBS_FETCH_CONFIG_MASK,
  344. .cnt_mask = IBS_FETCH_MAX_CNT,
  345. .enable_mask = IBS_FETCH_ENABLE,
  346. .valid_mask = IBS_FETCH_VAL,
  347. .max_period = IBS_FETCH_MAX_CNT << 4,
  348. .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
  349. .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
  350. .get_count = get_ibs_fetch_count,
  351. };
  352. static struct perf_ibs perf_ibs_op = {
  353. .pmu = {
  354. .task_ctx_nr = perf_invalid_context,
  355. .event_init = perf_ibs_init,
  356. .add = perf_ibs_add,
  357. .del = perf_ibs_del,
  358. .start = perf_ibs_start,
  359. .stop = perf_ibs_stop,
  360. .read = perf_ibs_read,
  361. },
  362. .msr = MSR_AMD64_IBSOPCTL,
  363. .config_mask = IBS_OP_CONFIG_MASK,
  364. .cnt_mask = IBS_OP_MAX_CNT,
  365. .enable_mask = IBS_OP_ENABLE,
  366. .valid_mask = IBS_OP_VAL,
  367. .max_period = IBS_OP_MAX_CNT << 4,
  368. .offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
  369. .offset_max = MSR_AMD64_IBSOP_REG_COUNT,
  370. .get_count = get_ibs_op_count,
  371. };
  372. static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
  373. {
  374. struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
  375. struct perf_event *event = pcpu->event;
  376. struct hw_perf_event *hwc = &event->hw;
  377. struct perf_sample_data data;
  378. struct perf_raw_record raw;
  379. struct pt_regs regs;
  380. struct perf_ibs_data ibs_data;
  381. int offset, size, check_rip, offset_max, throttle = 0;
  382. unsigned int msr;
  383. u64 *buf, config;
  384. if (!test_bit(IBS_STARTED, pcpu->state)) {
  385. /* Catch spurious interrupts after stopping IBS: */
  386. if (!test_and_clear_bit(IBS_STOPPING, pcpu->state))
  387. return 0;
  388. rdmsrl(perf_ibs->msr, *ibs_data.regs);
  389. return (*ibs_data.regs & perf_ibs->valid_mask) ? 1 : 0;
  390. }
  391. msr = hwc->config_base;
  392. buf = ibs_data.regs;
  393. rdmsrl(msr, *buf);
  394. if (!(*buf++ & perf_ibs->valid_mask))
  395. return 0;
  396. /*
  397. * Emulate IbsOpCurCnt in MSRC001_1033 (IbsOpCtl), not
  398. * supported in all cpus. As this triggered an interrupt, we
  399. * set the current count to the max count.
  400. */
  401. config = ibs_data.regs[0];
  402. if (perf_ibs == &perf_ibs_op && !(ibs_caps & IBS_CAPS_RDWROPCNT)) {
  403. config &= ~IBS_OP_CUR_CNT;
  404. config |= (config & IBS_OP_MAX_CNT) << 36;
  405. }
  406. perf_ibs_event_update(perf_ibs, event, config);
  407. perf_sample_data_init(&data, 0, hwc->last_period);
  408. if (!perf_ibs_set_period(perf_ibs, hwc, &config))
  409. goto out; /* no sw counter overflow */
  410. ibs_data.caps = ibs_caps;
  411. size = 1;
  412. offset = 1;
  413. check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
  414. if (event->attr.sample_type & PERF_SAMPLE_RAW)
  415. offset_max = perf_ibs->offset_max;
  416. else if (check_rip)
  417. offset_max = 2;
  418. else
  419. offset_max = 1;
  420. do {
  421. rdmsrl(msr + offset, *buf++);
  422. size++;
  423. offset = find_next_bit(perf_ibs->offset_mask,
  424. perf_ibs->offset_max,
  425. offset + 1);
  426. } while (offset < offset_max);
  427. ibs_data.size = sizeof(u64) * size;
  428. regs = *iregs;
  429. if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
  430. regs.flags &= ~PERF_EFLAGS_EXACT;
  431. } else {
  432. instruction_pointer_set(&regs, ibs_data.regs[1]);
  433. regs.flags |= PERF_EFLAGS_EXACT;
  434. }
  435. if (event->attr.sample_type & PERF_SAMPLE_RAW) {
  436. raw.size = sizeof(u32) + ibs_data.size;
  437. raw.data = ibs_data.data;
  438. data.raw = &raw;
  439. }
  440. throttle = perf_event_overflow(event, &data, &regs);
  441. out:
  442. config = (config >> 4) | (throttle ? 0 : perf_ibs->enable_mask);
  443. perf_ibs_enable_event(hwc, config);
  444. perf_event_update_userpage(event);
  445. return 1;
  446. }
  447. static int __kprobes
  448. perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
  449. {
  450. int handled = 0;
  451. handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs);
  452. handled += perf_ibs_handle_irq(&perf_ibs_op, regs);
  453. if (handled)
  454. inc_irq_stat(apic_perf_irqs);
  455. return handled;
  456. }
  457. static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
  458. {
  459. struct cpu_perf_ibs __percpu *pcpu;
  460. int ret;
  461. pcpu = alloc_percpu(struct cpu_perf_ibs);
  462. if (!pcpu)
  463. return -ENOMEM;
  464. perf_ibs->pcpu = pcpu;
  465. ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
  466. if (ret) {
  467. perf_ibs->pcpu = NULL;
  468. free_percpu(pcpu);
  469. }
  470. return ret;
  471. }
  472. static __init int perf_event_ibs_init(void)
  473. {
  474. if (!ibs_caps)
  475. return -ENODEV; /* ibs not supported by the cpu */
  476. perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
  477. if (ibs_caps & IBS_CAPS_OPCNT)
  478. perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
  479. perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
  480. register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
  481. printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps);
  482. return 0;
  483. }
  484. #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
  485. static __init int perf_event_ibs_init(void) { return 0; }
  486. #endif
  487. /* IBS - apic initialization, for perf and oprofile */
  488. static __init u32 __get_ibs_caps(void)
  489. {
  490. u32 caps;
  491. unsigned int max_level;
  492. if (!boot_cpu_has(X86_FEATURE_IBS))
  493. return 0;
  494. /* check IBS cpuid feature flags */
  495. max_level = cpuid_eax(0x80000000);
  496. if (max_level < IBS_CPUID_FEATURES)
  497. return IBS_CAPS_DEFAULT;
  498. caps = cpuid_eax(IBS_CPUID_FEATURES);
  499. if (!(caps & IBS_CAPS_AVAIL))
  500. /* cpuid flags not valid */
  501. return IBS_CAPS_DEFAULT;
  502. return caps;
  503. }
  504. u32 get_ibs_caps(void)
  505. {
  506. return ibs_caps;
  507. }
  508. EXPORT_SYMBOL(get_ibs_caps);
  509. static inline int get_eilvt(int offset)
  510. {
  511. return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
  512. }
  513. static inline int put_eilvt(int offset)
  514. {
  515. return !setup_APIC_eilvt(offset, 0, 0, 1);
  516. }
  517. /*
  518. * Check and reserve APIC extended interrupt LVT offset for IBS if available.
  519. */
  520. static inline int ibs_eilvt_valid(void)
  521. {
  522. int offset;
  523. u64 val;
  524. int valid = 0;
  525. preempt_disable();
  526. rdmsrl(MSR_AMD64_IBSCTL, val);
  527. offset = val & IBSCTL_LVT_OFFSET_MASK;
  528. if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
  529. pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
  530. smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
  531. goto out;
  532. }
  533. if (!get_eilvt(offset)) {
  534. pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
  535. smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
  536. goto out;
  537. }
  538. valid = 1;
  539. out:
  540. preempt_enable();
  541. return valid;
  542. }
  543. static int setup_ibs_ctl(int ibs_eilvt_off)
  544. {
  545. struct pci_dev *cpu_cfg;
  546. int nodes;
  547. u32 value = 0;
  548. nodes = 0;
  549. cpu_cfg = NULL;
  550. do {
  551. cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
  552. PCI_DEVICE_ID_AMD_10H_NB_MISC,
  553. cpu_cfg);
  554. if (!cpu_cfg)
  555. break;
  556. ++nodes;
  557. pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
  558. | IBSCTL_LVT_OFFSET_VALID);
  559. pci_read_config_dword(cpu_cfg, IBSCTL, &value);
  560. if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
  561. pci_dev_put(cpu_cfg);
  562. printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
  563. "IBSCTL = 0x%08x\n", value);
  564. return -EINVAL;
  565. }
  566. } while (1);
  567. if (!nodes) {
  568. printk(KERN_DEBUG "No CPU node configured for IBS\n");
  569. return -ENODEV;
  570. }
  571. return 0;
  572. }
  573. /*
  574. * This runs only on the current cpu. We try to find an LVT offset and
  575. * setup the local APIC. For this we must disable preemption. On
  576. * success we initialize all nodes with this offset. This updates then
  577. * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
  578. * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
  579. * is using the new offset.
  580. */
  581. static int force_ibs_eilvt_setup(void)
  582. {
  583. int offset;
  584. int ret;
  585. preempt_disable();
  586. /* find the next free available EILVT entry, skip offset 0 */
  587. for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
  588. if (get_eilvt(offset))
  589. break;
  590. }
  591. preempt_enable();
  592. if (offset == APIC_EILVT_NR_MAX) {
  593. printk(KERN_DEBUG "No EILVT entry available\n");
  594. return -EBUSY;
  595. }
  596. ret = setup_ibs_ctl(offset);
  597. if (ret)
  598. goto out;
  599. if (!ibs_eilvt_valid()) {
  600. ret = -EFAULT;
  601. goto out;
  602. }
  603. pr_info("IBS: LVT offset %d assigned\n", offset);
  604. return 0;
  605. out:
  606. preempt_disable();
  607. put_eilvt(offset);
  608. preempt_enable();
  609. return ret;
  610. }
  611. static inline int get_ibs_lvt_offset(void)
  612. {
  613. u64 val;
  614. rdmsrl(MSR_AMD64_IBSCTL, val);
  615. if (!(val & IBSCTL_LVT_OFFSET_VALID))
  616. return -EINVAL;
  617. return val & IBSCTL_LVT_OFFSET_MASK;
  618. }
  619. static void setup_APIC_ibs(void *dummy)
  620. {
  621. int offset;
  622. offset = get_ibs_lvt_offset();
  623. if (offset < 0)
  624. goto failed;
  625. if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
  626. return;
  627. failed:
  628. pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
  629. smp_processor_id());
  630. }
  631. static void clear_APIC_ibs(void *dummy)
  632. {
  633. int offset;
  634. offset = get_ibs_lvt_offset();
  635. if (offset >= 0)
  636. setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
  637. }
  638. static int __cpuinit
  639. perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
  640. {
  641. switch (action & ~CPU_TASKS_FROZEN) {
  642. case CPU_STARTING:
  643. setup_APIC_ibs(NULL);
  644. break;
  645. case CPU_DYING:
  646. clear_APIC_ibs(NULL);
  647. break;
  648. default:
  649. break;
  650. }
  651. return NOTIFY_OK;
  652. }
  653. static __init int amd_ibs_init(void)
  654. {
  655. u32 caps;
  656. int ret = -EINVAL;
  657. caps = __get_ibs_caps();
  658. if (!caps)
  659. return -ENODEV; /* ibs not supported by the cpu */
  660. /*
  661. * Force LVT offset assignment for family 10h: The offsets are
  662. * not assigned by the BIOS for this family, so the OS is
  663. * responsible for doing it. If the OS assignment fails, fall
  664. * back to BIOS settings and try to setup this.
  665. */
  666. if (boot_cpu_data.x86 == 0x10)
  667. force_ibs_eilvt_setup();
  668. if (!ibs_eilvt_valid())
  669. goto out;
  670. get_online_cpus();
  671. ibs_caps = caps;
  672. /* make ibs_caps visible to other cpus: */
  673. smp_mb();
  674. perf_cpu_notifier(perf_ibs_cpu_notifier);
  675. smp_call_function(setup_APIC_ibs, NULL, 1);
  676. put_online_cpus();
  677. ret = perf_event_ibs_init();
  678. out:
  679. if (ret)
  680. pr_err("Failed to setup IBS, %d\n", ret);
  681. return ret;
  682. }
  683. /* Since we need the pci subsystem to init ibs we can't do this earlier: */
  684. device_initcall(amd_ibs_init);