op_model_amd.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473
  1. /*
  2. * @file op_model_amd.c
  3. * athlon / K7 / K8 / Family 10h model-specific MSR operations
  4. *
  5. * @remark Copyright 2002-2009 OProfile authors
  6. * @remark Read the file COPYING
  7. *
  8. * @author John Levon
  9. * @author Philippe Elie
  10. * @author Graydon Hoare
  11. * @author Robert Richter <robert.richter@amd.com>
  12. * @author Barry Kasindorf
  13. */
  14. #include <linux/oprofile.h>
  15. #include <linux/device.h>
  16. #include <linux/pci.h>
  17. #include <asm/ptrace.h>
  18. #include <asm/msr.h>
  19. #include <asm/nmi.h>
  20. #include "op_x86_model.h"
  21. #include "op_counter.h"
  22. #define NUM_COUNTERS 4
  23. #define NUM_CONTROLS 4
  24. #define OP_EVENT_MASK 0x0FFF
  25. #define OP_CTR_OVERFLOW (1ULL<<31)
  26. #define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
  27. static unsigned long reset_value[NUM_COUNTERS];
  28. #ifdef CONFIG_OPROFILE_IBS
  29. /* IbsFetchCtl bits/masks */
  30. #define IBS_FETCH_RAND_EN (1ULL<<57)
  31. #define IBS_FETCH_VAL (1ULL<<49)
  32. #define IBS_FETCH_ENABLE (1ULL<<48)
  33. #define IBS_FETCH_CNT_MASK 0xFFFF0000ULL
  34. /*IbsOpCtl bits */
  35. #define IBS_OP_CNT_CTL (1ULL<<19)
  36. #define IBS_OP_VAL (1ULL<<18)
  37. #define IBS_OP_ENABLE (1ULL<<17)
  38. #define IBS_FETCH_SIZE 6
  39. #define IBS_OP_SIZE 12
  40. static int has_ibs; /* AMD Family10h and later */
  41. struct op_ibs_config {
  42. unsigned long op_enabled;
  43. unsigned long fetch_enabled;
  44. unsigned long max_cnt_fetch;
  45. unsigned long max_cnt_op;
  46. unsigned long rand_en;
  47. unsigned long dispatched_ops;
  48. };
  49. static struct op_ibs_config ibs_config;
  50. #endif
  51. /* functions for op_amd_spec */
  52. static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
  53. {
  54. int i;
  55. for (i = 0; i < NUM_COUNTERS; i++) {
  56. if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
  57. msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
  58. else
  59. msrs->counters[i].addr = 0;
  60. }
  61. for (i = 0; i < NUM_CONTROLS; i++) {
  62. if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
  63. msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
  64. else
  65. msrs->controls[i].addr = 0;
  66. }
  67. }
  68. static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
  69. struct op_msrs const * const msrs)
  70. {
  71. u64 val;
  72. int i;
  73. /* clear all counters */
  74. for (i = 0 ; i < NUM_CONTROLS; ++i) {
  75. if (unlikely(!msrs->controls[i].addr))
  76. continue;
  77. rdmsrl(msrs->controls[i].addr, val);
  78. val &= model->reserved;
  79. wrmsrl(msrs->controls[i].addr, val);
  80. }
  81. /* avoid a false detection of ctr overflows in NMI handler */
  82. for (i = 0; i < NUM_COUNTERS; ++i) {
  83. if (unlikely(!msrs->counters[i].addr))
  84. continue;
  85. wrmsrl(msrs->counters[i].addr, -1LL);
  86. }
  87. /* enable active counters */
  88. for (i = 0; i < NUM_COUNTERS; ++i) {
  89. if (counter_config[i].enabled && msrs->counters[i].addr) {
  90. reset_value[i] = counter_config[i].count;
  91. wrmsrl(msrs->counters[i].addr,
  92. -(s64)counter_config[i].count);
  93. rdmsrl(msrs->controls[i].addr, val);
  94. val &= model->reserved;
  95. val |= op_x86_get_ctrl(model, &counter_config[i]);
  96. wrmsrl(msrs->controls[i].addr, val);
  97. } else {
  98. reset_value[i] = 0;
  99. }
  100. }
  101. }
  102. #ifdef CONFIG_OPROFILE_IBS
  103. static inline int
  104. op_amd_handle_ibs(struct pt_regs * const regs,
  105. struct op_msrs const * const msrs)
  106. {
  107. u64 val, ctl;
  108. struct op_entry entry;
  109. if (!has_ibs)
  110. return 1;
  111. if (ibs_config.fetch_enabled) {
  112. rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
  113. if (ctl & IBS_FETCH_VAL) {
  114. rdmsrl(MSR_AMD64_IBSFETCHLINAD, val);
  115. oprofile_write_reserve(&entry, regs, val,
  116. IBS_FETCH_CODE, IBS_FETCH_SIZE);
  117. oprofile_add_data64(&entry, val);
  118. oprofile_add_data64(&entry, ctl);
  119. rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val);
  120. oprofile_add_data64(&entry, val);
  121. oprofile_write_commit(&entry);
  122. /* reenable the IRQ */
  123. ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT_MASK);
  124. ctl |= IBS_FETCH_ENABLE;
  125. wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
  126. }
  127. }
  128. if (ibs_config.op_enabled) {
  129. rdmsrl(MSR_AMD64_IBSOPCTL, ctl);
  130. if (ctl & IBS_OP_VAL) {
  131. rdmsrl(MSR_AMD64_IBSOPRIP, val);
  132. oprofile_write_reserve(&entry, regs, val,
  133. IBS_OP_CODE, IBS_OP_SIZE);
  134. oprofile_add_data64(&entry, val);
  135. rdmsrl(MSR_AMD64_IBSOPDATA, val);
  136. oprofile_add_data64(&entry, val);
  137. rdmsrl(MSR_AMD64_IBSOPDATA2, val);
  138. oprofile_add_data64(&entry, val);
  139. rdmsrl(MSR_AMD64_IBSOPDATA3, val);
  140. oprofile_add_data64(&entry, val);
  141. rdmsrl(MSR_AMD64_IBSDCLINAD, val);
  142. oprofile_add_data64(&entry, val);
  143. rdmsrl(MSR_AMD64_IBSDCPHYSAD, val);
  144. oprofile_add_data64(&entry, val);
  145. oprofile_write_commit(&entry);
  146. /* reenable the IRQ */
  147. ctl &= ~IBS_OP_VAL & 0xFFFFFFFF;
  148. ctl |= IBS_OP_ENABLE;
  149. wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
  150. }
  151. }
  152. return 1;
  153. }
  154. static inline void op_amd_start_ibs(void)
  155. {
  156. u64 val;
  157. if (has_ibs && ibs_config.fetch_enabled) {
  158. val = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
  159. val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
  160. val |= IBS_FETCH_ENABLE;
  161. wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
  162. }
  163. if (has_ibs && ibs_config.op_enabled) {
  164. val = (ibs_config.max_cnt_op >> 4) & 0xFFFF;
  165. val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0;
  166. val |= IBS_OP_ENABLE;
  167. wrmsrl(MSR_AMD64_IBSOPCTL, val);
  168. }
  169. }
  170. static void op_amd_stop_ibs(void)
  171. {
  172. if (has_ibs && ibs_config.fetch_enabled)
  173. /* clear max count and enable */
  174. wrmsrl(MSR_AMD64_IBSFETCHCTL, 0);
  175. if (has_ibs && ibs_config.op_enabled)
  176. /* clear max count and enable */
  177. wrmsrl(MSR_AMD64_IBSOPCTL, 0);
  178. }
  179. #else
  180. static inline int op_amd_handle_ibs(struct pt_regs * const regs,
  181. struct op_msrs const * const msrs) { }
  182. static inline void op_amd_start_ibs(void) { }
  183. static inline void op_amd_stop_ibs(void) { }
  184. #endif
  185. static int op_amd_check_ctrs(struct pt_regs * const regs,
  186. struct op_msrs const * const msrs)
  187. {
  188. u64 val;
  189. int i;
  190. for (i = 0 ; i < NUM_COUNTERS; ++i) {
  191. if (!reset_value[i])
  192. continue;
  193. rdmsrl(msrs->counters[i].addr, val);
  194. /* bit is clear if overflowed: */
  195. if (val & OP_CTR_OVERFLOW)
  196. continue;
  197. oprofile_add_sample(regs, i);
  198. wrmsrl(msrs->counters[i].addr, -(s64)reset_value[i]);
  199. }
  200. op_amd_handle_ibs(regs, msrs);
  201. /* See op_model_ppro.c */
  202. return 1;
  203. }
  204. static void op_amd_start(struct op_msrs const * const msrs)
  205. {
  206. u64 val;
  207. int i;
  208. for (i = 0 ; i < NUM_COUNTERS ; ++i) {
  209. if (reset_value[i]) {
  210. rdmsrl(msrs->controls[i].addr, val);
  211. val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
  212. wrmsrl(msrs->controls[i].addr, val);
  213. }
  214. }
  215. op_amd_start_ibs();
  216. }
  217. static void op_amd_stop(struct op_msrs const * const msrs)
  218. {
  219. u64 val;
  220. int i;
  221. /*
  222. * Subtle: stop on all counters to avoid race with setting our
  223. * pm callback
  224. */
  225. for (i = 0 ; i < NUM_COUNTERS ; ++i) {
  226. if (!reset_value[i])
  227. continue;
  228. rdmsrl(msrs->controls[i].addr, val);
  229. val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
  230. wrmsrl(msrs->controls[i].addr, val);
  231. }
  232. op_amd_stop_ibs();
  233. }
  234. static void op_amd_shutdown(struct op_msrs const * const msrs)
  235. {
  236. int i;
  237. for (i = 0 ; i < NUM_COUNTERS ; ++i) {
  238. if (msrs->counters[i].addr)
  239. release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
  240. }
  241. for (i = 0 ; i < NUM_CONTROLS ; ++i) {
  242. if (msrs->controls[i].addr)
  243. release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
  244. }
  245. }
  246. #ifdef CONFIG_OPROFILE_IBS
  247. static u8 ibs_eilvt_off;
  248. static inline void apic_init_ibs_nmi_per_cpu(void *arg)
  249. {
  250. ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0);
  251. }
  252. static inline void apic_clear_ibs_nmi_per_cpu(void *arg)
  253. {
  254. setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
  255. }
  256. static int init_ibs_nmi(void)
  257. {
  258. #define IBSCTL_LVTOFFSETVAL (1 << 8)
  259. #define IBSCTL 0x1cc
  260. struct pci_dev *cpu_cfg;
  261. int nodes;
  262. u32 value = 0;
  263. /* per CPU setup */
  264. on_each_cpu(apic_init_ibs_nmi_per_cpu, NULL, 1);
  265. nodes = 0;
  266. cpu_cfg = NULL;
  267. do {
  268. cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
  269. PCI_DEVICE_ID_AMD_10H_NB_MISC,
  270. cpu_cfg);
  271. if (!cpu_cfg)
  272. break;
  273. ++nodes;
  274. pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
  275. | IBSCTL_LVTOFFSETVAL);
  276. pci_read_config_dword(cpu_cfg, IBSCTL, &value);
  277. if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
  278. pci_dev_put(cpu_cfg);
  279. printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
  280. "IBSCTL = 0x%08x", value);
  281. return 1;
  282. }
  283. } while (1);
  284. if (!nodes) {
  285. printk(KERN_DEBUG "No CPU node configured for IBS");
  286. return 1;
  287. }
  288. #ifdef CONFIG_NUMA
  289. /* Sanity check */
  290. /* Works only for 64bit with proper numa implementation. */
  291. if (nodes != num_possible_nodes()) {
  292. printk(KERN_DEBUG "Failed to setup CPU node(s) for IBS, "
  293. "found: %d, expected %d",
  294. nodes, num_possible_nodes());
  295. return 1;
  296. }
  297. #endif
  298. return 0;
  299. }
  300. /* uninitialize the APIC for the IBS interrupts if needed */
  301. static void clear_ibs_nmi(void)
  302. {
  303. if (has_ibs)
  304. on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
  305. }
  306. /* initialize the APIC for the IBS interrupts if available */
  307. static void ibs_init(void)
  308. {
  309. has_ibs = boot_cpu_has(X86_FEATURE_IBS);
  310. if (!has_ibs)
  311. return;
  312. if (init_ibs_nmi()) {
  313. has_ibs = 0;
  314. return;
  315. }
  316. printk(KERN_INFO "oprofile: AMD IBS detected\n");
  317. }
  318. static void ibs_exit(void)
  319. {
  320. if (!has_ibs)
  321. return;
  322. clear_ibs_nmi();
  323. }
  324. static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
  325. static int setup_ibs_files(struct super_block *sb, struct dentry *root)
  326. {
  327. struct dentry *dir;
  328. int ret = 0;
  329. /* architecture specific files */
  330. if (create_arch_files)
  331. ret = create_arch_files(sb, root);
  332. if (ret)
  333. return ret;
  334. if (!has_ibs)
  335. return ret;
  336. /* model specific files */
  337. /* setup some reasonable defaults */
  338. ibs_config.max_cnt_fetch = 250000;
  339. ibs_config.fetch_enabled = 0;
  340. ibs_config.max_cnt_op = 250000;
  341. ibs_config.op_enabled = 0;
  342. ibs_config.dispatched_ops = 1;
  343. dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
  344. oprofilefs_create_ulong(sb, dir, "enable",
  345. &ibs_config.fetch_enabled);
  346. oprofilefs_create_ulong(sb, dir, "max_count",
  347. &ibs_config.max_cnt_fetch);
  348. oprofilefs_create_ulong(sb, dir, "rand_enable",
  349. &ibs_config.rand_en);
  350. dir = oprofilefs_mkdir(sb, root, "ibs_op");
  351. oprofilefs_create_ulong(sb, dir, "enable",
  352. &ibs_config.op_enabled);
  353. oprofilefs_create_ulong(sb, dir, "max_count",
  354. &ibs_config.max_cnt_op);
  355. oprofilefs_create_ulong(sb, dir, "dispatched_ops",
  356. &ibs_config.dispatched_ops);
  357. return 0;
  358. }
  359. static int op_amd_init(struct oprofile_operations *ops)
  360. {
  361. ibs_init();
  362. create_arch_files = ops->create_files;
  363. ops->create_files = setup_ibs_files;
  364. return 0;
  365. }
  366. static void op_amd_exit(void)
  367. {
  368. ibs_exit();
  369. }
  370. #else
  371. /* no IBS support */
  372. static int op_amd_init(struct oprofile_operations *ops)
  373. {
  374. return 0;
  375. }
  376. static void op_amd_exit(void) {}
  377. #endif /* CONFIG_OPROFILE_IBS */
  378. struct op_x86_model_spec const op_amd_spec = {
  379. .num_counters = NUM_COUNTERS,
  380. .num_controls = NUM_CONTROLS,
  381. .reserved = MSR_AMD_EVENTSEL_RESERVED,
  382. .event_mask = OP_EVENT_MASK,
  383. .init = op_amd_init,
  384. .exit = op_amd_exit,
  385. .fill_in_addresses = &op_amd_fill_in_addresses,
  386. .setup_ctrs = &op_amd_setup_ctrs,
  387. .check_ctrs = &op_amd_check_ctrs,
  388. .start = &op_amd_start,
  389. .stop = &op_amd_stop,
  390. .shutdown = &op_amd_shutdown,
  391. };