mce_amd.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. /*
  2. * (c) 2005 Advanced Micro Devices, Inc.
  3. * Your use of this code is subject to the terms and conditions of the
  4. * GNU general public license version 2. See "COPYING" or
  5. * http://www.gnu.org/licenses/gpl.html
  6. *
  7. * Written by Jacob Shin - AMD, Inc.
  8. *
  9. * Support : jacob.shin@amd.com
  10. *
  11. * MC4_MISC0 DRAM ECC Error Threshold available under AMD K8 Rev F.
  12. * MC4_MISC0 exists per physical processor.
  13. *
  14. */
  15. #include <linux/cpu.h>
  16. #include <linux/errno.h>
  17. #include <linux/init.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/kobject.h>
  20. #include <linux/notifier.h>
  21. #include <linux/sched.h>
  22. #include <linux/smp.h>
  23. #include <linux/sysdev.h>
  24. #include <linux/sysfs.h>
  25. #include <asm/apic.h>
  26. #include <asm/mce.h>
  27. #include <asm/msr.h>
  28. #include <asm/percpu.h>
  29. #include <asm/idle.h>
  30. #define PFX "mce_threshold: "
  31. #define VERSION "version 1.0.10"
  32. #define NR_BANKS 5
  33. #define THRESHOLD_MAX 0xFFF
  34. #define INT_TYPE_APIC 0x00020000
  35. #define MASK_VALID_HI 0x80000000
  36. #define MASK_LVTOFF_HI 0x00F00000
  37. #define MASK_COUNT_EN_HI 0x00080000
  38. #define MASK_INT_TYPE_HI 0x00060000
  39. #define MASK_OVERFLOW_HI 0x00010000
  40. #define MASK_ERR_COUNT_HI 0x00000FFF
  41. #define MASK_OVERFLOW 0x0001000000000000L
  42. struct threshold_bank {
  43. unsigned int cpu;
  44. u8 bank;
  45. u8 interrupt_enable;
  46. u16 threshold_limit;
  47. struct kobject kobj;
  48. };
  49. static struct threshold_bank threshold_defaults = {
  50. .interrupt_enable = 0,
  51. .threshold_limit = THRESHOLD_MAX,
  52. };
  53. #ifdef CONFIG_SMP
  54. static unsigned char shared_bank[NR_BANKS] = {
  55. 0, 0, 0, 0, 1
  56. };
  57. #endif
  58. static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
  59. /*
  60. * CPU Initialization
  61. */
  62. /* must be called with correct cpu affinity */
  63. static void threshold_restart_bank(struct threshold_bank *b,
  64. int reset, u16 old_limit)
  65. {
  66. u32 mci_misc_hi, mci_misc_lo;
  67. rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi);
  68. if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
  69. reset = 1; /* limit cannot be lower than err count */
  70. if (reset) { /* reset err count and overflow bit */
  71. mci_misc_hi =
  72. (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
  73. (THRESHOLD_MAX - b->threshold_limit);
  74. } else if (old_limit) { /* change limit w/o reset */
  75. int new_count = (mci_misc_hi & THRESHOLD_MAX) +
  76. (old_limit - b->threshold_limit);
  77. mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) |
  78. (new_count & THRESHOLD_MAX);
  79. }
  80. b->interrupt_enable ?
  81. (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
  82. (mci_misc_hi &= ~MASK_INT_TYPE_HI);
  83. mci_misc_hi |= MASK_COUNT_EN_HI;
  84. wrmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi);
  85. }
  86. void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
  87. {
  88. int bank;
  89. u32 mci_misc_lo, mci_misc_hi;
  90. unsigned int cpu = smp_processor_id();
  91. for (bank = 0; bank < NR_BANKS; ++bank) {
  92. rdmsr(MSR_IA32_MC0_MISC + bank * 4, mci_misc_lo, mci_misc_hi);
  93. /* !valid, !counter present, bios locked */
  94. if (!(mci_misc_hi & MASK_VALID_HI) ||
  95. !(mci_misc_hi & MASK_VALID_HI >> 1) ||
  96. (mci_misc_hi & MASK_VALID_HI >> 2))
  97. continue;
  98. per_cpu(bank_map, cpu) |= (1 << bank);
  99. #ifdef CONFIG_SMP
  100. if (shared_bank[bank] && c->cpu_core_id)
  101. continue;
  102. #endif
  103. setup_threshold_lvt((mci_misc_hi & MASK_LVTOFF_HI) >> 20);
  104. threshold_defaults.cpu = cpu;
  105. threshold_defaults.bank = bank;
  106. threshold_restart_bank(&threshold_defaults, 0, 0);
  107. }
  108. }
  109. /*
  110. * APIC Interrupt Handler
  111. */
  112. /*
  113. * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
  114. * the interrupt goes off when error_count reaches threshold_limit.
  115. * the handler will simply log mcelog w/ software defined bank number.
  116. */
  117. asmlinkage void mce_threshold_interrupt(void)
  118. {
  119. int bank;
  120. struct mce m;
  121. ack_APIC_irq();
  122. exit_idle();
  123. irq_enter();
  124. memset(&m, 0, sizeof(m));
  125. rdtscll(m.tsc);
  126. m.cpu = smp_processor_id();
  127. /* assume first bank caused it */
  128. for (bank = 0; bank < NR_BANKS; ++bank) {
  129. m.bank = MCE_THRESHOLD_BASE + bank;
  130. rdmsrl(MSR_IA32_MC0_MISC + bank * 4, m.misc);
  131. if (m.misc & MASK_OVERFLOW) {
  132. mce_log(&m);
  133. goto out;
  134. }
  135. }
  136. out:
  137. irq_exit();
  138. }
  139. /*
  140. * Sysfs Interface
  141. */
  142. struct threshold_attr {
  143. struct attribute attr;
  144. ssize_t(*show) (struct threshold_bank *, char *);
  145. ssize_t(*store) (struct threshold_bank *, const char *, size_t count);
  146. };
  147. static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
  148. static cpumask_t affinity_set(unsigned int cpu)
  149. {
  150. cpumask_t oldmask = current->cpus_allowed;
  151. cpumask_t newmask = CPU_MASK_NONE;
  152. cpu_set(cpu, newmask);
  153. set_cpus_allowed(current, newmask);
  154. return oldmask;
  155. }
  156. static void affinity_restore(cpumask_t oldmask)
  157. {
  158. set_cpus_allowed(current, oldmask);
  159. }
  160. #define SHOW_FIELDS(name) \
  161. static ssize_t show_ ## name(struct threshold_bank * b, char *buf) \
  162. { \
  163. return sprintf(buf, "%lx\n", (unsigned long) b->name); \
  164. }
  165. SHOW_FIELDS(interrupt_enable)
  166. SHOW_FIELDS(threshold_limit)
  167. static ssize_t store_interrupt_enable(struct threshold_bank *b,
  168. const char *buf, size_t count)
  169. {
  170. char *end;
  171. cpumask_t oldmask;
  172. unsigned long new = simple_strtoul(buf, &end, 0);
  173. if (end == buf)
  174. return -EINVAL;
  175. b->interrupt_enable = !!new;
  176. oldmask = affinity_set(b->cpu);
  177. threshold_restart_bank(b, 0, 0);
  178. affinity_restore(oldmask);
  179. return end - buf;
  180. }
  181. static ssize_t store_threshold_limit(struct threshold_bank *b,
  182. const char *buf, size_t count)
  183. {
  184. char *end;
  185. cpumask_t oldmask;
  186. u16 old;
  187. unsigned long new = simple_strtoul(buf, &end, 0);
  188. if (end == buf)
  189. return -EINVAL;
  190. if (new > THRESHOLD_MAX)
  191. new = THRESHOLD_MAX;
  192. if (new < 1)
  193. new = 1;
  194. old = b->threshold_limit;
  195. b->threshold_limit = new;
  196. oldmask = affinity_set(b->cpu);
  197. threshold_restart_bank(b, 0, old);
  198. affinity_restore(oldmask);
  199. return end - buf;
  200. }
  201. static ssize_t show_error_count(struct threshold_bank *b, char *buf)
  202. {
  203. u32 high, low;
  204. cpumask_t oldmask;
  205. oldmask = affinity_set(b->cpu);
  206. rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, low, high); /* ignore low 32 */
  207. affinity_restore(oldmask);
  208. return sprintf(buf, "%x\n",
  209. (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit));
  210. }
  211. static ssize_t store_error_count(struct threshold_bank *b,
  212. const char *buf, size_t count)
  213. {
  214. cpumask_t oldmask;
  215. oldmask = affinity_set(b->cpu);
  216. threshold_restart_bank(b, 1, 0);
  217. affinity_restore(oldmask);
  218. return 1;
  219. }
  220. #define THRESHOLD_ATTR(_name,_mode,_show,_store) { \
  221. .attr = {.name = __stringify(_name), .mode = _mode }, \
  222. .show = _show, \
  223. .store = _store, \
  224. };
  225. #define ATTR_FIELDS(name) \
  226. static struct threshold_attr name = \
  227. THRESHOLD_ATTR(name, 0644, show_## name, store_## name)
  228. ATTR_FIELDS(interrupt_enable);
  229. ATTR_FIELDS(threshold_limit);
  230. ATTR_FIELDS(error_count);
  231. static struct attribute *default_attrs[] = {
  232. &interrupt_enable.attr,
  233. &threshold_limit.attr,
  234. &error_count.attr,
  235. NULL
  236. };
  237. #define to_bank(k) container_of(k,struct threshold_bank,kobj)
  238. #define to_attr(a) container_of(a,struct threshold_attr,attr)
  239. static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
  240. {
  241. struct threshold_bank *b = to_bank(kobj);
  242. struct threshold_attr *a = to_attr(attr);
  243. ssize_t ret;
  244. ret = a->show ? a->show(b, buf) : -EIO;
  245. return ret;
  246. }
  247. static ssize_t store(struct kobject *kobj, struct attribute *attr,
  248. const char *buf, size_t count)
  249. {
  250. struct threshold_bank *b = to_bank(kobj);
  251. struct threshold_attr *a = to_attr(attr);
  252. ssize_t ret;
  253. ret = a->store ? a->store(b, buf, count) : -EIO;
  254. return ret;
  255. }
  256. static struct sysfs_ops threshold_ops = {
  257. .show = show,
  258. .store = store,
  259. };
  260. static struct kobj_type threshold_ktype = {
  261. .sysfs_ops = &threshold_ops,
  262. .default_attrs = default_attrs,
  263. };
  264. /* symlinks sibling shared banks to first core. first core owns dir/files. */
  265. static __cpuinit int threshold_create_bank(unsigned int cpu, int bank)
  266. {
  267. int err = 0;
  268. struct threshold_bank *b = NULL;
  269. #ifdef CONFIG_SMP
  270. if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */
  271. char name[16];
  272. unsigned lcpu = first_cpu(cpu_core_map[cpu]);
  273. if (cpu_data[lcpu].cpu_core_id)
  274. goto out; /* first core not up yet */
  275. b = per_cpu(threshold_banks, lcpu)[bank];
  276. if (!b)
  277. goto out;
  278. sprintf(name, "threshold_bank%i", bank);
  279. err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj,
  280. &b->kobj, name);
  281. if (err)
  282. goto out;
  283. per_cpu(threshold_banks, cpu)[bank] = b;
  284. goto out;
  285. }
  286. #endif
  287. b = kmalloc(sizeof(struct threshold_bank), GFP_KERNEL);
  288. if (!b) {
  289. err = -ENOMEM;
  290. goto out;
  291. }
  292. memset(b, 0, sizeof(struct threshold_bank));
  293. b->cpu = cpu;
  294. b->bank = bank;
  295. b->interrupt_enable = 0;
  296. b->threshold_limit = THRESHOLD_MAX;
  297. kobject_set_name(&b->kobj, "threshold_bank%i", bank);
  298. b->kobj.parent = &per_cpu(device_mce, cpu).kobj;
  299. b->kobj.ktype = &threshold_ktype;
  300. err = kobject_register(&b->kobj);
  301. if (err) {
  302. kfree(b);
  303. goto out;
  304. }
  305. per_cpu(threshold_banks, cpu)[bank] = b;
  306. out:
  307. return err;
  308. }
  309. /* create dir/files for all valid threshold banks */
  310. static __cpuinit int threshold_create_device(unsigned int cpu)
  311. {
  312. int bank;
  313. int err = 0;
  314. for (bank = 0; bank < NR_BANKS; ++bank) {
  315. if (!(per_cpu(bank_map, cpu) & 1 << bank))
  316. continue;
  317. err = threshold_create_bank(cpu, bank);
  318. if (err)
  319. goto out;
  320. }
  321. out:
  322. return err;
  323. }
  324. #ifdef CONFIG_HOTPLUG_CPU
  325. /*
  326. * let's be hotplug friendly.
  327. * in case of multiple core processors, the first core always takes ownership
  328. * of shared sysfs dir/files, and rest of the cores will be symlinked to it.
  329. */
  330. /* cpu hotplug call removes all symlinks before first core dies */
  331. static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank)
  332. {
  333. struct threshold_bank *b;
  334. char name[16];
  335. b = per_cpu(threshold_banks, cpu)[bank];
  336. if (!b)
  337. return;
  338. if (shared_bank[bank] && atomic_read(&b->kobj.kref.refcount) > 2) {
  339. sprintf(name, "threshold_bank%i", bank);
  340. sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
  341. per_cpu(threshold_banks, cpu)[bank] = NULL;
  342. } else {
  343. kobject_unregister(&b->kobj);
  344. kfree(per_cpu(threshold_banks, cpu)[bank]);
  345. }
  346. }
  347. static __cpuinit void threshold_remove_device(unsigned int cpu)
  348. {
  349. int bank;
  350. for (bank = 0; bank < NR_BANKS; ++bank) {
  351. if (!(per_cpu(bank_map, cpu) & 1 << bank))
  352. continue;
  353. threshold_remove_bank(cpu, bank);
  354. }
  355. }
  356. /* link all existing siblings when first core comes up */
  357. static __cpuinit int threshold_create_symlinks(unsigned int cpu)
  358. {
  359. int bank, err = 0;
  360. unsigned int lcpu = 0;
  361. if (cpu_data[cpu].cpu_core_id)
  362. return 0;
  363. for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
  364. if (lcpu == cpu)
  365. continue;
  366. for (bank = 0; bank < NR_BANKS; ++bank) {
  367. if (!(per_cpu(bank_map, cpu) & 1 << bank))
  368. continue;
  369. if (!shared_bank[bank])
  370. continue;
  371. err = threshold_create_bank(lcpu, bank);
  372. }
  373. }
  374. return err;
  375. }
  376. /* remove all symlinks before first core dies. */
  377. static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
  378. {
  379. int bank;
  380. unsigned int lcpu = 0;
  381. if (cpu_data[cpu].cpu_core_id)
  382. return;
  383. for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
  384. if (lcpu == cpu)
  385. continue;
  386. for (bank = 0; bank < NR_BANKS; ++bank) {
  387. if (!(per_cpu(bank_map, cpu) & 1 << bank))
  388. continue;
  389. if (!shared_bank[bank])
  390. continue;
  391. threshold_remove_bank(lcpu, bank);
  392. }
  393. }
  394. }
  395. #else /* !CONFIG_HOTPLUG_CPU */
  396. static __cpuinit void threshold_create_symlinks(unsigned int cpu)
  397. {
  398. }
  399. static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
  400. {
  401. }
  402. static void threshold_remove_device(unsigned int cpu)
  403. {
  404. }
  405. #endif
  406. /* get notified when a cpu comes on/off */
  407. static int threshold_cpu_callback(struct notifier_block *nfb,
  408. unsigned long action, void *hcpu)
  409. {
  410. /* cpu was unsigned int to begin with */
  411. unsigned int cpu = (unsigned long)hcpu;
  412. if (cpu >= NR_CPUS)
  413. goto out;
  414. switch (action) {
  415. case CPU_ONLINE:
  416. threshold_create_device(cpu);
  417. threshold_create_symlinks(cpu);
  418. break;
  419. case CPU_DOWN_PREPARE:
  420. threshold_remove_symlinks(cpu);
  421. break;
  422. case CPU_DOWN_FAILED:
  423. threshold_create_symlinks(cpu);
  424. break;
  425. case CPU_DEAD:
  426. threshold_remove_device(cpu);
  427. break;
  428. default:
  429. break;
  430. }
  431. out:
  432. return NOTIFY_OK;
  433. }
  434. static struct notifier_block threshold_cpu_notifier = {
  435. .notifier_call = threshold_cpu_callback,
  436. };
  437. static __init int threshold_init_device(void)
  438. {
  439. int lcpu = 0;
  440. /* to hit CPUs online before the notifier is up */
  441. for_each_online_cpu(lcpu) {
  442. int err = threshold_create_device(lcpu);
  443. if (err)
  444. return err;
  445. }
  446. register_cpu_notifier(&threshold_cpu_notifier);
  447. return 0;
  448. }
  449. device_initcall(threshold_init_device);