mce_amd.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742
  1. /*
  2. * (c) 2005, 2006 Advanced Micro Devices, Inc.
  3. * Your use of this code is subject to the terms and conditions of the
  4. * GNU general public license version 2. See "COPYING" or
  5. * http://www.gnu.org/licenses/gpl.html
  6. *
  7. * Written by Jacob Shin - AMD, Inc.
  8. *
  9. * Support : jacob.shin@amd.com
  10. *
  11. * April 2006
  12. * - added support for AMD Family 0x10 processors
  13. *
  14. * All MC4_MISCi registers are shared between multi-cores
  15. */
  16. #include <linux/interrupt.h>
  17. #include <linux/notifier.h>
  18. #include <linux/kobject.h>
  19. #include <linux/percpu.h>
  20. #include <linux/sysdev.h>
  21. #include <linux/errno.h>
  22. #include <linux/sched.h>
  23. #include <linux/sysfs.h>
  24. #include <linux/slab.h>
  25. #include <linux/init.h>
  26. #include <linux/cpu.h>
  27. #include <linux/smp.h>
  28. #include <asm/apic.h>
  29. #include <asm/idle.h>
  30. #include <asm/mce.h>
  31. #include <asm/msr.h>
  32. #define NR_BANKS 6
  33. #define NR_BLOCKS 9
  34. #define THRESHOLD_MAX 0xFFF
  35. #define INT_TYPE_APIC 0x00020000
  36. #define MASK_VALID_HI 0x80000000
  37. #define MASK_CNTP_HI 0x40000000
  38. #define MASK_LOCKED_HI 0x20000000
  39. #define MASK_LVTOFF_HI 0x00F00000
  40. #define MASK_COUNT_EN_HI 0x00080000
  41. #define MASK_INT_TYPE_HI 0x00060000
  42. #define MASK_OVERFLOW_HI 0x00010000
  43. #define MASK_ERR_COUNT_HI 0x00000FFF
  44. #define MASK_BLKPTR_LO 0xFF000000
  45. #define MCG_XBLK_ADDR 0xC0000400
  46. struct threshold_block {
  47. unsigned int block;
  48. unsigned int bank;
  49. unsigned int cpu;
  50. u32 address;
  51. u16 interrupt_enable;
  52. u16 threshold_limit;
  53. struct kobject kobj;
  54. struct list_head miscj;
  55. };
  56. struct threshold_bank {
  57. struct kobject *kobj;
  58. struct threshold_block *blocks;
  59. cpumask_var_t cpus;
  60. };
  61. static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
  62. #ifdef CONFIG_SMP
  63. static unsigned char shared_bank[NR_BANKS] = {
  64. 0, 0, 0, 0, 1
  65. };
  66. #endif
  67. static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
  68. static void amd_threshold_interrupt(void);
  69. /*
  70. * CPU Initialization
  71. */
  72. struct thresh_restart {
  73. struct threshold_block *b;
  74. int reset;
  75. int set_lvt_off;
  76. int lvt_off;
  77. u16 old_limit;
  78. };
  79. static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
  80. {
  81. int msr = (hi & MASK_LVTOFF_HI) >> 20;
  82. if (apic < 0) {
  83. pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
  84. "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
  85. b->bank, b->block, b->address, hi, lo);
  86. return 0;
  87. }
  88. if (apic != msr) {
  89. pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
  90. "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
  91. b->cpu, apic, b->bank, b->block, b->address, hi, lo);
  92. return 0;
  93. }
  94. return 1;
  95. };
  96. /* must be called with correct cpu affinity */
  97. /* Called via smp_call_function_single() */
  98. static void threshold_restart_bank(void *_tr)
  99. {
  100. struct thresh_restart *tr = _tr;
  101. u32 hi, lo;
  102. rdmsr(tr->b->address, lo, hi);
  103. if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
  104. tr->reset = 1; /* limit cannot be lower than err count */
  105. if (tr->reset) { /* reset err count and overflow bit */
  106. hi =
  107. (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
  108. (THRESHOLD_MAX - tr->b->threshold_limit);
  109. } else if (tr->old_limit) { /* change limit w/o reset */
  110. int new_count = (hi & THRESHOLD_MAX) +
  111. (tr->old_limit - tr->b->threshold_limit);
  112. hi = (hi & ~MASK_ERR_COUNT_HI) |
  113. (new_count & THRESHOLD_MAX);
  114. }
  115. if (tr->set_lvt_off) {
  116. if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
  117. /* set new lvt offset */
  118. hi &= ~MASK_LVTOFF_HI;
  119. hi |= tr->lvt_off << 20;
  120. }
  121. }
  122. tr->b->interrupt_enable ?
  123. (hi = (hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
  124. (hi &= ~MASK_INT_TYPE_HI);
  125. hi |= MASK_COUNT_EN_HI;
  126. wrmsr(tr->b->address, lo, hi);
  127. }
  128. static void mce_threshold_block_init(struct threshold_block *b, int offset)
  129. {
  130. struct thresh_restart tr = {
  131. .b = b,
  132. .set_lvt_off = 1,
  133. .lvt_off = offset,
  134. };
  135. b->threshold_limit = THRESHOLD_MAX;
  136. threshold_restart_bank(&tr);
  137. };
  138. static int setup_APIC_mce(int reserved, int new)
  139. {
  140. if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
  141. APIC_EILVT_MSG_FIX, 0))
  142. return new;
  143. return reserved;
  144. }
  145. /* cpu init entry point, called from mce.c with preempt off */
  146. void mce_amd_feature_init(struct cpuinfo_x86 *c)
  147. {
  148. struct threshold_block b;
  149. unsigned int cpu = smp_processor_id();
  150. u32 low = 0, high = 0, address = 0;
  151. unsigned int bank, block;
  152. int offset = -1;
  153. for (bank = 0; bank < NR_BANKS; ++bank) {
  154. for (block = 0; block < NR_BLOCKS; ++block) {
  155. if (block == 0)
  156. address = MSR_IA32_MC0_MISC + bank * 4;
  157. else if (block == 1) {
  158. address = (low & MASK_BLKPTR_LO) >> 21;
  159. if (!address)
  160. break;
  161. address += MCG_XBLK_ADDR;
  162. } else
  163. ++address;
  164. if (rdmsr_safe(address, &low, &high))
  165. break;
  166. if (!(high & MASK_VALID_HI))
  167. continue;
  168. if (!(high & MASK_CNTP_HI) ||
  169. (high & MASK_LOCKED_HI))
  170. continue;
  171. if (!block)
  172. per_cpu(bank_map, cpu) |= (1 << bank);
  173. #ifdef CONFIG_SMP
  174. if (shared_bank[bank] && c->cpu_core_id)
  175. break;
  176. #endif
  177. offset = setup_APIC_mce(offset,
  178. (high & MASK_LVTOFF_HI) >> 20);
  179. memset(&b, 0, sizeof(b));
  180. b.cpu = cpu;
  181. b.bank = bank;
  182. b.block = block;
  183. b.address = address;
  184. mce_threshold_block_init(&b, offset);
  185. mce_threshold_vector = amd_threshold_interrupt;
  186. }
  187. }
  188. }
  189. /*
  190. * APIC Interrupt Handler
  191. */
  192. /*
  193. * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
  194. * the interrupt goes off when error_count reaches threshold_limit.
  195. * the handler will simply log mcelog w/ software defined bank number.
  196. */
  197. static void amd_threshold_interrupt(void)
  198. {
  199. u32 low = 0, high = 0, address = 0;
  200. unsigned int bank, block;
  201. struct mce m;
  202. mce_setup(&m);
  203. /* assume first bank caused it */
  204. for (bank = 0; bank < NR_BANKS; ++bank) {
  205. if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
  206. continue;
  207. for (block = 0; block < NR_BLOCKS; ++block) {
  208. if (block == 0) {
  209. address = MSR_IA32_MC0_MISC + bank * 4;
  210. } else if (block == 1) {
  211. address = (low & MASK_BLKPTR_LO) >> 21;
  212. if (!address)
  213. break;
  214. address += MCG_XBLK_ADDR;
  215. } else {
  216. ++address;
  217. }
  218. if (rdmsr_safe(address, &low, &high))
  219. break;
  220. if (!(high & MASK_VALID_HI)) {
  221. if (block)
  222. continue;
  223. else
  224. break;
  225. }
  226. if (!(high & MASK_CNTP_HI) ||
  227. (high & MASK_LOCKED_HI))
  228. continue;
  229. /*
  230. * Log the machine check that caused the threshold
  231. * event.
  232. */
  233. machine_check_poll(MCP_TIMESTAMP,
  234. &__get_cpu_var(mce_poll_banks));
  235. if (high & MASK_OVERFLOW_HI) {
  236. rdmsrl(address, m.misc);
  237. rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
  238. m.status);
  239. m.bank = K8_MCE_THRESHOLD_BASE
  240. + bank * NR_BLOCKS
  241. + block;
  242. mce_log(&m);
  243. return;
  244. }
  245. }
  246. }
  247. }
  248. /*
  249. * Sysfs Interface
  250. */
  251. struct threshold_attr {
  252. struct attribute attr;
  253. ssize_t (*show) (struct threshold_block *, char *);
  254. ssize_t (*store) (struct threshold_block *, const char *, size_t count);
  255. };
  256. #define SHOW_FIELDS(name) \
  257. static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
  258. { \
  259. return sprintf(buf, "%lx\n", (unsigned long) b->name); \
  260. }
  261. SHOW_FIELDS(interrupt_enable)
  262. SHOW_FIELDS(threshold_limit)
  263. static ssize_t
  264. store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
  265. {
  266. struct thresh_restart tr;
  267. unsigned long new;
  268. if (strict_strtoul(buf, 0, &new) < 0)
  269. return -EINVAL;
  270. b->interrupt_enable = !!new;
  271. memset(&tr, 0, sizeof(tr));
  272. tr.b = b;
  273. smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
  274. return size;
  275. }
  276. static ssize_t
  277. store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
  278. {
  279. struct thresh_restart tr;
  280. unsigned long new;
  281. if (strict_strtoul(buf, 0, &new) < 0)
  282. return -EINVAL;
  283. if (new > THRESHOLD_MAX)
  284. new = THRESHOLD_MAX;
  285. if (new < 1)
  286. new = 1;
  287. memset(&tr, 0, sizeof(tr));
  288. tr.old_limit = b->threshold_limit;
  289. b->threshold_limit = new;
  290. tr.b = b;
  291. smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
  292. return size;
  293. }
  294. struct threshold_block_cross_cpu {
  295. struct threshold_block *tb;
  296. long retval;
  297. };
  298. static void local_error_count_handler(void *_tbcc)
  299. {
  300. struct threshold_block_cross_cpu *tbcc = _tbcc;
  301. struct threshold_block *b = tbcc->tb;
  302. u32 low, high;
  303. rdmsr(b->address, low, high);
  304. tbcc->retval = (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
  305. }
  306. static ssize_t show_error_count(struct threshold_block *b, char *buf)
  307. {
  308. struct threshold_block_cross_cpu tbcc = { .tb = b, };
  309. smp_call_function_single(b->cpu, local_error_count_handler, &tbcc, 1);
  310. return sprintf(buf, "%lx\n", tbcc.retval);
  311. }
  312. static ssize_t store_error_count(struct threshold_block *b,
  313. const char *buf, size_t count)
  314. {
  315. struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
  316. smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
  317. return 1;
  318. }
  319. #define RW_ATTR(val) \
  320. static struct threshold_attr val = { \
  321. .attr = {.name = __stringify(val), .mode = 0644 }, \
  322. .show = show_## val, \
  323. .store = store_## val, \
  324. };
  325. RW_ATTR(interrupt_enable);
  326. RW_ATTR(threshold_limit);
  327. RW_ATTR(error_count);
  328. static struct attribute *default_attrs[] = {
  329. &interrupt_enable.attr,
  330. &threshold_limit.attr,
  331. &error_count.attr,
  332. NULL
  333. };
  334. #define to_block(k) container_of(k, struct threshold_block, kobj)
  335. #define to_attr(a) container_of(a, struct threshold_attr, attr)
  336. static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
  337. {
  338. struct threshold_block *b = to_block(kobj);
  339. struct threshold_attr *a = to_attr(attr);
  340. ssize_t ret;
  341. ret = a->show ? a->show(b, buf) : -EIO;
  342. return ret;
  343. }
  344. static ssize_t store(struct kobject *kobj, struct attribute *attr,
  345. const char *buf, size_t count)
  346. {
  347. struct threshold_block *b = to_block(kobj);
  348. struct threshold_attr *a = to_attr(attr);
  349. ssize_t ret;
  350. ret = a->store ? a->store(b, buf, count) : -EIO;
  351. return ret;
  352. }
  353. static const struct sysfs_ops threshold_ops = {
  354. .show = show,
  355. .store = store,
  356. };
  357. static struct kobj_type threshold_ktype = {
  358. .sysfs_ops = &threshold_ops,
  359. .default_attrs = default_attrs,
  360. };
  361. static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
  362. unsigned int bank,
  363. unsigned int block,
  364. u32 address)
  365. {
  366. struct threshold_block *b = NULL;
  367. u32 low, high;
  368. int err;
  369. if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
  370. return 0;
  371. if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
  372. return 0;
  373. if (!(high & MASK_VALID_HI)) {
  374. if (block)
  375. goto recurse;
  376. else
  377. return 0;
  378. }
  379. if (!(high & MASK_CNTP_HI) ||
  380. (high & MASK_LOCKED_HI))
  381. goto recurse;
  382. b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
  383. if (!b)
  384. return -ENOMEM;
  385. b->block = block;
  386. b->bank = bank;
  387. b->cpu = cpu;
  388. b->address = address;
  389. b->interrupt_enable = 0;
  390. b->threshold_limit = THRESHOLD_MAX;
  391. INIT_LIST_HEAD(&b->miscj);
  392. if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
  393. list_add(&b->miscj,
  394. &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
  395. } else {
  396. per_cpu(threshold_banks, cpu)[bank]->blocks = b;
  397. }
  398. err = kobject_init_and_add(&b->kobj, &threshold_ktype,
  399. per_cpu(threshold_banks, cpu)[bank]->kobj,
  400. "misc%i", block);
  401. if (err)
  402. goto out_free;
  403. recurse:
  404. if (!block) {
  405. address = (low & MASK_BLKPTR_LO) >> 21;
  406. if (!address)
  407. return 0;
  408. address += MCG_XBLK_ADDR;
  409. } else {
  410. ++address;
  411. }
  412. err = allocate_threshold_blocks(cpu, bank, ++block, address);
  413. if (err)
  414. goto out_free;
  415. if (b)
  416. kobject_uevent(&b->kobj, KOBJ_ADD);
  417. return err;
  418. out_free:
  419. if (b) {
  420. kobject_put(&b->kobj);
  421. list_del(&b->miscj);
  422. kfree(b);
  423. }
  424. return err;
  425. }
  426. static __cpuinit long
  427. local_allocate_threshold_blocks(int cpu, unsigned int bank)
  428. {
  429. return allocate_threshold_blocks(cpu, bank, 0,
  430. MSR_IA32_MC0_MISC + bank * 4);
  431. }
  432. /* symlinks sibling shared banks to first core. first core owns dir/files. */
  433. static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
  434. {
  435. int i, err = 0;
  436. struct threshold_bank *b = NULL;
  437. char name[32];
  438. sprintf(name, "threshold_bank%i", bank);
  439. #ifdef CONFIG_SMP
  440. if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
  441. i = cpumask_first(cpu_llc_shared_mask(cpu));
  442. /* first core not up yet */
  443. if (cpu_data(i).cpu_core_id)
  444. goto out;
  445. /* already linked */
  446. if (per_cpu(threshold_banks, cpu)[bank])
  447. goto out;
  448. b = per_cpu(threshold_banks, i)[bank];
  449. if (!b)
  450. goto out;
  451. err = sysfs_create_link(&per_cpu(mce_sysdev, cpu).kobj,
  452. b->kobj, name);
  453. if (err)
  454. goto out;
  455. cpumask_copy(b->cpus, cpu_llc_shared_mask(cpu));
  456. per_cpu(threshold_banks, cpu)[bank] = b;
  457. goto out;
  458. }
  459. #endif
  460. b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
  461. if (!b) {
  462. err = -ENOMEM;
  463. goto out;
  464. }
  465. if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
  466. kfree(b);
  467. err = -ENOMEM;
  468. goto out;
  469. }
  470. b->kobj = kobject_create_and_add(name, &per_cpu(mce_sysdev, cpu).kobj);
  471. if (!b->kobj)
  472. goto out_free;
  473. #ifndef CONFIG_SMP
  474. cpumask_setall(b->cpus);
  475. #else
  476. cpumask_set_cpu(cpu, b->cpus);
  477. #endif
  478. per_cpu(threshold_banks, cpu)[bank] = b;
  479. err = local_allocate_threshold_blocks(cpu, bank);
  480. if (err)
  481. goto out_free;
  482. for_each_cpu(i, b->cpus) {
  483. if (i == cpu)
  484. continue;
  485. err = sysfs_create_link(&per_cpu(mce_sysdev, i).kobj,
  486. b->kobj, name);
  487. if (err)
  488. goto out;
  489. per_cpu(threshold_banks, i)[bank] = b;
  490. }
  491. goto out;
  492. out_free:
  493. per_cpu(threshold_banks, cpu)[bank] = NULL;
  494. free_cpumask_var(b->cpus);
  495. kfree(b);
  496. out:
  497. return err;
  498. }
  499. /* create dir/files for all valid threshold banks */
  500. static __cpuinit int threshold_create_device(unsigned int cpu)
  501. {
  502. unsigned int bank;
  503. int err = 0;
  504. for (bank = 0; bank < NR_BANKS; ++bank) {
  505. if (!(per_cpu(bank_map, cpu) & (1 << bank)))
  506. continue;
  507. err = threshold_create_bank(cpu, bank);
  508. if (err)
  509. return err;
  510. }
  511. return err;
  512. }
  513. /*
  514. * let's be hotplug friendly.
  515. * in case of multiple core processors, the first core always takes ownership
  516. * of shared sysfs dir/files, and rest of the cores will be symlinked to it.
  517. */
  518. static void deallocate_threshold_block(unsigned int cpu,
  519. unsigned int bank)
  520. {
  521. struct threshold_block *pos = NULL;
  522. struct threshold_block *tmp = NULL;
  523. struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
  524. if (!head)
  525. return;
  526. list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
  527. kobject_put(&pos->kobj);
  528. list_del(&pos->miscj);
  529. kfree(pos);
  530. }
  531. kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
  532. per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
  533. }
  534. static void threshold_remove_bank(unsigned int cpu, int bank)
  535. {
  536. struct threshold_bank *b;
  537. char name[32];
  538. int i = 0;
  539. b = per_cpu(threshold_banks, cpu)[bank];
  540. if (!b)
  541. return;
  542. if (!b->blocks)
  543. goto free_out;
  544. sprintf(name, "threshold_bank%i", bank);
  545. #ifdef CONFIG_SMP
  546. /* sibling symlink */
  547. if (shared_bank[bank] && b->blocks->cpu != cpu) {
  548. sysfs_remove_link(&per_cpu(mce_sysdev, cpu).kobj, name);
  549. per_cpu(threshold_banks, cpu)[bank] = NULL;
  550. return;
  551. }
  552. #endif
  553. /* remove all sibling symlinks before unregistering */
  554. for_each_cpu(i, b->cpus) {
  555. if (i == cpu)
  556. continue;
  557. sysfs_remove_link(&per_cpu(mce_sysdev, i).kobj, name);
  558. per_cpu(threshold_banks, i)[bank] = NULL;
  559. }
  560. deallocate_threshold_block(cpu, bank);
  561. free_out:
  562. kobject_del(b->kobj);
  563. kobject_put(b->kobj);
  564. free_cpumask_var(b->cpus);
  565. kfree(b);
  566. per_cpu(threshold_banks, cpu)[bank] = NULL;
  567. }
  568. static void threshold_remove_device(unsigned int cpu)
  569. {
  570. unsigned int bank;
  571. for (bank = 0; bank < NR_BANKS; ++bank) {
  572. if (!(per_cpu(bank_map, cpu) & (1 << bank)))
  573. continue;
  574. threshold_remove_bank(cpu, bank);
  575. }
  576. }
  577. /* get notified when a cpu comes on/off */
  578. static void __cpuinit
  579. amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
  580. {
  581. switch (action) {
  582. case CPU_ONLINE:
  583. case CPU_ONLINE_FROZEN:
  584. threshold_create_device(cpu);
  585. break;
  586. case CPU_DEAD:
  587. case CPU_DEAD_FROZEN:
  588. threshold_remove_device(cpu);
  589. break;
  590. default:
  591. break;
  592. }
  593. }
  594. static __init int threshold_init_device(void)
  595. {
  596. unsigned lcpu = 0;
  597. /* to hit CPUs online before the notifier is up */
  598. for_each_online_cpu(lcpu) {
  599. int err = threshold_create_device(lcpu);
  600. if (err)
  601. return err;
  602. }
  603. threshold_cpu_callback = amd_64_threshold_cpu_callback;
  604. return 0;
  605. }
  606. device_initcall(threshold_init_device);