nmi_int.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. /**
  2. * @file nmi_int.c
  3. *
  4. * @remark Copyright 2002-2009 OProfile authors
  5. * @remark Read the file COPYING
  6. *
  7. * @author John Levon <levon@movementarian.org>
  8. * @author Robert Richter <robert.richter@amd.com>
  9. * @author Barry Kasindorf <barry.kasindorf@amd.com>
  10. * @author Jason Yeh <jason.yeh@amd.com>
  11. * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
  12. */
  13. #include <linux/init.h>
  14. #include <linux/notifier.h>
  15. #include <linux/smp.h>
  16. #include <linux/oprofile.h>
  17. #include <linux/sysdev.h>
  18. #include <linux/slab.h>
  19. #include <linux/moduleparam.h>
  20. #include <linux/kdebug.h>
  21. #include <linux/cpu.h>
  22. #include <asm/nmi.h>
  23. #include <asm/msr.h>
  24. #include <asm/apic.h>
  25. #include "op_counter.h"
  26. #include "op_x86_model.h"
  27. static struct op_x86_model_spec *model;
  28. static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
  29. static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
  30. /* 0 == registered but off, 1 == registered and on */
  31. static int nmi_enabled = 0;
  32. struct op_counter_config counter_config[OP_MAX_COUNTER];
  33. /* common functions */
  34. u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
  35. struct op_counter_config *counter_config)
  36. {
  37. u64 val = 0;
  38. u16 event = (u16)counter_config->event;
  39. val |= ARCH_PERFMON_EVENTSEL_INT;
  40. val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
  41. val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
  42. val |= (counter_config->unit_mask & 0xFF) << 8;
  43. event &= model->event_mask ? model->event_mask : 0xFF;
  44. val |= event & 0xFF;
  45. val |= (event & 0x0F00) << 24;
  46. return val;
  47. }
  48. static int profile_exceptions_notify(struct notifier_block *self,
  49. unsigned long val, void *data)
  50. {
  51. struct die_args *args = (struct die_args *)data;
  52. int ret = NOTIFY_DONE;
  53. int cpu = smp_processor_id();
  54. switch (val) {
  55. case DIE_NMI:
  56. case DIE_NMI_IPI:
  57. model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu));
  58. ret = NOTIFY_STOP;
  59. break;
  60. default:
  61. break;
  62. }
  63. return ret;
  64. }
  65. static void nmi_cpu_save_registers(struct op_msrs *msrs)
  66. {
  67. struct op_msr *counters = msrs->counters;
  68. struct op_msr *controls = msrs->controls;
  69. unsigned int i;
  70. for (i = 0; i < model->num_counters; ++i) {
  71. if (counters[i].addr)
  72. rdmsrl(counters[i].addr, counters[i].saved);
  73. }
  74. for (i = 0; i < model->num_controls; ++i) {
  75. if (controls[i].addr)
  76. rdmsrl(controls[i].addr, controls[i].saved);
  77. }
  78. }
  79. static void nmi_cpu_start(void *dummy)
  80. {
  81. struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
  82. model->start(msrs);
  83. }
  84. static int nmi_start(void)
  85. {
  86. on_each_cpu(nmi_cpu_start, NULL, 1);
  87. return 0;
  88. }
  89. static void nmi_cpu_stop(void *dummy)
  90. {
  91. struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
  92. model->stop(msrs);
  93. }
  94. static void nmi_stop(void)
  95. {
  96. on_each_cpu(nmi_cpu_stop, NULL, 1);
  97. }
  98. #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
  99. static DEFINE_PER_CPU(int, switch_index);
  100. static inline int has_mux(void)
  101. {
  102. return !!model->switch_ctrl;
  103. }
  104. inline int op_x86_phys_to_virt(int phys)
  105. {
  106. return __get_cpu_var(switch_index) + phys;
  107. }
  108. static void nmi_shutdown_mux(void)
  109. {
  110. int i;
  111. if (!has_mux())
  112. return;
  113. for_each_possible_cpu(i) {
  114. kfree(per_cpu(cpu_msrs, i).multiplex);
  115. per_cpu(cpu_msrs, i).multiplex = NULL;
  116. per_cpu(switch_index, i) = 0;
  117. }
  118. }
  119. static int nmi_setup_mux(void)
  120. {
  121. size_t multiplex_size =
  122. sizeof(struct op_msr) * model->num_virt_counters;
  123. int i;
  124. if (!has_mux())
  125. return 1;
  126. for_each_possible_cpu(i) {
  127. per_cpu(cpu_msrs, i).multiplex =
  128. kmalloc(multiplex_size, GFP_KERNEL);
  129. if (!per_cpu(cpu_msrs, i).multiplex)
  130. return 0;
  131. }
  132. return 1;
  133. }
  134. static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
  135. {
  136. int i;
  137. struct op_msr *multiplex = msrs->multiplex;
  138. if (!has_mux())
  139. return;
  140. for (i = 0; i < model->num_virt_counters; ++i) {
  141. if (counter_config[i].enabled) {
  142. multiplex[i].saved = -(u64)counter_config[i].count;
  143. } else {
  144. multiplex[i].addr = 0;
  145. multiplex[i].saved = 0;
  146. }
  147. }
  148. per_cpu(switch_index, cpu) = 0;
  149. }
  150. static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
  151. {
  152. struct op_msr *multiplex = msrs->multiplex;
  153. int i;
  154. for (i = 0; i < model->num_counters; ++i) {
  155. int virt = op_x86_phys_to_virt(i);
  156. if (multiplex[virt].addr)
  157. rdmsrl(multiplex[virt].addr, multiplex[virt].saved);
  158. }
  159. }
  160. static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
  161. {
  162. struct op_msr *multiplex = msrs->multiplex;
  163. int i;
  164. for (i = 0; i < model->num_counters; ++i) {
  165. int virt = op_x86_phys_to_virt(i);
  166. if (multiplex[virt].addr)
  167. wrmsrl(multiplex[virt].addr, multiplex[virt].saved);
  168. }
  169. }
  170. static void nmi_cpu_switch(void *dummy)
  171. {
  172. int cpu = smp_processor_id();
  173. int si = per_cpu(switch_index, cpu);
  174. struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
  175. nmi_cpu_stop(NULL);
  176. nmi_cpu_save_mpx_registers(msrs);
  177. /* move to next set */
  178. si += model->num_counters;
  179. if ((si > model->num_virt_counters) || (counter_config[si].count == 0))
  180. per_cpu(switch_index, cpu) = 0;
  181. else
  182. per_cpu(switch_index, cpu) = si;
  183. model->switch_ctrl(model, msrs);
  184. nmi_cpu_restore_mpx_registers(msrs);
  185. nmi_cpu_start(NULL);
  186. }
  187. /*
  188. * Quick check to see if multiplexing is necessary.
  189. * The check should be sufficient since counters are used
  190. * in ordre.
  191. */
  192. static int nmi_multiplex_on(void)
  193. {
  194. return counter_config[model->num_counters].count ? 0 : -EINVAL;
  195. }
  196. static int nmi_switch_event(void)
  197. {
  198. if (!has_mux())
  199. return -ENOSYS; /* not implemented */
  200. if (nmi_multiplex_on() < 0)
  201. return -EINVAL; /* not necessary */
  202. on_each_cpu(nmi_cpu_switch, NULL, 1);
  203. return 0;
  204. }
  205. static inline void mux_init(struct oprofile_operations *ops)
  206. {
  207. if (has_mux())
  208. ops->switch_events = nmi_switch_event;
  209. }
  210. static void mux_clone(int cpu)
  211. {
  212. if (!has_mux())
  213. return;
  214. memcpy(per_cpu(cpu_msrs, cpu).multiplex,
  215. per_cpu(cpu_msrs, 0).multiplex,
  216. sizeof(struct op_msr) * model->num_virt_counters);
  217. }
  218. #else
  219. inline int op_x86_phys_to_virt(int phys) { return phys; }
  220. static inline void nmi_shutdown_mux(void) { }
  221. static inline int nmi_setup_mux(void) { return 1; }
  222. static inline void
  223. nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
  224. static inline void mux_init(struct oprofile_operations *ops) { }
  225. static void mux_clone(int cpu) { }
  226. #endif
  227. static void free_msrs(void)
  228. {
  229. int i;
  230. for_each_possible_cpu(i) {
  231. kfree(per_cpu(cpu_msrs, i).counters);
  232. per_cpu(cpu_msrs, i).counters = NULL;
  233. kfree(per_cpu(cpu_msrs, i).controls);
  234. per_cpu(cpu_msrs, i).controls = NULL;
  235. }
  236. }
  237. static int allocate_msrs(void)
  238. {
  239. size_t controls_size = sizeof(struct op_msr) * model->num_controls;
  240. size_t counters_size = sizeof(struct op_msr) * model->num_counters;
  241. int i;
  242. for_each_possible_cpu(i) {
  243. per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
  244. GFP_KERNEL);
  245. if (!per_cpu(cpu_msrs, i).counters)
  246. return 0;
  247. per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
  248. GFP_KERNEL);
  249. if (!per_cpu(cpu_msrs, i).controls)
  250. return 0;
  251. }
  252. return 1;
  253. }
  254. static void nmi_cpu_setup(void *dummy)
  255. {
  256. int cpu = smp_processor_id();
  257. struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
  258. nmi_cpu_save_registers(msrs);
  259. spin_lock(&oprofilefs_lock);
  260. model->setup_ctrs(model, msrs);
  261. nmi_cpu_setup_mux(cpu, msrs);
  262. spin_unlock(&oprofilefs_lock);
  263. per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
  264. apic_write(APIC_LVTPC, APIC_DM_NMI);
  265. }
  266. static struct notifier_block profile_exceptions_nb = {
  267. .notifier_call = profile_exceptions_notify,
  268. .next = NULL,
  269. .priority = 2
  270. };
  271. static int nmi_setup(void)
  272. {
  273. int err = 0;
  274. int cpu;
  275. if (!allocate_msrs())
  276. err = -ENOMEM;
  277. else if (!nmi_setup_mux())
  278. err = -ENOMEM;
  279. else
  280. err = register_die_notifier(&profile_exceptions_nb);
  281. if (err) {
  282. free_msrs();
  283. nmi_shutdown_mux();
  284. return err;
  285. }
  286. /* We need to serialize save and setup for HT because the subset
  287. * of msrs are distinct for save and setup operations
  288. */
  289. /* Assume saved/restored counters are the same on all CPUs */
  290. model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
  291. for_each_possible_cpu(cpu) {
  292. if (!cpu)
  293. continue;
  294. memcpy(per_cpu(cpu_msrs, cpu).counters,
  295. per_cpu(cpu_msrs, 0).counters,
  296. sizeof(struct op_msr) * model->num_counters);
  297. memcpy(per_cpu(cpu_msrs, cpu).controls,
  298. per_cpu(cpu_msrs, 0).controls,
  299. sizeof(struct op_msr) * model->num_controls);
  300. mux_clone(cpu);
  301. }
  302. on_each_cpu(nmi_cpu_setup, NULL, 1);
  303. nmi_enabled = 1;
  304. return 0;
  305. }
  306. static void nmi_cpu_restore_registers(struct op_msrs *msrs)
  307. {
  308. struct op_msr *counters = msrs->counters;
  309. struct op_msr *controls = msrs->controls;
  310. unsigned int i;
  311. for (i = 0; i < model->num_controls; ++i) {
  312. if (controls[i].addr)
  313. wrmsrl(controls[i].addr, controls[i].saved);
  314. }
  315. for (i = 0; i < model->num_counters; ++i) {
  316. if (counters[i].addr)
  317. wrmsrl(counters[i].addr, counters[i].saved);
  318. }
  319. }
  320. static void nmi_cpu_shutdown(void *dummy)
  321. {
  322. unsigned int v;
  323. int cpu = smp_processor_id();
  324. struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
  325. /* restoring APIC_LVTPC can trigger an apic error because the delivery
  326. * mode and vector nr combination can be illegal. That's by design: on
  327. * power on apic lvt contain a zero vector nr which are legal only for
  328. * NMI delivery mode. So inhibit apic err before restoring lvtpc
  329. */
  330. v = apic_read(APIC_LVTERR);
  331. apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
  332. apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
  333. apic_write(APIC_LVTERR, v);
  334. nmi_cpu_restore_registers(msrs);
  335. }
  336. static void nmi_shutdown(void)
  337. {
  338. struct op_msrs *msrs;
  339. nmi_enabled = 0;
  340. on_each_cpu(nmi_cpu_shutdown, NULL, 1);
  341. unregister_die_notifier(&profile_exceptions_nb);
  342. nmi_shutdown_mux();
  343. msrs = &get_cpu_var(cpu_msrs);
  344. model->shutdown(msrs);
  345. free_msrs();
  346. put_cpu_var(cpu_msrs);
  347. }
  348. static int nmi_create_files(struct super_block *sb, struct dentry *root)
  349. {
  350. unsigned int i;
  351. for (i = 0; i < model->num_virt_counters; ++i) {
  352. struct dentry *dir;
  353. char buf[4];
  354. #ifndef CONFIG_OPROFILE_EVENT_MULTIPLEX
  355. /* quick little hack to _not_ expose a counter if it is not
  356. * available for use. This should protect userspace app.
  357. * NOTE: assumes 1:1 mapping here (that counters are organized
  358. * sequentially in their struct assignment).
  359. */
  360. if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i)))
  361. continue;
  362. #endif /* CONFIG_OPROFILE_EVENT_MULTIPLEX */
  363. snprintf(buf, sizeof(buf), "%d", i);
  364. dir = oprofilefs_mkdir(sb, root, buf);
  365. oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
  366. oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
  367. oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
  368. oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
  369. oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
  370. oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
  371. }
  372. return 0;
  373. }
  374. #ifdef CONFIG_SMP
  375. static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
  376. void *data)
  377. {
  378. int cpu = (unsigned long)data;
  379. switch (action) {
  380. case CPU_DOWN_FAILED:
  381. case CPU_ONLINE:
  382. smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
  383. break;
  384. case CPU_DOWN_PREPARE:
  385. smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
  386. break;
  387. }
  388. return NOTIFY_DONE;
  389. }
  390. static struct notifier_block oprofile_cpu_nb = {
  391. .notifier_call = oprofile_cpu_notifier
  392. };
  393. #endif
  394. #ifdef CONFIG_PM
  395. static int nmi_suspend(struct sys_device *dev, pm_message_t state)
  396. {
  397. /* Only one CPU left, just stop that one */
  398. if (nmi_enabled == 1)
  399. nmi_cpu_stop(NULL);
  400. return 0;
  401. }
  402. static int nmi_resume(struct sys_device *dev)
  403. {
  404. if (nmi_enabled == 1)
  405. nmi_cpu_start(NULL);
  406. return 0;
  407. }
  408. static struct sysdev_class oprofile_sysclass = {
  409. .name = "oprofile",
  410. .resume = nmi_resume,
  411. .suspend = nmi_suspend,
  412. };
  413. static struct sys_device device_oprofile = {
  414. .id = 0,
  415. .cls = &oprofile_sysclass,
  416. };
  417. static int __init init_sysfs(void)
  418. {
  419. int error;
  420. error = sysdev_class_register(&oprofile_sysclass);
  421. if (!error)
  422. error = sysdev_register(&device_oprofile);
  423. return error;
  424. }
  425. static void exit_sysfs(void)
  426. {
  427. sysdev_unregister(&device_oprofile);
  428. sysdev_class_unregister(&oprofile_sysclass);
  429. }
  430. #else
  431. #define init_sysfs() do { } while (0)
  432. #define exit_sysfs() do { } while (0)
  433. #endif /* CONFIG_PM */
  434. static int __init p4_init(char **cpu_type)
  435. {
  436. __u8 cpu_model = boot_cpu_data.x86_model;
  437. if (cpu_model > 6 || cpu_model == 5)
  438. return 0;
  439. #ifndef CONFIG_SMP
  440. *cpu_type = "i386/p4";
  441. model = &op_p4_spec;
  442. return 1;
  443. #else
  444. switch (smp_num_siblings) {
  445. case 1:
  446. *cpu_type = "i386/p4";
  447. model = &op_p4_spec;
  448. return 1;
  449. case 2:
  450. *cpu_type = "i386/p4-ht";
  451. model = &op_p4_ht2_spec;
  452. return 1;
  453. }
  454. #endif
  455. printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
  456. printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
  457. return 0;
  458. }
  459. static int force_arch_perfmon;
  460. static int force_cpu_type(const char *str, struct kernel_param *kp)
  461. {
  462. if (!strcmp(str, "arch_perfmon")) {
  463. force_arch_perfmon = 1;
  464. printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
  465. }
  466. return 0;
  467. }
  468. module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
  469. static int __init ppro_init(char **cpu_type)
  470. {
  471. __u8 cpu_model = boot_cpu_data.x86_model;
  472. struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
  473. if (force_arch_perfmon && cpu_has_arch_perfmon)
  474. return 0;
  475. switch (cpu_model) {
  476. case 0 ... 2:
  477. *cpu_type = "i386/ppro";
  478. break;
  479. case 3 ... 5:
  480. *cpu_type = "i386/pii";
  481. break;
  482. case 6 ... 8:
  483. case 10 ... 11:
  484. *cpu_type = "i386/piii";
  485. break;
  486. case 9:
  487. case 13:
  488. *cpu_type = "i386/p6_mobile";
  489. break;
  490. case 14:
  491. *cpu_type = "i386/core";
  492. break;
  493. case 15: case 23:
  494. *cpu_type = "i386/core_2";
  495. break;
  496. case 26:
  497. spec = &op_arch_perfmon_spec;
  498. *cpu_type = "i386/core_i7";
  499. break;
  500. case 28:
  501. *cpu_type = "i386/atom";
  502. break;
  503. default:
  504. /* Unknown */
  505. return 0;
  506. }
  507. model = spec;
  508. return 1;
  509. }
  510. /* in order to get sysfs right */
  511. static int using_nmi;
  512. int __init op_nmi_init(struct oprofile_operations *ops)
  513. {
  514. __u8 vendor = boot_cpu_data.x86_vendor;
  515. __u8 family = boot_cpu_data.x86;
  516. char *cpu_type = NULL;
  517. int ret = 0;
  518. if (!cpu_has_apic)
  519. return -ENODEV;
  520. switch (vendor) {
  521. case X86_VENDOR_AMD:
  522. /* Needs to be at least an Athlon (or hammer in 32bit mode) */
  523. switch (family) {
  524. case 6:
  525. cpu_type = "i386/athlon";
  526. break;
  527. case 0xf:
  528. /*
  529. * Actually it could be i386/hammer too, but
  530. * give user space an consistent name.
  531. */
  532. cpu_type = "x86-64/hammer";
  533. break;
  534. case 0x10:
  535. cpu_type = "x86-64/family10";
  536. break;
  537. case 0x11:
  538. cpu_type = "x86-64/family11h";
  539. break;
  540. default:
  541. return -ENODEV;
  542. }
  543. model = &op_amd_spec;
  544. break;
  545. case X86_VENDOR_INTEL:
  546. switch (family) {
  547. /* Pentium IV */
  548. case 0xf:
  549. p4_init(&cpu_type);
  550. break;
  551. /* A P6-class processor */
  552. case 6:
  553. ppro_init(&cpu_type);
  554. break;
  555. default:
  556. break;
  557. }
  558. if (cpu_type)
  559. break;
  560. if (!cpu_has_arch_perfmon)
  561. return -ENODEV;
  562. /* use arch perfmon as fallback */
  563. cpu_type = "i386/arch_perfmon";
  564. model = &op_arch_perfmon_spec;
  565. break;
  566. default:
  567. return -ENODEV;
  568. }
  569. #ifdef CONFIG_SMP
  570. register_cpu_notifier(&oprofile_cpu_nb);
  571. #endif
  572. /* default values, can be overwritten by model */
  573. ops->create_files = nmi_create_files;
  574. ops->setup = nmi_setup;
  575. ops->shutdown = nmi_shutdown;
  576. ops->start = nmi_start;
  577. ops->stop = nmi_stop;
  578. ops->cpu_type = cpu_type;
  579. if (model->init)
  580. ret = model->init(ops);
  581. if (ret)
  582. return ret;
  583. if (!model->num_virt_counters)
  584. model->num_virt_counters = model->num_counters;
  585. mux_init(ops);
  586. init_sysfs();
  587. using_nmi = 1;
  588. printk(KERN_INFO "oprofile: using NMI interrupt.\n");
  589. return 0;
  590. }
  591. void op_nmi_exit(void)
  592. {
  593. if (using_nmi) {
  594. exit_sysfs();
  595. #ifdef CONFIG_SMP
  596. unregister_cpu_notifier(&oprofile_cpu_nb);
  597. #endif
  598. }
  599. if (model->exit)
  600. model->exit();
  601. }