pcr.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. /* pcr.c: Generic sparc64 performance counter infrastructure.
  2. *
  3. * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/module.h>
  7. #include <linux/init.h>
  8. #include <linux/irq.h>
  9. #include <asm/pil.h>
  10. #include <asm/pcr.h>
  11. /* This code is shared between various users of the performance
  12. * counters. Users will be oprofile, pseudo-NMI watchdog, and the
  13. * perf_counter support layer.
  14. */
  15. /* Performance counter interrupts run unmasked at PIL level 15.
  16. * Therefore we can't do things like wakeups and other work
  17. * that expects IRQ disabling to be adhered to in locking etc.
  18. *
  19. * Therefore in such situations we defer the work by signalling
  20. * a lower level cpu IRQ.
  21. */
  22. void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
  23. {
  24. clear_softint(1 << PIL_DEFERRED_PCR_WORK);
  25. }
  26. void schedule_deferred_pcr_work(void)
  27. {
  28. set_softint(1 << PIL_DEFERRED_PCR_WORK);
  29. }
  30. const struct pcr_ops *pcr_ops;
  31. EXPORT_SYMBOL_GPL(pcr_ops);
  32. static u64 direct_pcr_read(void)
  33. {
  34. u64 val;
  35. read_pcr(val);
  36. return val;
  37. }
  38. static void direct_pcr_write(u64 val)
  39. {
  40. write_pcr(val);
  41. }
  42. static const struct pcr_ops direct_pcr_ops = {
  43. .read = direct_pcr_read,
  44. .write = direct_pcr_write,
  45. };
  46. static void n2_pcr_write(u64 val)
  47. {
  48. unsigned long ret;
  49. ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
  50. if (val != HV_EOK)
  51. write_pcr(val);
  52. }
  53. static const struct pcr_ops n2_pcr_ops = {
  54. .read = direct_pcr_read,
  55. .write = n2_pcr_write,
  56. };
  57. static unsigned long perf_hsvc_group;
  58. static unsigned long perf_hsvc_major;
  59. static unsigned long perf_hsvc_minor;
  60. static int __init register_perf_hsvc(void)
  61. {
  62. if (tlb_type == hypervisor) {
  63. switch (sun4v_chip_type) {
  64. case SUN4V_CHIP_NIAGARA1:
  65. perf_hsvc_group = HV_GRP_NIAG_PERF;
  66. break;
  67. case SUN4V_CHIP_NIAGARA2:
  68. perf_hsvc_group = HV_GRP_N2_CPU;
  69. break;
  70. default:
  71. return -ENODEV;
  72. }
  73. perf_hsvc_major = 1;
  74. perf_hsvc_minor = 0;
  75. if (sun4v_hvapi_register(perf_hsvc_group,
  76. perf_hsvc_major,
  77. &perf_hsvc_minor)) {
  78. printk("perfmon: Could not register hvapi.\n");
  79. return -ENODEV;
  80. }
  81. }
  82. return 0;
  83. }
  84. static void __init unregister_perf_hsvc(void)
  85. {
  86. if (tlb_type != hypervisor)
  87. return;
  88. sun4v_hvapi_unregister(perf_hsvc_group);
  89. }
  90. int __init pcr_arch_init(void)
  91. {
  92. int err = register_perf_hsvc();
  93. if (err)
  94. return err;
  95. switch (tlb_type) {
  96. case hypervisor:
  97. pcr_ops = &n2_pcr_ops;
  98. break;
  99. case spitfire:
  100. case cheetah:
  101. case cheetah_plus:
  102. pcr_ops = &direct_pcr_ops;
  103. break;
  104. default:
  105. err = -ENODEV;
  106. goto out_unregister;
  107. }
  108. return 0;
  109. out_unregister:
  110. unregister_perf_hsvc();
  111. return err;
  112. }
  113. arch_initcall(pcr_arch_init);