pcr.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. /* pcr.c: Generic sparc64 performance counter infrastructure.
  2. *
  3. * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/module.h>
  7. #include <linux/init.h>
  8. #include <linux/irq.h>
  9. #include <linux/perf_event.h>
  10. #include <linux/ftrace.h>
  11. #include <asm/pil.h>
  12. #include <asm/pcr.h>
  13. #include <asm/nmi.h>
  14. /* This code is shared between various users of the performance
  15. * counters. Users will be oprofile, pseudo-NMI watchdog, and the
  16. * perf_event support layer.
  17. */
  18. #define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
  19. #define PCR_N2_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \
  20. PCR_N2_TOE_OV1 | \
  21. (2 << PCR_N2_SL1_SHIFT) | \
  22. (0xff << PCR_N2_MASK1_SHIFT))
  23. u64 pcr_enable;
  24. unsigned int picl_shift;
  25. /* Performance counter interrupts run unmasked at PIL level 15.
  26. * Therefore we can't do things like wakeups and other work
  27. * that expects IRQ disabling to be adhered to in locking etc.
  28. *
  29. * Therefore in such situations we defer the work by signalling
  30. * a lower level cpu IRQ.
  31. */
  32. void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
  33. {
  34. struct pt_regs *old_regs;
  35. clear_softint(1 << PIL_DEFERRED_PCR_WORK);
  36. old_regs = set_irq_regs(regs);
  37. irq_enter();
  38. #ifdef CONFIG_PERF_EVENTS
  39. perf_event_do_pending();
  40. #endif
  41. irq_exit();
  42. set_irq_regs(old_regs);
  43. }
  44. void set_perf_event_pending(void)
  45. {
  46. set_softint(1 << PIL_DEFERRED_PCR_WORK);
  47. }
  48. const struct pcr_ops *pcr_ops;
  49. EXPORT_SYMBOL_GPL(pcr_ops);
  50. static u64 direct_pcr_read(void)
  51. {
  52. u64 val;
  53. read_pcr(val);
  54. return val;
  55. }
  56. static void direct_pcr_write(u64 val)
  57. {
  58. write_pcr(val);
  59. }
  60. static const struct pcr_ops direct_pcr_ops = {
  61. .read = direct_pcr_read,
  62. .write = direct_pcr_write,
  63. };
  64. static void n2_pcr_write(u64 val)
  65. {
  66. unsigned long ret;
  67. ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
  68. if (val != HV_EOK)
  69. write_pcr(val);
  70. }
  71. static const struct pcr_ops n2_pcr_ops = {
  72. .read = direct_pcr_read,
  73. .write = n2_pcr_write,
  74. };
  75. static unsigned long perf_hsvc_group;
  76. static unsigned long perf_hsvc_major;
  77. static unsigned long perf_hsvc_minor;
  78. static int __init register_perf_hsvc(void)
  79. {
  80. if (tlb_type == hypervisor) {
  81. switch (sun4v_chip_type) {
  82. case SUN4V_CHIP_NIAGARA1:
  83. perf_hsvc_group = HV_GRP_NIAG_PERF;
  84. break;
  85. case SUN4V_CHIP_NIAGARA2:
  86. perf_hsvc_group = HV_GRP_N2_CPU;
  87. break;
  88. default:
  89. return -ENODEV;
  90. }
  91. perf_hsvc_major = 1;
  92. perf_hsvc_minor = 0;
  93. if (sun4v_hvapi_register(perf_hsvc_group,
  94. perf_hsvc_major,
  95. &perf_hsvc_minor)) {
  96. printk("perfmon: Could not register hvapi.\n");
  97. return -ENODEV;
  98. }
  99. }
  100. return 0;
  101. }
  102. static void __init unregister_perf_hsvc(void)
  103. {
  104. if (tlb_type != hypervisor)
  105. return;
  106. sun4v_hvapi_unregister(perf_hsvc_group);
  107. }
  108. int __init pcr_arch_init(void)
  109. {
  110. int err = register_perf_hsvc();
  111. if (err)
  112. return err;
  113. switch (tlb_type) {
  114. case hypervisor:
  115. pcr_ops = &n2_pcr_ops;
  116. pcr_enable = PCR_N2_ENABLE;
  117. picl_shift = 2;
  118. break;
  119. case cheetah:
  120. case cheetah_plus:
  121. pcr_ops = &direct_pcr_ops;
  122. pcr_enable = PCR_SUN4U_ENABLE;
  123. break;
  124. case spitfire:
  125. /* UltraSPARC-I/II and derivatives lack a profile
  126. * counter overflow interrupt so we can't make use of
  127. * their hardware currently.
  128. */
  129. /* fallthrough */
  130. default:
  131. err = -ENODEV;
  132. goto out_unregister;
  133. }
  134. return nmi_init();
  135. out_unregister:
  136. unregister_perf_hsvc();
  137. return err;
  138. }
  139. arch_initcall(pcr_arch_init);