pcr.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300
  1. /* pcr.c: Generic sparc64 performance counter infrastructure.
  2. *
  3. * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/export.h>
  7. #include <linux/init.h>
  8. #include <linux/irq.h>
  9. #include <linux/irq_work.h>
  10. #include <linux/ftrace.h>
  11. #include <asm/pil.h>
  12. #include <asm/pcr.h>
  13. #include <asm/nmi.h>
  14. #include <asm/asi.h>
  15. #include <asm/spitfire.h>
  16. /* This code is shared between various users of the performance
  17. * counters. Users will be oprofile, pseudo-NMI watchdog, and the
  18. * perf_event support layer.
  19. */
  20. /* Performance counter interrupts run unmasked at PIL level 15.
  21. * Therefore we can't do things like wakeups and other work
  22. * that expects IRQ disabling to be adhered to in locking etc.
  23. *
  24. * Therefore in such situations we defer the work by signalling
  25. * a lower level cpu IRQ.
  26. */
  27. void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
  28. {
  29. struct pt_regs *old_regs;
  30. clear_softint(1 << PIL_DEFERRED_PCR_WORK);
  31. old_regs = set_irq_regs(regs);
  32. irq_enter();
  33. #ifdef CONFIG_IRQ_WORK
  34. irq_work_run();
  35. #endif
  36. irq_exit();
  37. set_irq_regs(old_regs);
  38. }
  39. void arch_irq_work_raise(void)
  40. {
  41. set_softint(1 << PIL_DEFERRED_PCR_WORK);
  42. }
  43. const struct pcr_ops *pcr_ops;
  44. EXPORT_SYMBOL_GPL(pcr_ops);
  45. static u64 direct_pcr_read(unsigned long reg_num)
  46. {
  47. u64 val;
  48. WARN_ON_ONCE(reg_num != 0);
  49. __asm__ __volatile__("rd %%pcr, %0" : "=r" (val));
  50. return val;
  51. }
  52. static void direct_pcr_write(unsigned long reg_num, u64 val)
  53. {
  54. WARN_ON_ONCE(reg_num != 0);
  55. __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (val));
  56. }
  57. static u64 direct_pic_read(unsigned long reg_num)
  58. {
  59. u64 val;
  60. WARN_ON_ONCE(reg_num != 0);
  61. __asm__ __volatile__("rd %%pic, %0" : "=r" (val));
  62. return val;
  63. }
  64. static void direct_pic_write(unsigned long reg_num, u64 val)
  65. {
  66. WARN_ON_ONCE(reg_num != 0);
  67. /* Blackbird errata workaround. See commentary in
  68. * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
  69. * for more information.
  70. */
  71. __asm__ __volatile__("ba,pt %%xcc, 99f\n\t"
  72. " nop\n\t"
  73. ".align 64\n"
  74. "99:wr %0, 0x0, %%pic\n\t"
  75. "rd %%pic, %%g0" : : "r" (val));
  76. }
  77. static u64 direct_picl_value(unsigned int nmi_hz)
  78. {
  79. u32 delta = local_cpu_data().clock_tick / nmi_hz;
  80. return ((u64)((0 - delta) & 0xffffffff)) << 32;
  81. }
  82. static const struct pcr_ops direct_pcr_ops = {
  83. .read_pcr = direct_pcr_read,
  84. .write_pcr = direct_pcr_write,
  85. .read_pic = direct_pic_read,
  86. .write_pic = direct_pic_write,
  87. .nmi_picl_value = direct_picl_value,
  88. .pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE),
  89. .pcr_nmi_disable = PCR_PIC_PRIV,
  90. };
  91. static void n2_pcr_write(unsigned long reg_num, u64 val)
  92. {
  93. unsigned long ret;
  94. WARN_ON_ONCE(reg_num != 0);
  95. if (val & PCR_N2_HTRACE) {
  96. ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
  97. if (ret != HV_EOK)
  98. direct_pcr_write(reg_num, val);
  99. } else
  100. direct_pcr_write(reg_num, val);
  101. }
  102. static u64 n2_picl_value(unsigned int nmi_hz)
  103. {
  104. u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2);
  105. return ((u64)((0 - delta) & 0xffffffff)) << 32;
  106. }
  107. static const struct pcr_ops n2_pcr_ops = {
  108. .read_pcr = direct_pcr_read,
  109. .write_pcr = n2_pcr_write,
  110. .read_pic = direct_pic_read,
  111. .write_pic = direct_pic_write,
  112. .nmi_picl_value = n2_picl_value,
  113. .pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE |
  114. PCR_N2_TOE_OV1 |
  115. (2 << PCR_N2_SL1_SHIFT) |
  116. (0xff << PCR_N2_MASK1_SHIFT)),
  117. .pcr_nmi_disable = PCR_PIC_PRIV,
  118. };
  119. static u64 n4_pcr_read(unsigned long reg_num)
  120. {
  121. unsigned long val;
  122. (void) sun4v_vt_get_perfreg(reg_num, &val);
  123. return val;
  124. }
  125. static void n4_pcr_write(unsigned long reg_num, u64 val)
  126. {
  127. (void) sun4v_vt_set_perfreg(reg_num, val);
  128. }
  129. static u64 n4_pic_read(unsigned long reg_num)
  130. {
  131. unsigned long val;
  132. __asm__ __volatile__("ldxa [%1] %2, %0"
  133. : "=r" (val)
  134. : "r" (reg_num * 0x8UL), "i" (ASI_PIC));
  135. return val;
  136. }
  137. static void n4_pic_write(unsigned long reg_num, u64 val)
  138. {
  139. __asm__ __volatile__("stxa %0, [%1] %2"
  140. : /* no outputs */
  141. : "r" (val), "r" (reg_num * 0x8UL), "i" (ASI_PIC));
  142. }
  143. static u64 n4_picl_value(unsigned int nmi_hz)
  144. {
  145. u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2);
  146. return ((u64)((0 - delta) & 0xffffffff));
  147. }
  148. static const struct pcr_ops n4_pcr_ops = {
  149. .read_pcr = n4_pcr_read,
  150. .write_pcr = n4_pcr_write,
  151. .read_pic = n4_pic_read,
  152. .write_pic = n4_pic_write,
  153. .nmi_picl_value = n4_picl_value,
  154. .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
  155. PCR_N4_UTRACE | PCR_N4_TOE |
  156. (26 << PCR_N4_SL_SHIFT)),
  157. .pcr_nmi_disable = PCR_N4_PICNPT,
  158. };
  159. static unsigned long perf_hsvc_group;
  160. static unsigned long perf_hsvc_major;
  161. static unsigned long perf_hsvc_minor;
  162. static int __init register_perf_hsvc(void)
  163. {
  164. if (tlb_type == hypervisor) {
  165. switch (sun4v_chip_type) {
  166. case SUN4V_CHIP_NIAGARA1:
  167. perf_hsvc_group = HV_GRP_NIAG_PERF;
  168. break;
  169. case SUN4V_CHIP_NIAGARA2:
  170. perf_hsvc_group = HV_GRP_N2_CPU;
  171. break;
  172. case SUN4V_CHIP_NIAGARA3:
  173. perf_hsvc_group = HV_GRP_KT_CPU;
  174. break;
  175. case SUN4V_CHIP_NIAGARA4:
  176. perf_hsvc_group = HV_GRP_VT_CPU;
  177. break;
  178. default:
  179. return -ENODEV;
  180. }
  181. perf_hsvc_major = 1;
  182. perf_hsvc_minor = 0;
  183. if (sun4v_hvapi_register(perf_hsvc_group,
  184. perf_hsvc_major,
  185. &perf_hsvc_minor)) {
  186. printk("perfmon: Could not register hvapi.\n");
  187. return -ENODEV;
  188. }
  189. }
  190. return 0;
  191. }
  192. static void __init unregister_perf_hsvc(void)
  193. {
  194. if (tlb_type != hypervisor)
  195. return;
  196. sun4v_hvapi_unregister(perf_hsvc_group);
  197. }
  198. static int __init setup_sun4v_pcr_ops(void)
  199. {
  200. int ret = 0;
  201. switch (sun4v_chip_type) {
  202. case SUN4V_CHIP_NIAGARA1:
  203. case SUN4V_CHIP_NIAGARA2:
  204. case SUN4V_CHIP_NIAGARA3:
  205. pcr_ops = &n2_pcr_ops;
  206. break;
  207. case SUN4V_CHIP_NIAGARA4:
  208. pcr_ops = &n4_pcr_ops;
  209. break;
  210. default:
  211. ret = -ENODEV;
  212. break;
  213. }
  214. return ret;
  215. }
  216. int __init pcr_arch_init(void)
  217. {
  218. int err = register_perf_hsvc();
  219. if (err)
  220. return err;
  221. switch (tlb_type) {
  222. case hypervisor:
  223. err = setup_sun4v_pcr_ops();
  224. if (err)
  225. goto out_unregister;
  226. break;
  227. case cheetah:
  228. case cheetah_plus:
  229. pcr_ops = &direct_pcr_ops;
  230. break;
  231. case spitfire:
  232. /* UltraSPARC-I/II and derivatives lack a profile
  233. * counter overflow interrupt so we can't make use of
  234. * their hardware currently.
  235. */
  236. /* fallthrough */
  237. default:
  238. err = -ENODEV;
  239. goto out_unregister;
  240. }
  241. return nmi_init();
  242. out_unregister:
  243. unregister_perf_hsvc();
  244. return err;
  245. }