perf_event.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. /*
  2. * Performance event support for s390x
  3. *
  4. * Copyright IBM Corp. 2012
  5. * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License (version 2 only)
  9. * as published by the Free Software Foundation.
  10. */
  11. #define KMSG_COMPONENT "perf"
  12. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13. #include <linux/kernel.h>
  14. #include <linux/perf_event.h>
  15. #include <linux/kvm_host.h>
  16. #include <linux/percpu.h>
  17. #include <linux/export.h>
  18. #include <asm/irq.h>
  19. #include <asm/cpu_mf.h>
  20. #include <asm/lowcore.h>
  21. #include <asm/processor.h>
  22. const char *perf_pmu_name(void)
  23. {
  24. if (cpum_cf_avail() || cpum_sf_avail())
  25. return "CPU-measurement facilities (CPUMF)";
  26. return "pmu";
  27. }
  28. EXPORT_SYMBOL(perf_pmu_name);
  29. int perf_num_counters(void)
  30. {
  31. int num = 0;
  32. if (cpum_cf_avail())
  33. num += PERF_CPUM_CF_MAX_CTR;
  34. return num;
  35. }
  36. EXPORT_SYMBOL(perf_num_counters);
  37. static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
  38. {
  39. struct stack_frame *stack = (struct stack_frame *) regs->gprs[15];
  40. if (!stack)
  41. return NULL;
  42. return (struct kvm_s390_sie_block *) stack->empty1[0];
  43. }
  44. static bool is_in_guest(struct pt_regs *regs)
  45. {
  46. unsigned long ip = instruction_pointer(regs);
  47. if (user_mode(regs))
  48. return false;
  49. return ip == (unsigned long) &sie_exit;
  50. }
  51. static unsigned long guest_is_user_mode(struct pt_regs *regs)
  52. {
  53. return sie_block(regs)->gpsw.mask & PSW_MASK_PSTATE;
  54. }
  55. static unsigned long instruction_pointer_guest(struct pt_regs *regs)
  56. {
  57. return sie_block(regs)->gpsw.addr & PSW_ADDR_INSN;
  58. }
  59. unsigned long perf_instruction_pointer(struct pt_regs *regs)
  60. {
  61. return is_in_guest(regs) ? instruction_pointer_guest(regs)
  62. : instruction_pointer(regs);
  63. }
  64. static unsigned long perf_misc_guest_flags(struct pt_regs *regs)
  65. {
  66. return guest_is_user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
  67. : PERF_RECORD_MISC_GUEST_KERNEL;
  68. }
  69. unsigned long perf_misc_flags(struct pt_regs *regs)
  70. {
  71. if (is_in_guest(regs))
  72. return perf_misc_guest_flags(regs);
  73. return user_mode(regs) ? PERF_RECORD_MISC_USER
  74. : PERF_RECORD_MISC_KERNEL;
  75. }
  76. void perf_event_print_debug(void)
  77. {
  78. struct cpumf_ctr_info cf_info;
  79. unsigned long flags;
  80. int cpu;
  81. if (!cpum_cf_avail())
  82. return;
  83. local_irq_save(flags);
  84. cpu = smp_processor_id();
  85. memset(&cf_info, 0, sizeof(cf_info));
  86. if (!qctri(&cf_info)) {
  87. pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",
  88. cpu, cf_info.cfvn, cf_info.csvn,
  89. cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl);
  90. print_hex_dump_bytes("CPUMF Query: ", DUMP_PREFIX_OFFSET,
  91. &cf_info, sizeof(cf_info));
  92. }
  93. local_irq_restore(flags);
  94. }
  95. /* See also arch/s390/kernel/traps.c */
  96. static unsigned long __store_trace(struct perf_callchain_entry *entry,
  97. unsigned long sp,
  98. unsigned long low, unsigned long high)
  99. {
  100. struct stack_frame *sf;
  101. struct pt_regs *regs;
  102. while (1) {
  103. sp = sp & PSW_ADDR_INSN;
  104. if (sp < low || sp > high - sizeof(*sf))
  105. return sp;
  106. sf = (struct stack_frame *) sp;
  107. perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
  108. /* Follow the backchain. */
  109. while (1) {
  110. low = sp;
  111. sp = sf->back_chain & PSW_ADDR_INSN;
  112. if (!sp)
  113. break;
  114. if (sp <= low || sp > high - sizeof(*sf))
  115. return sp;
  116. sf = (struct stack_frame *) sp;
  117. perf_callchain_store(entry,
  118. sf->gprs[8] & PSW_ADDR_INSN);
  119. }
  120. /* Zero backchain detected, check for interrupt frame. */
  121. sp = (unsigned long) (sf + 1);
  122. if (sp <= low || sp > high - sizeof(*regs))
  123. return sp;
  124. regs = (struct pt_regs *) sp;
  125. perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
  126. low = sp;
  127. sp = regs->gprs[15];
  128. }
  129. }
  130. void perf_callchain_kernel(struct perf_callchain_entry *entry,
  131. struct pt_regs *regs)
  132. {
  133. unsigned long head;
  134. struct stack_frame *head_sf;
  135. if (user_mode(regs))
  136. return;
  137. head = regs->gprs[15];
  138. head_sf = (struct stack_frame *) head;
  139. if (!head_sf || !head_sf->back_chain)
  140. return;
  141. head = head_sf->back_chain;
  142. head = __store_trace(entry, head, S390_lowcore.async_stack - ASYNC_SIZE,
  143. S390_lowcore.async_stack);
  144. __store_trace(entry, head, S390_lowcore.thread_info,
  145. S390_lowcore.thread_info + THREAD_SIZE);
  146. }