perf_event.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. /*
  2. * Performance event support for s390x
  3. *
  4. * Copyright IBM Corp. 2012
  5. * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License (version 2 only)
  9. * as published by the Free Software Foundation.
  10. */
  11. #define KMSG_COMPONENT "perf"
  12. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13. #include <linux/kernel.h>
  14. #include <linux/perf_event.h>
  15. #include <linux/percpu.h>
  16. #include <linux/export.h>
  17. #include <asm/irq.h>
  18. #include <asm/cpu_mf.h>
  19. #include <asm/lowcore.h>
  20. #include <asm/processor.h>
  21. const char *perf_pmu_name(void)
  22. {
  23. if (cpum_cf_avail() || cpum_sf_avail())
  24. return "CPU-measurement facilities (CPUMF)";
  25. return "pmu";
  26. }
  27. EXPORT_SYMBOL(perf_pmu_name);
  28. int perf_num_counters(void)
  29. {
  30. int num = 0;
  31. if (cpum_cf_avail())
  32. num += PERF_CPUM_CF_MAX_CTR;
  33. return num;
  34. }
  35. EXPORT_SYMBOL(perf_num_counters);
  36. void perf_event_print_debug(void)
  37. {
  38. struct cpumf_ctr_info cf_info;
  39. unsigned long flags;
  40. int cpu;
  41. if (!cpum_cf_avail())
  42. return;
  43. local_irq_save(flags);
  44. cpu = smp_processor_id();
  45. memset(&cf_info, 0, sizeof(cf_info));
  46. if (!qctri(&cf_info)) {
  47. pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",
  48. cpu, cf_info.cfvn, cf_info.csvn,
  49. cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl);
  50. print_hex_dump_bytes("CPUMF Query: ", DUMP_PREFIX_OFFSET,
  51. &cf_info, sizeof(cf_info));
  52. }
  53. local_irq_restore(flags);
  54. }
  55. /* See also arch/s390/kernel/traps.c */
  56. static unsigned long __store_trace(struct perf_callchain_entry *entry,
  57. unsigned long sp,
  58. unsigned long low, unsigned long high)
  59. {
  60. struct stack_frame *sf;
  61. struct pt_regs *regs;
  62. while (1) {
  63. sp = sp & PSW_ADDR_INSN;
  64. if (sp < low || sp > high - sizeof(*sf))
  65. return sp;
  66. sf = (struct stack_frame *) sp;
  67. perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
  68. /* Follow the backchain. */
  69. while (1) {
  70. low = sp;
  71. sp = sf->back_chain & PSW_ADDR_INSN;
  72. if (!sp)
  73. break;
  74. if (sp <= low || sp > high - sizeof(*sf))
  75. return sp;
  76. sf = (struct stack_frame *) sp;
  77. perf_callchain_store(entry,
  78. sf->gprs[8] & PSW_ADDR_INSN);
  79. }
  80. /* Zero backchain detected, check for interrupt frame. */
  81. sp = (unsigned long) (sf + 1);
  82. if (sp <= low || sp > high - sizeof(*regs))
  83. return sp;
  84. regs = (struct pt_regs *) sp;
  85. perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
  86. low = sp;
  87. sp = regs->gprs[15];
  88. }
  89. }
  90. void perf_callchain_kernel(struct perf_callchain_entry *entry,
  91. struct pt_regs *regs)
  92. {
  93. unsigned long head;
  94. struct stack_frame *head_sf;
  95. if (user_mode(regs))
  96. return;
  97. head = regs->gprs[15];
  98. head_sf = (struct stack_frame *) head;
  99. if (!head_sf || !head_sf->back_chain)
  100. return;
  101. head = head_sf->back_chain;
  102. head = __store_trace(entry, head, S390_lowcore.async_stack - ASYNC_SIZE,
  103. S390_lowcore.async_stack);
  104. __store_trace(entry, head, S390_lowcore.thread_info,
  105. S390_lowcore.thread_info + THREAD_SIZE);
  106. }