op_model_athlon.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /*
  2. * @file op_model_athlon.c
  3. * athlon / K7 / K8 / Family 10h model-specific MSR operations
  4. *
  5. * @remark Copyright 2002-2008 OProfile authors
  6. * @remark Read the file COPYING
  7. *
  8. * @author John Levon
  9. * @author Philippe Elie
  10. * @author Graydon Hoare
  11. * @author Robert Richter <robert.richter@amd.com>
  12. */
  13. #include <linux/oprofile.h>
  14. #include <asm/ptrace.h>
  15. #include <asm/msr.h>
  16. #include <asm/nmi.h>
  17. #include "op_x86_model.h"
  18. #include "op_counter.h"
  19. #define NUM_COUNTERS 4
  20. #define NUM_CONTROLS 4
  21. #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
  22. #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
  23. #define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0)
  24. #define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
  25. #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
  26. #define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
  27. #define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
  28. #define CTRL_SET_ACTIVE(n) (n |= (1<<22))
  29. #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
  30. #define CTRL_CLEAR_LO(x) (x &= (1<<21))
  31. #define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0)
  32. #define CTRL_SET_ENABLE(val) (val |= 1<<20)
  33. #define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16))
  34. #define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
  35. #define CTRL_SET_UM(val, m) (val |= (m << 8))
  36. #define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff))
  37. #define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf))
  38. #define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9))
  39. #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8))
  40. static unsigned long reset_value[NUM_COUNTERS];
  41. /* functions for op_amd_spec */
  42. static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
  43. {
  44. int i;
  45. for (i = 0; i < NUM_COUNTERS; i++) {
  46. if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
  47. msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
  48. else
  49. msrs->counters[i].addr = 0;
  50. }
  51. for (i = 0; i < NUM_CONTROLS; i++) {
  52. if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
  53. msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
  54. else
  55. msrs->controls[i].addr = 0;
  56. }
  57. }
  58. static void op_amd_setup_ctrs(struct op_msrs const * const msrs)
  59. {
  60. unsigned int low, high;
  61. int i;
  62. /* clear all counters */
  63. for (i = 0 ; i < NUM_CONTROLS; ++i) {
  64. if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
  65. continue;
  66. CTRL_READ(low, high, msrs, i);
  67. CTRL_CLEAR_LO(low);
  68. CTRL_CLEAR_HI(high);
  69. CTRL_WRITE(low, high, msrs, i);
  70. }
  71. /* avoid a false detection of ctr overflows in NMI handler */
  72. for (i = 0; i < NUM_COUNTERS; ++i) {
  73. if (unlikely(!CTR_IS_RESERVED(msrs, i)))
  74. continue;
  75. CTR_WRITE(1, msrs, i);
  76. }
  77. /* enable active counters */
  78. for (i = 0; i < NUM_COUNTERS; ++i) {
  79. if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
  80. reset_value[i] = counter_config[i].count;
  81. CTR_WRITE(counter_config[i].count, msrs, i);
  82. CTRL_READ(low, high, msrs, i);
  83. CTRL_CLEAR_LO(low);
  84. CTRL_CLEAR_HI(high);
  85. CTRL_SET_ENABLE(low);
  86. CTRL_SET_USR(low, counter_config[i].user);
  87. CTRL_SET_KERN(low, counter_config[i].kernel);
  88. CTRL_SET_UM(low, counter_config[i].unit_mask);
  89. CTRL_SET_EVENT_LOW(low, counter_config[i].event);
  90. CTRL_SET_EVENT_HIGH(high, counter_config[i].event);
  91. CTRL_SET_HOST_ONLY(high, 0);
  92. CTRL_SET_GUEST_ONLY(high, 0);
  93. CTRL_WRITE(low, high, msrs, i);
  94. } else {
  95. reset_value[i] = 0;
  96. }
  97. }
  98. }
  99. static int op_amd_check_ctrs(struct pt_regs * const regs,
  100. struct op_msrs const * const msrs)
  101. {
  102. unsigned int low, high;
  103. int i;
  104. for (i = 0 ; i < NUM_COUNTERS; ++i) {
  105. if (!reset_value[i])
  106. continue;
  107. CTR_READ(low, high, msrs, i);
  108. if (CTR_OVERFLOWED(low)) {
  109. oprofile_add_sample(regs, i);
  110. CTR_WRITE(reset_value[i], msrs, i);
  111. }
  112. }
  113. /* See op_model_ppro.c */
  114. return 1;
  115. }
  116. static void op_amd_start(struct op_msrs const * const msrs)
  117. {
  118. unsigned int low, high;
  119. int i;
  120. for (i = 0 ; i < NUM_COUNTERS ; ++i) {
  121. if (reset_value[i]) {
  122. CTRL_READ(low, high, msrs, i);
  123. CTRL_SET_ACTIVE(low);
  124. CTRL_WRITE(low, high, msrs, i);
  125. }
  126. }
  127. }
  128. static void op_amd_stop(struct op_msrs const * const msrs)
  129. {
  130. unsigned int low, high;
  131. int i;
  132. /* Subtle: stop on all counters to avoid race with
  133. * setting our pm callback */
  134. for (i = 0 ; i < NUM_COUNTERS ; ++i) {
  135. if (!reset_value[i])
  136. continue;
  137. CTRL_READ(low, high, msrs, i);
  138. CTRL_SET_INACTIVE(low);
  139. CTRL_WRITE(low, high, msrs, i);
  140. }
  141. }
  142. static void op_amd_shutdown(struct op_msrs const * const msrs)
  143. {
  144. int i;
  145. for (i = 0 ; i < NUM_COUNTERS ; ++i) {
  146. if (CTR_IS_RESERVED(msrs, i))
  147. release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
  148. }
  149. for (i = 0 ; i < NUM_CONTROLS ; ++i) {
  150. if (CTRL_IS_RESERVED(msrs, i))
  151. release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
  152. }
  153. }
  154. static int op_amd_init(struct oprofile_operations *ops)
  155. {
  156. return 0;
  157. }
  158. static void op_amd_exit(void)
  159. {
  160. }
  161. struct op_x86_model_spec const op_amd_spec = {
  162. .init = op_amd_init,
  163. .exit = op_amd_exit,
  164. .num_counters = NUM_COUNTERS,
  165. .num_controls = NUM_CONTROLS,
  166. .fill_in_addresses = &op_amd_fill_in_addresses,
  167. .setup_ctrs = &op_amd_setup_ctrs,
  168. .check_ctrs = &op_amd_check_ctrs,
  169. .start = &op_amd_start,
  170. .stop = &op_amd_stop,
  171. .shutdown = &op_amd_shutdown
  172. };