rdpmc.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. #include <unistd.h>
  2. #include <stdlib.h>
  3. #include <signal.h>
  4. #include <sys/mman.h>
  5. #include "types.h"
  6. #include "perf.h"
  7. #include "debug.h"
  8. #include "tests.h"
  9. #if defined(__x86_64__) || defined(__i386__)
  10. static u64 rdpmc(unsigned int counter)
  11. {
  12. unsigned int low, high;
  13. asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
  14. return low | ((u64)high) << 32;
  15. }
  16. static u64 rdtsc(void)
  17. {
  18. unsigned int low, high;
  19. asm volatile("rdtsc" : "=a" (low), "=d" (high));
  20. return low | ((u64)high) << 32;
  21. }
  22. static u64 mmap_read_self(void *addr)
  23. {
  24. struct perf_event_mmap_page *pc = addr;
  25. u32 seq, idx, time_mult = 0, time_shift = 0;
  26. u64 count, cyc = 0, time_offset = 0, enabled, running, delta;
  27. do {
  28. seq = pc->lock;
  29. barrier();
  30. enabled = pc->time_enabled;
  31. running = pc->time_running;
  32. if (enabled != running) {
  33. cyc = rdtsc();
  34. time_mult = pc->time_mult;
  35. time_shift = pc->time_shift;
  36. time_offset = pc->time_offset;
  37. }
  38. idx = pc->index;
  39. count = pc->offset;
  40. if (idx)
  41. count += rdpmc(idx - 1);
  42. barrier();
  43. } while (pc->lock != seq);
  44. if (enabled != running) {
  45. u64 quot, rem;
  46. quot = (cyc >> time_shift);
  47. rem = cyc & ((1 << time_shift) - 1);
  48. delta = time_offset + quot * time_mult +
  49. ((rem * time_mult) >> time_shift);
  50. enabled += delta;
  51. if (idx)
  52. running += delta;
  53. quot = count / running;
  54. rem = count % running;
  55. count = quot * enabled + (rem * enabled) / running;
  56. }
  57. return count;
  58. }
  59. /*
  60. * If the RDPMC instruction faults then signal this back to the test parent task:
  61. */
  62. static void segfault_handler(int sig __maybe_unused,
  63. siginfo_t *info __maybe_unused,
  64. void *uc __maybe_unused)
  65. {
  66. exit(-1);
  67. }
  68. static int __test__rdpmc(void)
  69. {
  70. volatile int tmp = 0;
  71. u64 i, loops = 1000;
  72. int n;
  73. int fd;
  74. void *addr;
  75. struct perf_event_attr attr = {
  76. .type = PERF_TYPE_HARDWARE,
  77. .config = PERF_COUNT_HW_INSTRUCTIONS,
  78. .exclude_kernel = 1,
  79. };
  80. u64 delta_sum = 0;
  81. struct sigaction sa;
  82. sigfillset(&sa.sa_mask);
  83. sa.sa_sigaction = segfault_handler;
  84. sigaction(SIGSEGV, &sa, NULL);
  85. fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
  86. if (fd < 0) {
  87. pr_err("Error: sys_perf_event_open() syscall returned "
  88. "with %d (%s)\n", fd, strerror(errno));
  89. return -1;
  90. }
  91. addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
  92. if (addr == (void *)(-1)) {
  93. pr_err("Error: mmap() syscall returned with (%s)\n",
  94. strerror(errno));
  95. goto out_close;
  96. }
  97. for (n = 0; n < 6; n++) {
  98. u64 stamp, now, delta;
  99. stamp = mmap_read_self(addr);
  100. for (i = 0; i < loops; i++)
  101. tmp++;
  102. now = mmap_read_self(addr);
  103. loops *= 10;
  104. delta = now - stamp;
  105. pr_debug("%14d: %14Lu\n", n, (long long)delta);
  106. delta_sum += delta;
  107. }
  108. munmap(addr, page_size);
  109. pr_debug(" ");
  110. out_close:
  111. close(fd);
  112. if (!delta_sum)
  113. return -1;
  114. return 0;
  115. }
  116. int test__rdpmc(void)
  117. {
  118. int status = 0;
  119. int wret = 0;
  120. int ret;
  121. int pid;
  122. pid = fork();
  123. if (pid < 0)
  124. return -1;
  125. if (!pid) {
  126. ret = __test__rdpmc();
  127. exit(ret);
  128. }
  129. wret = waitpid(pid, &status, 0);
  130. if (wret < 0 || status)
  131. return -1;
  132. return 0;
  133. }
  134. #endif