cpu_buffer.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. /**
  2. * @file cpu_buffer.c
  3. *
  4. * @remark Copyright 2002 OProfile authors
  5. * @remark Read the file COPYING
  6. *
  7. * @author John Levon <levon@movementarian.org>
  8. * @author Barry Kasindorf <barry.kasindorf@amd.com>
  9. *
  10. * Each CPU has a local buffer that stores PC value/event
  11. * pairs. We also log context switches when we notice them.
  12. * Eventually each CPU's buffer is processed into the global
  13. * event buffer by sync_buffer().
  14. *
  15. * We use a local buffer for two reasons: an NMI or similar
  16. * interrupt cannot synchronise, and high sampling rates
  17. * would lead to catastrophic global synchronisation if
  18. * a global buffer was used.
  19. */
  20. #include <linux/sched.h>
  21. #include <linux/oprofile.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/errno.h>
  24. #include "event_buffer.h"
  25. #include "cpu_buffer.h"
  26. #include "buffer_sync.h"
  27. #include "oprof.h"
  28. #define OP_BUFFER_FLAGS 0
  29. /*
  30. * Read and write access is using spin locking. Thus, writing to the
  31. * buffer by NMI handler (x86) could occur also during critical
  32. * sections when reading the buffer. To avoid this, there are 2
  33. * buffers for independent read and write access. Read access is in
  34. * process context only, write access only in the NMI handler. If the
  35. * read buffer runs empty, both buffers are swapped atomically. There
  36. * is potentially a small window during swapping where the buffers are
  37. * disabled and samples could be lost.
  38. *
  39. * Using 2 buffers is a little bit overhead, but the solution is clear
  40. * and does not require changes in the ring buffer implementation. It
  41. * can be changed to a single buffer solution when the ring buffer
  42. * access is implemented as non-locking atomic code.
  43. */
  44. struct ring_buffer *op_ring_buffer_read;
  45. struct ring_buffer *op_ring_buffer_write;
  46. DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
  47. static void wq_sync_buffer(struct work_struct *work);
  48. #define DEFAULT_TIMER_EXPIRE (HZ / 10)
  49. static int work_enabled;
  50. void free_cpu_buffers(void)
  51. {
  52. if (op_ring_buffer_read)
  53. ring_buffer_free(op_ring_buffer_read);
  54. op_ring_buffer_read = NULL;
  55. if (op_ring_buffer_write)
  56. ring_buffer_free(op_ring_buffer_write);
  57. op_ring_buffer_write = NULL;
  58. }
  59. unsigned long oprofile_get_cpu_buffer_size(void)
  60. {
  61. return fs_cpu_buffer_size;
  62. }
  63. void oprofile_cpu_buffer_inc_smpl_lost(void)
  64. {
  65. struct oprofile_cpu_buffer *cpu_buf
  66. = &__get_cpu_var(cpu_buffer);
  67. cpu_buf->sample_lost_overflow++;
  68. }
  69. int alloc_cpu_buffers(void)
  70. {
  71. int i;
  72. unsigned long buffer_size = fs_cpu_buffer_size;
  73. op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
  74. if (!op_ring_buffer_read)
  75. goto fail;
  76. op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
  77. if (!op_ring_buffer_write)
  78. goto fail;
  79. for_each_possible_cpu(i) {
  80. struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
  81. b->last_task = NULL;
  82. b->last_is_kernel = -1;
  83. b->tracing = 0;
  84. b->buffer_size = buffer_size;
  85. b->tail_pos = 0;
  86. b->head_pos = 0;
  87. b->sample_received = 0;
  88. b->sample_lost_overflow = 0;
  89. b->backtrace_aborted = 0;
  90. b->sample_invalid_eip = 0;
  91. b->cpu = i;
  92. INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
  93. }
  94. return 0;
  95. fail:
  96. free_cpu_buffers();
  97. return -ENOMEM;
  98. }
  99. void start_cpu_work(void)
  100. {
  101. int i;
  102. work_enabled = 1;
  103. for_each_online_cpu(i) {
  104. struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
  105. /*
  106. * Spread the work by 1 jiffy per cpu so they dont all
  107. * fire at once.
  108. */
  109. schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
  110. }
  111. }
  112. void end_cpu_work(void)
  113. {
  114. int i;
  115. work_enabled = 0;
  116. for_each_online_cpu(i) {
  117. struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
  118. cancel_delayed_work(&b->work);
  119. }
  120. flush_scheduled_work();
  121. }
  122. static inline int
  123. add_sample(struct oprofile_cpu_buffer *cpu_buf,
  124. unsigned long pc, unsigned long event)
  125. {
  126. struct op_entry entry;
  127. int ret;
  128. ret = cpu_buffer_write_entry(&entry);
  129. if (ret)
  130. return ret;
  131. entry.sample->eip = pc;
  132. entry.sample->event = event;
  133. ret = cpu_buffer_write_commit(&entry);
  134. if (ret)
  135. return ret;
  136. return 0;
  137. }
  138. static inline int
  139. add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
  140. {
  141. return add_sample(buffer, ESCAPE_CODE, value);
  142. }
  143. /* This must be safe from any context. It's safe writing here
  144. * because of the head/tail separation of the writer and reader
  145. * of the CPU buffer.
  146. *
  147. * is_kernel is needed because on some architectures you cannot
  148. * tell if you are in kernel or user space simply by looking at
  149. * pc. We tag this in the buffer by generating kernel enter/exit
  150. * events whenever is_kernel changes
  151. */
  152. static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
  153. int is_kernel, unsigned long event)
  154. {
  155. struct task_struct *task;
  156. cpu_buf->sample_received++;
  157. if (pc == ESCAPE_CODE) {
  158. cpu_buf->sample_invalid_eip++;
  159. return 0;
  160. }
  161. is_kernel = !!is_kernel;
  162. task = current;
  163. /* notice a switch from user->kernel or vice versa */
  164. if (cpu_buf->last_is_kernel != is_kernel) {
  165. cpu_buf->last_is_kernel = is_kernel;
  166. if (add_code(cpu_buf, is_kernel))
  167. goto fail;
  168. }
  169. /* notice a task switch */
  170. if (cpu_buf->last_task != task) {
  171. cpu_buf->last_task = task;
  172. if (add_code(cpu_buf, (unsigned long)task))
  173. goto fail;
  174. }
  175. if (add_sample(cpu_buf, pc, event))
  176. goto fail;
  177. return 1;
  178. fail:
  179. cpu_buf->sample_lost_overflow++;
  180. return 0;
  181. }
  182. static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
  183. {
  184. add_code(cpu_buf, CPU_TRACE_BEGIN);
  185. cpu_buf->tracing = 1;
  186. return 1;
  187. }
  188. static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
  189. {
  190. cpu_buf->tracing = 0;
  191. }
  192. void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
  193. unsigned long event, int is_kernel)
  194. {
  195. struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
  196. if (!backtrace_depth) {
  197. log_sample(cpu_buf, pc, is_kernel, event);
  198. return;
  199. }
  200. if (!oprofile_begin_trace(cpu_buf))
  201. return;
  202. /*
  203. * if log_sample() fail we can't backtrace since we lost the
  204. * source of this event
  205. */
  206. if (log_sample(cpu_buf, pc, is_kernel, event))
  207. oprofile_ops.backtrace(regs, backtrace_depth);
  208. oprofile_end_trace(cpu_buf);
  209. }
  210. void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
  211. {
  212. int is_kernel = !user_mode(regs);
  213. unsigned long pc = profile_pc(regs);
  214. oprofile_add_ext_sample(pc, regs, event, is_kernel);
  215. }
  216. #ifdef CONFIG_OPROFILE_IBS
  217. #define MAX_IBS_SAMPLE_SIZE 14
  218. void oprofile_add_ibs_sample(struct pt_regs * const regs,
  219. unsigned int * const ibs_sample, int ibs_code)
  220. {
  221. int is_kernel = !user_mode(regs);
  222. struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
  223. struct task_struct *task;
  224. int fail = 0;
  225. cpu_buf->sample_received++;
  226. /* notice a switch from user->kernel or vice versa */
  227. if (cpu_buf->last_is_kernel != is_kernel) {
  228. if (add_code(cpu_buf, is_kernel))
  229. goto fail;
  230. cpu_buf->last_is_kernel = is_kernel;
  231. }
  232. /* notice a task switch */
  233. if (!is_kernel) {
  234. task = current;
  235. if (cpu_buf->last_task != task) {
  236. if (add_code(cpu_buf, (unsigned long)task))
  237. goto fail;
  238. cpu_buf->last_task = task;
  239. }
  240. }
  241. fail = fail || add_code(cpu_buf, ibs_code);
  242. fail = fail || add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
  243. fail = fail || add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
  244. fail = fail || add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
  245. if (ibs_code == IBS_OP_BEGIN) {
  246. fail = fail || add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
  247. fail = fail || add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
  248. fail = fail || add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
  249. }
  250. if (fail)
  251. goto fail;
  252. if (backtrace_depth)
  253. oprofile_ops.backtrace(regs, backtrace_depth);
  254. return;
  255. fail:
  256. cpu_buf->sample_lost_overflow++;
  257. return;
  258. }
  259. #endif
  260. void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
  261. {
  262. struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
  263. log_sample(cpu_buf, pc, is_kernel, event);
  264. }
  265. void oprofile_add_trace(unsigned long pc)
  266. {
  267. struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
  268. if (!cpu_buf->tracing)
  269. return;
  270. /*
  271. * broken frame can give an eip with the same value as an
  272. * escape code, abort the trace if we get it
  273. */
  274. if (pc == ESCAPE_CODE)
  275. goto fail;
  276. if (add_sample(cpu_buf, pc, 0))
  277. goto fail;
  278. return;
  279. fail:
  280. cpu_buf->tracing = 0;
  281. cpu_buf->backtrace_aborted++;
  282. return;
  283. }
  284. /*
  285. * This serves to avoid cpu buffer overflow, and makes sure
  286. * the task mortuary progresses
  287. *
  288. * By using schedule_delayed_work_on and then schedule_delayed_work
  289. * we guarantee this will stay on the correct cpu
  290. */
  291. static void wq_sync_buffer(struct work_struct *work)
  292. {
  293. struct oprofile_cpu_buffer *b =
  294. container_of(work, struct oprofile_cpu_buffer, work.work);
  295. if (b->cpu != smp_processor_id()) {
  296. printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
  297. smp_processor_id(), b->cpu);
  298. if (!cpu_online(b->cpu)) {
  299. cancel_delayed_work(&b->work);
  300. return;
  301. }
  302. }
  303. sync_buffer(b->cpu);
  304. /* don't re-add the work if we're shutting down */
  305. if (work_enabled)
  306. schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
  307. }