cpu_buffer.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. /**
  2. * @file cpu_buffer.h
  3. *
  4. * @remark Copyright 2002 OProfile authors
  5. * @remark Read the file COPYING
  6. *
  7. * @author John Levon <levon@movementarian.org>
  8. */
  9. #ifndef OPROFILE_CPU_BUFFER_H
  10. #define OPROFILE_CPU_BUFFER_H
  11. #include <linux/types.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/workqueue.h>
  14. #include <linux/cache.h>
  15. #include <linux/sched.h>
  16. #include <linux/ring_buffer.h>
  17. struct task_struct;
  18. int alloc_cpu_buffers(void);
  19. void free_cpu_buffers(void);
  20. void start_cpu_work(void);
  21. void end_cpu_work(void);
  22. /* CPU buffer is composed of such entries (which are
  23. * also used for context switch notes)
  24. */
  25. struct op_sample {
  26. unsigned long eip;
  27. unsigned long event;
  28. };
  29. struct op_entry {
  30. struct ring_buffer_event *event;
  31. struct op_sample *sample;
  32. unsigned long irq_flags;
  33. };
  34. struct oprofile_cpu_buffer {
  35. volatile unsigned long head_pos;
  36. volatile unsigned long tail_pos;
  37. unsigned long buffer_size;
  38. struct task_struct *last_task;
  39. int last_is_kernel;
  40. int tracing;
  41. unsigned long sample_received;
  42. unsigned long sample_lost_overflow;
  43. unsigned long backtrace_aborted;
  44. unsigned long sample_invalid_eip;
  45. int cpu;
  46. struct delayed_work work;
  47. };
  48. extern struct ring_buffer *op_ring_buffer_read;
  49. extern struct ring_buffer *op_ring_buffer_write;
  50. DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
  51. /*
  52. * Resets the cpu buffer to a sane state.
  53. *
  54. * reset these to invalid values; the next sample collected will
  55. * populate the buffer with proper values to initialize the buffer
  56. */
  57. static inline void cpu_buffer_reset(int cpu)
  58. {
  59. struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
  60. cpu_buf->last_is_kernel = -1;
  61. cpu_buf->last_task = NULL;
  62. }
  63. static inline int cpu_buffer_write_entry(struct op_entry *entry)
  64. {
  65. entry->event = ring_buffer_lock_reserve(op_ring_buffer_write,
  66. sizeof(struct op_sample),
  67. &entry->irq_flags);
  68. if (entry->event)
  69. entry->sample = ring_buffer_event_data(entry->event);
  70. else
  71. entry->sample = NULL;
  72. if (!entry->sample)
  73. return -ENOMEM;
  74. return 0;
  75. }
  76. static inline int cpu_buffer_write_commit(struct op_entry *entry)
  77. {
  78. return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
  79. entry->irq_flags);
  80. }
  81. static inline struct op_sample *cpu_buffer_read_entry(int cpu)
  82. {
  83. struct ring_buffer_event *e;
  84. e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
  85. if (e)
  86. return ring_buffer_event_data(e);
  87. if (ring_buffer_swap_cpu(op_ring_buffer_read,
  88. op_ring_buffer_write,
  89. cpu))
  90. return NULL;
  91. e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
  92. if (e)
  93. return ring_buffer_event_data(e);
  94. return NULL;
  95. }
  96. /* "acquire" as many cpu buffer slots as we can */
  97. static inline unsigned long cpu_buffer_entries(int cpu)
  98. {
  99. return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
  100. + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
  101. }
  102. /* transient events for the CPU buffer -> event buffer */
  103. #define CPU_IS_KERNEL 1
  104. #define CPU_TRACE_BEGIN 2
  105. #define IBS_FETCH_BEGIN 3
  106. #define IBS_OP_BEGIN 4
  107. #endif /* OPROFILE_CPU_BUFFER_H */