|
@@ -5,6 +5,7 @@
|
|
* @remark Read the file COPYING
|
|
* @remark Read the file COPYING
|
|
*
|
|
*
|
|
* @author John Levon <levon@movementarian.org>
|
|
* @author John Levon <levon@movementarian.org>
|
|
|
|
+ * @author Barry Kasindorf <barry.kasindorf@amd.com>
|
|
*
|
|
*
|
|
* Each CPU has a local buffer that stores PC value/event
|
|
* Each CPU has a local buffer that stores PC value/event
|
|
* pairs. We also log context switches when we notice them.
|
|
* pairs. We also log context switches when we notice them.
|
|
@@ -207,7 +208,7 @@ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
-static int oprofile_begin_trace(struct oprofile_cpu_buffer * cpu_buf)
|
|
|
|
|
|
+static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
|
|
{
|
|
{
|
|
if (nr_available_slots(cpu_buf) < 4) {
|
|
if (nr_available_slots(cpu_buf) < 4) {
|
|
cpu_buf->sample_lost_overflow++;
|
|
cpu_buf->sample_lost_overflow++;
|
|
@@ -252,6 +253,71 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
|
|
oprofile_add_ext_sample(pc, regs, event, is_kernel);
|
|
oprofile_add_ext_sample(pc, regs, event, is_kernel);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#define MAX_IBS_SAMPLE_SIZE 14
|
|
|
|
+static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
|
|
|
|
+ unsigned long pc, int is_kernel, unsigned int *ibs, int ibs_code)
|
|
|
|
+{
|
|
|
|
+ struct task_struct *task;
|
|
|
|
+
|
|
|
|
+ cpu_buf->sample_received++;
|
|
|
|
+
|
|
|
|
+ if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
|
|
|
|
+ cpu_buf->sample_lost_overflow++;
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ is_kernel = !!is_kernel;
|
|
|
|
+
|
|
|
|
+ /* notice a switch from user->kernel or vice versa */
|
|
|
|
+ if (cpu_buf->last_is_kernel != is_kernel) {
|
|
|
|
+ cpu_buf->last_is_kernel = is_kernel;
|
|
|
|
+ add_code(cpu_buf, is_kernel);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* notice a task switch */
|
|
|
|
+ if (!is_kernel) {
|
|
|
|
+ task = current;
|
|
|
|
+
|
|
|
|
+ if (cpu_buf->last_task != task) {
|
|
|
|
+ cpu_buf->last_task = task;
|
|
|
|
+ add_code(cpu_buf, (unsigned long)task);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ add_code(cpu_buf, ibs_code);
|
|
|
|
+ add_sample(cpu_buf, ibs[0], ibs[1]);
|
|
|
|
+ add_sample(cpu_buf, ibs[2], ibs[3]);
|
|
|
|
+ add_sample(cpu_buf, ibs[4], ibs[5]);
|
|
|
|
+
|
|
|
|
+ if (ibs_code == IBS_OP_BEGIN) {
|
|
|
|
+ add_sample(cpu_buf, ibs[6], ibs[7]);
|
|
|
|
+ add_sample(cpu_buf, ibs[8], ibs[9]);
|
|
|
|
+ add_sample(cpu_buf, ibs[10], ibs[11]);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void oprofile_add_ibs_sample(struct pt_regs *const regs,
|
|
|
|
+ unsigned int * const ibs_sample, u8 code)
|
|
|
|
+{
|
|
|
|
+ int is_kernel = !user_mode(regs);
|
|
|
|
+ unsigned long pc = profile_pc(regs);
|
|
|
|
+
|
|
|
|
+ struct oprofile_cpu_buffer *cpu_buf =
|
|
|
|
+ &per_cpu(cpu_buffer, smp_processor_id());
|
|
|
|
+
|
|
|
|
+ if (!backtrace_depth) {
|
|
|
|
+ log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* if log_sample() fails we can't backtrace since we lost the source
|
|
|
|
+ * of this event */
|
|
|
|
+ if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code))
|
|
|
|
+ oprofile_ops.backtrace(regs, backtrace_depth);
|
|
|
|
+}
|
|
|
|
+
|
|
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
|
|
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
|
|
{
|
|
{
|
|
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
|
|
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
|