|
@@ -25,18 +25,14 @@ static unsigned long reset_value[OP_MAX_COUNTER];
|
|
|
|
|
|
static int oprofile_running;
|
|
|
static int mmcra_has_sihv;
|
|
|
+/* Unfortunately these bits vary between CPUs */
|
|
|
+static unsigned long mmcra_sihv = MMCRA_SIHV;
|
|
|
+static unsigned long mmcra_sipr = MMCRA_SIPR;
|
|
|
|
|
|
/* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */
|
|
|
static u32 mmcr0_val;
|
|
|
static u64 mmcr1_val;
|
|
|
-static u32 mmcra_val;
|
|
|
-
|
|
|
-/*
|
|
|
- * Since we do not have an NMI, backtracing through spinlocks is
|
|
|
- * only a best guess. In light of this, allow it to be disabled at
|
|
|
- * runtime.
|
|
|
- */
|
|
|
-static int backtrace_spinlocks;
|
|
|
+static u64 mmcra_val;
|
|
|
|
|
|
static void power4_reg_setup(struct op_counter_config *ctr,
|
|
|
struct op_system_config *sys,
|
|
@@ -63,8 +59,6 @@ static void power4_reg_setup(struct op_counter_config *ctr,
|
|
|
mmcr1_val = sys->mmcr1;
|
|
|
mmcra_val = sys->mmcra;
|
|
|
|
|
|
- backtrace_spinlocks = sys->backtrace_spinlocks;
|
|
|
-
|
|
|
for (i = 0; i < cur_cpu_spec->num_pmcs; ++i)
|
|
|
reset_value[i] = 0x80000000UL - ctr[i].count;
|
|
|
|
|
@@ -197,25 +191,6 @@ static void __attribute_used__ kernel_unknown_bucket(void)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
-static unsigned long check_spinlock_pc(struct pt_regs *regs,
|
|
|
- unsigned long profile_pc)
|
|
|
-{
|
|
|
- unsigned long pc = instruction_pointer(regs);
|
|
|
-
|
|
|
- /*
|
|
|
- * If both the SIAR (sampled instruction) and the perfmon exception
|
|
|
- * occurred in a spinlock region then we account the sample to the
|
|
|
- * calling function. This isnt 100% correct, we really need soft
|
|
|
- * IRQ disable so we always get the perfmon exception at the
|
|
|
- * point at which the SIAR is set.
|
|
|
- */
|
|
|
- if (backtrace_spinlocks && in_lock_functions(pc) &&
|
|
|
- in_lock_functions(profile_pc))
|
|
|
- return regs->link;
|
|
|
- else
|
|
|
- return profile_pc;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* On GQ and newer the MMCRA stores the HV and PR bits at the time
|
|
|
* the SIAR was sampled. We use that to work out if the SIAR was sampled in
|
|
@@ -228,17 +203,17 @@ static unsigned long get_pc(struct pt_regs *regs)
|
|
|
|
|
|
/* Cant do much about it */
|
|
|
if (!mmcra_has_sihv)
|
|
|
- return check_spinlock_pc(regs, pc);
|
|
|
+ return pc;
|
|
|
|
|
|
mmcra = mfspr(SPRN_MMCRA);
|
|
|
|
|
|
/* Were we in the hypervisor? */
|
|
|
- if (firmware_has_feature(FW_FEATURE_LPAR) && (mmcra & MMCRA_SIHV))
|
|
|
+ if (firmware_has_feature(FW_FEATURE_LPAR) && (mmcra & mmcra_sihv))
|
|
|
/* function descriptor madness */
|
|
|
return *((unsigned long *)hypervisor_bucket);
|
|
|
|
|
|
/* We were in userspace, nothing to do */
|
|
|
- if (mmcra & MMCRA_SIPR)
|
|
|
+ if (mmcra & mmcra_sipr)
|
|
|
return pc;
|
|
|
|
|
|
#ifdef CONFIG_PPC_RTAS
|
|
@@ -257,7 +232,7 @@ static unsigned long get_pc(struct pt_regs *regs)
|
|
|
/* function descriptor madness */
|
|
|
return *((unsigned long *)kernel_unknown_bucket);
|
|
|
|
|
|
- return check_spinlock_pc(regs, pc);
|
|
|
+ return pc;
|
|
|
}
|
|
|
|
|
|
static int get_kernel(unsigned long pc)
|
|
@@ -268,7 +243,7 @@ static int get_kernel(unsigned long pc)
|
|
|
is_kernel = is_kernel_addr(pc);
|
|
|
} else {
|
|
|
unsigned long mmcra = mfspr(SPRN_MMCRA);
|
|
|
- is_kernel = ((mmcra & MMCRA_SIPR) == 0);
|
|
|
+ is_kernel = ((mmcra & mmcra_sipr) == 0);
|
|
|
}
|
|
|
|
|
|
return is_kernel;
|