|
@@ -11,7 +11,7 @@
|
|
* Version Perfmon-2.x is a rewrite of perfmon-1.x
|
|
* Version Perfmon-2.x is a rewrite of perfmon-1.x
|
|
* by Stephane Eranian, Hewlett Packard Co.
|
|
* by Stephane Eranian, Hewlett Packard Co.
|
|
*
|
|
*
|
|
- * Copyright (C) 1999-2003, 2005 Hewlett Packard Co
|
|
|
|
|
|
+ * Copyright (C) 1999-2005 Hewlett Packard Co
|
|
* Stephane Eranian <eranian@hpl.hp.com>
|
|
* Stephane Eranian <eranian@hpl.hp.com>
|
|
* David Mosberger-Tang <davidm@hpl.hp.com>
|
|
* David Mosberger-Tang <davidm@hpl.hp.com>
|
|
*
|
|
*
|
|
@@ -497,6 +497,9 @@ typedef struct {
|
|
static pfm_stats_t pfm_stats[NR_CPUS];
|
|
static pfm_stats_t pfm_stats[NR_CPUS];
|
|
static pfm_session_t pfm_sessions; /* global sessions information */
|
|
static pfm_session_t pfm_sessions; /* global sessions information */
|
|
|
|
|
|
|
|
+static spinlock_t pfm_alt_install_check = SPIN_LOCK_UNLOCKED;
|
|
|
|
+static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
|
|
|
|
+
|
|
static struct proc_dir_entry *perfmon_dir;
|
|
static struct proc_dir_entry *perfmon_dir;
|
|
static pfm_uuid_t pfm_null_uuid = {0,};
|
|
static pfm_uuid_t pfm_null_uuid = {0,};
|
|
|
|
|
|
@@ -606,6 +609,7 @@ DEFINE_PER_CPU(unsigned long, pfm_syst_info);
|
|
DEFINE_PER_CPU(struct task_struct *, pmu_owner);
|
|
DEFINE_PER_CPU(struct task_struct *, pmu_owner);
|
|
DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
|
|
DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
|
|
DEFINE_PER_CPU(unsigned long, pmu_activation_number);
|
|
DEFINE_PER_CPU(unsigned long, pmu_activation_number);
|
|
|
|
+EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
|
|
|
|
|
|
|
|
|
|
/* forward declaration */
|
|
/* forward declaration */
|
|
@@ -1325,7 +1329,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
|
|
error_conflict:
|
|
error_conflict:
|
|
DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
|
|
DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
|
|
pfm_sessions.pfs_sys_session[cpu]->pid,
|
|
pfm_sessions.pfs_sys_session[cpu]->pid,
|
|
- smp_processor_id()));
|
|
|
|
|
|
+ cpu));
|
|
abort:
|
|
abort:
|
|
UNLOCK_PFS(flags);
|
|
UNLOCK_PFS(flags);
|
|
|
|
|
|
@@ -5555,26 +5559,32 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
this_cpu = get_cpu();
|
|
this_cpu = get_cpu();
|
|
- min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
|
|
|
|
- max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
|
|
|
|
|
|
+ if (likely(!pfm_alt_intr_handler)) {
|
|
|
|
+ min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
|
|
|
|
+ max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
|
|
|
|
|
|
- start_cycles = ia64_get_itc();
|
|
|
|
|
|
+ start_cycles = ia64_get_itc();
|
|
|
|
|
|
- ret = pfm_do_interrupt_handler(irq, arg, regs);
|
|
|
|
|
|
+ ret = pfm_do_interrupt_handler(irq, arg, regs);
|
|
|
|
|
|
- total_cycles = ia64_get_itc();
|
|
|
|
|
|
+ total_cycles = ia64_get_itc();
|
|
|
|
|
|
- /*
|
|
|
|
- * don't measure spurious interrupts
|
|
|
|
- */
|
|
|
|
- if (likely(ret == 0)) {
|
|
|
|
- total_cycles -= start_cycles;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * don't measure spurious interrupts
|
|
|
|
+ */
|
|
|
|
+ if (likely(ret == 0)) {
|
|
|
|
+ total_cycles -= start_cycles;
|
|
|
|
|
|
- if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
|
|
|
|
- if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
|
|
|
|
|
|
+ if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
|
|
|
|
+ if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
|
|
|
|
|
|
- pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
|
|
|
|
|
|
+ pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ else {
|
|
|
|
+ (*pfm_alt_intr_handler->handler)(irq, arg, regs);
|
|
}
|
|
}
|
|
|
|
+
|
|
put_cpu_no_resched();
|
|
put_cpu_no_resched();
|
|
return IRQ_HANDLED;
|
|
return IRQ_HANDLED;
|
|
}
|
|
}
|
|
@@ -6425,6 +6435,141 @@ static struct irqaction perfmon_irqaction = {
|
|
.name = "perfmon"
|
|
.name = "perfmon"
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+static void
|
|
|
|
+pfm_alt_save_pmu_state(void *data)
|
|
|
|
+{
|
|
|
|
+ struct pt_regs *regs;
|
|
|
|
+
|
|
|
|
+ regs = ia64_task_regs(current);
|
|
|
|
+
|
|
|
|
+ DPRINT(("called\n"));
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * should not be necessary but
|
|
|
|
+ * let's take not risk
|
|
|
|
+ */
|
|
|
|
+ pfm_clear_psr_up();
|
|
|
|
+ pfm_clear_psr_pp();
|
|
|
|
+ ia64_psr(regs)->pp = 0;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * This call is required
|
|
|
|
+ * May cause a spurious interrupt on some processors
|
|
|
|
+ */
|
|
|
|
+ pfm_freeze_pmu();
|
|
|
|
+
|
|
|
|
+ ia64_srlz_d();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void
|
|
|
|
+pfm_alt_restore_pmu_state(void *data)
|
|
|
|
+{
|
|
|
|
+ struct pt_regs *regs;
|
|
|
|
+
|
|
|
|
+ regs = ia64_task_regs(current);
|
|
|
|
+
|
|
|
|
+ DPRINT(("called\n"));
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * put PMU back in state expected
|
|
|
|
+ * by perfmon
|
|
|
|
+ */
|
|
|
|
+ pfm_clear_psr_up();
|
|
|
|
+ pfm_clear_psr_pp();
|
|
|
|
+ ia64_psr(regs)->pp = 0;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * perfmon runs with PMU unfrozen at all times
|
|
|
|
+ */
|
|
|
|
+ pfm_unfreeze_pmu();
|
|
|
|
+
|
|
|
|
+ ia64_srlz_d();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int
|
|
|
|
+pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
|
|
|
|
+{
|
|
|
|
+ int ret, i;
|
|
|
|
+ int reserve_cpu;
|
|
|
|
+
|
|
|
|
+ /* some sanity checks */
|
|
|
|
+ if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
|
|
|
|
+
|
|
|
|
+ /* do the easy test first */
|
|
|
|
+ if (pfm_alt_intr_handler) return -EBUSY;
|
|
|
|
+
|
|
|
|
+ /* one at a time in the install or remove, just fail the others */
|
|
|
|
+ if (!spin_trylock(&pfm_alt_install_check)) {
|
|
|
|
+ return -EBUSY;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* reserve our session */
|
|
|
|
+ for_each_online_cpu(reserve_cpu) {
|
|
|
|
+ ret = pfm_reserve_session(NULL, 1, reserve_cpu);
|
|
|
|
+ if (ret) goto cleanup_reserve;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* save the current system wide pmu states */
|
|
|
|
+ ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
|
|
|
|
+ if (ret) {
|
|
|
|
+ DPRINT(("on_each_cpu() failed: %d\n", ret));
|
|
|
|
+ goto cleanup_reserve;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* officially change to the alternate interrupt handler */
|
|
|
|
+ pfm_alt_intr_handler = hdl;
|
|
|
|
+
|
|
|
|
+ spin_unlock(&pfm_alt_install_check);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+cleanup_reserve:
|
|
|
|
+ for_each_online_cpu(i) {
|
|
|
|
+ /* don't unreserve more than we reserved */
|
|
|
|
+ if (i >= reserve_cpu) break;
|
|
|
|
+
|
|
|
|
+ pfm_unreserve_session(NULL, 1, i);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ spin_unlock(&pfm_alt_install_check);
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
|
|
|
|
+
|
|
|
|
+int
|
|
|
|
+pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ if (hdl == NULL) return -EINVAL;
|
|
|
|
+
|
|
|
|
+ /* cannot remove someone else's handler! */
|
|
|
|
+ if (pfm_alt_intr_handler != hdl) return -EINVAL;
|
|
|
|
+
|
|
|
|
+ /* one at a time in the install or remove, just fail the others */
|
|
|
|
+ if (!spin_trylock(&pfm_alt_install_check)) {
|
|
|
|
+ return -EBUSY;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ pfm_alt_intr_handler = NULL;
|
|
|
|
+
|
|
|
|
+ ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
|
|
|
|
+ if (ret) {
|
|
|
|
+ DPRINT(("on_each_cpu() failed: %d\n", ret));
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for_each_online_cpu(i) {
|
|
|
|
+ pfm_unreserve_session(NULL, 1, i);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ spin_unlock(&pfm_alt_install_check);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* perfmon initialization routine, called from the initcall() table
|
|
* perfmon initialization routine, called from the initcall() table
|
|
*/
|
|
*/
|