|
@@ -29,7 +29,6 @@
|
|
|
#include <linux/hardirq.h>
|
|
|
#include <linux/rculist.h>
|
|
|
#include <linux/uaccess.h>
|
|
|
-#include <linux/suspend.h>
|
|
|
#include <linux/syscalls.h>
|
|
|
#include <linux/anon_inodes.h>
|
|
|
#include <linux/kernel_stat.h>
|
|
@@ -6853,7 +6852,7 @@ static void __cpuinit perf_event_init_cpu(int cpu)
|
|
|
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
|
|
|
|
|
|
mutex_lock(&swhash->hlist_mutex);
|
|
|
- if (swhash->hlist_refcount > 0 && !swhash->swevent_hlist) {
|
|
|
+ if (swhash->hlist_refcount > 0) {
|
|
|
struct swevent_hlist *hlist;
|
|
|
|
|
|
hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
|
|
@@ -6942,14 +6941,7 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
|
{
|
|
|
unsigned int cpu = (long)hcpu;
|
|
|
|
|
|
- /*
|
|
|
- * Ignore suspend/resume action, the perf_pm_notifier will
|
|
|
- * take care of that.
|
|
|
- */
|
|
|
- if (action & CPU_TASKS_FROZEN)
|
|
|
- return NOTIFY_OK;
|
|
|
-
|
|
|
- switch (action) {
|
|
|
+ switch (action & ~CPU_TASKS_FROZEN) {
|
|
|
|
|
|
case CPU_UP_PREPARE:
|
|
|
case CPU_DOWN_FAILED:
|
|
@@ -6968,90 +6960,6 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
|
return NOTIFY_OK;
|
|
|
}
|
|
|
|
|
|
-static void perf_pm_resume_cpu(void *unused)
|
|
|
-{
|
|
|
- struct perf_cpu_context *cpuctx;
|
|
|
- struct perf_event_context *ctx;
|
|
|
- struct pmu *pmu;
|
|
|
- int idx;
|
|
|
-
|
|
|
- idx = srcu_read_lock(&pmus_srcu);
|
|
|
- list_for_each_entry_rcu(pmu, &pmus, entry) {
|
|
|
- cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
|
|
|
- ctx = cpuctx->task_ctx;
|
|
|
-
|
|
|
- perf_ctx_lock(cpuctx, ctx);
|
|
|
- perf_pmu_disable(cpuctx->ctx.pmu);
|
|
|
-
|
|
|
- cpu_ctx_sched_out(cpuctx, EVENT_ALL);
|
|
|
- if (ctx)
|
|
|
- ctx_sched_out(ctx, cpuctx, EVENT_ALL);
|
|
|
-
|
|
|
- perf_pmu_enable(cpuctx->ctx.pmu);
|
|
|
- perf_ctx_unlock(cpuctx, ctx);
|
|
|
- }
|
|
|
- srcu_read_unlock(&pmus_srcu, idx);
|
|
|
-}
|
|
|
-
|
|
|
-static void perf_pm_suspend_cpu(void *unused)
|
|
|
-{
|
|
|
- struct perf_cpu_context *cpuctx;
|
|
|
- struct perf_event_context *ctx;
|
|
|
- struct pmu *pmu;
|
|
|
- int idx;
|
|
|
-
|
|
|
- idx = srcu_read_lock(&pmus_srcu);
|
|
|
- list_for_each_entry_rcu(pmu, &pmus, entry) {
|
|
|
- cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
|
|
|
- ctx = cpuctx->task_ctx;
|
|
|
-
|
|
|
- perf_ctx_lock(cpuctx, ctx);
|
|
|
- perf_pmu_disable(cpuctx->ctx.pmu);
|
|
|
-
|
|
|
- perf_event_sched_in(cpuctx, ctx, current);
|
|
|
-
|
|
|
- perf_pmu_enable(cpuctx->ctx.pmu);
|
|
|
- perf_ctx_unlock(cpuctx, ctx);
|
|
|
- }
|
|
|
- srcu_read_unlock(&pmus_srcu, idx);
|
|
|
-}
|
|
|
-
|
|
|
-static int perf_resume(void)
|
|
|
-{
|
|
|
- get_online_cpus();
|
|
|
- smp_call_function(perf_pm_resume_cpu, NULL, 1);
|
|
|
- put_online_cpus();
|
|
|
-
|
|
|
- return NOTIFY_OK;
|
|
|
-}
|
|
|
-
|
|
|
-static int perf_suspend(void)
|
|
|
-{
|
|
|
- get_online_cpus();
|
|
|
- smp_call_function(perf_pm_suspend_cpu, NULL, 1);
|
|
|
- put_online_cpus();
|
|
|
-
|
|
|
- return NOTIFY_OK;
|
|
|
-}
|
|
|
-
|
|
|
-static int perf_pm(struct notifier_block *self, unsigned long action, void *ptr)
|
|
|
-{
|
|
|
- switch (action) {
|
|
|
- case PM_POST_HIBERNATION:
|
|
|
- case PM_POST_SUSPEND:
|
|
|
- return perf_resume();
|
|
|
- case PM_HIBERNATION_PREPARE:
|
|
|
- case PM_SUSPEND_PREPARE:
|
|
|
- return perf_suspend();
|
|
|
- default:
|
|
|
- return NOTIFY_DONE;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static struct notifier_block perf_pm_notifier = {
|
|
|
- .notifier_call = perf_pm,
|
|
|
-};
|
|
|
-
|
|
|
void __init perf_event_init(void)
|
|
|
{
|
|
|
int ret;
|
|
@@ -7066,7 +6974,6 @@ void __init perf_event_init(void)
|
|
|
perf_tp_register();
|
|
|
perf_cpu_notifier(perf_cpu_notify);
|
|
|
register_reboot_notifier(&perf_reboot_notifier);
|
|
|
- register_pm_notifier(&perf_pm_notifier);
|
|
|
|
|
|
ret = init_hw_breakpoint();
|
|
|
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
|