|
@@ -129,6 +129,8 @@ struct cpu_hw_events {
|
|
* AMD specific bits
|
|
* AMD specific bits
|
|
*/
|
|
*/
|
|
struct amd_nb *amd_nb;
|
|
struct amd_nb *amd_nb;
|
|
|
|
+
|
|
|
|
+ void *kfree_on_online;
|
|
};
|
|
};
|
|
|
|
|
|
#define __EVENT_CONSTRAINT(c, n, m, w) {\
|
|
#define __EVENT_CONSTRAINT(c, n, m, w) {\
|
|
@@ -1466,10 +1468,12 @@ static int __cpuinit
|
|
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
{
|
|
{
|
|
unsigned int cpu = (long)hcpu;
|
|
unsigned int cpu = (long)hcpu;
|
|
|
|
+ struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
|
int ret = NOTIFY_OK;
|
|
int ret = NOTIFY_OK;
|
|
|
|
|
|
switch (action & ~CPU_TASKS_FROZEN) {
|
|
switch (action & ~CPU_TASKS_FROZEN) {
|
|
case CPU_UP_PREPARE:
|
|
case CPU_UP_PREPARE:
|
|
|
|
+ cpuc->kfree_on_online = NULL;
|
|
if (x86_pmu.cpu_prepare)
|
|
if (x86_pmu.cpu_prepare)
|
|
ret = x86_pmu.cpu_prepare(cpu);
|
|
ret = x86_pmu.cpu_prepare(cpu);
|
|
break;
|
|
break;
|
|
@@ -1479,6 +1483,10 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
x86_pmu.cpu_starting(cpu);
|
|
x86_pmu.cpu_starting(cpu);
|
|
break;
|
|
break;
|
|
|
|
|
|
|
|
+ case CPU_ONLINE:
|
|
|
|
+ kfree(cpuc->kfree_on_online);
|
|
|
|
+ break;
|
|
|
|
+
|
|
case CPU_DYING:
|
|
case CPU_DYING:
|
|
if (x86_pmu.cpu_dying)
|
|
if (x86_pmu.cpu_dying)
|
|
x86_pmu.cpu_dying(cpu);
|
|
x86_pmu.cpu_dying(cpu);
|