|
@@ -165,17 +165,27 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
|
+/* should be called late in the CPU removal sequence so that the stats
|
|
|
+ * memory is still available in case someone tries to use it.
|
|
|
+ */
|
|
|
static void cpufreq_stats_free_table(unsigned int cpu)
|
|
|
{
|
|
|
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
|
|
|
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
|
|
|
- if (policy && policy->cpu == cpu)
|
|
|
- sysfs_remove_group(&policy->kobj, &stats_attr_group);
|
|
|
if (stat) {
|
|
|
kfree(stat->time_in_state);
|
|
|
kfree(stat);
|
|
|
}
|
|
|
per_cpu(cpufreq_stats_table, cpu) = NULL;
|
|
|
+}
|
|
|
+
|
|
|
+/* must be called early in the CPU removal sequence (before
|
|
|
+ * cpufreq_remove_dev) so that policy is still valid.
|
|
|
+ */
|
|
|
+static void cpufreq_stats_free_sysfs(unsigned int cpu)
|
|
|
+{
|
|
|
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
|
|
|
+ if (policy && policy->cpu == cpu)
|
|
|
+ sysfs_remove_group(&policy->kobj, &stats_attr_group);
|
|
|
if (policy)
|
|
|
cpufreq_cpu_put(policy);
|
|
|
}
|
|
@@ -316,6 +326,9 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
|
|
|
case CPU_ONLINE_FROZEN:
|
|
|
cpufreq_update_policy(cpu);
|
|
|
break;
|
|
|
+ case CPU_DOWN_PREPARE:
|
|
|
+ cpufreq_stats_free_sysfs(cpu);
|
|
|
+ break;
|
|
|
case CPU_DEAD:
|
|
|
case CPU_DEAD_FROZEN:
|
|
|
cpufreq_stats_free_table(cpu);
|
|
@@ -324,9 +337,11 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
|
|
|
return NOTIFY_OK;
|
|
|
}
|
|
|
|
|
|
+/* priority=1 so this will get called before cpufreq_remove_dev */
|
|
|
static struct notifier_block cpufreq_stat_cpu_notifier __refdata =
|
|
|
{
|
|
|
.notifier_call = cpufreq_stat_cpu_callback,
|
|
|
+ .priority = 1,
|
|
|
};
|
|
|
|
|
|
static struct notifier_block notifier_policy_block = {
|