|
@@ -40,16 +40,6 @@
|
|
|
/* OPP tolerance in percentage */
|
|
|
#define OPP_TOLERANCE 4
|
|
|
|
|
|
-#ifdef CONFIG_SMP
|
|
|
-struct lpj_info {
|
|
|
- unsigned long ref;
|
|
|
- unsigned int freq;
|
|
|
-};
|
|
|
-
|
|
|
-static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
|
|
|
-static struct lpj_info global_lpj_ref;
|
|
|
-#endif
|
|
|
-
|
|
|
static struct cpufreq_frequency_table *freq_table;
|
|
|
static atomic_t freq_table_users = ATOMIC_INIT(0);
|
|
|
static struct clk *mpu_clk;
|
|
@@ -161,31 +151,6 @@ static int omap_target(struct cpufreq_policy *policy,
|
|
|
}
|
|
|
|
|
|
freqs.new = omap_getspeed(policy->cpu);
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- /*
|
|
|
- * Note that loops_per_jiffy is not updated on SMP systems in
|
|
|
- * cpufreq driver. So, update the per-CPU loops_per_jiffy value
|
|
|
- * on frequency transition. We need to update all dependent CPUs.
|
|
|
- */
|
|
|
- for_each_cpu(i, policy->cpus) {
|
|
|
- struct lpj_info *lpj = &per_cpu(lpj_ref, i);
|
|
|
- if (!lpj->freq) {
|
|
|
- lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
|
|
|
- lpj->freq = freqs.old;
|
|
|
- }
|
|
|
-
|
|
|
- per_cpu(cpu_data, i).loops_per_jiffy =
|
|
|
- cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
|
|
|
- }
|
|
|
-
|
|
|
- /* And don't forget to adjust the global one */
|
|
|
- if (!global_lpj_ref.freq) {
|
|
|
- global_lpj_ref.ref = loops_per_jiffy;
|
|
|
- global_lpj_ref.freq = freqs.old;
|
|
|
- }
|
|
|
- loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
|
|
|
- freqs.new);
|
|
|
-#endif
|
|
|
|
|
|
done:
|
|
|
/* notifiers */
|