|
@@ -52,47 +52,6 @@ static inline bool has_target(void)
|
|
|
return cpufreq_driver->target_index || cpufreq_driver->target;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
|
|
|
- * all cpufreq/hotplug/workqueue/etc related lock issues.
|
|
|
- *
|
|
|
- * The rules for this semaphore:
|
|
|
- * - Any routine that wants to read from the policy structure will
|
|
|
- * do a down_read on this semaphore.
|
|
|
- * - Any routine that will write to the policy structure and/or may take away
|
|
|
- * the policy altogether (eg. CPU hotplug), will hold this lock in write
|
|
|
- * mode before doing so.
|
|
|
- *
|
|
|
- * Additional rules:
|
|
|
- * - Governor routines that can be called in cpufreq hotplug path should not
|
|
|
- * take this sem as top level hotplug notifier handler takes this.
|
|
|
- * - Lock should not be held across
|
|
|
- * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
|
|
|
- */
|
|
|
-static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
|
|
|
-
|
|
|
-#define lock_policy_rwsem(mode, cpu) \
|
|
|
-static void lock_policy_rwsem_##mode(int cpu) \
|
|
|
-{ \
|
|
|
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
|
|
|
- BUG_ON(!policy); \
|
|
|
- down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
|
|
|
-}
|
|
|
-
|
|
|
-lock_policy_rwsem(read, cpu);
|
|
|
-lock_policy_rwsem(write, cpu);
|
|
|
-
|
|
|
-#define unlock_policy_rwsem(mode, cpu) \
|
|
|
-static void unlock_policy_rwsem_##mode(int cpu) \
|
|
|
-{ \
|
|
|
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
|
|
|
- BUG_ON(!policy); \
|
|
|
- up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
|
|
|
-}
|
|
|
-
|
|
|
-unlock_policy_rwsem(read, cpu);
|
|
|
-unlock_policy_rwsem(write, cpu);
|
|
|
-
|
|
|
/*
|
|
|
* rwsem to guarantee that cpufreq driver module doesn't unload during critical
|
|
|
* sections
|
|
@@ -688,14 +647,14 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
|
|
|
if (!down_read_trylock(&cpufreq_rwsem))
|
|
|
return -EINVAL;
|
|
|
|
|
|
- lock_policy_rwsem_read(policy->cpu);
|
|
|
+ down_read(&policy->rwsem);
|
|
|
|
|
|
if (fattr->show)
|
|
|
ret = fattr->show(policy, buf);
|
|
|
else
|
|
|
ret = -EIO;
|
|
|
|
|
|
- unlock_policy_rwsem_read(policy->cpu);
|
|
|
+ up_read(&policy->rwsem);
|
|
|
up_read(&cpufreq_rwsem);
|
|
|
|
|
|
return ret;
|
|
@@ -716,14 +675,14 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
|
|
|
if (!down_read_trylock(&cpufreq_rwsem))
|
|
|
goto unlock;
|
|
|
|
|
|
- lock_policy_rwsem_write(policy->cpu);
|
|
|
+ down_write(&policy->rwsem);
|
|
|
|
|
|
if (fattr->store)
|
|
|
ret = fattr->store(policy, buf, count);
|
|
|
else
|
|
|
ret = -EIO;
|
|
|
|
|
|
- unlock_policy_rwsem_write(policy->cpu);
|
|
|
+ up_write(&policy->rwsem);
|
|
|
|
|
|
up_read(&cpufreq_rwsem);
|
|
|
unlock:
|
|
@@ -900,7 +859,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- lock_policy_rwsem_write(policy->cpu);
|
|
|
+ down_write(&policy->rwsem);
|
|
|
|
|
|
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
|
|
|
|
@@ -908,7 +867,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
|
|
|
per_cpu(cpufreq_cpu_data, cpu) = policy;
|
|
|
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
|
|
|
|
|
- unlock_policy_rwsem_write(policy->cpu);
|
|
|
+ up_write(&policy->rwsem);
|
|
|
|
|
|
if (has_target()) {
|
|
|
if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
|
|
@@ -955,6 +914,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
|
|
|
goto err_free_cpumask;
|
|
|
|
|
|
INIT_LIST_HEAD(&policy->policy_list);
|
|
|
+ init_rwsem(&policy->rwsem);
|
|
|
+
|
|
|
return policy;
|
|
|
|
|
|
err_free_cpumask:
|
|
@@ -977,19 +938,12 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
|
|
|
if (WARN_ON(cpu == policy->cpu))
|
|
|
return;
|
|
|
|
|
|
- /*
|
|
|
- * Take direct locks as lock_policy_rwsem_write wouldn't work here.
|
|
|
- * Also lock for last cpu is enough here as contention will happen only
|
|
|
- * after policy->cpu is changed and after it is changed, other threads
|
|
|
- * will try to acquire lock for new cpu. And policy is already updated
|
|
|
- * by then.
|
|
|
- */
|
|
|
- down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
|
|
|
+ down_write(&policy->rwsem);
|
|
|
|
|
|
policy->last_cpu = policy->cpu;
|
|
|
policy->cpu = cpu;
|
|
|
|
|
|
- up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
|
|
|
+ up_write(&policy->rwsem);
|
|
|
|
|
|
cpufreq_frequency_table_update_policy_cpu(policy);
|
|
|
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
|
@@ -1181,9 +1135,9 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
|
|
|
if (ret) {
|
|
|
pr_err("%s: Failed to move kobj: %d", __func__, ret);
|
|
|
|
|
|
- lock_policy_rwsem_write(old_cpu);
|
|
|
+ down_write(&policy->rwsem);
|
|
|
cpumask_set_cpu(old_cpu, policy->cpus);
|
|
|
- unlock_policy_rwsem_write(old_cpu);
|
|
|
+ up_write(&policy->rwsem);
|
|
|
|
|
|
ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
|
|
|
"cpufreq");
|
|
@@ -1234,9 +1188,9 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
|
|
|
policy->governor->name, CPUFREQ_NAME_LEN);
|
|
|
#endif
|
|
|
|
|
|
- lock_policy_rwsem_read(cpu);
|
|
|
+ down_read(&policy->rwsem);
|
|
|
cpus = cpumask_weight(policy->cpus);
|
|
|
- unlock_policy_rwsem_read(cpu);
|
|
|
+ up_read(&policy->rwsem);
|
|
|
|
|
|
if (cpu != policy->cpu) {
|
|
|
if (!frozen)
|
|
@@ -1276,12 +1230,12 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- lock_policy_rwsem_write(cpu);
|
|
|
+ down_write(&policy->rwsem);
|
|
|
cpus = cpumask_weight(policy->cpus);
|
|
|
|
|
|
if (cpus > 1)
|
|
|
cpumask_clear_cpu(cpu, policy->cpus);
|
|
|
- unlock_policy_rwsem_write(cpu);
|
|
|
+ up_write(&policy->rwsem);
|
|
|
|
|
|
/* If cpu is last user of policy, free policy */
|
|
|
if (cpus == 1) {
|
|
@@ -1296,10 +1250,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
|
|
|
}
|
|
|
|
|
|
if (!frozen) {
|
|
|
- lock_policy_rwsem_read(cpu);
|
|
|
+ down_read(&policy->rwsem);
|
|
|
kobj = &policy->kobj;
|
|
|
cmp = &policy->kobj_unregister;
|
|
|
- unlock_policy_rwsem_read(cpu);
|
|
|
+ up_read(&policy->rwsem);
|
|
|
kobject_put(kobj);
|
|
|
|
|
|
/*
|
|
@@ -1479,19 +1433,22 @@ static unsigned int __cpufreq_get(unsigned int cpu)
|
|
|
*/
|
|
|
unsigned int cpufreq_get(unsigned int cpu)
|
|
|
{
|
|
|
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
|
|
|
unsigned int ret_freq = 0;
|
|
|
|
|
|
if (cpufreq_disabled() || !cpufreq_driver)
|
|
|
return -ENOENT;
|
|
|
|
|
|
+ BUG_ON(!policy);
|
|
|
+
|
|
|
if (!down_read_trylock(&cpufreq_rwsem))
|
|
|
return 0;
|
|
|
|
|
|
- lock_policy_rwsem_read(cpu);
|
|
|
+ down_read(&policy->rwsem);
|
|
|
|
|
|
ret_freq = __cpufreq_get(cpu);
|
|
|
|
|
|
- unlock_policy_rwsem_read(cpu);
|
|
|
+ up_read(&policy->rwsem);
|
|
|
up_read(&cpufreq_rwsem);
|
|
|
|
|
|
return ret_freq;
|
|
@@ -1744,11 +1701,11 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
|
|
|
{
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
- lock_policy_rwsem_write(policy->cpu);
|
|
|
+ down_write(&policy->rwsem);
|
|
|
|
|
|
ret = __cpufreq_driver_target(policy, target_freq, relation);
|
|
|
|
|
|
- unlock_policy_rwsem_write(policy->cpu);
|
|
|
+ up_write(&policy->rwsem);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -1979,10 +1936,10 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
|
|
/* end old governor */
|
|
|
if (policy->governor) {
|
|
|
__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
|
|
|
- unlock_policy_rwsem_write(new_policy->cpu);
|
|
|
+ up_write(&policy->rwsem);
|
|
|
__cpufreq_governor(policy,
|
|
|
CPUFREQ_GOV_POLICY_EXIT);
|
|
|
- lock_policy_rwsem_write(new_policy->cpu);
|
|
|
+ down_write(&policy->rwsem);
|
|
|
}
|
|
|
|
|
|
/* start new governor */
|
|
@@ -1991,10 +1948,10 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
|
|
if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
|
|
|
failed = 0;
|
|
|
} else {
|
|
|
- unlock_policy_rwsem_write(new_policy->cpu);
|
|
|
+ up_write(&policy->rwsem);
|
|
|
__cpufreq_governor(policy,
|
|
|
CPUFREQ_GOV_POLICY_EXIT);
|
|
|
- lock_policy_rwsem_write(new_policy->cpu);
|
|
|
+ down_write(&policy->rwsem);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2040,7 +1997,7 @@ int cpufreq_update_policy(unsigned int cpu)
|
|
|
goto no_policy;
|
|
|
}
|
|
|
|
|
|
- lock_policy_rwsem_write(cpu);
|
|
|
+ down_write(&policy->rwsem);
|
|
|
|
|
|
pr_debug("updating policy for CPU %u\n", cpu);
|
|
|
memcpy(&new_policy, policy, sizeof(*policy));
|
|
@@ -2067,7 +2024,7 @@ int cpufreq_update_policy(unsigned int cpu)
|
|
|
|
|
|
ret = cpufreq_set_policy(policy, &new_policy);
|
|
|
|
|
|
- unlock_policy_rwsem_write(cpu);
|
|
|
+ up_write(&policy->rwsem);
|
|
|
|
|
|
cpufreq_cpu_put(policy);
|
|
|
no_policy:
|
|
@@ -2225,14 +2182,9 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
|
|
|
|
|
|
static int __init cpufreq_core_init(void)
|
|
|
{
|
|
|
- int cpu;
|
|
|
-
|
|
|
if (cpufreq_disabled())
|
|
|
return -ENODEV;
|
|
|
|
|
|
- for_each_possible_cpu(cpu)
|
|
|
- init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
|
|
|
-
|
|
|
cpufreq_global_kobject = kobject_create();
|
|
|
BUG_ON(!cpufreq_global_kobject);
|
|
|
register_syscore_ops(&cpufreq_syscore_ops);
|