|
@@ -59,8 +59,6 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
|
|
|
* mode before doing so.
|
|
|
*
|
|
|
* Additional rules:
|
|
|
- * - All holders of the lock should check to make sure that the CPU they
|
|
|
- * are concerned with are online after they get the lock.
|
|
|
* - Governor routines that can be called in cpufreq hotplug path should not
|
|
|
* take this sem as top level hotplug notifier handler takes this.
|
|
|
* - Lock should not be held across
|
|
@@ -70,38 +68,28 @@ static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
|
|
|
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
|
|
|
|
|
|
#define lock_policy_rwsem(mode, cpu) \
|
|
|
-static int lock_policy_rwsem_##mode \
|
|
|
-(int cpu) \
|
|
|
+static int lock_policy_rwsem_##mode(int cpu) \
|
|
|
{ \
|
|
|
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
|
|
|
BUG_ON(policy_cpu == -1); \
|
|
|
down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
|
|
|
- if (unlikely(!cpu_online(cpu))) { \
|
|
|
- up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
|
|
|
- return -1; \
|
|
|
- } \
|
|
|
\
|
|
|
return 0; \
|
|
|
}
|
|
|
|
|
|
lock_policy_rwsem(read, cpu);
|
|
|
-
|
|
|
lock_policy_rwsem(write, cpu);
|
|
|
|
|
|
-static void unlock_policy_rwsem_read(int cpu)
|
|
|
-{
|
|
|
- int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
|
|
|
- BUG_ON(policy_cpu == -1);
|
|
|
- up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
|
|
|
-}
|
|
|
-
|
|
|
-static void unlock_policy_rwsem_write(int cpu)
|
|
|
-{
|
|
|
- int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
|
|
|
- BUG_ON(policy_cpu == -1);
|
|
|
- up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
|
|
|
+#define unlock_policy_rwsem(mode, cpu) \
|
|
|
+static void unlock_policy_rwsem_##mode(int cpu) \
|
|
|
+{ \
|
|
|
+ int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
|
|
|
+ BUG_ON(policy_cpu == -1); \
|
|
|
+ up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
|
|
|
}
|
|
|
|
|
|
+unlock_policy_rwsem(read, cpu);
|
|
|
+unlock_policy_rwsem(write, cpu);
|
|
|
|
|
|
/* internal prototypes */
|
|
|
static int __cpufreq_governor(struct cpufreq_policy *policy,
|
|
@@ -180,6 +168,9 @@ err_out:
|
|
|
|
|
|
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
|
|
|
{
|
|
|
+ if (cpufreq_disabled())
|
|
|
+ return NULL;
|
|
|
+
|
|
|
return __cpufreq_cpu_get(cpu, false);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
|
|
@@ -198,6 +189,9 @@ static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
|
|
|
|
|
|
void cpufreq_cpu_put(struct cpufreq_policy *data)
|
|
|
{
|
|
|
+ if (cpufreq_disabled())
|
|
|
+ return;
|
|
|
+
|
|
|
__cpufreq_cpu_put(data, false);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
|
|
@@ -261,14 +255,21 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
|
|
|
void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
|
|
|
{
|
|
|
struct cpufreq_policy *policy;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
BUG_ON(irqs_disabled());
|
|
|
|
|
|
+ if (cpufreq_disabled())
|
|
|
+ return;
|
|
|
+
|
|
|
freqs->flags = cpufreq_driver->flags;
|
|
|
pr_debug("notification %u of frequency transition to %u kHz\n",
|
|
|
state, freqs->new);
|
|
|
|
|
|
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
|
|
|
policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
|
|
|
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
|
|
+
|
|
|
switch (state) {
|
|
|
|
|
|
case CPUFREQ_PRECHANGE:
|
|
@@ -542,8 +543,6 @@ static ssize_t show_cpus(const struct cpumask *mask, char *buf)
|
|
|
*/
|
|
|
static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
|
|
|
{
|
|
|
- if (cpumask_empty(policy->related_cpus))
|
|
|
- return show_cpus(policy->cpus, buf);
|
|
|
return show_cpus(policy->related_cpus, buf);
|
|
|
}
|
|
|
|
|
@@ -699,87 +698,6 @@ static struct kobj_type ktype_cpufreq = {
|
|
|
.release = cpufreq_sysfs_release,
|
|
|
};
|
|
|
|
|
|
-/*
|
|
|
- * Returns:
|
|
|
- * Negative: Failure
|
|
|
- * 0: Success
|
|
|
- * Positive: When we have a managed CPU and the sysfs got symlinked
|
|
|
- */
|
|
|
-static int cpufreq_add_dev_policy(unsigned int cpu,
|
|
|
- struct cpufreq_policy *policy,
|
|
|
- struct device *dev)
|
|
|
-{
|
|
|
- int ret = 0;
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- unsigned long flags;
|
|
|
- unsigned int j;
|
|
|
-#ifdef CONFIG_HOTPLUG_CPU
|
|
|
- struct cpufreq_governor *gov;
|
|
|
-
|
|
|
- gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
|
|
|
- if (gov) {
|
|
|
- policy->governor = gov;
|
|
|
- pr_debug("Restoring governor %s for cpu %d\n",
|
|
|
- policy->governor->name, cpu);
|
|
|
- }
|
|
|
-#endif
|
|
|
-
|
|
|
- for_each_cpu(j, policy->cpus) {
|
|
|
- struct cpufreq_policy *managed_policy;
|
|
|
-
|
|
|
- if (cpu == j)
|
|
|
- continue;
|
|
|
-
|
|
|
- /* Check for existing affected CPUs.
|
|
|
- * They may not be aware of it due to CPU Hotplug.
|
|
|
- * cpufreq_cpu_put is called when the device is removed
|
|
|
- * in __cpufreq_remove_dev()
|
|
|
- */
|
|
|
- managed_policy = cpufreq_cpu_get(j);
|
|
|
- if (unlikely(managed_policy)) {
|
|
|
-
|
|
|
- /* Set proper policy_cpu */
|
|
|
- unlock_policy_rwsem_write(cpu);
|
|
|
- per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
|
|
|
-
|
|
|
- if (lock_policy_rwsem_write(cpu) < 0) {
|
|
|
- /* Should not go through policy unlock path */
|
|
|
- if (cpufreq_driver->exit)
|
|
|
- cpufreq_driver->exit(policy);
|
|
|
- cpufreq_cpu_put(managed_policy);
|
|
|
- return -EBUSY;
|
|
|
- }
|
|
|
-
|
|
|
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
|
|
|
- cpumask_copy(managed_policy->cpus, policy->cpus);
|
|
|
- per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
|
|
|
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
|
|
-
|
|
|
- pr_debug("CPU already managed, adding link\n");
|
|
|
- ret = sysfs_create_link(&dev->kobj,
|
|
|
- &managed_policy->kobj,
|
|
|
- "cpufreq");
|
|
|
- if (ret)
|
|
|
- cpufreq_cpu_put(managed_policy);
|
|
|
- /*
|
|
|
- * Success. We only needed to be added to the mask.
|
|
|
- * Call driver->exit() because only the cpu parent of
|
|
|
- * the kobj needed to call init().
|
|
|
- */
|
|
|
- if (cpufreq_driver->exit)
|
|
|
- cpufreq_driver->exit(policy);
|
|
|
-
|
|
|
- if (!ret)
|
|
|
- return 1;
|
|
|
- else
|
|
|
- return ret;
|
|
|
- }
|
|
|
- }
|
|
|
-#endif
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
/* symlink affected CPUs */
|
|
|
static int cpufreq_add_dev_symlink(unsigned int cpu,
|
|
|
struct cpufreq_policy *policy)
|
|
@@ -793,8 +711,6 @@ static int cpufreq_add_dev_symlink(unsigned int cpu,
|
|
|
|
|
|
if (j == cpu)
|
|
|
continue;
|
|
|
- if (!cpu_online(j))
|
|
|
- continue;
|
|
|
|
|
|
pr_debug("CPU %u already managed, adding link\n", j);
|
|
|
managed_policy = cpufreq_cpu_get(cpu);
|
|
@@ -851,8 +767,6 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
|
|
|
|
|
|
spin_lock_irqsave(&cpufreq_driver_lock, flags);
|
|
|
for_each_cpu(j, policy->cpus) {
|
|
|
- if (!cpu_online(j))
|
|
|
- continue;
|
|
|
per_cpu(cpufreq_cpu_data, j) = policy;
|
|
|
per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
|
|
|
}
|
|
@@ -884,6 +798,42 @@ err_out_kobj_put:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_HOTPLUG_CPU
|
|
|
+static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
|
|
|
+ struct device *dev)
|
|
|
+{
|
|
|
+ struct cpufreq_policy *policy;
|
|
|
+ int ret = 0;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ policy = cpufreq_cpu_get(sibling);
|
|
|
+ WARN_ON(!policy);
|
|
|
+
|
|
|
+ __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
|
|
|
+
|
|
|
+ lock_policy_rwsem_write(sibling);
|
|
|
+
|
|
|
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
|
|
|
+
|
|
|
+ cpumask_set_cpu(cpu, policy->cpus);
|
|
|
+ per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
|
|
|
+ per_cpu(cpufreq_cpu_data, cpu) = policy;
|
|
|
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
|
|
+
|
|
|
+ unlock_policy_rwsem_write(sibling);
|
|
|
+
|
|
|
+ __cpufreq_governor(policy, CPUFREQ_GOV_START);
|
|
|
+ __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
|
|
|
+
|
|
|
+ ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
|
|
|
+ if (ret) {
|
|
|
+ cpufreq_cpu_put(policy);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+#endif
|
|
|
|
|
|
/**
|
|
|
* cpufreq_add_dev - add a CPU device
|
|
@@ -896,12 +846,12 @@ err_out_kobj_put:
|
|
|
*/
|
|
|
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
|
|
{
|
|
|
- unsigned int cpu = dev->id;
|
|
|
- int ret = 0, found = 0;
|
|
|
+ unsigned int j, cpu = dev->id;
|
|
|
+ int ret = -ENOMEM;
|
|
|
struct cpufreq_policy *policy;
|
|
|
unsigned long flags;
|
|
|
- unsigned int j;
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
+ struct cpufreq_governor *gov;
|
|
|
int sibling;
|
|
|
#endif
|
|
|
|
|
@@ -918,6 +868,19 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
|
|
cpufreq_cpu_put(policy);
|
|
|
return 0;
|
|
|
}
|
|
|
+
|
|
|
+#ifdef CONFIG_HOTPLUG_CPU
|
|
|
+ /* Check if this cpu was hot-unplugged earlier and has siblings */
|
|
|
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
|
|
|
+ for_each_online_cpu(sibling) {
|
|
|
+ struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
|
|
|
+ if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
|
|
|
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
|
|
+ return cpufreq_add_policy_cpu(cpu, sibling, dev);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
|
|
+#endif
|
|
|
#endif
|
|
|
|
|
|
if (!try_module_get(cpufreq_driver->owner)) {
|
|
@@ -925,7 +888,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
|
|
goto module_out;
|
|
|
}
|
|
|
|
|
|
- ret = -ENOMEM;
|
|
|
policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
|
|
|
if (!policy)
|
|
|
goto nomem_out;
|
|
@@ -937,66 +899,58 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
|
|
goto err_free_cpumask;
|
|
|
|
|
|
policy->cpu = cpu;
|
|
|
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
|
|
cpumask_copy(policy->cpus, cpumask_of(cpu));
|
|
|
|
|
|
/* Initially set CPU itself as the policy_cpu */
|
|
|
per_cpu(cpufreq_policy_cpu, cpu) = cpu;
|
|
|
- ret = (lock_policy_rwsem_write(cpu) < 0);
|
|
|
- WARN_ON(ret);
|
|
|
|
|
|
init_completion(&policy->kobj_unregister);
|
|
|
INIT_WORK(&policy->update, handle_update);
|
|
|
|
|
|
- /* Set governor before ->init, so that driver could check it */
|
|
|
-#ifdef CONFIG_HOTPLUG_CPU
|
|
|
- for_each_online_cpu(sibling) {
|
|
|
- struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
|
|
|
- if (cp && cp->governor &&
|
|
|
- (cpumask_test_cpu(cpu, cp->related_cpus))) {
|
|
|
- policy->governor = cp->governor;
|
|
|
- found = 1;
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
-#endif
|
|
|
- if (!found)
|
|
|
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
|
|
/* call driver. From then on the cpufreq must be able
|
|
|
* to accept all calls to ->verify and ->setpolicy for this CPU
|
|
|
*/
|
|
|
ret = cpufreq_driver->init(policy);
|
|
|
if (ret) {
|
|
|
pr_debug("initialization failed\n");
|
|
|
- goto err_unlock_policy;
|
|
|
+ goto err_set_policy_cpu;
|
|
|
}
|
|
|
+
|
|
|
+ /* related cpus should atleast have policy->cpus */
|
|
|
+ cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * affected cpus must always be the one, which are online. We aren't
|
|
|
+ * managing offline cpus here.
|
|
|
+ */
|
|
|
+ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
|
|
|
+
|
|
|
policy->user_policy.min = policy->min;
|
|
|
policy->user_policy.max = policy->max;
|
|
|
|
|
|
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
|
|
CPUFREQ_START, policy);
|
|
|
|
|
|
- ret = cpufreq_add_dev_policy(cpu, policy, dev);
|
|
|
- if (ret) {
|
|
|
- if (ret > 0)
|
|
|
- /* This is a managed cpu, symlink created,
|
|
|
- exit with 0 */
|
|
|
- ret = 0;
|
|
|
- goto err_unlock_policy;
|
|
|
+#ifdef CONFIG_HOTPLUG_CPU
|
|
|
+ gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
|
|
|
+ if (gov) {
|
|
|
+ policy->governor = gov;
|
|
|
+ pr_debug("Restoring governor %s for cpu %d\n",
|
|
|
+ policy->governor->name, cpu);
|
|
|
}
|
|
|
+#endif
|
|
|
|
|
|
ret = cpufreq_add_dev_interface(cpu, policy, dev);
|
|
|
if (ret)
|
|
|
goto err_out_unregister;
|
|
|
|
|
|
- unlock_policy_rwsem_write(cpu);
|
|
|
-
|
|
|
kobject_uevent(&policy->kobj, KOBJ_ADD);
|
|
|
module_put(cpufreq_driver->owner);
|
|
|
pr_debug("initialization complete\n");
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
-
|
|
|
err_out_unregister:
|
|
|
spin_lock_irqsave(&cpufreq_driver_lock, flags);
|
|
|
for_each_cpu(j, policy->cpus)
|
|
@@ -1006,8 +960,8 @@ err_out_unregister:
|
|
|
kobject_put(&policy->kobj);
|
|
|
wait_for_completion(&policy->kobj_unregister);
|
|
|
|
|
|
-err_unlock_policy:
|
|
|
- unlock_policy_rwsem_write(cpu);
|
|
|
+err_set_policy_cpu:
|
|
|
+ per_cpu(cpufreq_policy_cpu, cpu) = -1;
|
|
|
free_cpumask_var(policy->related_cpus);
|
|
|
err_free_cpumask:
|
|
|
free_cpumask_var(policy->cpus);
|
|
@@ -1019,6 +973,22 @@ module_out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
|
|
|
+{
|
|
|
+ int j;
|
|
|
+
|
|
|
+ policy->last_cpu = policy->cpu;
|
|
|
+ policy->cpu = cpu;
|
|
|
+
|
|
|
+ for_each_cpu(j, policy->cpus)
|
|
|
+ per_cpu(cpufreq_policy_cpu, j) = cpu;
|
|
|
+
|
|
|
+#ifdef CONFIG_CPU_FREQ_TABLE
|
|
|
+ cpufreq_frequency_table_update_policy_cpu(policy);
|
|
|
+#endif
|
|
|
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
|
|
+ CPUFREQ_UPDATE_POLICY_CPU, policy);
|
|
|
+}
|
|
|
|
|
|
/**
|
|
|
* __cpufreq_remove_dev - remove a CPU device
|
|
@@ -1029,129 +999,103 @@ module_out:
|
|
|
*/
|
|
|
static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
|
|
|
{
|
|
|
- unsigned int cpu = dev->id;
|
|
|
+ unsigned int cpu = dev->id, ret, cpus;
|
|
|
unsigned long flags;
|
|
|
struct cpufreq_policy *data;
|
|
|
struct kobject *kobj;
|
|
|
struct completion *cmp;
|
|
|
-#ifdef CONFIG_SMP
|
|
|
struct device *cpu_dev;
|
|
|
- unsigned int j;
|
|
|
-#endif
|
|
|
|
|
|
- pr_debug("unregistering CPU %u\n", cpu);
|
|
|
+ pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
|
|
|
|
|
|
spin_lock_irqsave(&cpufreq_driver_lock, flags);
|
|
|
+
|
|
|
data = per_cpu(cpufreq_cpu_data, cpu);
|
|
|
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
|
|
|
|
|
if (!data) {
|
|
|
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
|
|
- unlock_policy_rwsem_write(cpu);
|
|
|
+ pr_debug("%s: No cpu_data found\n", __func__);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
- per_cpu(cpufreq_cpu_data, cpu) = NULL;
|
|
|
|
|
|
+ if (cpufreq_driver->target)
|
|
|
+ __cpufreq_governor(data, CPUFREQ_GOV_STOP);
|
|
|
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- /* if this isn't the CPU which is the parent of the kobj, we
|
|
|
- * only need to unlink, put and exit
|
|
|
- */
|
|
|
- if (unlikely(cpu != data->cpu)) {
|
|
|
- pr_debug("removing link\n");
|
|
|
- cpumask_clear_cpu(cpu, data->cpus);
|
|
|
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
|
|
- kobj = &dev->kobj;
|
|
|
- cpufreq_cpu_put(data);
|
|
|
- unlock_policy_rwsem_write(cpu);
|
|
|
- sysfs_remove_link(kobj, "cpufreq");
|
|
|
- return 0;
|
|
|
- }
|
|
|
+#ifdef CONFIG_HOTPLUG_CPU
|
|
|
+ if (!cpufreq_driver->setpolicy)
|
|
|
+ strncpy(per_cpu(cpufreq_cpu_governor, cpu),
|
|
|
+ data->governor->name, CPUFREQ_NAME_LEN);
|
|
|
#endif
|
|
|
|
|
|
-#ifdef CONFIG_SMP
|
|
|
+ WARN_ON(lock_policy_rwsem_write(cpu));
|
|
|
+ cpus = cpumask_weight(data->cpus);
|
|
|
+ cpumask_clear_cpu(cpu, data->cpus);
|
|
|
+ unlock_policy_rwsem_write(cpu);
|
|
|
|
|
|
-#ifdef CONFIG_HOTPLUG_CPU
|
|
|
- strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
|
|
|
- CPUFREQ_NAME_LEN);
|
|
|
-#endif
|
|
|
+ if (cpu != data->cpu) {
|
|
|
+ sysfs_remove_link(&dev->kobj, "cpufreq");
|
|
|
+ } else if (cpus > 1) {
|
|
|
+ /* first sibling now owns the new sysfs dir */
|
|
|
+ cpu_dev = get_cpu_device(cpumask_first(data->cpus));
|
|
|
+ sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
|
|
|
+ ret = kobject_move(&data->kobj, &cpu_dev->kobj);
|
|
|
+ if (ret) {
|
|
|
+ pr_err("%s: Failed to move kobj: %d", __func__, ret);
|
|
|
|
|
|
- /* if we have other CPUs still registered, we need to unlink them,
|
|
|
- * or else wait_for_completion below will lock up. Clean the
|
|
|
- * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
|
|
|
- * the sysfs links afterwards.
|
|
|
- */
|
|
|
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
|
|
|
- for_each_cpu(j, data->cpus) {
|
|
|
- if (j == cpu)
|
|
|
- continue;
|
|
|
- per_cpu(cpufreq_cpu_data, j) = NULL;
|
|
|
- }
|
|
|
- }
|
|
|
+ WARN_ON(lock_policy_rwsem_write(cpu));
|
|
|
+ cpumask_set_cpu(cpu, data->cpus);
|
|
|
|
|
|
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
|
|
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
|
|
|
+ per_cpu(cpufreq_cpu_data, cpu) = data;
|
|
|
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
|
|
|
|
|
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
|
|
|
- for_each_cpu(j, data->cpus) {
|
|
|
- if (j == cpu)
|
|
|
- continue;
|
|
|
- pr_debug("removing link for cpu %u\n", j);
|
|
|
-#ifdef CONFIG_HOTPLUG_CPU
|
|
|
- strncpy(per_cpu(cpufreq_cpu_governor, j),
|
|
|
- data->governor->name, CPUFREQ_NAME_LEN);
|
|
|
-#endif
|
|
|
- cpu_dev = get_cpu_device(j);
|
|
|
- kobj = &cpu_dev->kobj;
|
|
|
unlock_policy_rwsem_write(cpu);
|
|
|
- sysfs_remove_link(kobj, "cpufreq");
|
|
|
- lock_policy_rwsem_write(cpu);
|
|
|
- cpufreq_cpu_put(data);
|
|
|
- }
|
|
|
- }
|
|
|
-#else
|
|
|
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
|
|
-#endif
|
|
|
|
|
|
- if (cpufreq_driver->target)
|
|
|
- __cpufreq_governor(data, CPUFREQ_GOV_STOP);
|
|
|
+ ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
|
|
|
+ "cpufreq");
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
|
|
|
- kobj = &data->kobj;
|
|
|
- cmp = &data->kobj_unregister;
|
|
|
- unlock_policy_rwsem_write(cpu);
|
|
|
- kobject_put(kobj);
|
|
|
+ WARN_ON(lock_policy_rwsem_write(cpu));
|
|
|
+ update_policy_cpu(data, cpu_dev->id);
|
|
|
+ unlock_policy_rwsem_write(cpu);
|
|
|
+ pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
|
|
|
+ __func__, cpu_dev->id, cpu);
|
|
|
+ }
|
|
|
|
|
|
- /* we need to make sure that the underlying kobj is actually
|
|
|
- * not referenced anymore by anybody before we proceed with
|
|
|
- * unloading.
|
|
|
- */
|
|
|
- pr_debug("waiting for dropping of refcount\n");
|
|
|
- wait_for_completion(cmp);
|
|
|
- pr_debug("wait complete\n");
|
|
|
+ pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
|
|
|
+ cpufreq_cpu_put(data);
|
|
|
|
|
|
- lock_policy_rwsem_write(cpu);
|
|
|
- if (cpufreq_driver->exit)
|
|
|
- cpufreq_driver->exit(data);
|
|
|
- unlock_policy_rwsem_write(cpu);
|
|
|
+ /* If cpu is last user of policy, free policy */
|
|
|
+ if (cpus == 1) {
|
|
|
+ lock_policy_rwsem_read(cpu);
|
|
|
+ kobj = &data->kobj;
|
|
|
+ cmp = &data->kobj_unregister;
|
|
|
+ unlock_policy_rwsem_read(cpu);
|
|
|
+ kobject_put(kobj);
|
|
|
+
|
|
|
+ /* we need to make sure that the underlying kobj is actually
|
|
|
+ * not referenced anymore by anybody before we proceed with
|
|
|
+ * unloading.
|
|
|
+ */
|
|
|
+ pr_debug("waiting for dropping of refcount\n");
|
|
|
+ wait_for_completion(cmp);
|
|
|
+ pr_debug("wait complete\n");
|
|
|
|
|
|
-#ifdef CONFIG_HOTPLUG_CPU
|
|
|
- /* when the CPU which is the parent of the kobj is hotplugged
|
|
|
- * offline, check for siblings, and create cpufreq sysfs interface
|
|
|
- * and symlinks
|
|
|
- */
|
|
|
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
|
|
|
- /* first sibling now owns the new sysfs dir */
|
|
|
- cpumask_clear_cpu(cpu, data->cpus);
|
|
|
- cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL);
|
|
|
+ if (cpufreq_driver->exit)
|
|
|
+ cpufreq_driver->exit(data);
|
|
|
|
|
|
- /* finally remove our own symlink */
|
|
|
- lock_policy_rwsem_write(cpu);
|
|
|
- __cpufreq_remove_dev(dev, sif);
|
|
|
+ free_cpumask_var(data->related_cpus);
|
|
|
+ free_cpumask_var(data->cpus);
|
|
|
+ kfree(data);
|
|
|
+ } else if (cpufreq_driver->target) {
|
|
|
+ __cpufreq_governor(data, CPUFREQ_GOV_START);
|
|
|
+ __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
|
|
|
}
|
|
|
-#endif
|
|
|
-
|
|
|
- free_cpumask_var(data->related_cpus);
|
|
|
- free_cpumask_var(data->cpus);
|
|
|
- kfree(data);
|
|
|
|
|
|
+ per_cpu(cpufreq_policy_cpu, cpu) = -1;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1164,9 +1108,6 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
|
|
|
if (cpu_is_offline(cpu))
|
|
|
return 0;
|
|
|
|
|
|
- if (unlikely(lock_policy_rwsem_write(cpu)))
|
|
|
- BUG();
|
|
|
-
|
|
|
retval = __cpufreq_remove_dev(dev, sif);
|
|
|
return retval;
|
|
|
}
|
|
@@ -1215,9 +1156,13 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
|
|
|
*/
|
|
|
unsigned int cpufreq_quick_get(unsigned int cpu)
|
|
|
{
|
|
|
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
|
|
|
+ struct cpufreq_policy *policy;
|
|
|
unsigned int ret_freq = 0;
|
|
|
|
|
|
+ if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
|
|
|
+ return cpufreq_driver->get(cpu);
|
|
|
+
|
|
|
+ policy = cpufreq_cpu_get(cpu);
|
|
|
if (policy) {
|
|
|
ret_freq = policy->cur;
|
|
|
cpufreq_cpu_put(policy);
|
|
@@ -1385,6 +1330,20 @@ static struct syscore_ops cpufreq_syscore_ops = {
|
|
|
.resume = cpufreq_bp_resume,
|
|
|
};
|
|
|
|
|
|
+/**
|
|
|
+ * cpufreq_get_current_driver - return current driver's name
|
|
|
+ *
|
|
|
+ * Return the name string of the currently loaded cpufreq driver
|
|
|
+ * or NULL, if none.
|
|
|
+ */
|
|
|
+const char *cpufreq_get_current_driver(void)
|
|
|
+{
|
|
|
+ if (cpufreq_driver)
|
|
|
+ return cpufreq_driver->name;
|
|
|
+
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
|
|
|
|
|
|
/*********************************************************************
|
|
|
* NOTIFIER LISTS INTERFACE *
|
|
@@ -1407,6 +1366,9 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
|
+ if (cpufreq_disabled())
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
WARN_ON(!init_cpufreq_transition_notifier_list_called);
|
|
|
|
|
|
switch (list) {
|
|
@@ -1441,6 +1403,9 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
|
+ if (cpufreq_disabled())
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
switch (list) {
|
|
|
case CPUFREQ_TRANSITION_NOTIFIER:
|
|
|
ret = srcu_notifier_chain_unregister(
|
|
@@ -1486,7 +1451,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
|
|
|
if (target_freq == policy->cur)
|
|
|
return 0;
|
|
|
|
|
|
- if (cpu_online(policy->cpu) && cpufreq_driver->target)
|
|
|
+ if (cpufreq_driver->target)
|
|
|
retval = cpufreq_driver->target(policy, target_freq, relation);
|
|
|
|
|
|
return retval;
|
|
@@ -1521,7 +1486,10 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
|
|
|
{
|
|
|
int ret = 0;
|
|
|
|
|
|
- if (!(cpu_online(cpu) && cpufreq_driver->getavg))
|
|
|
+ if (cpufreq_disabled())
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ if (!cpufreq_driver->getavg)
|
|
|
return 0;
|
|
|
|
|
|
policy = cpufreq_cpu_get(policy->cpu);
|
|
@@ -1576,6 +1544,11 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
|
|
|
policy->cpu, event);
|
|
|
ret = policy->governor->governor(policy, event);
|
|
|
|
|
|
+ if (event == CPUFREQ_GOV_START)
|
|
|
+ policy->governor->initialized++;
|
|
|
+ else if (event == CPUFREQ_GOV_STOP)
|
|
|
+ policy->governor->initialized--;
|
|
|
+
|
|
|
/* we keep one module reference alive for
|
|
|
each CPU governed by this CPU */
|
|
|
if ((event != CPUFREQ_GOV_START) || ret)
|
|
@@ -1599,6 +1572,7 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
|
|
|
|
|
|
mutex_lock(&cpufreq_governor_mutex);
|
|
|
|
|
|
+ governor->initialized = 0;
|
|
|
err = -EBUSY;
|
|
|
if (__find_governor(governor->name) == NULL) {
|
|
|
err = 0;
|
|
@@ -1796,7 +1770,7 @@ int cpufreq_update_policy(unsigned int cpu)
|
|
|
pr_debug("Driver did not initialize current freq");
|
|
|
data->cur = policy.cur;
|
|
|
} else {
|
|
|
- if (data->cur != policy.cur)
|
|
|
+ if (data->cur != policy.cur && cpufreq_driver->target)
|
|
|
cpufreq_out_of_sync(cpu, data->cur,
|
|
|
policy.cur);
|
|
|
}
|
|
@@ -1828,9 +1802,6 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
|
|
|
break;
|
|
|
case CPU_DOWN_PREPARE:
|
|
|
case CPU_DOWN_PREPARE_FROZEN:
|
|
|
- if (unlikely(lock_policy_rwsem_write(cpu)))
|
|
|
- BUG();
|
|
|
-
|
|
|
__cpufreq_remove_dev(dev, NULL);
|
|
|
break;
|
|
|
case CPU_DOWN_FAILED:
|