|
@@ -58,8 +58,9 @@ struct cpufreq_cooling_device {
|
|
|
};
|
|
|
static LIST_HEAD(cooling_cpufreq_list);
|
|
|
static DEFINE_IDR(cpufreq_idr);
|
|
|
+static DEFINE_MUTEX(cooling_cpufreq_lock);
|
|
|
|
|
|
-static struct mutex cooling_cpufreq_lock;
|
|
|
+static unsigned int cpufreq_dev_count;
|
|
|
|
|
|
/* notify_table passes value to the CPUFREQ_ADJUST callback function. */
|
|
|
#define NOTIFY_INVALID NULL
|
|
@@ -240,28 +241,18 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
|
|
|
static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
|
|
|
unsigned long *state)
|
|
|
{
|
|
|
- int ret = -EINVAL, i = 0;
|
|
|
- struct cpufreq_cooling_device *cpufreq_device;
|
|
|
- struct cpumask *maskPtr;
|
|
|
+ struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
|
|
|
+ struct cpumask *maskPtr = &cpufreq_device->allowed_cpus;
|
|
|
unsigned int cpu;
|
|
|
struct cpufreq_frequency_table *table;
|
|
|
unsigned long count = 0;
|
|
|
+ int i = 0;
|
|
|
|
|
|
- mutex_lock(&cooling_cpufreq_lock);
|
|
|
- list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) {
|
|
|
- if (cpufreq_device && cpufreq_device->cool_dev == cdev)
|
|
|
- break;
|
|
|
- }
|
|
|
- if (cpufreq_device == NULL)
|
|
|
- goto return_get_max_state;
|
|
|
-
|
|
|
- maskPtr = &cpufreq_device->allowed_cpus;
|
|
|
cpu = cpumask_any(maskPtr);
|
|
|
table = cpufreq_frequency_get_table(cpu);
|
|
|
if (!table) {
|
|
|
*state = 0;
|
|
|
- ret = 0;
|
|
|
- goto return_get_max_state;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
|
|
@@ -272,12 +263,10 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
|
|
|
|
|
|
if (count > 0) {
|
|
|
*state = --count;
|
|
|
- ret = 0;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-return_get_max_state:
|
|
|
- mutex_unlock(&cooling_cpufreq_lock);
|
|
|
- return ret;
|
|
|
+ return -EINVAL;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -288,20 +277,10 @@ return_get_max_state:
|
|
|
static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
|
|
|
unsigned long *state)
|
|
|
{
|
|
|
- int ret = -EINVAL;
|
|
|
- struct cpufreq_cooling_device *cpufreq_device;
|
|
|
+ struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
|
|
|
|
|
|
- mutex_lock(&cooling_cpufreq_lock);
|
|
|
- list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) {
|
|
|
- if (cpufreq_device && cpufreq_device->cool_dev == cdev) {
|
|
|
- *state = cpufreq_device->cpufreq_state;
|
|
|
- ret = 0;
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- mutex_unlock(&cooling_cpufreq_lock);
|
|
|
-
|
|
|
- return ret;
|
|
|
+ *state = cpufreq_device->cpufreq_state;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -312,22 +291,9 @@ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
|
|
|
static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
|
|
|
unsigned long state)
|
|
|
{
|
|
|
- int ret = -EINVAL;
|
|
|
- struct cpufreq_cooling_device *cpufreq_device;
|
|
|
+ struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
|
|
|
|
|
|
- mutex_lock(&cooling_cpufreq_lock);
|
|
|
- list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) {
|
|
|
- if (cpufreq_device && cpufreq_device->cool_dev == cdev) {
|
|
|
- ret = 0;
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- if (!ret)
|
|
|
- ret = cpufreq_apply_cooling(cpufreq_device, state);
|
|
|
-
|
|
|
- mutex_unlock(&cooling_cpufreq_lock);
|
|
|
-
|
|
|
- return ret;
|
|
|
+ return cpufreq_apply_cooling(cpufreq_device, state);
|
|
|
}
|
|
|
|
|
|
/* Bind cpufreq callbacks to thermal cooling device ops */
|
|
@@ -351,14 +317,11 @@ struct thermal_cooling_device *cpufreq_cooling_register(
|
|
|
{
|
|
|
struct thermal_cooling_device *cool_dev;
|
|
|
struct cpufreq_cooling_device *cpufreq_dev = NULL;
|
|
|
- unsigned int cpufreq_dev_count = 0, min = 0, max = 0;
|
|
|
+ unsigned int min = 0, max = 0;
|
|
|
char dev_name[THERMAL_NAME_LENGTH];
|
|
|
int ret = 0, i;
|
|
|
struct cpufreq_policy policy;
|
|
|
|
|
|
- list_for_each_entry(cpufreq_dev, &cooling_cpufreq_list, node)
|
|
|
- cpufreq_dev_count++;
|
|
|
-
|
|
|
/*Verify that all the clip cpus have same freq_min, freq_max limit*/
|
|
|
for_each_cpu(i, clip_cpus) {
|
|
|
/*continue if cpufreq policy not found and not return error*/
|
|
@@ -380,9 +343,6 @@ struct thermal_cooling_device *cpufreq_cooling_register(
|
|
|
|
|
|
cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
|
|
|
|
|
|
- if (cpufreq_dev_count == 0)
|
|
|
- mutex_init(&cooling_cpufreq_lock);
|
|
|
-
|
|
|
ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
|
|
|
if (ret) {
|
|
|
kfree(cpufreq_dev);
|
|
@@ -401,12 +361,12 @@ struct thermal_cooling_device *cpufreq_cooling_register(
|
|
|
cpufreq_dev->cool_dev = cool_dev;
|
|
|
cpufreq_dev->cpufreq_state = 0;
|
|
|
mutex_lock(&cooling_cpufreq_lock);
|
|
|
- list_add_tail(&cpufreq_dev->node, &cooling_cpufreq_list);
|
|
|
|
|
|
/* Register the notifier for first cpufreq cooling device */
|
|
|
if (cpufreq_dev_count == 0)
|
|
|
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
|
|
|
CPUFREQ_POLICY_NOTIFIER);
|
|
|
+ cpufreq_dev_count++;
|
|
|
|
|
|
mutex_unlock(&cooling_cpufreq_lock);
|
|
|
return cool_dev;
|
|
@@ -419,33 +379,20 @@ EXPORT_SYMBOL(cpufreq_cooling_register);
|
|
|
*/
|
|
|
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
|
|
|
{
|
|
|
- struct cpufreq_cooling_device *cpufreq_dev = NULL;
|
|
|
- unsigned int cpufreq_dev_count = 0;
|
|
|
+ struct cpufreq_cooling_device *cpufreq_dev = cdev->devdata;
|
|
|
|
|
|
mutex_lock(&cooling_cpufreq_lock);
|
|
|
- list_for_each_entry(cpufreq_dev, &cooling_cpufreq_list, node) {
|
|
|
- if (cpufreq_dev && cpufreq_dev->cool_dev == cdev)
|
|
|
- break;
|
|
|
- cpufreq_dev_count++;
|
|
|
- }
|
|
|
-
|
|
|
- if (!cpufreq_dev || cpufreq_dev->cool_dev != cdev) {
|
|
|
- mutex_unlock(&cooling_cpufreq_lock);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- list_del(&cpufreq_dev->node);
|
|
|
+ cpufreq_dev_count--;
|
|
|
|
|
|
/* Unregister the notifier for the last cpufreq cooling device */
|
|
|
- if (cpufreq_dev_count == 1) {
|
|
|
+ if (cpufreq_dev_count == 0) {
|
|
|
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
|
|
|
CPUFREQ_POLICY_NOTIFIER);
|
|
|
}
|
|
|
mutex_unlock(&cooling_cpufreq_lock);
|
|
|
+
|
|
|
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
|
|
|
release_idr(&cpufreq_idr, cpufreq_dev->id);
|
|
|
- if (cpufreq_dev_count == 1)
|
|
|
- mutex_destroy(&cooling_cpufreq_lock);
|
|
|
kfree(cpufreq_dev);
|
|
|
}
|
|
|
EXPORT_SYMBOL(cpufreq_cooling_unregister);
|