|
@@ -75,19 +75,6 @@ static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
|
|
|
start_latency_ns, "start");
|
|
|
}
|
|
|
|
|
|
-static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
|
|
|
-{
|
|
|
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
|
|
|
- save_state_latency_ns, "state save");
|
|
|
-}
|
|
|
-
|
|
|
-static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
|
|
|
-{
|
|
|
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
|
|
|
- restore_state_latency_ns,
|
|
|
- "state restore");
|
|
|
-}
|
|
|
-
|
|
|
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
|
|
|
{
|
|
|
bool ret = false;
|
|
@@ -139,6 +126,19 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
|
|
|
genpd->status = GPD_STATE_ACTIVE;
|
|
|
}
|
|
|
|
|
|
+static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
|
|
|
+{
|
|
|
+ s64 usecs64;
|
|
|
+
|
|
|
+ if (!genpd->cpu_data)
|
|
|
+ return;
|
|
|
+
|
|
|
+ usecs64 = genpd->power_on_latency_ns;
|
|
|
+ do_div(usecs64, NSEC_PER_USEC);
|
|
|
+ usecs64 += genpd->cpu_data->saved_exit_latency;
|
|
|
+ genpd->cpu_data->idle_state->exit_latency = usecs64;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* __pm_genpd_poweron - Restore power to a given PM domain and its masters.
|
|
|
* @genpd: PM domain to power up.
|
|
@@ -146,7 +146,7 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
|
|
|
* Restore power to @genpd and all of its masters so that it is possible to
|
|
|
* resume a device belonging to it.
|
|
|
*/
|
|
|
-int __pm_genpd_poweron(struct generic_pm_domain *genpd)
|
|
|
+static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
|
|
|
__releases(&genpd->lock) __acquires(&genpd->lock)
|
|
|
{
|
|
|
struct gpd_link *link;
|
|
@@ -176,6 +176,13 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+ if (genpd->cpu_data) {
|
|
|
+ cpuidle_pause_and_lock();
|
|
|
+ genpd->cpu_data->idle_state->disabled = true;
|
|
|
+ cpuidle_resume_and_unlock();
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* The list is guaranteed not to change while the loop below is being
|
|
|
* executed, unless one of the masters' .power_on() callbacks fiddles
|
|
@@ -215,6 +222,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
|
|
|
if (elapsed_ns > genpd->power_on_latency_ns) {
|
|
|
genpd->power_on_latency_ns = elapsed_ns;
|
|
|
genpd->max_off_time_changed = true;
|
|
|
+ genpd_recalc_cpu_exit_latency(genpd);
|
|
|
if (genpd->name)
|
|
|
pr_warning("%s: Power-on latency exceeded, "
|
|
|
"new value %lld ns\n", genpd->name,
|
|
@@ -222,6 +230,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ out:
|
|
|
genpd_set_active(genpd);
|
|
|
|
|
|
return 0;
|
|
@@ -251,6 +260,19 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
|
|
|
|
|
|
#ifdef CONFIG_PM_RUNTIME
|
|
|
|
|
|
+static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
|
|
|
+{
|
|
|
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
|
|
|
+ save_state_latency_ns, "state save");
|
|
|
+}
|
|
|
+
|
|
|
+static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
|
|
|
+{
|
|
|
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
|
|
|
+ restore_state_latency_ns,
|
|
|
+ "state restore");
|
|
|
+}
|
|
|
+
|
|
|
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
|
|
|
unsigned long val, void *ptr)
|
|
|
{
|
|
@@ -275,7 +297,7 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
|
|
|
|
|
|
pdd = dev->power.subsys_data ?
|
|
|
dev->power.subsys_data->domain_data : NULL;
|
|
|
- if (pdd) {
|
|
|
+ if (pdd && pdd->dev) {
|
|
|
to_gpd_data(pdd)->td.constraint_changed = true;
|
|
|
genpd = dev_to_genpd(dev);
|
|
|
} else {
|
|
@@ -339,19 +361,16 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
|
|
|
{
|
|
|
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
|
|
|
struct device *dev = pdd->dev;
|
|
|
+ bool need_restore = gpd_data->need_restore;
|
|
|
|
|
|
- if (!gpd_data->need_restore)
|
|
|
- return;
|
|
|
-
|
|
|
+ gpd_data->need_restore = false;
|
|
|
mutex_unlock(&genpd->lock);
|
|
|
|
|
|
genpd_start_dev(genpd, dev);
|
|
|
- genpd_restore_dev(genpd, dev);
|
|
|
- genpd_stop_dev(genpd, dev);
|
|
|
+ if (need_restore)
|
|
|
+ genpd_restore_dev(genpd, dev);
|
|
|
|
|
|
mutex_lock(&genpd->lock);
|
|
|
-
|
|
|
- gpd_data->need_restore = false;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -458,6 +477,21 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ if (genpd->cpu_data) {
|
|
|
+ /*
|
|
|
+ * If cpu_data is set, cpuidle should turn the domain off when
|
|
|
+ * the CPU in it is idle. In that case we don't decrement the
|
|
|
+ * subdomain counts of the master domains, so that power is not
|
|
|
+ * removed from the current domain prematurely as a result of
|
|
|
+ * cutting off the masters' power.
|
|
|
+ */
|
|
|
+ genpd->status = GPD_STATE_POWER_OFF;
|
|
|
+ cpuidle_pause_and_lock();
|
|
|
+ genpd->cpu_data->idle_state->disabled = false;
|
|
|
+ cpuidle_resume_and_unlock();
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
if (genpd->power_off) {
|
|
|
ktime_t time_start;
|
|
|
s64 elapsed_ns;
|
|
@@ -595,7 +629,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
|
|
|
|
|
|
/* If power.irq_safe, the PM domain is never powered off. */
|
|
|
if (dev->power.irq_safe)
|
|
|
- goto out;
|
|
|
+ return genpd_start_dev(genpd, dev);
|
|
|
|
|
|
mutex_lock(&genpd->lock);
|
|
|
ret = __pm_genpd_poweron(genpd);
|
|
@@ -628,9 +662,6 @@ static int pm_genpd_runtime_resume(struct device *dev)
|
|
|
wake_up_all(&genpd->status_wait_queue);
|
|
|
mutex_unlock(&genpd->lock);
|
|
|
|
|
|
- out:
|
|
|
- genpd_start_dev(genpd, dev);
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1235,6 +1266,27 @@ static void pm_genpd_complete(struct device *dev)
|
|
|
|
|
|
#endif /* CONFIG_PM_SLEEP */
|
|
|
|
|
|
+static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
|
|
|
+{
|
|
|
+ struct generic_pm_domain_data *gpd_data;
|
|
|
+
|
|
|
+ gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
|
|
|
+ if (!gpd_data)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ mutex_init(&gpd_data->lock);
|
|
|
+ gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
|
|
|
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb);
|
|
|
+ return gpd_data;
|
|
|
+}
|
|
|
+
|
|
|
+static void __pm_genpd_free_dev_data(struct device *dev,
|
|
|
+ struct generic_pm_domain_data *gpd_data)
|
|
|
+{
|
|
|
+ dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
|
|
|
+ kfree(gpd_data);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* __pm_genpd_add_device - Add a device to an I/O PM domain.
|
|
|
* @genpd: PM domain to add the device to.
|
|
@@ -1244,7 +1296,7 @@ static void pm_genpd_complete(struct device *dev)
|
|
|
int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
|
|
|
struct gpd_timing_data *td)
|
|
|
{
|
|
|
- struct generic_pm_domain_data *gpd_data;
|
|
|
+ struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
|
|
|
struct pm_domain_data *pdd;
|
|
|
int ret = 0;
|
|
|
|
|
@@ -1253,14 +1305,10 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
|
|
|
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
|
|
|
return -EINVAL;
|
|
|
|
|
|
- gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
|
|
|
- if (!gpd_data)
|
|
|
+ gpd_data_new = __pm_genpd_alloc_dev_data(dev);
|
|
|
+ if (!gpd_data_new)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- mutex_init(&gpd_data->lock);
|
|
|
- gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
|
|
|
- dev_pm_qos_add_notifier(dev, &gpd_data->nb);
|
|
|
-
|
|
|
genpd_acquire_lock(genpd);
|
|
|
|
|
|
if (genpd->prepared_count > 0) {
|
|
@@ -1274,35 +1322,42 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ ret = dev_pm_get_subsys_data(dev);
|
|
|
+ if (ret)
|
|
|
+ goto out;
|
|
|
+
|
|
|
genpd->device_count++;
|
|
|
genpd->max_off_time_changed = true;
|
|
|
|
|
|
- dev_pm_get_subsys_data(dev);
|
|
|
-
|
|
|
- mutex_lock(&gpd_data->lock);
|
|
|
spin_lock_irq(&dev->power.lock);
|
|
|
+
|
|
|
dev->pm_domain = &genpd->domain;
|
|
|
- dev->power.subsys_data->domain_data = &gpd_data->base;
|
|
|
- gpd_data->base.dev = dev;
|
|
|
- list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
|
|
|
- gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
|
|
|
+ if (dev->power.subsys_data->domain_data) {
|
|
|
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
|
|
|
+ } else {
|
|
|
+ gpd_data = gpd_data_new;
|
|
|
+ dev->power.subsys_data->domain_data = &gpd_data->base;
|
|
|
+ }
|
|
|
+ gpd_data->refcount++;
|
|
|
if (td)
|
|
|
gpd_data->td = *td;
|
|
|
|
|
|
+ spin_unlock_irq(&dev->power.lock);
|
|
|
+
|
|
|
+ mutex_lock(&gpd_data->lock);
|
|
|
+ gpd_data->base.dev = dev;
|
|
|
+ list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
|
|
|
+ gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
|
|
|
gpd_data->td.constraint_changed = true;
|
|
|
gpd_data->td.effective_constraint_ns = -1;
|
|
|
- spin_unlock_irq(&dev->power.lock);
|
|
|
mutex_unlock(&gpd_data->lock);
|
|
|
|
|
|
- genpd_release_lock(genpd);
|
|
|
-
|
|
|
- return 0;
|
|
|
-
|
|
|
out:
|
|
|
genpd_release_lock(genpd);
|
|
|
|
|
|
- dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
|
|
|
- kfree(gpd_data);
|
|
|
+ if (gpd_data != gpd_data_new)
|
|
|
+ __pm_genpd_free_dev_data(dev, gpd_data_new);
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -1348,6 +1403,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
|
|
{
|
|
|
struct generic_pm_domain_data *gpd_data;
|
|
|
struct pm_domain_data *pdd;
|
|
|
+ bool remove = false;
|
|
|
int ret = 0;
|
|
|
|
|
|
dev_dbg(dev, "%s()\n", __func__);
|
|
@@ -1368,22 +1424,28 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
|
|
genpd->max_off_time_changed = true;
|
|
|
|
|
|
spin_lock_irq(&dev->power.lock);
|
|
|
+
|
|
|
dev->pm_domain = NULL;
|
|
|
pdd = dev->power.subsys_data->domain_data;
|
|
|
list_del_init(&pdd->list_node);
|
|
|
- dev->power.subsys_data->domain_data = NULL;
|
|
|
+ gpd_data = to_gpd_data(pdd);
|
|
|
+ if (--gpd_data->refcount == 0) {
|
|
|
+ dev->power.subsys_data->domain_data = NULL;
|
|
|
+ remove = true;
|
|
|
+ }
|
|
|
+
|
|
|
spin_unlock_irq(&dev->power.lock);
|
|
|
|
|
|
- gpd_data = to_gpd_data(pdd);
|
|
|
mutex_lock(&gpd_data->lock);
|
|
|
pdd->dev = NULL;
|
|
|
mutex_unlock(&gpd_data->lock);
|
|
|
|
|
|
genpd_release_lock(genpd);
|
|
|
|
|
|
- dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
|
|
|
- kfree(gpd_data);
|
|
|
dev_pm_put_subsys_data(dev);
|
|
|
+ if (remove)
|
|
|
+ __pm_genpd_free_dev_data(dev, gpd_data);
|
|
|
+
|
|
|
return 0;
|
|
|
|
|
|
out:
|
|
@@ -1541,33 +1603,52 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
|
|
* @dev: Device to add the callbacks to.
|
|
|
* @ops: Set of callbacks to add.
|
|
|
* @td: Timing data to add to the device along with the callbacks (optional).
|
|
|
+ *
|
|
|
+ * Every call to this routine should be balanced with a call to
|
|
|
+ * __pm_genpd_remove_callbacks() and they must not be nested.
|
|
|
*/
|
|
|
int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
|
|
|
struct gpd_timing_data *td)
|
|
|
{
|
|
|
- struct pm_domain_data *pdd;
|
|
|
+ struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
|
|
|
int ret = 0;
|
|
|
|
|
|
- if (!(dev && dev->power.subsys_data && ops))
|
|
|
+ if (!(dev && ops))
|
|
|
return -EINVAL;
|
|
|
|
|
|
+ gpd_data_new = __pm_genpd_alloc_dev_data(dev);
|
|
|
+ if (!gpd_data_new)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
pm_runtime_disable(dev);
|
|
|
device_pm_lock();
|
|
|
|
|
|
- pdd = dev->power.subsys_data->domain_data;
|
|
|
- if (pdd) {
|
|
|
- struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
|
|
|
+ ret = dev_pm_get_subsys_data(dev);
|
|
|
+ if (ret)
|
|
|
+ goto out;
|
|
|
|
|
|
- gpd_data->ops = *ops;
|
|
|
- if (td)
|
|
|
- gpd_data->td = *td;
|
|
|
+ spin_lock_irq(&dev->power.lock);
|
|
|
+
|
|
|
+ if (dev->power.subsys_data->domain_data) {
|
|
|
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
|
|
|
} else {
|
|
|
- ret = -EINVAL;
|
|
|
+ gpd_data = gpd_data_new;
|
|
|
+ dev->power.subsys_data->domain_data = &gpd_data->base;
|
|
|
}
|
|
|
+ gpd_data->refcount++;
|
|
|
+ gpd_data->ops = *ops;
|
|
|
+ if (td)
|
|
|
+ gpd_data->td = *td;
|
|
|
+
|
|
|
+ spin_unlock_irq(&dev->power.lock);
|
|
|
|
|
|
+ out:
|
|
|
device_pm_unlock();
|
|
|
pm_runtime_enable(dev);
|
|
|
|
|
|
+ if (gpd_data != gpd_data_new)
|
|
|
+ __pm_genpd_free_dev_data(dev, gpd_data_new);
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
|
|
@@ -1576,10 +1657,13 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
|
|
|
* __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
|
|
|
* @dev: Device to remove the callbacks from.
|
|
|
* @clear_td: If set, clear the device's timing data too.
|
|
|
+ *
|
|
|
+ * This routine can only be called after pm_genpd_add_callbacks().
|
|
|
*/
|
|
|
int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
|
|
|
{
|
|
|
- struct pm_domain_data *pdd;
|
|
|
+ struct generic_pm_domain_data *gpd_data = NULL;
|
|
|
+ bool remove = false;
|
|
|
int ret = 0;
|
|
|
|
|
|
if (!(dev && dev->power.subsys_data))
|
|
@@ -1588,24 +1672,118 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
|
|
|
pm_runtime_disable(dev);
|
|
|
device_pm_lock();
|
|
|
|
|
|
- pdd = dev->power.subsys_data->domain_data;
|
|
|
- if (pdd) {
|
|
|
- struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
|
|
|
+ spin_lock_irq(&dev->power.lock);
|
|
|
|
|
|
- gpd_data->ops = (struct gpd_dev_ops){ 0 };
|
|
|
+ if (dev->power.subsys_data->domain_data) {
|
|
|
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
|
|
|
+ gpd_data->ops = (struct gpd_dev_ops){ NULL };
|
|
|
if (clear_td)
|
|
|
gpd_data->td = (struct gpd_timing_data){ 0 };
|
|
|
+
|
|
|
+ if (--gpd_data->refcount == 0) {
|
|
|
+ dev->power.subsys_data->domain_data = NULL;
|
|
|
+ remove = true;
|
|
|
+ }
|
|
|
} else {
|
|
|
ret = -EINVAL;
|
|
|
}
|
|
|
|
|
|
+ spin_unlock_irq(&dev->power.lock);
|
|
|
+
|
|
|
device_pm_unlock();
|
|
|
pm_runtime_enable(dev);
|
|
|
|
|
|
- return ret;
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ dev_pm_put_subsys_data(dev);
|
|
|
+ if (remove)
|
|
|
+ __pm_genpd_free_dev_data(dev, gpd_data);
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
|
|
|
|
|
|
+int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
|
|
|
+{
|
|
|
+ struct cpuidle_driver *cpuidle_drv;
|
|
|
+ struct gpd_cpu_data *cpu_data;
|
|
|
+ struct cpuidle_state *idle_state;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ if (IS_ERR_OR_NULL(genpd) || state < 0)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ genpd_acquire_lock(genpd);
|
|
|
+
|
|
|
+ if (genpd->cpu_data) {
|
|
|
+ ret = -EEXIST;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
|
|
|
+ if (!cpu_data) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ cpuidle_drv = cpuidle_driver_ref();
|
|
|
+ if (!cpuidle_drv) {
|
|
|
+ ret = -ENODEV;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ if (cpuidle_drv->state_count <= state) {
|
|
|
+ ret = -EINVAL;
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+ idle_state = &cpuidle_drv->states[state];
|
|
|
+ if (!idle_state->disabled) {
|
|
|
+ ret = -EAGAIN;
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+ cpu_data->idle_state = idle_state;
|
|
|
+ cpu_data->saved_exit_latency = idle_state->exit_latency;
|
|
|
+ genpd->cpu_data = cpu_data;
|
|
|
+ genpd_recalc_cpu_exit_latency(genpd);
|
|
|
+
|
|
|
+ out:
|
|
|
+ genpd_release_lock(genpd);
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ err:
|
|
|
+ cpuidle_driver_unref();
|
|
|
+ goto out;
|
|
|
+}
|
|
|
+
|
|
|
+int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
|
|
|
+{
|
|
|
+ struct gpd_cpu_data *cpu_data;
|
|
|
+ struct cpuidle_state *idle_state;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ if (IS_ERR_OR_NULL(genpd))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ genpd_acquire_lock(genpd);
|
|
|
+
|
|
|
+ cpu_data = genpd->cpu_data;
|
|
|
+ if (!cpu_data) {
|
|
|
+ ret = -ENODEV;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ idle_state = cpu_data->idle_state;
|
|
|
+ if (!idle_state->disabled) {
|
|
|
+ ret = -EAGAIN;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ idle_state->exit_latency = cpu_data->saved_exit_latency;
|
|
|
+ cpuidle_driver_unref();
|
|
|
+ genpd->cpu_data = NULL;
|
|
|
+ kfree(cpu_data);
|
|
|
+
|
|
|
+ out:
|
|
|
+ genpd_release_lock(genpd);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
/* Default device callbacks for generic PM domains. */
|
|
|
|
|
|
/**
|
|
@@ -1615,16 +1793,24 @@ EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
|
|
|
static int pm_genpd_default_save_state(struct device *dev)
|
|
|
{
|
|
|
int (*cb)(struct device *__dev);
|
|
|
- struct device_driver *drv = dev->driver;
|
|
|
|
|
|
cb = dev_gpd_data(dev)->ops.save_state;
|
|
|
if (cb)
|
|
|
return cb(dev);
|
|
|
|
|
|
- if (drv && drv->pm && drv->pm->runtime_suspend)
|
|
|
- return drv->pm->runtime_suspend(dev);
|
|
|
+ if (dev->type && dev->type->pm)
|
|
|
+ cb = dev->type->pm->runtime_suspend;
|
|
|
+ else if (dev->class && dev->class->pm)
|
|
|
+ cb = dev->class->pm->runtime_suspend;
|
|
|
+ else if (dev->bus && dev->bus->pm)
|
|
|
+ cb = dev->bus->pm->runtime_suspend;
|
|
|
+ else
|
|
|
+ cb = NULL;
|
|
|
|
|
|
- return 0;
|
|
|
+ if (!cb && dev->driver && dev->driver->pm)
|
|
|
+ cb = dev->driver->pm->runtime_suspend;
|
|
|
+
|
|
|
+ return cb ? cb(dev) : 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1634,16 +1820,24 @@ static int pm_genpd_default_save_state(struct device *dev)
|
|
|
static int pm_genpd_default_restore_state(struct device *dev)
|
|
|
{
|
|
|
int (*cb)(struct device *__dev);
|
|
|
- struct device_driver *drv = dev->driver;
|
|
|
|
|
|
cb = dev_gpd_data(dev)->ops.restore_state;
|
|
|
if (cb)
|
|
|
return cb(dev);
|
|
|
|
|
|
- if (drv && drv->pm && drv->pm->runtime_resume)
|
|
|
- return drv->pm->runtime_resume(dev);
|
|
|
+ if (dev->type && dev->type->pm)
|
|
|
+ cb = dev->type->pm->runtime_resume;
|
|
|
+ else if (dev->class && dev->class->pm)
|
|
|
+ cb = dev->class->pm->runtime_resume;
|
|
|
+ else if (dev->bus && dev->bus->pm)
|
|
|
+ cb = dev->bus->pm->runtime_resume;
|
|
|
+ else
|
|
|
+ cb = NULL;
|
|
|
|
|
|
- return 0;
|
|
|
+ if (!cb && dev->driver && dev->driver->pm)
|
|
|
+ cb = dev->driver->pm->runtime_resume;
|
|
|
+
|
|
|
+ return cb ? cb(dev) : 0;
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|