|
@@ -29,10 +29,20 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
|
|
|
return pd_to_genpd(dev->pm_domain);
|
|
|
}
|
|
|
|
|
|
-static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
|
|
|
+static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
|
|
|
{
|
|
|
- if (!WARN_ON(genpd->sd_count == 0))
|
|
|
- genpd->sd_count--;
|
|
|
+ bool ret = false;
|
|
|
+
|
|
|
+ if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
|
|
|
+ ret = !!atomic_dec_and_test(&genpd->sd_count);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
|
|
|
+{
|
|
|
+ atomic_inc(&genpd->sd_count);
|
|
|
+ smp_mb__after_atomic_inc();
|
|
|
}
|
|
|
|
|
|
static void genpd_acquire_lock(struct generic_pm_domain *genpd)
|
|
@@ -71,81 +81,119 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * pm_genpd_poweron - Restore power to a given PM domain and its parents.
|
|
|
+ * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
|
|
|
* @genpd: PM domain to power up.
|
|
|
*
|
|
|
- * Restore power to @genpd and all of its parents so that it is possible to
|
|
|
+ * Restore power to @genpd and all of its masters so that it is possible to
|
|
|
* resume a device belonging to it.
|
|
|
*/
|
|
|
-int pm_genpd_poweron(struct generic_pm_domain *genpd)
|
|
|
+int __pm_genpd_poweron(struct generic_pm_domain *genpd)
|
|
|
+ __releases(&genpd->lock) __acquires(&genpd->lock)
|
|
|
{
|
|
|
- struct generic_pm_domain *parent = genpd->parent;
|
|
|
+ struct gpd_link *link;
|
|
|
+ DEFINE_WAIT(wait);
|
|
|
int ret = 0;
|
|
|
|
|
|
- start:
|
|
|
- if (parent) {
|
|
|
- genpd_acquire_lock(parent);
|
|
|
- mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
|
|
|
- } else {
|
|
|
+ /* If the domain's master is being waited for, we have to wait too. */
|
|
|
+ for (;;) {
|
|
|
+ prepare_to_wait(&genpd->status_wait_queue, &wait,
|
|
|
+ TASK_UNINTERRUPTIBLE);
|
|
|
+ if (genpd->status != GPD_STATE_WAIT_MASTER)
|
|
|
+ break;
|
|
|
+ mutex_unlock(&genpd->lock);
|
|
|
+
|
|
|
+ schedule();
|
|
|
+
|
|
|
mutex_lock(&genpd->lock);
|
|
|
}
|
|
|
+ finish_wait(&genpd->status_wait_queue, &wait);
|
|
|
|
|
|
if (genpd->status == GPD_STATE_ACTIVE
|
|
|
|| (genpd->prepared_count > 0 && genpd->suspend_power_off))
|
|
|
- goto out;
|
|
|
+ return 0;
|
|
|
|
|
|
if (genpd->status != GPD_STATE_POWER_OFF) {
|
|
|
genpd_set_active(genpd);
|
|
|
- goto out;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
- if (parent && parent->status != GPD_STATE_ACTIVE) {
|
|
|
+ /*
|
|
|
+ * The list is guaranteed not to change while the loop below is being
|
|
|
+ * executed, unless one of the masters' .power_on() callbacks fiddles
|
|
|
+ * with it.
|
|
|
+ */
|
|
|
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
|
|
+ genpd_sd_counter_inc(link->master);
|
|
|
+ genpd->status = GPD_STATE_WAIT_MASTER;
|
|
|
+
|
|
|
mutex_unlock(&genpd->lock);
|
|
|
- genpd_release_lock(parent);
|
|
|
|
|
|
- ret = pm_genpd_poweron(parent);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
+ ret = pm_genpd_poweron(link->master);
|
|
|
|
|
|
- goto start;
|
|
|
+ mutex_lock(&genpd->lock);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The "wait for parent" status is guaranteed not to change
|
|
|
+ * while the master is powering on.
|
|
|
+ */
|
|
|
+ genpd->status = GPD_STATE_POWER_OFF;
|
|
|
+ wake_up_all(&genpd->status_wait_queue);
|
|
|
+ if (ret) {
|
|
|
+ genpd_sd_counter_dec(link->master);
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
if (genpd->power_on) {
|
|
|
ret = genpd->power_on(genpd);
|
|
|
if (ret)
|
|
|
- goto out;
|
|
|
+ goto err;
|
|
|
}
|
|
|
|
|
|
genpd_set_active(genpd);
|
|
|
- if (parent)
|
|
|
- parent->sd_count++;
|
|
|
|
|
|
- out:
|
|
|
- mutex_unlock(&genpd->lock);
|
|
|
- if (parent)
|
|
|
- genpd_release_lock(parent);
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ err:
|
|
|
+ list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
|
|
|
+ genpd_sd_counter_dec(link->master);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * pm_genpd_poweron - Restore power to a given PM domain and its masters.
|
|
|
+ * @genpd: PM domain to power up.
|
|
|
+ */
|
|
|
+int pm_genpd_poweron(struct generic_pm_domain *genpd)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ mutex_lock(&genpd->lock);
|
|
|
+ ret = __pm_genpd_poweron(genpd);
|
|
|
+ mutex_unlock(&genpd->lock);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
#endif /* CONFIG_PM */
|
|
|
|
|
|
#ifdef CONFIG_PM_RUNTIME
|
|
|
|
|
|
/**
|
|
|
* __pm_genpd_save_device - Save the pre-suspend state of a device.
|
|
|
- * @dle: Device list entry of the device to save the state of.
|
|
|
+ * @pdd: Domain data of the device to save the state of.
|
|
|
* @genpd: PM domain the device belongs to.
|
|
|
*/
|
|
|
-static int __pm_genpd_save_device(struct dev_list_entry *dle,
|
|
|
+static int __pm_genpd_save_device(struct pm_domain_data *pdd,
|
|
|
struct generic_pm_domain *genpd)
|
|
|
__releases(&genpd->lock) __acquires(&genpd->lock)
|
|
|
{
|
|
|
- struct device *dev = dle->dev;
|
|
|
+ struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
|
|
|
+ struct device *dev = pdd->dev;
|
|
|
struct device_driver *drv = dev->driver;
|
|
|
int ret = 0;
|
|
|
|
|
|
- if (dle->need_restore)
|
|
|
+ if (gpd_data->need_restore)
|
|
|
return 0;
|
|
|
|
|
|
mutex_unlock(&genpd->lock);
|
|
@@ -163,24 +211,25 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
|
|
|
mutex_lock(&genpd->lock);
|
|
|
|
|
|
if (!ret)
|
|
|
- dle->need_restore = true;
|
|
|
+ gpd_data->need_restore = true;
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
* __pm_genpd_restore_device - Restore the pre-suspend state of a device.
|
|
|
- * @dle: Device list entry of the device to restore the state of.
|
|
|
+ * @pdd: Domain data of the device to restore the state of.
|
|
|
* @genpd: PM domain the device belongs to.
|
|
|
*/
|
|
|
-static void __pm_genpd_restore_device(struct dev_list_entry *dle,
|
|
|
+static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
|
|
|
struct generic_pm_domain *genpd)
|
|
|
__releases(&genpd->lock) __acquires(&genpd->lock)
|
|
|
{
|
|
|
- struct device *dev = dle->dev;
|
|
|
+ struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
|
|
|
+ struct device *dev = pdd->dev;
|
|
|
struct device_driver *drv = dev->driver;
|
|
|
|
|
|
- if (!dle->need_restore)
|
|
|
+ if (!gpd_data->need_restore)
|
|
|
return;
|
|
|
|
|
|
mutex_unlock(&genpd->lock);
|
|
@@ -197,7 +246,7 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
|
|
|
|
|
|
mutex_lock(&genpd->lock);
|
|
|
|
|
|
- dle->need_restore = false;
|
|
|
+ gpd_data->need_restore = false;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -211,7 +260,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
|
|
|
*/
|
|
|
static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
|
|
|
{
|
|
|
- return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
|
|
|
+ return genpd->status == GPD_STATE_WAIT_MASTER
|
|
|
+ || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -238,8 +288,8 @@ void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
|
|
|
static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
|
|
|
__releases(&genpd->lock) __acquires(&genpd->lock)
|
|
|
{
|
|
|
- struct generic_pm_domain *parent;
|
|
|
- struct dev_list_entry *dle;
|
|
|
+ struct pm_domain_data *pdd;
|
|
|
+ struct gpd_link *link;
|
|
|
unsigned int not_suspended;
|
|
|
int ret = 0;
|
|
|
|
|
@@ -247,19 +297,22 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
|
|
|
/*
|
|
|
* Do not try to power off the domain in the following situations:
|
|
|
* (1) The domain is already in the "power off" state.
|
|
|
- * (2) System suspend is in progress.
|
|
|
+ * (2) The domain is waiting for its master to power up.
|
|
|
* (3) One of the domain's devices is being resumed right now.
|
|
|
+ * (4) System suspend is in progress.
|
|
|
*/
|
|
|
- if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
|
|
|
- || genpd->resume_count > 0)
|
|
|
+ if (genpd->status == GPD_STATE_POWER_OFF
|
|
|
+ || genpd->status == GPD_STATE_WAIT_MASTER
|
|
|
+ || genpd->resume_count > 0 || genpd->prepared_count > 0)
|
|
|
return 0;
|
|
|
|
|
|
- if (genpd->sd_count > 0)
|
|
|
+ if (atomic_read(&genpd->sd_count) > 0)
|
|
|
return -EBUSY;
|
|
|
|
|
|
not_suspended = 0;
|
|
|
- list_for_each_entry(dle, &genpd->dev_list, node)
|
|
|
- if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
|
|
|
+ list_for_each_entry(pdd, &genpd->dev_list, list_node)
|
|
|
+ if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
|
|
|
+ || pdd->dev->power.irq_safe))
|
|
|
not_suspended++;
|
|
|
|
|
|
if (not_suspended > genpd->in_progress)
|
|
@@ -282,54 +335,50 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
|
|
|
genpd->status = GPD_STATE_BUSY;
|
|
|
genpd->poweroff_task = current;
|
|
|
|
|
|
- list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
|
|
|
- ret = __pm_genpd_save_device(dle, genpd);
|
|
|
+ list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
|
|
|
+ ret = atomic_read(&genpd->sd_count) == 0 ?
|
|
|
+ __pm_genpd_save_device(pdd, genpd) : -EBUSY;
|
|
|
+
|
|
|
+ if (genpd_abort_poweroff(genpd))
|
|
|
+ goto out;
|
|
|
+
|
|
|
if (ret) {
|
|
|
genpd_set_active(genpd);
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- if (genpd_abort_poweroff(genpd))
|
|
|
- goto out;
|
|
|
-
|
|
|
if (genpd->status == GPD_STATE_REPEAT) {
|
|
|
genpd->poweroff_task = NULL;
|
|
|
goto start;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- parent = genpd->parent;
|
|
|
- if (parent) {
|
|
|
- mutex_unlock(&genpd->lock);
|
|
|
-
|
|
|
- genpd_acquire_lock(parent);
|
|
|
- mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
|
|
|
-
|
|
|
- if (genpd_abort_poweroff(genpd)) {
|
|
|
- genpd_release_lock(parent);
|
|
|
+ if (genpd->power_off) {
|
|
|
+ if (atomic_read(&genpd->sd_count) > 0) {
|
|
|
+ ret = -EBUSY;
|
|
|
goto out;
|
|
|
}
|
|
|
- }
|
|
|
|
|
|
- if (genpd->power_off) {
|
|
|
+ /*
|
|
|
+ * If sd_count > 0 at this point, one of the subdomains hasn't
|
|
|
+ * managed to call pm_genpd_poweron() for the master yet after
|
|
|
+ * incrementing it. In that case pm_genpd_poweron() will wait
|
|
|
+ * for us to drop the lock, so we can call .power_off() and let
|
|
|
+ * the pm_genpd_poweron() restore power for us (this shouldn't
|
|
|
+ * happen very often).
|
|
|
+ */
|
|
|
ret = genpd->power_off(genpd);
|
|
|
if (ret == -EBUSY) {
|
|
|
genpd_set_active(genpd);
|
|
|
- if (parent)
|
|
|
- genpd_release_lock(parent);
|
|
|
-
|
|
|
goto out;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
genpd->status = GPD_STATE_POWER_OFF;
|
|
|
|
|
|
- if (parent) {
|
|
|
- genpd_sd_counter_dec(parent);
|
|
|
- if (parent->sd_count == 0)
|
|
|
- genpd_queue_power_off_work(parent);
|
|
|
-
|
|
|
- genpd_release_lock(parent);
|
|
|
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
|
|
+ genpd_sd_counter_dec(link->master);
|
|
|
+ genpd_queue_power_off_work(link->master);
|
|
|
}
|
|
|
|
|
|
out:
|
|
@@ -371,12 +420,21 @@ static int pm_genpd_runtime_suspend(struct device *dev)
|
|
|
if (IS_ERR(genpd))
|
|
|
return -EINVAL;
|
|
|
|
|
|
+ might_sleep_if(!genpd->dev_irq_safe);
|
|
|
+
|
|
|
if (genpd->stop_device) {
|
|
|
int ret = genpd->stop_device(dev);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * If power.irq_safe is set, this routine will be run with interrupts
|
|
|
+ * off, so it can't use mutexes.
|
|
|
+ */
|
|
|
+ if (dev->power.irq_safe)
|
|
|
+ return 0;
|
|
|
+
|
|
|
mutex_lock(&genpd->lock);
|
|
|
genpd->in_progress++;
|
|
|
pm_genpd_poweroff(genpd);
|
|
@@ -386,24 +444,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
|
|
|
- * @dev: Device to resume.
|
|
|
- * @genpd: PM domain the device belongs to.
|
|
|
- */
|
|
|
-static void __pm_genpd_runtime_resume(struct device *dev,
|
|
|
- struct generic_pm_domain *genpd)
|
|
|
-{
|
|
|
- struct dev_list_entry *dle;
|
|
|
-
|
|
|
- list_for_each_entry(dle, &genpd->dev_list, node) {
|
|
|
- if (dle->dev == dev) {
|
|
|
- __pm_genpd_restore_device(dle, genpd);
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
|
|
|
* @dev: Device to resume.
|
|
@@ -424,11 +464,18 @@ static int pm_genpd_runtime_resume(struct device *dev)
|
|
|
if (IS_ERR(genpd))
|
|
|
return -EINVAL;
|
|
|
|
|
|
- ret = pm_genpd_poweron(genpd);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
+ might_sleep_if(!genpd->dev_irq_safe);
|
|
|
+
|
|
|
+ /* If power.irq_safe, the PM domain is never powered off. */
|
|
|
+ if (dev->power.irq_safe)
|
|
|
+ goto out;
|
|
|
|
|
|
mutex_lock(&genpd->lock);
|
|
|
+ ret = __pm_genpd_poweron(genpd);
|
|
|
+ if (ret) {
|
|
|
+ mutex_unlock(&genpd->lock);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
genpd->status = GPD_STATE_BUSY;
|
|
|
genpd->resume_count++;
|
|
|
for (;;) {
|
|
@@ -448,12 +495,13 @@ static int pm_genpd_runtime_resume(struct device *dev)
|
|
|
mutex_lock(&genpd->lock);
|
|
|
}
|
|
|
finish_wait(&genpd->status_wait_queue, &wait);
|
|
|
- __pm_genpd_runtime_resume(dev, genpd);
|
|
|
+ __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
|
|
|
genpd->resume_count--;
|
|
|
genpd_set_active(genpd);
|
|
|
wake_up_all(&genpd->status_wait_queue);
|
|
|
mutex_unlock(&genpd->lock);
|
|
|
|
|
|
+ out:
|
|
|
if (genpd->start_device)
|
|
|
genpd->start_device(dev);
|
|
|
|
|
@@ -478,8 +526,6 @@ void pm_genpd_poweroff_unused(void)
|
|
|
#else
|
|
|
|
|
|
static inline void genpd_power_off_work_fn(struct work_struct *work) {}
|
|
|
-static inline void __pm_genpd_runtime_resume(struct device *dev,
|
|
|
- struct generic_pm_domain *genpd) {}
|
|
|
|
|
|
#define pm_genpd_runtime_suspend NULL
|
|
|
#define pm_genpd_runtime_resume NULL
|
|
@@ -489,11 +535,11 @@ static inline void __pm_genpd_runtime_resume(struct device *dev,
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
|
|
|
/**
|
|
|
- * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
|
|
|
+ * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
|
|
|
* @genpd: PM domain to power off, if possible.
|
|
|
*
|
|
|
* Check if the given PM domain can be powered off (during system suspend or
|
|
|
- * hibernation) and do that if so. Also, in that case propagate to its parent.
|
|
|
+ * hibernation) and do that if so. Also, in that case propagate to its masters.
|
|
|
*
|
|
|
* This function is only called in "noirq" stages of system power transitions,
|
|
|
* so it need not acquire locks (all of the "noirq" callbacks are executed
|
|
@@ -501,21 +547,23 @@ static inline void __pm_genpd_runtime_resume(struct device *dev,
|
|
|
*/
|
|
|
static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
|
|
|
{
|
|
|
- struct generic_pm_domain *parent = genpd->parent;
|
|
|
+ struct gpd_link *link;
|
|
|
|
|
|
if (genpd->status == GPD_STATE_POWER_OFF)
|
|
|
return;
|
|
|
|
|
|
- if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
|
|
|
+ if (genpd->suspended_count != genpd->device_count
|
|
|
+ || atomic_read(&genpd->sd_count) > 0)
|
|
|
return;
|
|
|
|
|
|
if (genpd->power_off)
|
|
|
genpd->power_off(genpd);
|
|
|
|
|
|
genpd->status = GPD_STATE_POWER_OFF;
|
|
|
- if (parent) {
|
|
|
- genpd_sd_counter_dec(parent);
|
|
|
- pm_genpd_sync_poweroff(parent);
|
|
|
+
|
|
|
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
|
|
+ genpd_sd_counter_dec(link->master);
|
|
|
+ pm_genpd_sync_poweroff(link->master);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1034,7 +1082,8 @@ static void pm_genpd_complete(struct device *dev)
|
|
|
*/
|
|
|
int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
|
|
|
{
|
|
|
- struct dev_list_entry *dle;
|
|
|
+ struct generic_pm_domain_data *gpd_data;
|
|
|
+ struct pm_domain_data *pdd;
|
|
|
int ret = 0;
|
|
|
|
|
|
dev_dbg(dev, "%s()\n", __func__);
|
|
@@ -1054,26 +1103,26 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- list_for_each_entry(dle, &genpd->dev_list, node)
|
|
|
- if (dle->dev == dev) {
|
|
|
+ list_for_each_entry(pdd, &genpd->dev_list, list_node)
|
|
|
+ if (pdd->dev == dev) {
|
|
|
ret = -EINVAL;
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- dle = kzalloc(sizeof(*dle), GFP_KERNEL);
|
|
|
- if (!dle) {
|
|
|
+ gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
|
|
|
+ if (!gpd_data) {
|
|
|
ret = -ENOMEM;
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- dle->dev = dev;
|
|
|
- dle->need_restore = false;
|
|
|
- list_add_tail(&dle->node, &genpd->dev_list);
|
|
|
genpd->device_count++;
|
|
|
|
|
|
- spin_lock_irq(&dev->power.lock);
|
|
|
dev->pm_domain = &genpd->domain;
|
|
|
- spin_unlock_irq(&dev->power.lock);
|
|
|
+ dev_pm_get_subsys_data(dev);
|
|
|
+ dev->power.subsys_data->domain_data = &gpd_data->base;
|
|
|
+ gpd_data->base.dev = dev;
|
|
|
+ gpd_data->need_restore = false;
|
|
|
+ list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
|
|
|
|
|
|
out:
|
|
|
genpd_release_lock(genpd);
|
|
@@ -1089,7 +1138,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
|
|
|
int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
|
|
struct device *dev)
|
|
|
{
|
|
|
- struct dev_list_entry *dle;
|
|
|
+ struct pm_domain_data *pdd;
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
dev_dbg(dev, "%s()\n", __func__);
|
|
@@ -1104,17 +1153,17 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- list_for_each_entry(dle, &genpd->dev_list, node) {
|
|
|
- if (dle->dev != dev)
|
|
|
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
|
|
|
+ if (pdd->dev != dev)
|
|
|
continue;
|
|
|
|
|
|
- spin_lock_irq(&dev->power.lock);
|
|
|
+ list_del_init(&pdd->list_node);
|
|
|
+ pdd->dev = NULL;
|
|
|
+ dev_pm_put_subsys_data(dev);
|
|
|
dev->pm_domain = NULL;
|
|
|
- spin_unlock_irq(&dev->power.lock);
|
|
|
+ kfree(to_gpd_data(pdd));
|
|
|
|
|
|
genpd->device_count--;
|
|
|
- list_del(&dle->node);
|
|
|
- kfree(dle);
|
|
|
|
|
|
ret = 0;
|
|
|
break;
|
|
@@ -1129,48 +1178,55 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
|
|
/**
|
|
|
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
|
|
|
* @genpd: Master PM domain to add the subdomain to.
|
|
|
- * @new_subdomain: Subdomain to be added.
|
|
|
+ * @subdomain: Subdomain to be added.
|
|
|
*/
|
|
|
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
|
|
- struct generic_pm_domain *new_subdomain)
|
|
|
+ struct generic_pm_domain *subdomain)
|
|
|
{
|
|
|
- struct generic_pm_domain *subdomain;
|
|
|
+ struct gpd_link *link;
|
|
|
int ret = 0;
|
|
|
|
|
|
- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
|
|
|
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
|
|
|
return -EINVAL;
|
|
|
|
|
|
start:
|
|
|
genpd_acquire_lock(genpd);
|
|
|
- mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
|
|
|
+ mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
|
|
|
|
|
|
- if (new_subdomain->status != GPD_STATE_POWER_OFF
|
|
|
- && new_subdomain->status != GPD_STATE_ACTIVE) {
|
|
|
- mutex_unlock(&new_subdomain->lock);
|
|
|
+ if (subdomain->status != GPD_STATE_POWER_OFF
|
|
|
+ && subdomain->status != GPD_STATE_ACTIVE) {
|
|
|
+ mutex_unlock(&subdomain->lock);
|
|
|
genpd_release_lock(genpd);
|
|
|
goto start;
|
|
|
}
|
|
|
|
|
|
if (genpd->status == GPD_STATE_POWER_OFF
|
|
|
- && new_subdomain->status != GPD_STATE_POWER_OFF) {
|
|
|
+ && subdomain->status != GPD_STATE_POWER_OFF) {
|
|
|
ret = -EINVAL;
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
|
|
|
- if (subdomain == new_subdomain) {
|
|
|
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
|
|
+ if (link->slave == subdomain && link->master == genpd) {
|
|
|
ret = -EINVAL;
|
|
|
goto out;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
|
|
|
- new_subdomain->parent = genpd;
|
|
|
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
|
|
|
+ if (!link) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ link->master = genpd;
|
|
|
+ list_add_tail(&link->master_node, &genpd->master_links);
|
|
|
+ link->slave = subdomain;
|
|
|
+ list_add_tail(&link->slave_node, &subdomain->slave_links);
|
|
|
if (subdomain->status != GPD_STATE_POWER_OFF)
|
|
|
- genpd->sd_count++;
|
|
|
+ genpd_sd_counter_inc(genpd);
|
|
|
|
|
|
out:
|
|
|
- mutex_unlock(&new_subdomain->lock);
|
|
|
+ mutex_unlock(&subdomain->lock);
|
|
|
genpd_release_lock(genpd);
|
|
|
|
|
|
return ret;
|
|
@@ -1179,22 +1235,22 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
|
|
/**
|
|
|
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
|
|
|
* @genpd: Master PM domain to remove the subdomain from.
|
|
|
- * @target: Subdomain to be removed.
|
|
|
+ * @subdomain: Subdomain to be removed.
|
|
|
*/
|
|
|
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
|
|
- struct generic_pm_domain *target)
|
|
|
+ struct generic_pm_domain *subdomain)
|
|
|
{
|
|
|
- struct generic_pm_domain *subdomain;
|
|
|
+ struct gpd_link *link;
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
|
|
|
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
|
|
|
return -EINVAL;
|
|
|
|
|
|
start:
|
|
|
genpd_acquire_lock(genpd);
|
|
|
|
|
|
- list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
|
|
|
- if (subdomain != target)
|
|
|
+ list_for_each_entry(link, &genpd->master_links, master_node) {
|
|
|
+ if (link->slave != subdomain)
|
|
|
continue;
|
|
|
|
|
|
mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
|
|
@@ -1206,8 +1262,9 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
|
|
goto start;
|
|
|
}
|
|
|
|
|
|
- list_del(&subdomain->sd_node);
|
|
|
- subdomain->parent = NULL;
|
|
|
+ list_del(&link->master_node);
|
|
|
+ list_del(&link->slave_node);
|
|
|
+ kfree(link);
|
|
|
if (subdomain->status != GPD_STATE_POWER_OFF)
|
|
|
genpd_sd_counter_dec(genpd);
|
|
|
|
|
@@ -1234,15 +1291,14 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
|
|
|
if (IS_ERR_OR_NULL(genpd))
|
|
|
return;
|
|
|
|
|
|
- INIT_LIST_HEAD(&genpd->sd_node);
|
|
|
- genpd->parent = NULL;
|
|
|
+ INIT_LIST_HEAD(&genpd->master_links);
|
|
|
+ INIT_LIST_HEAD(&genpd->slave_links);
|
|
|
INIT_LIST_HEAD(&genpd->dev_list);
|
|
|
- INIT_LIST_HEAD(&genpd->sd_list);
|
|
|
mutex_init(&genpd->lock);
|
|
|
genpd->gov = gov;
|
|
|
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
|
|
|
genpd->in_progress = 0;
|
|
|
- genpd->sd_count = 0;
|
|
|
+ atomic_set(&genpd->sd_count, 0);
|
|
|
genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
|
|
|
init_waitqueue_head(&genpd->status_wait_queue);
|
|
|
genpd->poweroff_task = NULL;
|