|
@@ -32,28 +32,28 @@ DEFINE_MUTEX(pm_mutex);
|
|
|
/* This is just an arbitrary number */
|
|
|
#define FREE_PAGE_NUMBER (100)
|
|
|
|
|
|
-struct pm_ops *pm_ops;
|
|
|
+struct platform_suspend_ops *suspend_ops;
|
|
|
|
|
|
/**
|
|
|
- * pm_set_ops - Set the global power method table.
|
|
|
+ * suspend_set_ops - Set the global suspend method table.
|
|
|
* @ops: Pointer to ops structure.
|
|
|
*/
|
|
|
|
|
|
-void pm_set_ops(struct pm_ops * ops)
|
|
|
+void suspend_set_ops(struct platform_suspend_ops *ops)
|
|
|
{
|
|
|
mutex_lock(&pm_mutex);
|
|
|
- pm_ops = ops;
|
|
|
+ suspend_ops = ops;
|
|
|
mutex_unlock(&pm_mutex);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * pm_valid_only_mem - generic memory-only valid callback
|
|
|
+ * suspend_valid_only_mem - generic memory-only valid callback
|
|
|
*
|
|
|
- * pm_ops drivers that implement mem suspend only and only need
|
|
|
+ * Platform drivers that implement mem suspend only and only need
|
|
|
* to check for that in their .valid callback can use this instead
|
|
|
* of rolling their own .valid callback.
|
|
|
*/
|
|
|
-int pm_valid_only_mem(suspend_state_t state)
|
|
|
+int suspend_valid_only_mem(suspend_state_t state)
|
|
|
{
|
|
|
return state == PM_SUSPEND_MEM;
|
|
|
}
|
|
@@ -61,8 +61,8 @@ int pm_valid_only_mem(suspend_state_t state)
|
|
|
|
|
|
static inline void pm_finish(suspend_state_t state)
|
|
|
{
|
|
|
- if (pm_ops->finish)
|
|
|
- pm_ops->finish(state);
|
|
|
+ if (suspend_ops->finish)
|
|
|
+ suspend_ops->finish(state);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -76,7 +76,7 @@ static int suspend_prepare(void)
|
|
|
int error;
|
|
|
unsigned int free_pages;
|
|
|
|
|
|
- if (!pm_ops || !pm_ops->enter)
|
|
|
+ if (!suspend_ops || !suspend_ops->enter)
|
|
|
return -EPERM;
|
|
|
|
|
|
error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
|
|
@@ -139,7 +139,7 @@ static int suspend_enter(suspend_state_t state)
|
|
|
printk(KERN_ERR "Some devices failed to power down\n");
|
|
|
goto Done;
|
|
|
}
|
|
|
- error = pm_ops->enter(state);
|
|
|
+ error = suspend_ops->enter(state);
|
|
|
device_power_up();
|
|
|
Done:
|
|
|
arch_suspend_enable_irqs();
|
|
@@ -156,11 +156,11 @@ int suspend_devices_and_enter(suspend_state_t state)
|
|
|
{
|
|
|
int error;
|
|
|
|
|
|
- if (!pm_ops)
|
|
|
+ if (!suspend_ops)
|
|
|
return -ENOSYS;
|
|
|
|
|
|
- if (pm_ops->set_target) {
|
|
|
- error = pm_ops->set_target(state);
|
|
|
+ if (suspend_ops->set_target) {
|
|
|
+ error = suspend_ops->set_target(state);
|
|
|
if (error)
|
|
|
return error;
|
|
|
}
|
|
@@ -170,8 +170,8 @@ int suspend_devices_and_enter(suspend_state_t state)
|
|
|
printk(KERN_ERR "Some devices failed to suspend\n");
|
|
|
goto Resume_console;
|
|
|
}
|
|
|
- if (pm_ops->prepare) {
|
|
|
- error = pm_ops->prepare(state);
|
|
|
+ if (suspend_ops->prepare) {
|
|
|
+ error = suspend_ops->prepare(state);
|
|
|
if (error)
|
|
|
goto Resume_devices;
|
|
|
}
|
|
@@ -214,7 +214,7 @@ static inline int valid_state(suspend_state_t state)
|
|
|
/* All states need lowlevel support and need to be valid
|
|
|
* to the lowlevel implementation, no valid callback
|
|
|
* implies that none are valid. */
|
|
|
- if (!pm_ops || !pm_ops->valid || !pm_ops->valid(state))
|
|
|
+ if (!suspend_ops || !suspend_ops->valid || !suspend_ops->valid(state))
|
|
|
return 0;
|
|
|
return 1;
|
|
|
}
|