|
@@ -1,5 +1,5 @@
|
|
-#ifndef _LINUX_SWSUSP_H
|
|
|
|
-#define _LINUX_SWSUSP_H
|
|
|
|
|
|
+#ifndef _LINUX_SUSPEND_H
|
|
|
|
+#define _LINUX_SUSPEND_H
|
|
|
|
|
|
#if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
|
|
#if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
|
|
#include <asm/suspend.h>
|
|
#include <asm/suspend.h>
|
|
@@ -9,6 +9,113 @@
|
|
#include <linux/init.h>
|
|
#include <linux/init.h>
|
|
#include <linux/pm.h>
|
|
#include <linux/pm.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/mm.h>
|
|
|
|
+#include <asm/errno.h>
|
|
|
|
+
|
|
|
|
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
|
|
|
|
+extern int pm_prepare_console(void);
|
|
|
|
+extern void pm_restore_console(void);
|
|
|
|
+#else
|
|
|
|
+static inline int pm_prepare_console(void) { return 0; }
|
|
|
|
+static inline void pm_restore_console(void) {}
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+typedef int __bitwise suspend_state_t;
|
|
|
|
+
|
|
|
|
+#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
|
|
|
|
+#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1)
|
|
|
|
+#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
|
|
|
|
+#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * struct pm_ops - Callbacks for managing platform dependent system sleep
|
|
|
|
+ * states.
|
|
|
|
+ *
|
|
|
|
+ * @valid: Callback to determine if given system sleep state is supported by
|
|
|
|
+ * the platform.
|
|
|
|
+ * Valid (ie. supported) states are advertised in /sys/power/state. Note
|
|
|
|
+ * that it still may be impossible to enter given system sleep state if the
|
|
|
|
+ * conditions aren't right.
|
|
|
|
+ * There is the %pm_valid_only_mem function available that can be assigned
|
|
|
|
+ * to this if the platform only supports mem sleep.
|
|
|
|
+ *
|
|
|
|
+ * @set_target: Tell the platform which system sleep state is going to be
|
|
|
|
+ * entered.
|
|
|
|
+ * @set_target() is executed right prior to suspending devices. The
|
|
|
|
+ * information conveyed to the platform code by @set_target() should be
|
|
|
|
+ * disregarded by the platform as soon as @finish() is executed and if
|
|
|
|
+ * @prepare() fails. If @set_target() fails (ie. returns nonzero),
|
|
|
|
+ * @prepare(), @enter() and @finish() will not be called by the PM core.
|
|
|
|
+ * This callback is optional. However, if it is implemented, the argument
|
|
|
|
+ * passed to @prepare(), @enter() and @finish() is meaningless and should
|
|
|
|
+ * be ignored.
|
|
|
|
+ *
|
|
|
|
+ * @prepare: Prepare the platform for entering the system sleep state indicated
|
|
|
|
+ * by @set_target() or represented by the argument if @set_target() is not
|
|
|
|
+ * implemented.
|
|
|
|
+ * @prepare() is called right after devices have been suspended (ie. the
|
|
|
|
+ * appropriate .suspend() method has been executed for each device) and
|
|
|
|
+ * before the nonboot CPUs are disabled (it is executed with IRQs enabled).
|
|
|
|
+ * This callback is optional. It returns 0 on success or a negative
|
|
|
|
+ * error code otherwise, in which case the system cannot enter the desired
|
|
|
|
+ * sleep state (@enter() and @finish() will not be called in that case).
|
|
|
|
+ *
|
|
|
|
+ * @enter: Enter the system sleep state indicated by @set_target() or
|
|
|
|
+ * represented by the argument if @set_target() is not implemented.
|
|
|
|
+ * This callback is mandatory. It returns 0 on success or a negative
|
|
|
|
+ * error code otherwise, in which case the system cannot enter the desired
|
|
|
|
+ * sleep state.
|
|
|
|
+ *
|
|
|
|
+ * @finish: Called when the system has just left a sleep state, right after
|
|
|
|
+ * the nonboot CPUs have been enabled and before devices are resumed (it is
|
|
|
|
+ * executed with IRQs enabled). If @set_target() is not implemented, the
|
|
|
|
+ * argument represents the sleep state being left.
|
|
|
|
+ * This callback is optional, but should be implemented by the platforms
|
|
|
|
+ * that implement @prepare(). If implemented, it is always called after
|
|
|
|
+ * @enter() (even if @enter() fails).
|
|
|
|
+ */
|
|
|
|
+struct pm_ops {
|
|
|
|
+ int (*valid)(suspend_state_t state);
|
|
|
|
+ int (*set_target)(suspend_state_t state);
|
|
|
|
+ int (*prepare)(suspend_state_t state);
|
|
|
|
+ int (*enter)(suspend_state_t state);
|
|
|
|
+ int (*finish)(suspend_state_t state);
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_SUSPEND
|
|
|
|
+extern struct pm_ops *pm_ops;
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * pm_set_ops - set platform dependent power management ops
|
|
|
|
+ * @pm_ops: The new power management operations to set.
|
|
|
|
+ */
|
|
|
|
+extern void pm_set_ops(struct pm_ops *pm_ops);
|
|
|
|
+extern int pm_valid_only_mem(suspend_state_t state);
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * arch_suspend_disable_irqs - disable IRQs for suspend
|
|
|
|
+ *
|
|
|
|
+ * Disables IRQs (in the default case). This is a weak symbol in the common
|
|
|
|
+ * code and thus allows architectures to override it if more needs to be
|
|
|
|
+ * done. Not called for suspend to disk.
|
|
|
|
+ */
|
|
|
|
+extern void arch_suspend_disable_irqs(void);
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * arch_suspend_enable_irqs - enable IRQs after suspend
|
|
|
|
+ *
|
|
|
|
+ * Enables IRQs (in the default case). This is a weak symbol in the common
|
|
|
|
+ * code and thus allows architectures to override it if more needs to be
|
|
|
|
+ * done. Not called for suspend to disk.
|
|
|
|
+ */
|
|
|
|
+extern void arch_suspend_enable_irqs(void);
|
|
|
|
+
|
|
|
|
+extern int pm_suspend(suspend_state_t state);
|
|
|
|
+#else /* !CONFIG_SUSPEND */
|
|
|
|
+#define suspend_valid_only_mem NULL
|
|
|
|
+
|
|
|
|
+static inline void pm_set_ops(struct pm_ops *pm_ops) {}
|
|
|
|
+static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
|
|
|
|
+#endif /* !CONFIG_SUSPEND */
|
|
|
|
|
|
/* struct pbe is used for creating lists of pages that should be restored
|
|
/* struct pbe is used for creating lists of pages that should be restored
|
|
* atomically during the resume from disk, because the page frames they have
|
|
* atomically during the resume from disk, because the page frames they have
|
|
@@ -24,14 +131,6 @@ struct pbe {
|
|
extern void drain_local_pages(void);
|
|
extern void drain_local_pages(void);
|
|
extern void mark_free_pages(struct zone *zone);
|
|
extern void mark_free_pages(struct zone *zone);
|
|
|
|
|
|
-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
|
|
|
|
-extern int pm_prepare_console(void);
|
|
|
|
-extern void pm_restore_console(void);
|
|
|
|
-#else
|
|
|
|
-static inline int pm_prepare_console(void) { return 0; }
|
|
|
|
-static inline void pm_restore_console(void) {}
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* struct hibernation_ops - hibernation platform support
|
|
* struct hibernation_ops - hibernation platform support
|
|
*
|
|
*
|
|
@@ -130,4 +229,4 @@ static inline void register_nosave_region_late(unsigned long b, unsigned long e)
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-#endif /* _LINUX_SWSUSP_H */
|
|
|
|
|
|
+#endif /* _LINUX_SUSPEND_H */
|