| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef _LINUX_SUSPEND_H | 
|---|
| 3 | #define _LINUX_SUSPEND_H | 
|---|
| 4 |  | 
|---|
| 5 | #include <linux/swap.h> | 
|---|
| 6 | #include <linux/notifier.h> | 
|---|
| 7 | #include <linux/init.h> | 
|---|
| 8 | #include <linux/pm.h> | 
|---|
| 9 | #include <linux/mm.h> | 
|---|
| 10 | #include <linux/freezer.h> | 
|---|
| 11 | #include <asm/errno.h> | 
|---|
| 12 |  | 
|---|
| 13 | #ifdef CONFIG_VT | 
|---|
| 14 | extern void pm_set_vt_switch(int); | 
|---|
| 15 | #else | 
|---|
| 16 | static inline void pm_set_vt_switch(int do_switch) | 
|---|
| 17 | { | 
|---|
| 18 | } | 
|---|
| 19 | #endif | 
|---|
| 20 |  | 
|---|
| 21 | #ifdef CONFIG_VT_CONSOLE_SLEEP | 
|---|
| 22 | extern void pm_prepare_console(void); | 
|---|
| 23 | extern void pm_restore_console(void); | 
|---|
| 24 | #else | 
|---|
| 25 | static inline void pm_prepare_console(void) | 
|---|
| 26 | { | 
|---|
| 27 | } | 
|---|
| 28 |  | 
|---|
| 29 | static inline void pm_restore_console(void) | 
|---|
| 30 | { | 
|---|
| 31 | } | 
|---|
| 32 | #endif | 
|---|
| 33 |  | 
|---|
| 34 | typedef int __bitwise suspend_state_t; | 
|---|
| 35 |  | 
|---|
| 36 | #define PM_SUSPEND_ON		((__force suspend_state_t) 0) | 
|---|
| 37 | #define PM_SUSPEND_TO_IDLE	((__force suspend_state_t) 1) | 
|---|
| 38 | #define PM_SUSPEND_STANDBY	((__force suspend_state_t) 2) | 
|---|
| 39 | #define PM_SUSPEND_MEM		((__force suspend_state_t) 3) | 
|---|
| 40 | #define PM_SUSPEND_MIN		PM_SUSPEND_TO_IDLE | 
|---|
| 41 | #define PM_SUSPEND_MAX		((__force suspend_state_t) 4) | 
|---|
| 42 |  | 
|---|
| 43 | /** | 
|---|
| 44 | * struct platform_suspend_ops - Callbacks for managing platform dependent | 
|---|
| 45 | *	system sleep states. | 
|---|
| 46 | * | 
|---|
| 47 | * @valid: Callback to determine if given system sleep state is supported by | 
|---|
| 48 | *	the platform. | 
|---|
| 49 | *	Valid (ie. supported) states are advertised in /sys/power/state.  Note | 
|---|
| 50 | *	that it still may be impossible to enter given system sleep state if the | 
|---|
| 51 | *	conditions aren't right. | 
|---|
| 52 | *	There is the %suspend_valid_only_mem function available that can be | 
|---|
| 53 | *	assigned to this if the platform only supports mem sleep. | 
|---|
| 54 | * | 
|---|
| 55 | * @begin: Initialise a transition to given system sleep state. | 
|---|
| 56 | *	@begin() is executed right prior to suspending devices.  The information | 
|---|
| 57 | *	conveyed to the platform code by @begin() should be disregarded by it as | 
|---|
| 58 | *	soon as @end() is executed.  If @begin() fails (ie. returns nonzero), | 
|---|
| 59 | *	@prepare(), @enter() and @finish() will not be called by the PM core. | 
|---|
| 60 | *	This callback is optional.  However, if it is implemented, the argument | 
|---|
| 61 | *	passed to @enter() is redundant and should be ignored. | 
|---|
| 62 | * | 
|---|
| 63 | * @prepare: Prepare the platform for entering the system sleep state indicated | 
|---|
| 64 | *	by @begin(). | 
|---|
| 65 | *	@prepare() is called right after devices have been suspended (ie. the | 
|---|
| 66 | *	appropriate .suspend() method has been executed for each device) and | 
|---|
| 67 | *	before device drivers' late suspend callbacks are executed.  It returns | 
|---|
| 68 | *	0 on success or a negative error code otherwise, in which case the | 
|---|
| 69 | *	system cannot enter the desired sleep state (@prepare_late(), @enter(), | 
|---|
| 70 | *	and @wake() will not be called in that case). | 
|---|
| 71 | * | 
|---|
| 72 | * @prepare_late: Finish preparing the platform for entering the system sleep | 
|---|
| 73 | *	state indicated by @begin(). | 
|---|
| 74 | *	@prepare_late is called before disabling nonboot CPUs and after | 
|---|
| 75 | *	device drivers' late suspend callbacks have been executed.  It returns | 
|---|
| 76 | *	0 on success or a negative error code otherwise, in which case the | 
|---|
| 77 | *	system cannot enter the desired sleep state (@enter() will not be | 
|---|
| 78 | *	executed). | 
|---|
| 79 | * | 
|---|
| 80 | * @enter: Enter the system sleep state indicated by @begin() or represented by | 
|---|
| 81 | *	the argument if @begin() is not implemented. | 
|---|
| 82 | *	This callback is mandatory.  It returns 0 on success or a negative | 
|---|
| 83 | *	error code otherwise, in which case the system cannot enter the desired | 
|---|
| 84 | *	sleep state. | 
|---|
| 85 | * | 
|---|
| 86 | * @wake: Called when the system has just left a sleep state, right after | 
|---|
| 87 | *	the nonboot CPUs have been enabled and before device drivers' early | 
|---|
| 88 | *	resume callbacks are executed. | 
|---|
| 89 | *	This callback is optional, but should be implemented by the platforms | 
|---|
| 90 | *	that implement @prepare_late().  If implemented, it is always called | 
|---|
| 91 | *	after @prepare_late and @enter(), even if one of them fails. | 
|---|
| 92 | * | 
|---|
| 93 | * @finish: Finish wake-up of the platform. | 
|---|
| 94 | *	@finish is called right prior to calling device drivers' regular suspend | 
|---|
| 95 | *	callbacks. | 
|---|
| 96 | *	This callback is optional, but should be implemented by the platforms | 
|---|
| 97 | *	that implement @prepare().  If implemented, it is always called after | 
|---|
| 98 | *	@enter() and @wake(), even if any of them fails.  It is executed after | 
|---|
| 99 | *	a failing @prepare. | 
|---|
| 100 | * | 
|---|
| 101 | * @suspend_again: Returns whether the system should suspend again (true) or | 
|---|
| 102 | *	not (false). If the platform wants to poll sensors or execute some | 
|---|
| 103 | *	code during suspended without invoking userspace and most of devices, | 
|---|
| 104 | *	suspend_again callback is the place assuming that periodic-wakeup or | 
|---|
| 105 | *	alarm-wakeup is already setup. This allows to execute some codes while | 
|---|
| 106 | *	being kept suspended in the view of userland and devices. | 
|---|
| 107 | * | 
|---|
| 108 | * @end: Called by the PM core right after resuming devices, to indicate to | 
|---|
| 109 | *	the platform that the system has returned to the working state or | 
|---|
| 110 | *	the transition to the sleep state has been aborted. | 
|---|
| 111 | *	This callback is optional, but should be implemented by the platforms | 
|---|
| 112 | *	that implement @begin().  Accordingly, platforms implementing @begin() | 
|---|
| 113 | *	should also provide a @end() which cleans up transitions aborted before | 
|---|
| 114 | *	@enter(). | 
|---|
| 115 | * | 
|---|
| 116 | * @recover: Recover the platform from a suspend failure. | 
|---|
| 117 | *	Called by the PM core if the suspending of devices fails. | 
|---|
| 118 | *	This callback is optional and should only be implemented by platforms | 
|---|
| 119 | *	which require special recovery actions in that situation. | 
|---|
| 120 | */ | 
|---|
| 121 | struct platform_suspend_ops { | 
|---|
| 122 | int (*valid)(suspend_state_t state); | 
|---|
| 123 | int (*begin)(suspend_state_t state); | 
|---|
| 124 | int (*prepare)(void); | 
|---|
| 125 | int (*prepare_late)(void); | 
|---|
| 126 | int (*enter)(suspend_state_t state); | 
|---|
| 127 | void (*wake)(void); | 
|---|
| 128 | void (*finish)(void); | 
|---|
| 129 | bool (*suspend_again)(void); | 
|---|
| 130 | void (*end)(void); | 
|---|
| 131 | void (*recover)(void); | 
|---|
| 132 | }; | 
|---|
| 133 |  | 
|---|
| 134 | struct platform_s2idle_ops { | 
|---|
| 135 | int (*begin)(void); | 
|---|
| 136 | int (*prepare)(void); | 
|---|
| 137 | int (*prepare_late)(void); | 
|---|
| 138 | void (*check)(void); | 
|---|
| 139 | bool (*wake)(void); | 
|---|
| 140 | void (*restore_early)(void); | 
|---|
| 141 | void (*restore)(void); | 
|---|
| 142 | void (*end)(void); | 
|---|
| 143 | }; | 
|---|
| 144 |  | 
|---|
| 145 | #ifdef CONFIG_SUSPEND | 
|---|
| 146 | extern suspend_state_t pm_suspend_target_state; | 
|---|
| 147 | extern suspend_state_t mem_sleep_current; | 
|---|
| 148 | extern suspend_state_t mem_sleep_default; | 
|---|
| 149 |  | 
|---|
| 150 | /** | 
|---|
| 151 | * suspend_set_ops - set platform dependent suspend operations | 
|---|
| 152 | * @ops: The new suspend operations to set. | 
|---|
| 153 | */ | 
|---|
| 154 | extern void suspend_set_ops(const struct platform_suspend_ops *ops); | 
|---|
| 155 | extern int suspend_valid_only_mem(suspend_state_t state); | 
|---|
| 156 |  | 
|---|
| 157 | extern unsigned int pm_suspend_global_flags; | 
|---|
| 158 |  | 
|---|
| 159 | #define PM_SUSPEND_FLAG_FW_SUSPEND	BIT(0) | 
|---|
| 160 | #define PM_SUSPEND_FLAG_FW_RESUME	BIT(1) | 
|---|
| 161 | #define PM_SUSPEND_FLAG_NO_PLATFORM	BIT(2) | 
|---|
| 162 |  | 
|---|
| 163 | static inline void pm_suspend_clear_flags(void) | 
|---|
| 164 | { | 
|---|
| 165 | pm_suspend_global_flags = 0; | 
|---|
| 166 | } | 
|---|
| 167 |  | 
|---|
| 168 | static inline void pm_set_suspend_via_firmware(void) | 
|---|
| 169 | { | 
|---|
| 170 | pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_SUSPEND; | 
|---|
| 171 | } | 
|---|
| 172 |  | 
|---|
| 173 | static inline void pm_set_resume_via_firmware(void) | 
|---|
| 174 | { | 
|---|
| 175 | pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_RESUME; | 
|---|
| 176 | } | 
|---|
| 177 |  | 
|---|
| 178 | static inline void pm_set_suspend_no_platform(void) | 
|---|
| 179 | { | 
|---|
| 180 | pm_suspend_global_flags |= PM_SUSPEND_FLAG_NO_PLATFORM; | 
|---|
| 181 | } | 
|---|
| 182 |  | 
|---|
| 183 | /** | 
|---|
| 184 | * pm_suspend_via_firmware - Check if platform firmware will suspend the system. | 
|---|
| 185 | * | 
|---|
| 186 | * To be called during system-wide power management transitions to sleep states | 
|---|
| 187 | * or during the subsequent system-wide transitions back to the working state. | 
|---|
| 188 | * | 
|---|
| 189 | * Return 'true' if the platform firmware is going to be invoked at the end of | 
|---|
| 190 | * the system-wide power management transition (to a sleep state) in progress in | 
|---|
| 191 | * order to complete it, or if the platform firmware has been invoked in order | 
|---|
| 192 | * to complete the last (or preceding) transition of the system to a sleep | 
|---|
| 193 | * state. | 
|---|
| 194 | * | 
|---|
| 195 | * This matters if the caller needs or wants to carry out some special actions | 
|---|
| 196 | * depending on whether or not control will be passed to the platform firmware | 
|---|
| 197 | * subsequently (for example, the device may need to be reset before letting the | 
|---|
| 198 | * platform firmware manipulate it, which is not necessary when the platform | 
|---|
| 199 | * firmware is not going to be invoked) or when such special actions may have | 
|---|
| 200 | * been carried out during the preceding transition of the system to a sleep | 
|---|
| 201 | * state (as they may need to be taken into account). | 
|---|
| 202 | */ | 
|---|
| 203 | static inline bool pm_suspend_via_firmware(void) | 
|---|
| 204 | { | 
|---|
| 205 | return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_SUSPEND); | 
|---|
| 206 | } | 
|---|
| 207 |  | 
|---|
| 208 | /** | 
|---|
| 209 | * pm_resume_via_firmware - Check if platform firmware has woken up the system. | 
|---|
| 210 | * | 
|---|
| 211 | * To be called during system-wide power management transitions from sleep | 
|---|
| 212 | * states. | 
|---|
| 213 | * | 
|---|
| 214 | * Return 'true' if the platform firmware has passed control to the kernel at | 
|---|
| 215 | * the beginning of the system-wide power management transition in progress, so | 
|---|
| 216 | * the event that woke up the system from sleep has been handled by the platform | 
|---|
| 217 | * firmware. | 
|---|
| 218 | */ | 
|---|
| 219 | static inline bool pm_resume_via_firmware(void) | 
|---|
| 220 | { | 
|---|
| 221 | return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_RESUME); | 
|---|
| 222 | } | 
|---|
| 223 |  | 
|---|
| 224 | /** | 
|---|
| 225 | * pm_suspend_no_platform - Check if platform may change device power states. | 
|---|
| 226 | * | 
|---|
| 227 | * To be called during system-wide power management transitions to sleep states | 
|---|
| 228 | * or during the subsequent system-wide transitions back to the working state. | 
|---|
| 229 | * | 
|---|
| 230 | * Return 'true' if the power states of devices remain under full control of the | 
|---|
| 231 | * kernel throughout the system-wide suspend and resume cycle in progress (that | 
|---|
| 232 | * is, if a device is put into a certain power state during suspend, it can be | 
|---|
| 233 | * expected to remain in that state during resume). | 
|---|
| 234 | */ | 
|---|
| 235 | static inline bool pm_suspend_no_platform(void) | 
|---|
| 236 | { | 
|---|
| 237 | return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_NO_PLATFORM); | 
|---|
| 238 | } | 
|---|
| 239 |  | 
|---|
| 240 | /* Suspend-to-idle state machnine. */ | 
|---|
| 241 | enum s2idle_states { | 
|---|
| 242 | S2IDLE_STATE_NONE,      /* Not suspended/suspending. */ | 
|---|
| 243 | S2IDLE_STATE_ENTER,     /* Enter suspend-to-idle. */ | 
|---|
| 244 | S2IDLE_STATE_WAKE,      /* Wake up from suspend-to-idle. */ | 
|---|
| 245 | }; | 
|---|
| 246 |  | 
|---|
| 247 | extern enum s2idle_states __read_mostly s2idle_state; | 
|---|
| 248 |  | 
|---|
| 249 | static inline bool idle_should_enter_s2idle(void) | 
|---|
| 250 | { | 
|---|
| 251 | return unlikely(s2idle_state == S2IDLE_STATE_ENTER); | 
|---|
| 252 | } | 
|---|
| 253 |  | 
|---|
| 254 | extern bool pm_suspend_default_s2idle(void); | 
|---|
| 255 | extern void __init pm_states_init(void); | 
|---|
| 256 | extern void s2idle_set_ops(const struct platform_s2idle_ops *ops); | 
|---|
| 257 | extern void s2idle_wake(void); | 
|---|
| 258 |  | 
|---|
| 259 | /** | 
|---|
| 260 | * arch_suspend_disable_irqs - disable IRQs for suspend | 
|---|
| 261 | * | 
|---|
| 262 | * Disables IRQs (in the default case). This is a weak symbol in the common | 
|---|
| 263 | * code and thus allows architectures to override it if more needs to be | 
|---|
| 264 | * done. Not called for suspend to disk. | 
|---|
| 265 | */ | 
|---|
| 266 | extern void arch_suspend_disable_irqs(void); | 
|---|
| 267 |  | 
|---|
| 268 | /** | 
|---|
| 269 | * arch_suspend_enable_irqs - enable IRQs after suspend | 
|---|
| 270 | * | 
|---|
| 271 | * Enables IRQs (in the default case). This is a weak symbol in the common | 
|---|
| 272 | * code and thus allows architectures to override it if more needs to be | 
|---|
| 273 | * done. Not called for suspend to disk. | 
|---|
| 274 | */ | 
|---|
| 275 | extern void arch_suspend_enable_irqs(void); | 
|---|
| 276 |  | 
|---|
| 277 | extern int pm_suspend(suspend_state_t state); | 
|---|
| 278 | extern bool sync_on_suspend_enabled; | 
|---|
| 279 | #else /* !CONFIG_SUSPEND */ | 
|---|
| 280 | #define suspend_valid_only_mem	NULL | 
|---|
| 281 |  | 
|---|
| 282 | #define pm_suspend_target_state	(PM_SUSPEND_ON) | 
|---|
| 283 |  | 
|---|
| 284 | static inline void pm_suspend_clear_flags(void) {} | 
|---|
| 285 | static inline void pm_set_suspend_via_firmware(void) {} | 
|---|
| 286 | static inline void pm_set_resume_via_firmware(void) {} | 
|---|
| 287 | static inline bool pm_suspend_via_firmware(void) { return false; } | 
|---|
| 288 | static inline bool pm_resume_via_firmware(void) { return false; } | 
|---|
| 289 | static inline bool pm_suspend_no_platform(void) { return false; } | 
|---|
| 290 | static inline bool pm_suspend_default_s2idle(void) { return false; } | 
|---|
| 291 |  | 
|---|
| 292 | static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} | 
|---|
| 293 | static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } | 
|---|
| 294 | static inline bool sync_on_suspend_enabled(void) { return true; } | 
|---|
| 295 | static inline bool idle_should_enter_s2idle(void) { return false; } | 
|---|
| 296 | static inline void __init pm_states_init(void) {} | 
|---|
| 297 | static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {} | 
|---|
| 298 | static inline void s2idle_wake(void) {} | 
|---|
| 299 | #endif /* !CONFIG_SUSPEND */ | 
|---|
| 300 |  | 
|---|
| 301 | static inline bool pm_suspend_in_progress(void) | 
|---|
| 302 | { | 
|---|
| 303 | return pm_suspend_target_state != PM_SUSPEND_ON; | 
|---|
| 304 | } | 
|---|
| 305 |  | 
|---|
| 306 | /* struct pbe is used for creating lists of pages that should be restored | 
|---|
| 307 | * atomically during the resume from disk, because the page frames they have | 
|---|
| 308 | * occupied before the suspend are in use. | 
|---|
| 309 | */ | 
|---|
| 310 | struct pbe { | 
|---|
| 311 | void *address;		/* address of the copy */ | 
|---|
| 312 | void *orig_address;	/* original address of a page */ | 
|---|
| 313 | struct pbe *next; | 
|---|
| 314 | }; | 
|---|
| 315 |  | 
|---|
| 316 | /** | 
|---|
| 317 | * struct platform_hibernation_ops - hibernation platform support | 
|---|
| 318 | * | 
|---|
| 319 | * The methods in this structure allow a platform to carry out special | 
|---|
| 320 | * operations required by it during a hibernation transition. | 
|---|
| 321 | * | 
|---|
| 322 | * All the methods below, except for @recover(), must be implemented. | 
|---|
| 323 | * | 
|---|
| 324 | * @begin: Tell the platform driver that we're starting hibernation. | 
|---|
| 325 | *	Called right after shrinking memory and before freezing devices. | 
|---|
| 326 | * | 
|---|
| 327 | * @end: Called by the PM core right after resuming devices, to indicate to | 
|---|
| 328 | *	the platform that the system has returned to the working state. | 
|---|
| 329 | * | 
|---|
| 330 | * @pre_snapshot: Prepare the platform for creating the hibernation image. | 
|---|
| 331 | *	Called right after devices have been frozen and before the nonboot | 
|---|
| 332 | *	CPUs are disabled (runs with IRQs on). | 
|---|
| 333 | * | 
|---|
| 334 | * @finish: Restore the previous state of the platform after the hibernation | 
|---|
| 335 | *	image has been created *or* put the platform into the normal operation | 
|---|
| 336 | *	mode after the hibernation (the same method is executed in both cases). | 
|---|
| 337 | *	Called right after the nonboot CPUs have been enabled and before | 
|---|
| 338 | *	thawing devices (runs with IRQs on). | 
|---|
| 339 | * | 
|---|
| 340 | * @prepare: Prepare the platform for entering the low power state. | 
|---|
| 341 | *	Called right after the hibernation image has been saved and before | 
|---|
| 342 | *	devices are prepared for entering the low power state. | 
|---|
| 343 | * | 
|---|
| 344 | * @enter: Put the system into the low power state after the hibernation image | 
|---|
| 345 | *	has been saved to disk. | 
|---|
| 346 | *	Called after the nonboot CPUs have been disabled and all of the low | 
|---|
| 347 | *	level devices have been shut down (runs with IRQs off). | 
|---|
| 348 | * | 
|---|
| 349 | * @leave: Perform the first stage of the cleanup after the system sleep state | 
|---|
| 350 | *	indicated by @set_target() has been left. | 
|---|
| 351 | *	Called right after the control has been passed from the boot kernel to | 
|---|
| 352 | *	the image kernel, before the nonboot CPUs are enabled and before devices | 
|---|
| 353 | *	are resumed.  Executed with interrupts disabled. | 
|---|
| 354 | * | 
|---|
| 355 | * @pre_restore: Prepare system for the restoration from a hibernation image. | 
|---|
| 356 | *	Called right after devices have been frozen and before the nonboot | 
|---|
| 357 | *	CPUs are disabled (runs with IRQs on). | 
|---|
| 358 | * | 
|---|
| 359 | * @restore_cleanup: Clean up after a failing image restoration. | 
|---|
| 360 | *	Called right after the nonboot CPUs have been enabled and before | 
|---|
| 361 | *	thawing devices (runs with IRQs on). | 
|---|
| 362 | * | 
|---|
| 363 | * @recover: Recover the platform from a failure to suspend devices. | 
|---|
| 364 | *	Called by the PM core if the suspending of devices during hibernation | 
|---|
| 365 | *	fails.  This callback is optional and should only be implemented by | 
|---|
| 366 | *	platforms which require special recovery actions in that situation. | 
|---|
| 367 | */ | 
|---|
| 368 | struct platform_hibernation_ops { | 
|---|
| 369 | int (*begin)(pm_message_t stage); | 
|---|
| 370 | void (*end)(void); | 
|---|
| 371 | int (*pre_snapshot)(void); | 
|---|
| 372 | void (*finish)(void); | 
|---|
| 373 | int (*prepare)(void); | 
|---|
| 374 | int (*enter)(void); | 
|---|
| 375 | void (*leave)(void); | 
|---|
| 376 | int (*pre_restore)(void); | 
|---|
| 377 | void (*restore_cleanup)(void); | 
|---|
| 378 | void (*recover)(void); | 
|---|
| 379 | }; | 
|---|
| 380 |  | 
|---|
| 381 | #ifdef CONFIG_HIBERNATION | 
|---|
| 382 | /* kernel/power/snapshot.c */ | 
|---|
| 383 | extern void register_nosave_region(unsigned long b, unsigned long e); | 
|---|
| 384 | extern int swsusp_page_is_forbidden(struct page *); | 
|---|
| 385 | extern void swsusp_set_page_free(struct page *); | 
|---|
| 386 | extern void swsusp_unset_page_free(struct page *); | 
|---|
| 387 | extern unsigned long get_safe_page(gfp_t gfp_mask); | 
|---|
| 388 | extern asmlinkage int swsusp_arch_suspend(void); | 
|---|
| 389 | extern asmlinkage int swsusp_arch_resume(void); | 
|---|
| 390 |  | 
|---|
| 391 | extern u32 swsusp_hardware_signature; | 
|---|
| 392 | extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); | 
|---|
| 393 | extern int hibernate(void); | 
|---|
| 394 | extern bool system_entering_hibernation(void); | 
|---|
| 395 | extern bool hibernation_available(void); | 
|---|
| 396 | asmlinkage int swsusp_save(void); | 
|---|
| 397 | extern struct pbe *restore_pblist; | 
|---|
| 398 | int pfn_is_nosave(unsigned long pfn); | 
|---|
| 399 |  | 
|---|
| 400 | int hibernate_quiet_exec(int (*func)(void *data), void *data); | 
|---|
| 401 | int hibernate_resume_nonboot_cpu_disable(void); | 
|---|
| 402 | int (void *addr, unsigned int max_size); | 
|---|
| 403 | int (void *addr); | 
|---|
| 404 |  | 
|---|
| 405 | #else /* CONFIG_HIBERNATION */ | 
|---|
| 406 | static inline void register_nosave_region(unsigned long b, unsigned long e) {} | 
|---|
| 407 | static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } | 
|---|
| 408 | static inline void swsusp_set_page_free(struct page *p) {} | 
|---|
| 409 | static inline void swsusp_unset_page_free(struct page *p) {} | 
|---|
| 410 |  | 
|---|
| 411 | static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {} | 
|---|
| 412 | static inline int hibernate(void) { return -ENOSYS; } | 
|---|
| 413 | static inline bool system_entering_hibernation(void) { return false; } | 
|---|
| 414 | static inline bool hibernation_available(void) { return false; } | 
|---|
| 415 |  | 
|---|
| 416 | static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) { | 
|---|
| 417 | return -ENOTSUPP; | 
|---|
| 418 | } | 
|---|
| 419 | #endif /* CONFIG_HIBERNATION */ | 
|---|
| 420 |  | 
|---|
| 421 | #if defined(CONFIG_HIBERNATION) && defined(CONFIG_SUSPEND) | 
|---|
| 422 | bool pm_hibernation_mode_is_suspend(void); | 
|---|
| 423 | #else | 
|---|
| 424 | static inline bool pm_hibernation_mode_is_suspend(void) { return false; } | 
|---|
| 425 | #endif | 
|---|
| 426 |  | 
|---|
| 427 | int arch_resume_nosmt(void); | 
|---|
| 428 |  | 
|---|
| 429 | #ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV | 
|---|
| 430 | int is_hibernate_resume_dev(dev_t dev); | 
|---|
| 431 | #else | 
|---|
| 432 | static inline int is_hibernate_resume_dev(dev_t dev) { return 0; } | 
|---|
| 433 | #endif | 
|---|
| 434 |  | 
|---|
| 435 | /* Hibernation and suspend events */ | 
|---|
| 436 | #define PM_HIBERNATION_PREPARE	0x0001 /* Going to hibernate */ | 
|---|
| 437 | #define PM_POST_HIBERNATION	0x0002 /* Hibernation finished */ | 
|---|
| 438 | #define PM_SUSPEND_PREPARE	0x0003 /* Going to suspend the system */ | 
|---|
| 439 | #define PM_POST_SUSPEND		0x0004 /* Suspend finished */ | 
|---|
| 440 | #define PM_RESTORE_PREPARE	0x0005 /* Going to restore a saved image */ | 
|---|
| 441 | #define PM_POST_RESTORE		0x0006 /* Restore failed */ | 
|---|
| 442 |  | 
|---|
| 443 | extern struct mutex system_transition_mutex; | 
|---|
| 444 |  | 
|---|
| 445 | #ifdef CONFIG_PM_SLEEP | 
|---|
| 446 | void save_processor_state(void); | 
|---|
| 447 | void restore_processor_state(void); | 
|---|
| 448 |  | 
|---|
| 449 | /* kernel/power/main.c */ | 
|---|
| 450 | extern int register_pm_notifier(struct notifier_block *nb); | 
|---|
| 451 | extern int unregister_pm_notifier(struct notifier_block *nb); | 
|---|
| 452 | extern void ksys_sync_helper(void); | 
|---|
| 453 | extern void pm_report_hw_sleep_time(u64 t); | 
|---|
| 454 | extern void pm_report_max_hw_sleep(u64 t); | 
|---|
| 455 | void pm_restrict_gfp_mask(void); | 
|---|
| 456 | void pm_restore_gfp_mask(void); | 
|---|
| 457 |  | 
|---|
| 458 | #define pm_notifier(fn, pri) {				\ | 
|---|
| 459 | static struct notifier_block fn##_nb =			\ | 
|---|
| 460 | { .notifier_call = fn, .priority = pri };	\ | 
|---|
| 461 | register_pm_notifier(&fn##_nb);			\ | 
|---|
| 462 | } | 
|---|
| 463 |  | 
|---|
| 464 | /* drivers/base/power/wakeup.c */ | 
|---|
| 465 | extern bool events_check_enabled; | 
|---|
| 466 |  | 
|---|
| 467 | static inline bool pm_suspended_storage(void) | 
|---|
| 468 | { | 
|---|
| 469 | return !gfp_has_io_fs(gfp: gfp_allowed_mask); | 
|---|
| 470 | } | 
|---|
| 471 |  | 
|---|
| 472 | extern bool pm_wakeup_pending(void); | 
|---|
| 473 | extern void pm_system_wakeup(void); | 
|---|
| 474 | extern void pm_system_cancel_wakeup(void); | 
|---|
| 475 | extern void pm_wakeup_clear(unsigned int irq_number); | 
|---|
| 476 | extern void pm_system_irq_wakeup(unsigned int irq_number); | 
|---|
| 477 | extern unsigned int pm_wakeup_irq(void); | 
|---|
| 478 | extern bool pm_get_wakeup_count(unsigned int *count, bool block); | 
|---|
| 479 | extern bool pm_save_wakeup_count(unsigned int count); | 
|---|
| 480 | extern void pm_wakep_autosleep_enabled(bool set); | 
|---|
| 481 | extern void pm_print_active_wakeup_sources(void); | 
|---|
| 482 |  | 
|---|
| 483 | extern unsigned int lock_system_sleep(void); | 
|---|
| 484 | extern void unlock_system_sleep(unsigned int); | 
|---|
| 485 |  | 
|---|
| 486 | extern bool pm_sleep_transition_in_progress(void); | 
|---|
| 487 | bool pm_hibernate_is_recovering(void); | 
|---|
| 488 |  | 
|---|
| 489 | #else /* !CONFIG_PM_SLEEP */ | 
|---|
| 490 |  | 
|---|
| 491 | static inline int register_pm_notifier(struct notifier_block *nb) | 
|---|
| 492 | { | 
|---|
| 493 | return 0; | 
|---|
| 494 | } | 
|---|
| 495 |  | 
|---|
| 496 | static inline int unregister_pm_notifier(struct notifier_block *nb) | 
|---|
| 497 | { | 
|---|
| 498 | return 0; | 
|---|
| 499 | } | 
|---|
| 500 |  | 
|---|
| 501 | static inline void pm_report_hw_sleep_time(u64 t) {}; | 
|---|
| 502 | static inline void pm_report_max_hw_sleep(u64 t) {}; | 
|---|
| 503 |  | 
|---|
| 504 | static inline void pm_restrict_gfp_mask(void) {} | 
|---|
| 505 | static inline void pm_restore_gfp_mask(void) {} | 
|---|
| 506 |  | 
|---|
| 507 | static inline void ksys_sync_helper(void) {} | 
|---|
| 508 |  | 
|---|
| 509 | #define pm_notifier(fn, pri)	do { (void)(fn); } while (0) | 
|---|
| 510 |  | 
|---|
| 511 | static inline bool pm_suspended_storage(void) { return false; } | 
|---|
| 512 | static inline bool pm_wakeup_pending(void) { return false; } | 
|---|
| 513 | static inline void pm_system_wakeup(void) {} | 
|---|
| 514 | static inline void pm_wakeup_clear(bool reset) {} | 
|---|
| 515 | static inline void pm_system_irq_wakeup(unsigned int irq_number) {} | 
|---|
| 516 |  | 
|---|
| 517 | static inline unsigned int lock_system_sleep(void) { return 0; } | 
|---|
| 518 | static inline void unlock_system_sleep(unsigned int flags) {} | 
|---|
| 519 |  | 
|---|
| 520 | static inline bool pm_sleep_transition_in_progress(void) { return false; } | 
|---|
| 521 | static inline bool pm_hibernate_is_recovering(void) { return false; } | 
|---|
| 522 |  | 
|---|
| 523 | #endif /* !CONFIG_PM_SLEEP */ | 
|---|
| 524 |  | 
|---|
| 525 | #ifdef CONFIG_PM_SLEEP_DEBUG | 
|---|
| 526 | extern bool pm_print_times_enabled; | 
|---|
| 527 | extern bool pm_debug_messages_on; | 
|---|
| 528 | extern bool pm_debug_messages_should_print(void); | 
|---|
| 529 | static inline int pm_dyn_debug_messages_on(void) | 
|---|
| 530 | { | 
|---|
| 531 | #ifdef CONFIG_DYNAMIC_DEBUG | 
|---|
| 532 | return 1; | 
|---|
| 533 | #else | 
|---|
| 534 | return 0; | 
|---|
| 535 | #endif | 
|---|
| 536 | } | 
|---|
| 537 | #ifndef pr_fmt | 
|---|
| 538 | #define pr_fmt(fmt) "PM: " fmt | 
|---|
| 539 | #endif | 
|---|
| 540 | #define __pm_pr_dbg(fmt, ...)					\ | 
|---|
| 541 | do {							\ | 
|---|
| 542 | if (pm_debug_messages_should_print())		\ | 
|---|
| 543 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__);	\ | 
|---|
| 544 | else if (pm_dyn_debug_messages_on())		\ | 
|---|
| 545 | pr_debug(fmt, ##__VA_ARGS__);	\ | 
|---|
| 546 | } while (0) | 
|---|
| 547 | #define __pm_deferred_pr_dbg(fmt, ...)				\ | 
|---|
| 548 | do {							\ | 
|---|
| 549 | if (pm_debug_messages_should_print())		\ | 
|---|
| 550 | printk_deferred(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__);	\ | 
|---|
| 551 | } while (0) | 
|---|
| 552 | #else | 
|---|
| 553 | #define pm_print_times_enabled	(false) | 
|---|
| 554 | #define pm_debug_messages_on	(false) | 
|---|
| 555 |  | 
|---|
| 556 | #include <linux/printk.h> | 
|---|
| 557 |  | 
|---|
| 558 | #define __pm_pr_dbg(fmt, ...) \ | 
|---|
| 559 | no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | 
|---|
| 560 | #define __pm_deferred_pr_dbg(fmt, ...) \ | 
|---|
| 561 | no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | 
|---|
| 562 | #endif | 
|---|
| 563 |  | 
|---|
| 564 | /** | 
|---|
| 565 | * pm_pr_dbg - print pm sleep debug messages | 
|---|
| 566 | * | 
|---|
| 567 | * If pm_debug_messages_on is enabled and the system is entering/leaving | 
|---|
| 568 | *      suspend, print message. | 
|---|
| 569 | * If pm_debug_messages_on is disabled and CONFIG_DYNAMIC_DEBUG is enabled, | 
|---|
| 570 | *	print message only from instances explicitly enabled on dynamic debug's | 
|---|
| 571 | *	control. | 
|---|
| 572 | * If pm_debug_messages_on is disabled and CONFIG_DYNAMIC_DEBUG is disabled, | 
|---|
| 573 | *	don't print message. | 
|---|
| 574 | */ | 
|---|
| 575 | #define pm_pr_dbg(fmt, ...) \ | 
|---|
| 576 | __pm_pr_dbg(fmt, ##__VA_ARGS__) | 
|---|
| 577 |  | 
|---|
| 578 | #define pm_deferred_pr_dbg(fmt, ...) \ | 
|---|
| 579 | __pm_deferred_pr_dbg(fmt, ##__VA_ARGS__) | 
|---|
| 580 |  | 
|---|
| 581 | #ifdef CONFIG_PM_AUTOSLEEP | 
|---|
| 582 |  | 
|---|
| 583 | /* kernel/power/autosleep.c */ | 
|---|
| 584 | void queue_up_suspend_work(void); | 
|---|
| 585 |  | 
|---|
| 586 | #else /* !CONFIG_PM_AUTOSLEEP */ | 
|---|
| 587 |  | 
|---|
| 588 | static inline void queue_up_suspend_work(void) {} | 
|---|
| 589 |  | 
|---|
| 590 | #endif /* !CONFIG_PM_AUTOSLEEP */ | 
|---|
| 591 |  | 
|---|
| 592 | enum suspend_stat_step { | 
|---|
| 593 | SUSPEND_WORKING = 0, | 
|---|
| 594 | SUSPEND_FREEZE, | 
|---|
| 595 | SUSPEND_PREPARE, | 
|---|
| 596 | SUSPEND_SUSPEND, | 
|---|
| 597 | SUSPEND_SUSPEND_LATE, | 
|---|
| 598 | SUSPEND_SUSPEND_NOIRQ, | 
|---|
| 599 | SUSPEND_RESUME_NOIRQ, | 
|---|
| 600 | SUSPEND_RESUME_EARLY, | 
|---|
| 601 | SUSPEND_RESUME | 
|---|
| 602 | }; | 
|---|
| 603 |  | 
|---|
| 604 | void dpm_save_failed_dev(const char *name); | 
|---|
| 605 | void dpm_save_failed_step(enum suspend_stat_step step); | 
|---|
| 606 |  | 
|---|
| 607 | #endif /* _LINUX_SUSPEND_H */ | 
|---|
| 608 |  | 
|---|