2 * pm.h - Power management interface
4 * Copyright (C) 2000 Andrew Henroid
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/list.h>
27 #include <asm/atomic.h>
30 * Power management requests... these are passed to pm_send_all() and friends.
32 * these functions are old and deprecated, see below.
34 typedef int __bitwise pm_request_t;
36 #define PM_SUSPEND ((__force pm_request_t) 1) /* enter D1-D3 */
37 #define PM_RESUME ((__force pm_request_t) 2) /* enter D0 */
41 * Device types... these are passed to pm_register
43 typedef int __bitwise pm_dev_t;
45 #define PM_UNKNOWN_DEV ((__force pm_dev_t) 0) /* generic */
46 #define PM_SYS_DEV ((__force pm_dev_t) 1) /* system device (fan, KB controller, ...) */
47 #define PM_PCI_DEV ((__force pm_dev_t) 2) /* PCI device */
48 #define PM_USB_DEV ((__force pm_dev_t) 3) /* USB device */
49 #define PM_SCSI_DEV ((__force pm_dev_t) 4) /* SCSI device */
50 #define PM_ISA_DEV ((__force pm_dev_t) 5) /* ISA device */
51 #define PM_MTD_DEV ((__force pm_dev_t) 6) /* Memory Technology Device */
54 * System device hardware ID (PnP) values
58 PM_SYS_UNKNOWN = 0x00000000, /* generic */
59 PM_SYS_KBC = 0x41d00303, /* keyboard controller */
60 PM_SYS_COM = 0x41d00500, /* serial port */
61 PM_SYS_IRDA = 0x41d00510, /* IRDA controller */
62 PM_SYS_FDC = 0x41d00700, /* floppy controller */
63 PM_SYS_VGA = 0x41d00900, /* VGA controller */
64 PM_SYS_PCMCIA = 0x41d00e00, /* PCMCIA controller */
70 #define PM_PCI_ID(dev) ((dev)->bus->number << 16 | (dev)->devfn)
73 * Request handler callback
77 typedef int (*pm_callback)(struct pm_dev *dev, pm_request_t rqst, void *data);
80 * Dynamic device information
91 unsigned long prev_state;
93 struct list_head entry;
96 /* Functions above this comment are list-based old-style power
97 * managment. Please avoid using them. */
100 * Callbacks for platform drivers to implement.
102 extern void (*pm_idle)(void);
103 extern void (*pm_power_off)(void);
105 typedef int __bitwise suspend_state_t;
107 #define PM_SUSPEND_ON ((__force suspend_state_t) 0)
108 #define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1)
109 #define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
110 #define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
113 * struct pm_ops - Callbacks for managing platform dependent system sleep
116 * @valid: Callback to determine if given system sleep state is supported by
118 * Valid (ie. supported) states are advertised in /sys/power/state. Note
119 * that it still may be impossible to enter given system sleep state if the
120 * conditions aren't right.
121 * There is the %pm_valid_only_mem function available that can be assigned
122 * to this if the platform only supports mem sleep.
124 * @set_target: Tell the platform which system sleep state is going to be
126 * @set_target() is executed right prior to suspending devices. The
127 * information conveyed to the platform code by @set_target() should be
128 * disregarded by the platform as soon as @finish() is executed and if
129 * @prepare() fails. If @set_target() fails (ie. returns nonzero),
130 * @prepare(), @enter() and @finish() will not be called by the PM core.
131 * This callback is optional. However, if it is implemented, the argument
132 * passed to @prepare(), @enter() and @finish() is meaningless and should
135 * @prepare: Prepare the platform for entering the system sleep state indicated
136 * by @set_target() or represented by the argument if @set_target() is not
138 * @prepare() is called right after devices have been suspended (ie. the
139 * appropriate .suspend() method has been executed for each device) and
140 * before the nonboot CPUs are disabled (it is executed with IRQs enabled).
141 * This callback is optional. It returns 0 on success or a negative
142 * error code otherwise, in which case the system cannot enter the desired
143 * sleep state (@enter() and @finish() will not be called in that case).
145 * @enter: Enter the system sleep state indicated by @set_target() or
146 * represented by the argument if @set_target() is not implemented.
147 * This callback is mandatory. It returns 0 on success or a negative
148 * error code otherwise, in which case the system cannot enter the desired
151 * @finish: Called when the system has just left a sleep state, right after
152 * the nonboot CPUs have been enabled and before devices are resumed (it is
153 * executed with IRQs enabled). If @set_target() is not implemented, the
154 * argument represents the sleep state being left.
155 * This callback is optional, but should be implemented by the platforms
156 * that implement @prepare(). If implemented, it is always called after
157 * @enter() (even if @enter() fails).
160 int (*valid)(suspend_state_t state);
161 int (*set_target)(suspend_state_t state);
162 int (*prepare)(suspend_state_t state);
163 int (*enter)(suspend_state_t state);
164 int (*finish)(suspend_state_t state);
167 extern struct pm_ops *pm_ops;
170 * pm_set_ops - set platform dependent power management ops
171 * @pm_ops: The new power management operations to set.
173 extern void pm_set_ops(struct pm_ops *pm_ops);
174 extern int pm_valid_only_mem(suspend_state_t state);
177 * arch_suspend_disable_irqs - disable IRQs for suspend
179 * Disables IRQs (in the default case). This is a weak symbol in the common
180 * code and thus allows architectures to override it if more needs to be
181 * done. Not called for suspend to disk.
183 extern void arch_suspend_disable_irqs(void);
186 * arch_suspend_enable_irqs - enable IRQs after suspend
188 * Enables IRQs (in the default case). This is a weak symbol in the common
189 * code and thus allows architectures to override it if more needs to be
190 * done. Not called for suspend to disk.
192 extern void arch_suspend_enable_irqs(void);
194 extern int pm_suspend(suspend_state_t state);
197 * Device power management
202 typedef struct pm_message {
207 * Several driver power state transitions are externally visible, affecting
208 * the state of pending I/O queues and (for drivers that touch hardware)
209 * interrupts, wakeups, DMA, and other hardware state. There may also be
210 * internal transitions to various low power modes, which are transparent
211 * to the rest of the driver stack (such as a driver that's ON gating off
212 * clocks which are not in active use).
214 * One transition is triggered by resume(), after a suspend() call; the
215 * message is implicit:
217 * ON Driver starts working again, responding to hardware events
218 * and software requests. The hardware may have gone through
219 * a power-off reset, or it may have maintained state from the
220 * previous suspend() which the driver will rely on while
221 * resuming. On most platforms, there are no restrictions on
222 * availability of resources like clocks during resume().
224 * Other transitions are triggered by messages sent using suspend(). All
225 * these transitions quiesce the driver, so that I/O queues are inactive.
226 * That commonly entails turning off IRQs and DMA; there may be rules
227 * about how to quiesce that are specific to the bus or the device's type.
228 * (For example, network drivers mark the link state.) Other details may
229 * differ according to the message:
231 * SUSPEND Quiesce, enter a low power device state appropriate for
232 * the upcoming system state (such as PCI_D3hot), and enable
233 * wakeup events as appropriate.
235 * FREEZE Quiesce operations so that a consistent image can be saved;
236 * but do NOT otherwise enter a low power device state, and do
237 * NOT emit system wakeup events.
239 * PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring
240 * the system from a snapshot taken after an earlier FREEZE.
241 * Some drivers will need to reset their hardware state instead
242 * of preserving it, to ensure that it's never mistaken for the
243 * state which that earlier snapshot had set up.
245 * A minimally power-aware driver treats all messages as SUSPEND, fully
246 * reinitializes its device during resume() -- whether or not it was reset
247 * during the suspend/resume cycle -- and can't issue wakeup events.
249 * More power-aware drivers may also use low power states at runtime as
250 * well as during system sleep states like PM_SUSPEND_STANDBY. They may
251 * be able to use wakeup events to exit from runtime low-power states,
252 * or from system low-power states such as standby or suspend-to-RAM.
255 #define PM_EVENT_ON 0
256 #define PM_EVENT_FREEZE 1
257 #define PM_EVENT_SUSPEND 2
258 #define PM_EVENT_PRETHAW 3
260 #define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
261 #define PMSG_PRETHAW ((struct pm_message){ .event = PM_EVENT_PRETHAW, })
262 #define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
263 #define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
266 pm_message_t power_state;
267 unsigned can_wakeup:1;
269 unsigned should_wakeup:1;
270 struct list_head entry;
274 extern int device_power_down(pm_message_t state);
275 extern void device_power_up(void);
276 extern void device_resume(void);
279 extern int device_suspend(pm_message_t state);
280 extern int device_prepare_suspend(pm_message_t state);
282 #define device_set_wakeup_enable(dev,val) \
283 ((dev)->power.should_wakeup = !!(val))
284 #define device_may_wakeup(dev) \
285 (device_can_wakeup(dev) && (dev)->power.should_wakeup)
287 extern int dpm_runtime_suspend(struct device *, pm_message_t);
288 extern void dpm_runtime_resume(struct device *);
289 extern void __suspend_report_result(const char *function, void *fn, int ret);
291 #define suspend_report_result(fn, ret) \
293 __suspend_report_result(__FUNCTION__, fn, ret); \
297 * Platform hook to activate device wakeup capability, if that's not already
298 * handled by enable_irq_wake() etc.
299 * Returns zero on success, else negative errno
301 extern int (*platform_enable_wakeup)(struct device *dev, int is_on);
303 static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
305 if (platform_enable_wakeup)
306 return (*platform_enable_wakeup)(dev, is_on);
310 #else /* !CONFIG_PM */
312 static inline int device_suspend(pm_message_t state)
317 #define device_set_wakeup_enable(dev,val) do{}while(0)
318 #define device_may_wakeup(dev) (0)
320 static inline int dpm_runtime_suspend(struct device * dev, pm_message_t state)
325 static inline void dpm_runtime_resume(struct device * dev)
329 #define suspend_report_result(fn, ret) do { } while (0)
331 static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
338 /* changes to device_may_wakeup take effect on the next pm state change.
339 * by default, devices should wakeup if they can.
341 #define device_can_wakeup(dev) \
342 ((dev)->power.can_wakeup)
343 #define device_init_wakeup(dev,val) \
345 device_can_wakeup(dev) = !!(val); \
346 device_set_wakeup_enable(dev,val); \
349 #endif /* __KERNEL__ */
351 #endif /* _LINUX_PM_H */