2 * processor_idle - idle state submodule to the ACPI processor driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/init.h>
34 #include <linux/cpufreq.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/acpi.h>
38 #include <linux/dmi.h>
39 #include <linux/moduleparam.h>
40 #include <linux/sched.h> /* need_resched() */
41 #include <linux/pm_qos_params.h>
42 #include <linux/clockchips.h>
43 #include <linux/cpuidle.h>
44 #include <linux/cpuidle.h>
47 * Include the apic definitions for x86 to have the APIC timer related defines
48 * available also for UP (on SMP it gets magically included via linux/smp.h).
49 * asm/acpi.h is not an option, as it would require more include magic. Also
50 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
57 #include <asm/uaccess.h>
59 #include <acpi/acpi_bus.h>
60 #include <acpi/processor.h>
61 #include <asm/processor.h>
63 #define ACPI_PROCESSOR_COMPONENT 0x01000000
64 #define ACPI_PROCESSOR_CLASS "processor"
65 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
66 ACPI_MODULE_NAME("processor_idle");
67 #define ACPI_PROCESSOR_FILE_POWER "power"
68 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
69 #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
70 #ifndef CONFIG_CPU_IDLE
71 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
72 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
73 static void (*pm_idle_save) (void) __read_mostly;
75 #define C2_OVERHEAD 1 /* 1us */
76 #define C3_OVERHEAD 1 /* 1us */
78 #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
80 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
81 #ifdef CONFIG_CPU_IDLE
82 module_param(max_cstate, uint, 0000);
84 module_param(max_cstate, uint, 0644);
86 static unsigned int nocst __read_mostly;
87 module_param(nocst, uint, 0000);
89 #ifndef CONFIG_CPU_IDLE
91 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
92 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
93 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
94 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
95 * reduce history for more aggressive entry into C3
97 static unsigned int bm_history __read_mostly =
98 (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
99 module_param(bm_history, uint, 0644);
101 static int acpi_processor_set_power_policy(struct acpi_processor *pr);
103 #else /* CONFIG_CPU_IDLE */
104 static unsigned int latency_factor __read_mostly = 2;
105 module_param(latency_factor, uint, 0644);
109 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
110 * For now disable this. Probably a bug somewhere else.
112 * To skip this limit, boot/load with a large max_cstate limit.
114 static int set_max_cstate(const struct dmi_system_id *id)
116 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
119 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
120 " Override with \"processor.max_cstate=%d\"\n", id->ident,
121 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
123 max_cstate = (long)id->driver_data;
128 /* Actually this shouldn't be __cpuinitdata, would be better to fix the
129 callers to only run once -AK */
130 static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
131 { set_max_cstate, "IBM ThinkPad R40e", {
132 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
133 DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
134 { set_max_cstate, "IBM ThinkPad R40e", {
135 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
136 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
137 { set_max_cstate, "IBM ThinkPad R40e", {
138 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
139 DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
140 { set_max_cstate, "IBM ThinkPad R40e", {
141 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
142 DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
143 { set_max_cstate, "IBM ThinkPad R40e", {
144 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
145 DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
146 { set_max_cstate, "IBM ThinkPad R40e", {
147 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
148 DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
149 { set_max_cstate, "IBM ThinkPad R40e", {
150 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
151 DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
152 { set_max_cstate, "IBM ThinkPad R40e", {
153 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
154 DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
155 { set_max_cstate, "IBM ThinkPad R40e", {
156 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
157 DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
158 { set_max_cstate, "IBM ThinkPad R40e", {
159 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
160 DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
161 { set_max_cstate, "IBM ThinkPad R40e", {
162 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
163 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
164 { set_max_cstate, "IBM ThinkPad R40e", {
165 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
166 DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
167 { set_max_cstate, "IBM ThinkPad R40e", {
168 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
169 DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
170 { set_max_cstate, "IBM ThinkPad R40e", {
171 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
172 DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
173 { set_max_cstate, "IBM ThinkPad R40e", {
174 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
175 DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
176 { set_max_cstate, "IBM ThinkPad R40e", {
177 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
178 DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
179 { set_max_cstate, "Medion 41700", {
180 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
181 DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
182 { set_max_cstate, "Clevo 5600D", {
183 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
184 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
189 static inline u32 ticks_elapsed(u32 t1, u32 t2)
193 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
194 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
196 return ((0xFFFFFFFF - t1) + t2);
199 static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
202 return PM_TIMER_TICKS_TO_US(t2 - t1);
203 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
204 return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
206 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
210 * Callers should disable interrupts before the call and enable
211 * interrupts after return.
213 static void acpi_safe_halt(void)
215 current_thread_info()->status &= ~TS_POLLING;
217 * TS_POLLING-cleared state must be visible before we
221 if (!need_resched()) {
225 current_thread_info()->status |= TS_POLLING;
228 #ifndef CONFIG_CPU_IDLE
231 acpi_processor_power_activate(struct acpi_processor *pr,
232 struct acpi_processor_cx *new)
234 struct acpi_processor_cx *old;
239 old = pr->power.state;
242 old->promotion.count = 0;
243 new->demotion.count = 0;
245 /* Cleanup from old state. */
249 /* Disable bus master reload */
250 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
251 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
256 /* Prepare to use new state. */
259 /* Enable bus master reload */
260 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
261 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
265 pr->power.state = new;
270 static atomic_t c3_cpu_count;
272 /* Common C-state entry for C2, C3, .. */
273 static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
275 if (cstate->entry_method == ACPI_CSTATE_FFH) {
276 /* Call into architectural FFH based C-state */
277 acpi_processor_ffh_cstate_enter(cstate);
280 /* IO port based C-state */
281 inb(cstate->address);
282 /* Dummy wait op - must do something useless after P_LVL2 read
283 because chipsets cannot guarantee that STPCLK# signal
284 gets asserted in time to freeze execution properly. */
285 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
288 #endif /* !CONFIG_CPU_IDLE */
290 #ifdef ARCH_APICTIMER_STOPS_ON_C3
293 * Some BIOS implementations switch to C3 in the published C2 state.
294 * This seems to be a common problem on AMD boxen, but other vendors
295 * are affected too. We pick the most conservative approach: we assume
296 * that the local APIC stops in both C2 and C3.
298 static void acpi_timer_check_state(int state, struct acpi_processor *pr,
299 struct acpi_processor_cx *cx)
301 struct acpi_processor_power *pwr = &pr->power;
302 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
305 * Check, if one of the previous states already marked the lapic
308 if (pwr->timer_broadcast_on_state < state)
311 if (cx->type >= type)
312 pr->power.timer_broadcast_on_state = state;
315 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
317 unsigned long reason;
319 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
320 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
322 clockevents_notify(reason, &pr->id);
325 /* Power(C) State timer broadcast control */
326 static void acpi_state_timer_broadcast(struct acpi_processor *pr,
327 struct acpi_processor_cx *cx,
330 int state = cx - pr->power.states;
332 if (state >= pr->power.timer_broadcast_on_state) {
333 unsigned long reason;
335 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
336 CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
337 clockevents_notify(reason, &pr->id);
343 static void acpi_timer_check_state(int state, struct acpi_processor *pr,
344 struct acpi_processor_cx *cstate) { }
345 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
346 static void acpi_state_timer_broadcast(struct acpi_processor *pr,
347 struct acpi_processor_cx *cx,
355 * Suspend / resume control
357 static int acpi_idle_suspend;
359 int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
361 acpi_idle_suspend = 1;
365 int acpi_processor_resume(struct acpi_device * device)
367 acpi_idle_suspend = 0;
371 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
372 static int tsc_halts_in_c(int state)
374 switch (boot_cpu_data.x86_vendor) {
377 * AMD Fam10h TSC will tick in all
378 * C/P/S0/S1 states when this bit is set.
380 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
383 case X86_VENDOR_INTEL:
384 /* Several cases known where TSC halts in C2 too */
386 return state > ACPI_STATE_C1;
391 #ifndef CONFIG_CPU_IDLE
392 static void acpi_processor_idle(void)
394 struct acpi_processor *pr = NULL;
395 struct acpi_processor_cx *cx = NULL;
396 struct acpi_processor_cx *next_state = NULL;
401 * Interrupts must be disabled during bus mastering calculations and
402 * for C2/C3 transitions.
406 pr = __get_cpu_var(processors);
413 * Check whether we truly need to go idle, or should
416 if (unlikely(need_resched())) {
421 cx = pr->power.state;
422 if (!cx || acpi_idle_suspend) {
424 pm_idle_save(); /* enables IRQs */
436 * Check for bus mastering activity (if required), record, and check
439 if (pr->flags.bm_check) {
441 unsigned long diff = jiffies - pr->power.bm_check_timestamp;
446 pr->power.bm_activity <<= diff;
448 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
450 pr->power.bm_activity |= 0x1;
451 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
454 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
455 * the true state of bus mastering activity; forcing us to
456 * manually check the BMIDEA bit of each IDE channel.
458 else if (errata.piix4.bmisx) {
459 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
460 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
461 pr->power.bm_activity |= 0x1;
464 pr->power.bm_check_timestamp = jiffies;
467 * If bus mastering is or was active this jiffy, demote
468 * to avoid a faulty transition. Note that the processor
469 * won't enter a low-power state during this call (to this
470 * function) but should upon the next.
472 * TBD: A better policy might be to fallback to the demotion
473 * state (use it for this quantum only) istead of
474 * demoting -- and rely on duration as our sole demotion
475 * qualification. This may, however, introduce DMA
476 * issues (e.g. floppy DMA transfer overrun/underrun).
478 if ((pr->power.bm_activity & 0x1) &&
479 cx->demotion.threshold.bm) {
481 next_state = cx->demotion.state;
486 #ifdef CONFIG_HOTPLUG_CPU
488 * Check for P_LVL2_UP flag before entering C2 and above on
489 * an SMP system. We do it here instead of doing it at _CST/P_LVL
490 * detection phase, to work cleanly with logical CPU hotplug.
492 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
493 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
494 cx = &pr->power.states[ACPI_STATE_C1];
500 * Invoke the current Cx state to put the processor to sleep.
502 if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
503 current_thread_info()->status &= ~TS_POLLING;
505 * TS_POLLING-cleared state must be visible before we
509 if (need_resched()) {
510 current_thread_info()->status |= TS_POLLING;
521 * Use the appropriate idle routine, the one that would
522 * be used without acpi C-states.
525 pm_idle_save(); /* enables IRQs */
532 * TBD: Can't get time duration while in C1, as resumes
533 * go to an ISR rather than here. Need to instrument
534 * base interrupt handler.
536 * Note: the TSC better not stop in C1, sched_clock() will
539 sleep_ticks = 0xFFFFFFFF;
544 /* Get start time (ticks) */
545 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
546 /* Tell the scheduler that we are going deep-idle: */
547 sched_clock_idle_sleep_event();
549 acpi_state_timer_broadcast(pr, cx, 1);
550 acpi_cstate_enter(cx);
551 /* Get end time (ticks) */
552 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
554 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
555 /* TSC halts in C2, so notify users */
556 if (tsc_halts_in_c(ACPI_STATE_C2))
557 mark_tsc_unstable("possible TSC halt in C2");
559 /* Compute time (ticks) that we were actually asleep */
560 sleep_ticks = ticks_elapsed(t1, t2);
562 /* Tell the scheduler how much we idled: */
563 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
565 /* Re-enable interrupts */
567 /* Do not account our idle-switching overhead: */
568 sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
570 current_thread_info()->status |= TS_POLLING;
571 acpi_state_timer_broadcast(pr, cx, 0);
575 acpi_unlazy_tlb(smp_processor_id());
577 * Must be done before busmaster disable as we might
578 * need to access HPET !
580 acpi_state_timer_broadcast(pr, cx, 1);
583 * bm_check implies we need ARB_DIS
584 * !bm_check implies we need cache flush
585 * bm_control implies whether we can do ARB_DIS
587 * That leaves a case where bm_check is set and bm_control is
588 * not set. In that case we cannot do much, we enter C3
589 * without doing anything.
591 if (pr->flags.bm_check && pr->flags.bm_control) {
592 if (atomic_inc_return(&c3_cpu_count) ==
595 * All CPUs are trying to go to C3
596 * Disable bus master arbitration
598 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
600 } else if (!pr->flags.bm_check) {
601 /* SMP with no shared cache... Invalidate cache */
602 ACPI_FLUSH_CPU_CACHE();
605 /* Get start time (ticks) */
606 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
608 /* Tell the scheduler that we are going deep-idle: */
609 sched_clock_idle_sleep_event();
610 acpi_cstate_enter(cx);
611 /* Get end time (ticks) */
612 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
613 if (pr->flags.bm_check && pr->flags.bm_control) {
614 /* Enable bus master arbitration */
615 atomic_dec(&c3_cpu_count);
616 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
619 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
620 /* TSC halts in C3, so notify users */
621 if (tsc_halts_in_c(ACPI_STATE_C3))
622 mark_tsc_unstable("TSC halts in C3");
624 /* Compute time (ticks) that we were actually asleep */
625 sleep_ticks = ticks_elapsed(t1, t2);
626 /* Tell the scheduler how much we idled: */
627 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
629 /* Re-enable interrupts */
631 /* Do not account our idle-switching overhead: */
632 sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
634 current_thread_info()->status |= TS_POLLING;
635 acpi_state_timer_broadcast(pr, cx, 0);
643 if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
644 cx->time += sleep_ticks;
646 next_state = pr->power.state;
648 #ifdef CONFIG_HOTPLUG_CPU
649 /* Don't do promotion/demotion */
650 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
651 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
660 * Track the number of longs (time asleep is greater than threshold)
661 * and promote when the count threshold is reached. Note that bus
662 * mastering activity may prevent promotions.
663 * Do not promote above max_cstate.
665 if (cx->promotion.state &&
666 ((cx->promotion.state - pr->power.states) <= max_cstate)) {
667 if (sleep_ticks > cx->promotion.threshold.ticks &&
668 cx->promotion.state->latency <=
669 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
670 cx->promotion.count++;
671 cx->demotion.count = 0;
672 if (cx->promotion.count >=
673 cx->promotion.threshold.count) {
674 if (pr->flags.bm_check) {
676 (pr->power.bm_activity & cx->
677 promotion.threshold.bm)) {
683 next_state = cx->promotion.state;
693 * Track the number of shorts (time asleep is less than time threshold)
694 * and demote when the usage threshold is reached.
696 if (cx->demotion.state) {
697 if (sleep_ticks < cx->demotion.threshold.ticks) {
698 cx->demotion.count++;
699 cx->promotion.count = 0;
700 if (cx->demotion.count >= cx->demotion.threshold.count) {
701 next_state = cx->demotion.state;
709 * Demote if current state exceeds max_cstate
710 * or if the latency of the current state is unacceptable
712 if ((pr->power.state - pr->power.states) > max_cstate ||
713 pr->power.state->latency >
714 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
715 if (cx->demotion.state)
716 next_state = cx->demotion.state;
722 * If we're going to start using a new Cx state we must clean up
723 * from the previous and prepare to use the new.
725 if (next_state != pr->power.state)
726 acpi_processor_power_activate(pr, next_state);
729 static int acpi_processor_set_power_policy(struct acpi_processor *pr)
732 unsigned int state_is_set = 0;
733 struct acpi_processor_cx *lower = NULL;
734 struct acpi_processor_cx *higher = NULL;
735 struct acpi_processor_cx *cx;
742 * This function sets the default Cx state policy (OS idle handler).
743 * Our scheme is to promote quickly to C2 but more conservatively
744 * to C3. We're favoring C2 for its characteristics of low latency
745 * (quick response), good power savings, and ability to allow bus
746 * mastering activity. Note that the Cx state policy is completely
747 * customizable and can be altered dynamically.
751 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
752 cx = &pr->power.states[i];
757 pr->power.state = cx;
766 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
767 cx = &pr->power.states[i];
772 cx->demotion.state = lower;
773 cx->demotion.threshold.ticks = cx->latency_ticks;
774 cx->demotion.threshold.count = 1;
775 if (cx->type == ACPI_STATE_C3)
776 cx->demotion.threshold.bm = bm_history;
783 for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
784 cx = &pr->power.states[i];
789 cx->promotion.state = higher;
790 cx->promotion.threshold.ticks = cx->latency_ticks;
791 if (cx->type >= ACPI_STATE_C2)
792 cx->promotion.threshold.count = 4;
794 cx->promotion.threshold.count = 10;
795 if (higher->type == ACPI_STATE_C3)
796 cx->promotion.threshold.bm = bm_history;
804 #endif /* !CONFIG_CPU_IDLE */
806 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
815 /* if info is obtained from pblk/fadt, type equals state */
816 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
817 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
819 #ifndef CONFIG_HOTPLUG_CPU
821 * Check for P_LVL2_UP flag before entering C2 and above on
824 if ((num_online_cpus() > 1) &&
825 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
829 /* determine C2 and C3 address from pblk */
830 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
831 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
833 /* determine latencies from FADT */
834 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
835 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
837 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
838 "lvl2[0x%08x] lvl3[0x%08x]\n",
839 pr->power.states[ACPI_STATE_C2].address,
840 pr->power.states[ACPI_STATE_C3].address));
845 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
847 if (!pr->power.states[ACPI_STATE_C1].valid) {
848 /* set the first C-State to C1 */
849 /* all processors need to support C1 */
850 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
851 pr->power.states[ACPI_STATE_C1].valid = 1;
852 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
854 /* the C0 state only exists as a filler in our array */
855 pr->power.states[ACPI_STATE_C0].valid = 1;
859 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
861 acpi_status status = 0;
865 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
866 union acpi_object *cst;
874 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
875 if (ACPI_FAILURE(status)) {
876 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
880 cst = buffer.pointer;
882 /* There must be at least 2 elements */
883 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
884 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
889 count = cst->package.elements[0].integer.value;
891 /* Validate number of power states. */
892 if (count < 1 || count != cst->package.count - 1) {
893 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
898 /* Tell driver that at least _CST is supported. */
899 pr->flags.has_cst = 1;
901 for (i = 1; i <= count; i++) {
902 union acpi_object *element;
903 union acpi_object *obj;
904 struct acpi_power_register *reg;
905 struct acpi_processor_cx cx;
907 memset(&cx, 0, sizeof(cx));
909 element = &(cst->package.elements[i]);
910 if (element->type != ACPI_TYPE_PACKAGE)
913 if (element->package.count != 4)
916 obj = &(element->package.elements[0]);
918 if (obj->type != ACPI_TYPE_BUFFER)
921 reg = (struct acpi_power_register *)obj->buffer.pointer;
923 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
924 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
927 /* There should be an easy way to extract an integer... */
928 obj = &(element->package.elements[1]);
929 if (obj->type != ACPI_TYPE_INTEGER)
932 cx.type = obj->integer.value;
934 * Some buggy BIOSes won't list C1 in _CST -
935 * Let acpi_processor_get_power_info_default() handle them later
937 if (i == 1 && cx.type != ACPI_STATE_C1)
940 cx.address = reg->address;
941 cx.index = current_count + 1;
943 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
944 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
945 if (acpi_processor_ffh_cstate_probe
946 (pr->id, &cx, reg) == 0) {
947 cx.entry_method = ACPI_CSTATE_FFH;
948 } else if (cx.type == ACPI_STATE_C1) {
950 * C1 is a special case where FIXED_HARDWARE
951 * can be handled in non-MWAIT way as well.
952 * In that case, save this _CST entry info.
953 * Otherwise, ignore this info and continue.
955 cx.entry_method = ACPI_CSTATE_HALT;
956 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
960 if (cx.type == ACPI_STATE_C1 &&
961 (idle_halt || idle_nomwait)) {
963 * In most cases the C1 space_id obtained from
964 * _CST object is FIXED_HARDWARE access mode.
965 * But when the option of idle=halt is added,
966 * the entry_method type should be changed from
967 * CSTATE_FFH to CSTATE_HALT.
968 * When the option of idle=nomwait is added,
969 * the C1 entry_method type should be
972 cx.entry_method = ACPI_CSTATE_HALT;
973 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
976 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
980 if (cx.type == ACPI_STATE_C1) {
984 obj = &(element->package.elements[2]);
985 if (obj->type != ACPI_TYPE_INTEGER)
988 cx.latency = obj->integer.value;
990 obj = &(element->package.elements[3]);
991 if (obj->type != ACPI_TYPE_INTEGER)
994 cx.power = obj->integer.value;
997 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
1000 * We support total ACPI_PROCESSOR_MAX_POWER - 1
1001 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
1003 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
1005 "Limiting number of power states to max (%d)\n",
1006 ACPI_PROCESSOR_MAX_POWER);
1008 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1013 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
1016 /* Validate number of power states discovered */
1017 if (current_count < 2)
1021 kfree(buffer.pointer);
1026 static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
1033 * C2 latency must be less than or equal to 100
1036 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
1037 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1038 "latency too large [%d]\n", cx->latency));
1043 * Otherwise we've met all of our C2 requirements.
1044 * Normalize the C2 latency to expidite policy
1048 #ifndef CONFIG_CPU_IDLE
1049 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1051 cx->latency_ticks = cx->latency;
1057 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
1058 struct acpi_processor_cx *cx)
1060 static int bm_check_flag;
1067 * C3 latency must be less than or equal to 1000
1070 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
1071 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1072 "latency too large [%d]\n", cx->latency));
1077 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
1078 * DMA transfers are used by any ISA device to avoid livelock.
1079 * Note that we could disable Type-F DMA (as recommended by
1080 * the erratum), but this is known to disrupt certain ISA
1081 * devices thus we take the conservative approach.
1083 else if (errata.piix4.fdma) {
1084 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1085 "C3 not supported on PIIX4 with Type-F DMA\n"));
1089 /* All the logic here assumes flags.bm_check is same across all CPUs */
1090 if (!bm_check_flag) {
1091 /* Determine whether bm_check is needed based on CPU */
1092 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
1093 bm_check_flag = pr->flags.bm_check;
1095 pr->flags.bm_check = bm_check_flag;
1098 if (pr->flags.bm_check) {
1099 if (!pr->flags.bm_control) {
1100 if (pr->flags.has_cst != 1) {
1101 /* bus mastering control is necessary */
1102 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1103 "C3 support requires BM control\n"));
1106 /* Here we enter C3 without bus mastering */
1107 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1108 "C3 support without BM control\n"));
1113 * WBINVD should be set in fadt, for C3 state to be
1114 * supported on when bm_check is not required.
1116 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
1117 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1118 "Cache invalidation should work properly"
1119 " for C3 to be enabled on SMP systems\n"));
1122 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1126 * Otherwise we've met all of our C3 requirements.
1127 * Normalize the C3 latency to expidite policy. Enable
1128 * checking of bus mastering status (bm_check) so we can
1129 * use this in our C3 policy
1133 #ifndef CONFIG_CPU_IDLE
1134 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1136 cx->latency_ticks = cx->latency;
1142 static int acpi_processor_power_verify(struct acpi_processor *pr)
1145 unsigned int working = 0;
1147 pr->power.timer_broadcast_on_state = INT_MAX;
1149 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1150 struct acpi_processor_cx *cx = &pr->power.states[i];
1158 acpi_processor_power_verify_c2(cx);
1160 acpi_timer_check_state(i, pr, cx);
1164 acpi_processor_power_verify_c3(pr, cx);
1166 acpi_timer_check_state(i, pr, cx);
1174 acpi_propagate_timer_broadcast(pr);
1179 static int acpi_processor_get_power_info(struct acpi_processor *pr)
1185 /* NOTE: the idle thread may not be running while calling
1188 /* Zero initialize all the C-states info. */
1189 memset(pr->power.states, 0, sizeof(pr->power.states));
1191 result = acpi_processor_get_power_info_cst(pr);
1192 if (result == -ENODEV)
1193 result = acpi_processor_get_power_info_fadt(pr);
1198 acpi_processor_get_power_info_default(pr);
1200 pr->power.count = acpi_processor_power_verify(pr);
1202 #ifndef CONFIG_CPU_IDLE
1204 * Set Default Policy
1205 * ------------------
1206 * Now that we know which states are supported, set the default
1207 * policy. Note that this policy can be changed dynamically
1208 * (e.g. encourage deeper sleeps to conserve battery life when
1211 result = acpi_processor_set_power_policy(pr);
1217 * if one state of type C2 or C3 is available, mark this
1218 * CPU as being "idle manageable"
1220 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1221 if (pr->power.states[i].valid) {
1222 pr->power.count = i;
1223 if (pr->power.states[i].type >= ACPI_STATE_C2)
1224 pr->flags.power = 1;
1231 static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1233 struct acpi_processor *pr = seq->private;
1240 seq_printf(seq, "active state: C%zd\n"
1242 "bus master activity: %08x\n"
1243 "maximum allowed latency: %d usec\n",
1244 pr->power.state ? pr->power.state - pr->power.states : 0,
1245 max_cstate, (unsigned)pr->power.bm_activity,
1246 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
1248 seq_puts(seq, "states:\n");
1250 for (i = 1; i <= pr->power.count; i++) {
1251 seq_printf(seq, " %cC%d: ",
1252 (&pr->power.states[i] ==
1253 pr->power.state ? '*' : ' '), i);
1255 if (!pr->power.states[i].valid) {
1256 seq_puts(seq, "<not supported>\n");
1260 switch (pr->power.states[i].type) {
1262 seq_printf(seq, "type[C1] ");
1265 seq_printf(seq, "type[C2] ");
1268 seq_printf(seq, "type[C3] ");
1271 seq_printf(seq, "type[--] ");
1275 if (pr->power.states[i].promotion.state)
1276 seq_printf(seq, "promotion[C%zd] ",
1277 (pr->power.states[i].promotion.state -
1280 seq_puts(seq, "promotion[--] ");
1282 if (pr->power.states[i].demotion.state)
1283 seq_printf(seq, "demotion[C%zd] ",
1284 (pr->power.states[i].demotion.state -
1287 seq_puts(seq, "demotion[--] ");
1289 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
1290 pr->power.states[i].latency,
1291 pr->power.states[i].usage,
1292 (unsigned long long)pr->power.states[i].time);
1299 static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
1301 return single_open(file, acpi_processor_power_seq_show,
1305 static const struct file_operations acpi_processor_power_fops = {
1306 .owner = THIS_MODULE,
1307 .open = acpi_processor_power_open_fs,
1309 .llseek = seq_lseek,
1310 .release = single_release,
1313 #ifndef CONFIG_CPU_IDLE
1315 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1319 if (boot_option_idle_override)
1329 if (!pr->flags.power_setup_done)
1332 /* Fall back to the default idle loop */
1333 pm_idle = pm_idle_save;
1334 synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
1336 pr->flags.power = 0;
1337 result = acpi_processor_get_power_info(pr);
1338 if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
1339 pm_idle = acpi_processor_idle;
1345 static void smp_callback(void *v)
1347 /* we already woke the CPU up, nothing more to do */
1351 * This function gets called when a part of the kernel has a new latency
1352 * requirement. This means we need to get all processors out of their C-state,
1353 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
1354 * wakes them all right up.
1356 static int acpi_processor_latency_notify(struct notifier_block *b,
1357 unsigned long l, void *v)
1359 smp_call_function(smp_callback, NULL, 1);
1363 static struct notifier_block acpi_processor_latency_notifier = {
1364 .notifier_call = acpi_processor_latency_notify,
1369 #else /* CONFIG_CPU_IDLE */
1372 * acpi_idle_bm_check - checks if bus master activity was detected
1374 static int acpi_idle_bm_check(void)
1378 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
1380 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1382 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
1383 * the true state of bus mastering activity; forcing us to
1384 * manually check the BMIDEA bit of each IDE channel.
1386 else if (errata.piix4.bmisx) {
1387 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
1388 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
1395 * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
1396 * @pr: the processor
1397 * @target: the new target state
1399 static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
1400 struct acpi_processor_cx *target)
1402 if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
1403 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1404 pr->flags.bm_rld_set = 0;
1407 if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
1408 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1409 pr->flags.bm_rld_set = 1;
1414 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
1417 * Caller disables interrupt before call and enables interrupt after return.
1419 static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
1421 if (cx->entry_method == ACPI_CSTATE_FFH) {
1422 /* Call into architectural FFH based C-state */
1423 acpi_processor_ffh_cstate_enter(cx);
1424 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
1428 /* IO port based C-state */
1430 /* Dummy wait op - must do something useless after P_LVL2 read
1431 because chipsets cannot guarantee that STPCLK# signal
1432 gets asserted in time to freeze execution properly. */
1433 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
1438 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
1439 * @dev: the target CPU
1440 * @state: the state data
1442 * This is equivalent to the HALT instruction.
1444 static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1445 struct cpuidle_state *state)
1448 struct acpi_processor *pr;
1449 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1451 pr = __get_cpu_var(processors);
1456 local_irq_disable();
1458 /* Do not access any ACPI IO ports in suspend path */
1459 if (acpi_idle_suspend) {
1465 if (pr->flags.bm_check)
1466 acpi_idle_update_bm_rld(pr, cx);
1468 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1469 acpi_idle_do_entry(cx);
1470 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1475 return ticks_elapsed_in_us(t1, t2);
1479 * acpi_idle_enter_simple - enters an ACPI state without BM handling
1480 * @dev: the target CPU
1481 * @state: the state data
1483 static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1484 struct cpuidle_state *state)
1486 struct acpi_processor *pr;
1487 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1489 int sleep_ticks = 0;
1491 pr = __get_cpu_var(processors);
1496 if (acpi_idle_suspend)
1497 return(acpi_idle_enter_c1(dev, state));
1499 local_irq_disable();
1500 current_thread_info()->status &= ~TS_POLLING;
1502 * TS_POLLING-cleared state must be visible before we test
1507 if (unlikely(need_resched())) {
1508 current_thread_info()->status |= TS_POLLING;
1514 * Must be done before busmaster disable as we might need to
1517 acpi_state_timer_broadcast(pr, cx, 1);
1519 if (pr->flags.bm_check)
1520 acpi_idle_update_bm_rld(pr, cx);
1522 if (cx->type == ACPI_STATE_C3)
1523 ACPI_FLUSH_CPU_CACHE();
1525 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1526 /* Tell the scheduler that we are going deep-idle: */
1527 sched_clock_idle_sleep_event();
1528 acpi_idle_do_entry(cx);
1529 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1531 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
1532 /* TSC could halt in idle, so notify users */
1533 if (tsc_halts_in_c(cx->type))
1534 mark_tsc_unstable("TSC halts in idle");;
1536 sleep_ticks = ticks_elapsed(t1, t2);
1538 /* Tell the scheduler how much we idled: */
1539 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1542 current_thread_info()->status |= TS_POLLING;
1546 acpi_state_timer_broadcast(pr, cx, 0);
1547 cx->time += sleep_ticks;
1548 return ticks_elapsed_in_us(t1, t2);
1551 static int c3_cpu_count;
1552 static DEFINE_SPINLOCK(c3_lock);
1555 * acpi_idle_enter_bm - enters C3 with proper BM handling
1556 * @dev: the target CPU
1557 * @state: the state data
1559 * If BM is detected, the deepest non-C3 idle state is entered instead.
1561 static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1562 struct cpuidle_state *state)
1564 struct acpi_processor *pr;
1565 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1567 int sleep_ticks = 0;
1569 pr = __get_cpu_var(processors);
1574 if (acpi_idle_suspend)
1575 return(acpi_idle_enter_c1(dev, state));
1577 if (acpi_idle_bm_check()) {
1578 if (dev->safe_state) {
1579 return dev->safe_state->enter(dev, dev->safe_state);
1581 local_irq_disable();
1588 local_irq_disable();
1589 current_thread_info()->status &= ~TS_POLLING;
1591 * TS_POLLING-cleared state must be visible before we test
1596 if (unlikely(need_resched())) {
1597 current_thread_info()->status |= TS_POLLING;
1602 acpi_unlazy_tlb(smp_processor_id());
1604 /* Tell the scheduler that we are going deep-idle: */
1605 sched_clock_idle_sleep_event();
1607 * Must be done before busmaster disable as we might need to
1610 acpi_state_timer_broadcast(pr, cx, 1);
1612 acpi_idle_update_bm_rld(pr, cx);
1615 * disable bus master
1616 * bm_check implies we need ARB_DIS
1617 * !bm_check implies we need cache flush
1618 * bm_control implies whether we can do ARB_DIS
1620 * That leaves a case where bm_check is set and bm_control is
1621 * not set. In that case we cannot do much, we enter C3
1622 * without doing anything.
1624 if (pr->flags.bm_check && pr->flags.bm_control) {
1625 spin_lock(&c3_lock);
1627 /* Disable bus master arbitration when all CPUs are in C3 */
1628 if (c3_cpu_count == num_online_cpus())
1629 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
1630 spin_unlock(&c3_lock);
1631 } else if (!pr->flags.bm_check) {
1632 ACPI_FLUSH_CPU_CACHE();
1635 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1636 acpi_idle_do_entry(cx);
1637 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1639 /* Re-enable bus master arbitration */
1640 if (pr->flags.bm_check && pr->flags.bm_control) {
1641 spin_lock(&c3_lock);
1642 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
1644 spin_unlock(&c3_lock);
1647 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
1648 /* TSC could halt in idle, so notify users */
1649 if (tsc_halts_in_c(ACPI_STATE_C3))
1650 mark_tsc_unstable("TSC halts in idle");
1652 sleep_ticks = ticks_elapsed(t1, t2);
1653 /* Tell the scheduler how much we idled: */
1654 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1657 current_thread_info()->status |= TS_POLLING;
1661 acpi_state_timer_broadcast(pr, cx, 0);
1662 cx->time += sleep_ticks;
1663 return ticks_elapsed_in_us(t1, t2);
1666 struct cpuidle_driver acpi_idle_driver = {
1667 .name = "acpi_idle",
1668 .owner = THIS_MODULE,
1672 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
1673 * @pr: the ACPI processor
1675 static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1677 int i, count = CPUIDLE_DRIVER_STATE_START;
1678 struct acpi_processor_cx *cx;
1679 struct cpuidle_state *state;
1680 struct cpuidle_device *dev = &pr->power.dev;
1682 if (!pr->flags.power_setup_done)
1685 if (pr->flags.power == 0) {
1690 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
1691 dev->states[i].name[0] = '\0';
1692 dev->states[i].desc[0] = '\0';
1695 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1696 cx = &pr->power.states[i];
1697 state = &dev->states[count];
1702 #ifdef CONFIG_HOTPLUG_CPU
1703 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1704 !pr->flags.has_cst &&
1705 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1708 cpuidle_set_statedata(state, cx);
1710 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1711 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
1712 state->exit_latency = cx->latency;
1713 state->target_residency = cx->latency * latency_factor;
1714 state->power_usage = cx->power;
1719 state->flags |= CPUIDLE_FLAG_SHALLOW;
1720 if (cx->entry_method == ACPI_CSTATE_FFH)
1721 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1723 state->enter = acpi_idle_enter_c1;
1724 dev->safe_state = state;
1728 state->flags |= CPUIDLE_FLAG_BALANCED;
1729 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1730 state->enter = acpi_idle_enter_simple;
1731 dev->safe_state = state;
1735 state->flags |= CPUIDLE_FLAG_DEEP;
1736 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1737 state->flags |= CPUIDLE_FLAG_CHECK_BM;
1738 state->enter = pr->flags.bm_check ?
1739 acpi_idle_enter_bm :
1740 acpi_idle_enter_simple;
1745 if (count == CPUIDLE_STATE_MAX)
1749 dev->state_count = count;
1757 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1761 if (boot_option_idle_override)
1771 if (!pr->flags.power_setup_done)
1774 cpuidle_pause_and_lock();
1775 cpuidle_disable_device(&pr->power.dev);
1776 acpi_processor_get_power_info(pr);
1777 if (pr->flags.power) {
1778 acpi_processor_setup_cpuidle(pr);
1779 ret = cpuidle_enable_device(&pr->power.dev);
1781 cpuidle_resume_and_unlock();
1786 #endif /* CONFIG_CPU_IDLE */
1788 int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1789 struct acpi_device *device)
1791 acpi_status status = 0;
1792 static int first_run;
1793 struct proc_dir_entry *entry = NULL;
1796 if (boot_option_idle_override)
1802 * When the boot option of "idle=halt" is added, halt
1803 * is used for CPU IDLE.
1804 * In such case C2/C3 is meaningless. So the max_cstate
1809 dmi_check_system(processor_power_dmi_table);
1810 max_cstate = acpi_processor_cstate_check(max_cstate);
1811 if (max_cstate < ACPI_C_STATES_MAX)
1813 "ACPI: processor limited to max C-state %d\n",
1816 #if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
1817 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
1818 &acpi_processor_latency_notifier);
1825 if (acpi_gbl_FADT.cst_control && !nocst) {
1827 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1828 if (ACPI_FAILURE(status)) {
1829 ACPI_EXCEPTION((AE_INFO, status,
1830 "Notifying BIOS of _CST ability failed"));
1834 acpi_processor_get_power_info(pr);
1835 pr->flags.power_setup_done = 1;
1838 * Install the idle handler if processor power management is supported.
1839 * Note that we use previously set idle handler will be used on
1840 * platforms that only support C1.
1842 if (pr->flags.power) {
1843 #ifdef CONFIG_CPU_IDLE
1844 acpi_processor_setup_cpuidle(pr);
1845 if (cpuidle_register_device(&pr->power.dev))
1849 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1850 for (i = 1; i <= pr->power.count; i++)
1851 if (pr->power.states[i].valid)
1852 printk(" C%d[C%d]", i,
1853 pr->power.states[i].type);
1856 #ifndef CONFIG_CPU_IDLE
1858 pm_idle_save = pm_idle;
1859 pm_idle = acpi_processor_idle;
1865 entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
1866 S_IRUGO, acpi_device_dir(device),
1867 &acpi_processor_power_fops,
1868 acpi_driver_data(device));
1874 int acpi_processor_power_exit(struct acpi_processor *pr,
1875 struct acpi_device *device)
1877 if (boot_option_idle_override)
1880 #ifdef CONFIG_CPU_IDLE
1881 cpuidle_unregister_device(&pr->power.dev);
1883 pr->flags.power_setup_done = 0;
1885 if (acpi_device_dir(device))
1886 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1887 acpi_device_dir(device));
1889 #ifndef CONFIG_CPU_IDLE
1891 /* Unregister the idle handler when processor #0 is removed. */
1893 pm_idle = pm_idle_save;
1896 * We are about to unload the current idle thread pm callback
1897 * (pm_idle), Wait for all processors to update cached/local
1898 * copies of pm_idle before proceeding.
1902 pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
1903 &acpi_processor_latency_notifier);