2 * menu.c - the menu idle governor
4 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
6 * This code is licenced under the GPL.
9 #include <linux/kernel.h>
10 #include <linux/cpuidle.h>
11 #include <linux/pm_qos_params.h>
12 #include <linux/time.h>
13 #include <linux/ktime.h>
14 #include <linux/hrtimer.h>
15 #include <linux/tick.h>
17 #define BREAK_FUZZ 4 /* 4 us */
22 unsigned int expected_us;
23 unsigned int predicted_us;
24 unsigned int last_measured_us;
25 unsigned int elapsed_us;
28 static DEFINE_PER_CPU(struct menu_device, menu_devices);
31 * menu_select - selects the next idle state to enter
34 static int menu_select(struct cpuidle_device *dev)
36 struct menu_device *data = &__get_cpu_var(menu_devices);
37 int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
40 /* Special case when user has set very strict latency requirement */
41 if (unlikely(latency_req == 0)) {
42 data->last_state_idx = 0;
46 /* determine the expected residency time */
48 (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
50 /* find the deepest idle state that satisfies our constraints */
51 for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
52 struct cpuidle_state *s = &dev->states[i];
54 if (s->target_residency > data->expected_us)
56 if (s->target_residency > data->predicted_us)
58 if (s->exit_latency > latency_req)
62 data->last_state_idx = i - 1;
67 * menu_reflect - attempts to guess what happened after entry
70 * NOTE: it's important to be fast here because this operation will add to
71 * the overall exit latency.
73 static void menu_reflect(struct cpuidle_device *dev)
75 struct menu_device *data = &__get_cpu_var(menu_devices);
76 int last_idx = data->last_state_idx;
77 unsigned int last_idle_us = cpuidle_get_last_residency(dev);
78 struct cpuidle_state *target = &dev->states[last_idx];
79 unsigned int measured_us;
82 * Ugh, this idle state doesn't support residency measurements, so we
83 * are basically lost in the dark. As a compromise, assume we slept
84 * for one full standard timer tick. However, be aware that this
85 * could potentially result in a suboptimal state transition.
87 if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
88 last_idle_us = USEC_PER_SEC / HZ;
91 * measured_us and elapsed_us are the cumulative idle time, since the
92 * last time we were woken out of idle by an interrupt.
94 if (data->elapsed_us <= data->elapsed_us + last_idle_us)
95 measured_us = data->elapsed_us + last_idle_us;
99 /* Predict time until next break event */
100 data->predicted_us = max(measured_us, data->last_measured_us);
102 if (last_idle_us + BREAK_FUZZ <
103 data->expected_us - target->exit_latency) {
104 data->last_measured_us = measured_us;
105 data->elapsed_us = 0;
107 data->elapsed_us = measured_us;
112 * menu_enable_device - scans a CPU's states and does setup
115 static int menu_enable_device(struct cpuidle_device *dev)
117 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
119 memset(data, 0, sizeof(struct menu_device));
124 static struct cpuidle_governor menu_governor = {
127 .enable = menu_enable_device,
128 .select = menu_select,
129 .reflect = menu_reflect,
130 .owner = THIS_MODULE,
134 * init_menu - initializes the governor
136 static int __init init_menu(void)
138 return cpuidle_register_governor(&menu_governor);
142 * exit_menu - exits the governor
144 static void __exit exit_menu(void)
146 cpuidle_unregister_governor(&menu_governor);
149 MODULE_LICENSE("GPL");
150 module_init(init_menu);
151 module_exit(exit_menu);