2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/cpufreq.h>
17 #include <linux/cpu.h>
18 #include <linux/jiffies.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mutex.h>
21 #include <linux/hrtimer.h>
22 #include <linux/tick.h>
23 #include <linux/ktime.h>
24 #include <linux/sched.h>
27 * dbs is used in this file as a shortform for demandbased switching
28 * It helps to keep variable names smaller, simpler
31 #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
32 #define DEF_FREQUENCY_UP_THRESHOLD (80)
33 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
34 #define MICRO_FREQUENCY_UP_THRESHOLD (95)
35 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
36 #define MIN_FREQUENCY_UP_THRESHOLD (11)
37 #define MAX_FREQUENCY_UP_THRESHOLD (100)
40 * The polling frequency of this governor depends on the capability of
41 * the processor. Default polling frequency is 1000 times the transition
42 * latency of the processor. The governor will work on any processor with
43 * transition latency <= 10mS, using appropriate sampling
45 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
46 * this governor will not work.
47 * All times here are in uS.
49 #define MIN_SAMPLING_RATE_RATIO (2)
51 static unsigned int min_sampling_rate;
53 #define LATENCY_MULTIPLIER (1000)
54 #define MIN_LATENCY_MULTIPLIER (100)
55 #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
57 static void do_dbs_timer(struct work_struct *work);
60 enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
62 struct cpu_dbs_info_s {
63 cputime64_t prev_cpu_idle;
64 cputime64_t prev_cpu_wall;
65 cputime64_t prev_cpu_nice;
66 struct cpufreq_policy *cur_policy;
67 struct delayed_work work;
68 struct cpufreq_frequency_table *freq_table;
70 unsigned int freq_lo_jiffies;
71 unsigned int freq_hi_jiffies;
73 unsigned int enable:1,
76 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
78 static unsigned int dbs_enable; /* number of CPUs using this policy */
81 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
82 * lock and dbs_mutex. cpu_hotplug lock should always be held before
83 * dbs_mutex. If any function that can potentially take cpu_hotplug lock
84 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
85 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
86 * is recursive for the same process. -Venki
87 * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
88 * would deadlock with cancel_delayed_work_sync(), which is needed for proper
89 * raceless workqueue teardown.
91 static DEFINE_MUTEX(dbs_mutex);
93 static struct workqueue_struct *kondemand_wq;
95 static struct dbs_tuners {
96 unsigned int sampling_rate;
97 unsigned int up_threshold;
98 unsigned int down_differential;
99 unsigned int ignore_nice;
100 unsigned int powersave_bias;
102 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
103 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
108 static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
111 cputime64_t idle_time;
112 cputime64_t cur_wall_time;
113 cputime64_t busy_time;
115 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
116 busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
117 kstat_cpu(cpu).cpustat.system);
119 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
120 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
121 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
122 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
124 idle_time = cputime64_sub(cur_wall_time, busy_time);
126 *wall = cur_wall_time;
131 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
133 u64 idle_time = get_cpu_idle_time_us(cpu, wall);
135 if (idle_time == -1ULL)
136 return get_cpu_idle_time_jiffy(cpu, wall);
142 * Find right freq to be set now with powersave_bias on.
143 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
144 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
146 static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
147 unsigned int freq_next,
148 unsigned int relation)
150 unsigned int freq_req, freq_reduc, freq_avg;
151 unsigned int freq_hi, freq_lo;
152 unsigned int index = 0;
153 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
154 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu);
156 if (!dbs_info->freq_table) {
157 dbs_info->freq_lo = 0;
158 dbs_info->freq_lo_jiffies = 0;
162 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
164 freq_req = dbs_info->freq_table[index].frequency;
165 freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
166 freq_avg = freq_req - freq_reduc;
168 /* Find freq bounds for freq_avg in freq_table */
170 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
171 CPUFREQ_RELATION_H, &index);
172 freq_lo = dbs_info->freq_table[index].frequency;
174 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
175 CPUFREQ_RELATION_L, &index);
176 freq_hi = dbs_info->freq_table[index].frequency;
178 /* Find out how long we have to be in hi and lo freqs */
179 if (freq_hi == freq_lo) {
180 dbs_info->freq_lo = 0;
181 dbs_info->freq_lo_jiffies = 0;
184 jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
185 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
186 jiffies_hi += ((freq_hi - freq_lo) / 2);
187 jiffies_hi /= (freq_hi - freq_lo);
188 jiffies_lo = jiffies_total - jiffies_hi;
189 dbs_info->freq_lo = freq_lo;
190 dbs_info->freq_lo_jiffies = jiffies_lo;
191 dbs_info->freq_hi_jiffies = jiffies_hi;
195 static void ondemand_powersave_bias_init(void)
198 for_each_online_cpu(i) {
199 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i);
200 dbs_info->freq_table = cpufreq_frequency_get_table(i);
201 dbs_info->freq_lo = 0;
205 /************************** sysfs interface ************************/
206 static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
208 static int print_once;
211 printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
212 "sysfs file is deprecated - used by: %s\n",
216 return sprintf(buf, "%u\n", -1U);
219 static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
221 static int print_once;
224 printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_min "
225 "sysfs file is deprecated - used by: %s\n",
229 return sprintf(buf, "%u\n", min_sampling_rate);
232 #define define_one_ro(_name) \
233 static struct freq_attr _name = \
234 __ATTR(_name, 0444, show_##_name, NULL)
236 define_one_ro(sampling_rate_max);
237 define_one_ro(sampling_rate_min);
239 /* cpufreq_ondemand Governor Tunables */
240 #define show_one(file_name, object) \
241 static ssize_t show_##file_name \
242 (struct cpufreq_policy *unused, char *buf) \
244 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
246 show_one(sampling_rate, sampling_rate);
247 show_one(up_threshold, up_threshold);
248 show_one(ignore_nice_load, ignore_nice);
249 show_one(powersave_bias, powersave_bias);
251 static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
252 const char *buf, size_t count)
256 ret = sscanf(buf, "%u", &input);
258 mutex_lock(&dbs_mutex);
260 mutex_unlock(&dbs_mutex);
263 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
264 mutex_unlock(&dbs_mutex);
269 static ssize_t store_up_threshold(struct cpufreq_policy *unused,
270 const char *buf, size_t count)
274 ret = sscanf(buf, "%u", &input);
276 mutex_lock(&dbs_mutex);
277 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
278 input < MIN_FREQUENCY_UP_THRESHOLD) {
279 mutex_unlock(&dbs_mutex);
283 dbs_tuners_ins.up_threshold = input;
284 mutex_unlock(&dbs_mutex);
289 static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
290 const char *buf, size_t count)
297 ret = sscanf(buf, "%u", &input);
304 mutex_lock(&dbs_mutex);
305 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
306 mutex_unlock(&dbs_mutex);
309 dbs_tuners_ins.ignore_nice = input;
311 /* we need to re-evaluate prev_cpu_idle */
312 for_each_online_cpu(j) {
313 struct cpu_dbs_info_s *dbs_info;
314 dbs_info = &per_cpu(cpu_dbs_info, j);
315 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
316 &dbs_info->prev_cpu_wall);
317 if (dbs_tuners_ins.ignore_nice)
318 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
321 mutex_unlock(&dbs_mutex);
326 static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
327 const char *buf, size_t count)
331 ret = sscanf(buf, "%u", &input);
339 mutex_lock(&dbs_mutex);
340 dbs_tuners_ins.powersave_bias = input;
341 ondemand_powersave_bias_init();
342 mutex_unlock(&dbs_mutex);
347 #define define_one_rw(_name) \
348 static struct freq_attr _name = \
349 __ATTR(_name, 0644, show_##_name, store_##_name)
351 define_one_rw(sampling_rate);
352 define_one_rw(up_threshold);
353 define_one_rw(ignore_nice_load);
354 define_one_rw(powersave_bias);
356 static struct attribute *dbs_attributes[] = {
357 &sampling_rate_max.attr,
358 &sampling_rate_min.attr,
361 &ignore_nice_load.attr,
362 &powersave_bias.attr,
366 static struct attribute_group dbs_attr_group = {
367 .attrs = dbs_attributes,
371 /************************** sysfs end ************************/
373 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
375 unsigned int max_load_freq;
377 struct cpufreq_policy *policy;
380 if (!this_dbs_info->enable)
383 this_dbs_info->freq_lo = 0;
384 policy = this_dbs_info->cur_policy;
387 * Every sampling_rate, we check, if current idle time is less
388 * than 20% (default), then we try to increase frequency
389 * Every sampling_rate, we look for a the lowest
390 * frequency which can sustain the load while keeping idle time over
391 * 30%. If such a frequency exist, we try to decrease to this frequency.
393 * Any frequency increase takes it to the maximum frequency.
394 * Frequency reduction happens at minimum steps of
395 * 5% (default) of current frequency
398 /* Get Absolute Load - in terms of freq */
401 for_each_cpu(j, policy->cpus) {
402 struct cpu_dbs_info_s *j_dbs_info;
403 cputime64_t cur_wall_time, cur_idle_time;
404 unsigned int idle_time, wall_time;
405 unsigned int load, load_freq;
408 j_dbs_info = &per_cpu(cpu_dbs_info, j);
410 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
412 wall_time = (unsigned int) cputime64_sub(cur_wall_time,
413 j_dbs_info->prev_cpu_wall);
414 j_dbs_info->prev_cpu_wall = cur_wall_time;
416 idle_time = (unsigned int) cputime64_sub(cur_idle_time,
417 j_dbs_info->prev_cpu_idle);
418 j_dbs_info->prev_cpu_idle = cur_idle_time;
420 if (dbs_tuners_ins.ignore_nice) {
421 cputime64_t cur_nice;
422 unsigned long cur_nice_jiffies;
424 cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
425 j_dbs_info->prev_cpu_nice);
427 * Assumption: nice time between sampling periods will
428 * be less than 2^32 jiffies for 32 bit sys
430 cur_nice_jiffies = (unsigned long)
431 cputime64_to_jiffies64(cur_nice);
433 j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
434 idle_time += jiffies_to_usecs(cur_nice_jiffies);
437 if (unlikely(!wall_time || wall_time < idle_time))
440 load = 100 * (wall_time - idle_time) / wall_time;
442 freq_avg = __cpufreq_driver_getavg(policy, j);
444 freq_avg = policy->cur;
446 load_freq = load * freq_avg;
447 if (load_freq > max_load_freq)
448 max_load_freq = load_freq;
451 /* Check for frequency increase */
452 if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
453 /* if we are already at full speed then break out early */
454 if (!dbs_tuners_ins.powersave_bias) {
455 if (policy->cur == policy->max)
458 __cpufreq_driver_target(policy, policy->max,
461 int freq = powersave_bias_target(policy, policy->max,
463 __cpufreq_driver_target(policy, freq,
469 /* Check for frequency decrease */
470 /* if we cannot reduce the frequency anymore, break out early */
471 if (policy->cur == policy->min)
475 * The optimal frequency is the frequency that is the lowest that
476 * can support the current CPU usage without triggering the up
477 * policy. To be safe, we focus 10 points under the threshold.
480 (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
482 unsigned int freq_next;
483 freq_next = max_load_freq /
484 (dbs_tuners_ins.up_threshold -
485 dbs_tuners_ins.down_differential);
487 if (!dbs_tuners_ins.powersave_bias) {
488 __cpufreq_driver_target(policy, freq_next,
491 int freq = powersave_bias_target(policy, freq_next,
493 __cpufreq_driver_target(policy, freq,
499 static void do_dbs_timer(struct work_struct *work)
501 struct cpu_dbs_info_s *dbs_info =
502 container_of(work, struct cpu_dbs_info_s, work.work);
503 unsigned int cpu = dbs_info->cpu;
504 int sample_type = dbs_info->sample_type;
506 /* We want all CPUs to do sampling nearly on same jiffy */
507 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
509 delay -= jiffies % delay;
511 if (lock_policy_rwsem_write(cpu) < 0)
514 if (!dbs_info->enable) {
515 unlock_policy_rwsem_write(cpu);
519 /* Common NORMAL_SAMPLE setup */
520 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
521 if (!dbs_tuners_ins.powersave_bias ||
522 sample_type == DBS_NORMAL_SAMPLE) {
523 dbs_check_cpu(dbs_info);
524 if (dbs_info->freq_lo) {
525 /* Setup timer for SUB_SAMPLE */
526 dbs_info->sample_type = DBS_SUB_SAMPLE;
527 delay = dbs_info->freq_hi_jiffies;
530 __cpufreq_driver_target(dbs_info->cur_policy,
531 dbs_info->freq_lo, CPUFREQ_RELATION_H);
533 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
534 unlock_policy_rwsem_write(cpu);
537 static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
539 /* We want all CPUs to do sampling nearly on same jiffy */
540 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
541 delay -= jiffies % delay;
543 dbs_info->enable = 1;
544 ondemand_powersave_bias_init();
545 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
546 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
547 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
551 static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
553 dbs_info->enable = 0;
554 cancel_delayed_work_sync(&dbs_info->work);
557 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
560 unsigned int cpu = policy->cpu;
561 struct cpu_dbs_info_s *this_dbs_info;
565 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
568 case CPUFREQ_GOV_START:
569 if ((!cpu_online(cpu)) || (!policy->cur))
572 if (this_dbs_info->enable) /* Already enabled */
575 mutex_lock(&dbs_mutex);
578 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
581 mutex_unlock(&dbs_mutex);
585 for_each_cpu(j, policy->cpus) {
586 struct cpu_dbs_info_s *j_dbs_info;
587 j_dbs_info = &per_cpu(cpu_dbs_info, j);
588 j_dbs_info->cur_policy = policy;
590 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
591 &j_dbs_info->prev_cpu_wall);
592 if (dbs_tuners_ins.ignore_nice) {
593 j_dbs_info->prev_cpu_nice =
594 kstat_cpu(j).cpustat.nice;
597 this_dbs_info->cpu = cpu;
599 * Start the timerschedule work, when this governor
600 * is used for first time
602 if (dbs_enable == 1) {
603 unsigned int latency;
604 /* policy latency is in nS. Convert it to uS first */
605 latency = policy->cpuinfo.transition_latency / 1000;
608 /* Bring kernel and HW constraints together */
609 min_sampling_rate = max(min_sampling_rate,
610 MIN_LATENCY_MULTIPLIER * latency);
611 dbs_tuners_ins.sampling_rate =
612 max(min_sampling_rate,
613 latency * LATENCY_MULTIPLIER);
615 dbs_timer_init(this_dbs_info);
617 mutex_unlock(&dbs_mutex);
620 case CPUFREQ_GOV_STOP:
621 mutex_lock(&dbs_mutex);
622 dbs_timer_exit(this_dbs_info);
623 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
625 mutex_unlock(&dbs_mutex);
629 case CPUFREQ_GOV_LIMITS:
630 mutex_lock(&dbs_mutex);
631 if (policy->max < this_dbs_info->cur_policy->cur)
632 __cpufreq_driver_target(this_dbs_info->cur_policy,
633 policy->max, CPUFREQ_RELATION_H);
634 else if (policy->min > this_dbs_info->cur_policy->cur)
635 __cpufreq_driver_target(this_dbs_info->cur_policy,
636 policy->min, CPUFREQ_RELATION_L);
637 mutex_unlock(&dbs_mutex);
643 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
646 struct cpufreq_governor cpufreq_gov_ondemand = {
648 .governor = cpufreq_governor_dbs,
649 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
650 .owner = THIS_MODULE,
653 static int __init cpufreq_gov_dbs_init(void)
660 idle_time = get_cpu_idle_time_us(cpu, &wall);
662 if (idle_time != -1ULL) {
663 /* Idle micro accounting is supported. Use finer thresholds */
664 dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
665 dbs_tuners_ins.down_differential =
666 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
668 * In no_hz/micro accounting case we set the minimum frequency
669 * not depending on HZ, but fixed (very low). The deferred
670 * timer might skip some samples if idle/sleeping as needed.
672 min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
674 /* For correct statistics, we need 10 ticks for each measure */
676 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
679 kondemand_wq = create_workqueue("kondemand");
681 printk(KERN_ERR "Creation of kondemand failed\n");
684 err = cpufreq_register_governor(&cpufreq_gov_ondemand);
686 destroy_workqueue(kondemand_wq);
691 static void __exit cpufreq_gov_dbs_exit(void)
693 cpufreq_unregister_governor(&cpufreq_gov_ondemand);
694 destroy_workqueue(kondemand_wq);
698 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
699 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
700 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
701 "Low Latency Frequency Transition capable processors");
702 MODULE_LICENSE("GPL");
704 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
705 fs_initcall(cpufreq_gov_dbs_init);
707 module_init(cpufreq_gov_dbs_init);
709 module_exit(cpufreq_gov_dbs_exit);