Input: appletouch - fix DMA to/from stack buffer
[linux-2.6] / drivers / cpufreq / cpufreq_ondemand.c
1 /*
2  *  drivers/cpufreq/cpufreq_ondemand.c
3  *
4  *  Copyright (C)  2001 Russell King
5  *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6  *                      Jun Nakajima <jun.nakajima@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/cpufreq.h>
17 #include <linux/cpu.h>
18 #include <linux/jiffies.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mutex.h>
21 #include <linux/hrtimer.h>
22 #include <linux/tick.h>
23 #include <linux/ktime.h>
24 #include <linux/sched.h>
25
26 /*
27  * dbs is used in this file as a shortform for demandbased switching
28  * It helps to keep variable names smaller, simpler
29  */
30
31 #define DEF_FREQUENCY_DOWN_DIFFERENTIAL         (10)
32 #define DEF_FREQUENCY_UP_THRESHOLD              (80)
33 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL       (3)
34 #define MICRO_FREQUENCY_UP_THRESHOLD            (95)
35 #define MIN_FREQUENCY_UP_THRESHOLD              (11)
36 #define MAX_FREQUENCY_UP_THRESHOLD              (100)
37
38 /*
39  * The polling frequency of this governor depends on the capability of
40  * the processor. Default polling frequency is 1000 times the transition
41  * latency of the processor. The governor will work on any processor with
42  * transition latency <= 10mS, using appropriate sampling
43  * rate.
44  * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
45  * this governor will not work.
46  * All times here are in uS.
47  */
48 static unsigned int def_sampling_rate;
49 #define MIN_SAMPLING_RATE_RATIO                 (2)
50 /* for correct statistics, we need at least 10 ticks between each measure */
51 #define MIN_STAT_SAMPLING_RATE                  \
52                         (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
53 #define MIN_SAMPLING_RATE                       \
54                         (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
55 /* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
56  * Define the minimal settable sampling rate to the greater of:
57  *   - "HW transition latency" * 100 (same as default sampling / 10)
58  *   - MIN_STAT_SAMPLING_RATE
59  * To avoid that userspace shoots itself.
60 */
61 static unsigned int minimum_sampling_rate(void)
62 {
63         return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
64 }
65
66 /* This will also vanish soon with removing sampling_rate_max */
67 #define MAX_SAMPLING_RATE                       (500 * def_sampling_rate)
68 #define LATENCY_MULTIPLIER                      (1000)
69 #define TRANSITION_LATENCY_LIMIT                (10 * 1000 * 1000)
70
71 static void do_dbs_timer(struct work_struct *work);
72
73 /* Sampling types */
74 enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
75
76 struct cpu_dbs_info_s {
77         cputime64_t prev_cpu_idle;
78         cputime64_t prev_cpu_wall;
79         cputime64_t prev_cpu_nice;
80         struct cpufreq_policy *cur_policy;
81         struct delayed_work work;
82         struct cpufreq_frequency_table *freq_table;
83         unsigned int freq_lo;
84         unsigned int freq_lo_jiffies;
85         unsigned int freq_hi_jiffies;
86         int cpu;
87         unsigned int enable:1,
88                 sample_type:1;
89 };
90 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
91
92 static unsigned int dbs_enable; /* number of CPUs using this policy */
93
94 /*
95  * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
96  * lock and dbs_mutex. cpu_hotplug lock should always be held before
97  * dbs_mutex. If any function that can potentially take cpu_hotplug lock
98  * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
99  * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
100  * is recursive for the same process. -Venki
101  */
102 static DEFINE_MUTEX(dbs_mutex);
103
104 static struct workqueue_struct  *kondemand_wq;
105
106 static struct dbs_tuners {
107         unsigned int sampling_rate;
108         unsigned int up_threshold;
109         unsigned int down_differential;
110         unsigned int ignore_nice;
111         unsigned int powersave_bias;
112 } dbs_tuners_ins = {
113         .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
114         .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
115         .ignore_nice = 0,
116         .powersave_bias = 0,
117 };
118
119 static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
120                                                         cputime64_t *wall)
121 {
122         cputime64_t idle_time;
123         cputime64_t cur_wall_time;
124         cputime64_t busy_time;
125
126         cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
127         busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
128                         kstat_cpu(cpu).cpustat.system);
129
130         busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
131         busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
132         busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
133         busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
134
135         idle_time = cputime64_sub(cur_wall_time, busy_time);
136         if (wall)
137                 *wall = cur_wall_time;
138
139         return idle_time;
140 }
141
142 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
143 {
144         u64 idle_time = get_cpu_idle_time_us(cpu, wall);
145
146         if (idle_time == -1ULL)
147                 return get_cpu_idle_time_jiffy(cpu, wall);
148
149         return idle_time;
150 }
151
152 /*
153  * Find right freq to be set now with powersave_bias on.
154  * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
155  * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
156  */
157 static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
158                                           unsigned int freq_next,
159                                           unsigned int relation)
160 {
161         unsigned int freq_req, freq_reduc, freq_avg;
162         unsigned int freq_hi, freq_lo;
163         unsigned int index = 0;
164         unsigned int jiffies_total, jiffies_hi, jiffies_lo;
165         struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu);
166
167         if (!dbs_info->freq_table) {
168                 dbs_info->freq_lo = 0;
169                 dbs_info->freq_lo_jiffies = 0;
170                 return freq_next;
171         }
172
173         cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
174                         relation, &index);
175         freq_req = dbs_info->freq_table[index].frequency;
176         freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
177         freq_avg = freq_req - freq_reduc;
178
179         /* Find freq bounds for freq_avg in freq_table */
180         index = 0;
181         cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
182                         CPUFREQ_RELATION_H, &index);
183         freq_lo = dbs_info->freq_table[index].frequency;
184         index = 0;
185         cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
186                         CPUFREQ_RELATION_L, &index);
187         freq_hi = dbs_info->freq_table[index].frequency;
188
189         /* Find out how long we have to be in hi and lo freqs */
190         if (freq_hi == freq_lo) {
191                 dbs_info->freq_lo = 0;
192                 dbs_info->freq_lo_jiffies = 0;
193                 return freq_lo;
194         }
195         jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
196         jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
197         jiffies_hi += ((freq_hi - freq_lo) / 2);
198         jiffies_hi /= (freq_hi - freq_lo);
199         jiffies_lo = jiffies_total - jiffies_hi;
200         dbs_info->freq_lo = freq_lo;
201         dbs_info->freq_lo_jiffies = jiffies_lo;
202         dbs_info->freq_hi_jiffies = jiffies_hi;
203         return freq_hi;
204 }
205
206 static void ondemand_powersave_bias_init(void)
207 {
208         int i;
209         for_each_online_cpu(i) {
210                 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i);
211                 dbs_info->freq_table = cpufreq_frequency_get_table(i);
212                 dbs_info->freq_lo = 0;
213         }
214 }
215
216 /************************** sysfs interface ************************/
217 static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
218 {
219         static int print_once;
220
221         if (!print_once) {
222                 printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
223                        "sysfs file is deprecated - used by: %s\n",
224                        current->comm);
225                 print_once = 1;
226         }
227         return sprintf(buf, "%u\n", MAX_SAMPLING_RATE);
228 }
229
230 static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
231 {
232         static int print_once;
233
234         if (!print_once) {
235                 printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_min "
236                        "sysfs file is deprecated - used by: %s\n",
237                        current->comm);
238                 print_once = 1;
239         }
240         return sprintf(buf, "%u\n", MIN_SAMPLING_RATE);
241 }
242
243 #define define_one_ro(_name)            \
244 static struct freq_attr _name =         \
245 __ATTR(_name, 0444, show_##_name, NULL)
246
247 define_one_ro(sampling_rate_max);
248 define_one_ro(sampling_rate_min);
249
250 /* cpufreq_ondemand Governor Tunables */
251 #define show_one(file_name, object)                                     \
252 static ssize_t show_##file_name                                         \
253 (struct cpufreq_policy *unused, char *buf)                              \
254 {                                                                       \
255         return sprintf(buf, "%u\n", dbs_tuners_ins.object);             \
256 }
257 show_one(sampling_rate, sampling_rate);
258 show_one(up_threshold, up_threshold);
259 show_one(ignore_nice_load, ignore_nice);
260 show_one(powersave_bias, powersave_bias);
261
262 static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
263                 const char *buf, size_t count)
264 {
265         unsigned int input;
266         int ret;
267         ret = sscanf(buf, "%u", &input);
268
269         mutex_lock(&dbs_mutex);
270         if (ret != 1) {
271                 mutex_unlock(&dbs_mutex);
272                 return -EINVAL;
273         }
274         dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
275         mutex_unlock(&dbs_mutex);
276
277         return count;
278 }
279
280 static ssize_t store_up_threshold(struct cpufreq_policy *unused,
281                 const char *buf, size_t count)
282 {
283         unsigned int input;
284         int ret;
285         ret = sscanf(buf, "%u", &input);
286
287         mutex_lock(&dbs_mutex);
288         if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
289                         input < MIN_FREQUENCY_UP_THRESHOLD) {
290                 mutex_unlock(&dbs_mutex);
291                 return -EINVAL;
292         }
293
294         dbs_tuners_ins.up_threshold = input;
295         mutex_unlock(&dbs_mutex);
296
297         return count;
298 }
299
300 static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
301                 const char *buf, size_t count)
302 {
303         unsigned int input;
304         int ret;
305
306         unsigned int j;
307
308         ret = sscanf(buf, "%u", &input);
309         if (ret != 1)
310                 return -EINVAL;
311
312         if (input > 1)
313                 input = 1;
314
315         mutex_lock(&dbs_mutex);
316         if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
317                 mutex_unlock(&dbs_mutex);
318                 return count;
319         }
320         dbs_tuners_ins.ignore_nice = input;
321
322         /* we need to re-evaluate prev_cpu_idle */
323         for_each_online_cpu(j) {
324                 struct cpu_dbs_info_s *dbs_info;
325                 dbs_info = &per_cpu(cpu_dbs_info, j);
326                 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
327                                                 &dbs_info->prev_cpu_wall);
328                 if (dbs_tuners_ins.ignore_nice)
329                         dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
330
331         }
332         mutex_unlock(&dbs_mutex);
333
334         return count;
335 }
336
337 static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
338                 const char *buf, size_t count)
339 {
340         unsigned int input;
341         int ret;
342         ret = sscanf(buf, "%u", &input);
343
344         if (ret != 1)
345                 return -EINVAL;
346
347         if (input > 1000)
348                 input = 1000;
349
350         mutex_lock(&dbs_mutex);
351         dbs_tuners_ins.powersave_bias = input;
352         ondemand_powersave_bias_init();
353         mutex_unlock(&dbs_mutex);
354
355         return count;
356 }
357
358 #define define_one_rw(_name) \
359 static struct freq_attr _name = \
360 __ATTR(_name, 0644, show_##_name, store_##_name)
361
362 define_one_rw(sampling_rate);
363 define_one_rw(up_threshold);
364 define_one_rw(ignore_nice_load);
365 define_one_rw(powersave_bias);
366
367 static struct attribute *dbs_attributes[] = {
368         &sampling_rate_max.attr,
369         &sampling_rate_min.attr,
370         &sampling_rate.attr,
371         &up_threshold.attr,
372         &ignore_nice_load.attr,
373         &powersave_bias.attr,
374         NULL
375 };
376
377 static struct attribute_group dbs_attr_group = {
378         .attrs = dbs_attributes,
379         .name = "ondemand",
380 };
381
382 /************************** sysfs end ************************/
383
384 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
385 {
386         unsigned int max_load_freq;
387
388         struct cpufreq_policy *policy;
389         unsigned int j;
390
391         if (!this_dbs_info->enable)
392                 return;
393
394         this_dbs_info->freq_lo = 0;
395         policy = this_dbs_info->cur_policy;
396
397         /*
398          * Every sampling_rate, we check, if current idle time is less
399          * than 20% (default), then we try to increase frequency
400          * Every sampling_rate, we look for a the lowest
401          * frequency which can sustain the load while keeping idle time over
402          * 30%. If such a frequency exist, we try to decrease to this frequency.
403          *
404          * Any frequency increase takes it to the maximum frequency.
405          * Frequency reduction happens at minimum steps of
406          * 5% (default) of current frequency
407          */
408
409         /* Get Absolute Load - in terms of freq */
410         max_load_freq = 0;
411
412         for_each_cpu(j, policy->cpus) {
413                 struct cpu_dbs_info_s *j_dbs_info;
414                 cputime64_t cur_wall_time, cur_idle_time;
415                 unsigned int idle_time, wall_time;
416                 unsigned int load, load_freq;
417                 int freq_avg;
418
419                 j_dbs_info = &per_cpu(cpu_dbs_info, j);
420
421                 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
422
423                 wall_time = (unsigned int) cputime64_sub(cur_wall_time,
424                                 j_dbs_info->prev_cpu_wall);
425                 j_dbs_info->prev_cpu_wall = cur_wall_time;
426
427                 idle_time = (unsigned int) cputime64_sub(cur_idle_time,
428                                 j_dbs_info->prev_cpu_idle);
429                 j_dbs_info->prev_cpu_idle = cur_idle_time;
430
431                 if (dbs_tuners_ins.ignore_nice) {
432                         cputime64_t cur_nice;
433                         unsigned long cur_nice_jiffies;
434
435                         cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
436                                          j_dbs_info->prev_cpu_nice);
437                         /*
438                          * Assumption: nice time between sampling periods will
439                          * be less than 2^32 jiffies for 32 bit sys
440                          */
441                         cur_nice_jiffies = (unsigned long)
442                                         cputime64_to_jiffies64(cur_nice);
443
444                         j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
445                         idle_time += jiffies_to_usecs(cur_nice_jiffies);
446                 }
447
448                 if (unlikely(!wall_time || wall_time < idle_time))
449                         continue;
450
451                 load = 100 * (wall_time - idle_time) / wall_time;
452
453                 freq_avg = __cpufreq_driver_getavg(policy, j);
454                 if (freq_avg <= 0)
455                         freq_avg = policy->cur;
456
457                 load_freq = load * freq_avg;
458                 if (load_freq > max_load_freq)
459                         max_load_freq = load_freq;
460         }
461
462         /* Check for frequency increase */
463         if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
464                 /* if we are already at full speed then break out early */
465                 if (!dbs_tuners_ins.powersave_bias) {
466                         if (policy->cur == policy->max)
467                                 return;
468
469                         __cpufreq_driver_target(policy, policy->max,
470                                 CPUFREQ_RELATION_H);
471                 } else {
472                         int freq = powersave_bias_target(policy, policy->max,
473                                         CPUFREQ_RELATION_H);
474                         __cpufreq_driver_target(policy, freq,
475                                 CPUFREQ_RELATION_L);
476                 }
477                 return;
478         }
479
480         /* Check for frequency decrease */
481         /* if we cannot reduce the frequency anymore, break out early */
482         if (policy->cur == policy->min)
483                 return;
484
485         /*
486          * The optimal frequency is the frequency that is the lowest that
487          * can support the current CPU usage without triggering the up
488          * policy. To be safe, we focus 10 points under the threshold.
489          */
490         if (max_load_freq <
491             (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
492              policy->cur) {
493                 unsigned int freq_next;
494                 freq_next = max_load_freq /
495                                 (dbs_tuners_ins.up_threshold -
496                                  dbs_tuners_ins.down_differential);
497
498                 if (!dbs_tuners_ins.powersave_bias) {
499                         __cpufreq_driver_target(policy, freq_next,
500                                         CPUFREQ_RELATION_L);
501                 } else {
502                         int freq = powersave_bias_target(policy, freq_next,
503                                         CPUFREQ_RELATION_L);
504                         __cpufreq_driver_target(policy, freq,
505                                 CPUFREQ_RELATION_L);
506                 }
507         }
508 }
509
510 static void do_dbs_timer(struct work_struct *work)
511 {
512         struct cpu_dbs_info_s *dbs_info =
513                 container_of(work, struct cpu_dbs_info_s, work.work);
514         unsigned int cpu = dbs_info->cpu;
515         int sample_type = dbs_info->sample_type;
516
517         /* We want all CPUs to do sampling nearly on same jiffy */
518         int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
519
520         delay -= jiffies % delay;
521
522         if (lock_policy_rwsem_write(cpu) < 0)
523                 return;
524
525         if (!dbs_info->enable) {
526                 unlock_policy_rwsem_write(cpu);
527                 return;
528         }
529
530         /* Common NORMAL_SAMPLE setup */
531         dbs_info->sample_type = DBS_NORMAL_SAMPLE;
532         if (!dbs_tuners_ins.powersave_bias ||
533             sample_type == DBS_NORMAL_SAMPLE) {
534                 dbs_check_cpu(dbs_info);
535                 if (dbs_info->freq_lo) {
536                         /* Setup timer for SUB_SAMPLE */
537                         dbs_info->sample_type = DBS_SUB_SAMPLE;
538                         delay = dbs_info->freq_hi_jiffies;
539                 }
540         } else {
541                 __cpufreq_driver_target(dbs_info->cur_policy,
542                         dbs_info->freq_lo, CPUFREQ_RELATION_H);
543         }
544         queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
545         unlock_policy_rwsem_write(cpu);
546 }
547
548 static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
549 {
550         /* We want all CPUs to do sampling nearly on same jiffy */
551         int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
552         delay -= jiffies % delay;
553
554         dbs_info->enable = 1;
555         ondemand_powersave_bias_init();
556         dbs_info->sample_type = DBS_NORMAL_SAMPLE;
557         INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
558         queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
559                 delay);
560 }
561
562 static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
563 {
564         dbs_info->enable = 0;
565         cancel_delayed_work(&dbs_info->work);
566 }
567
568 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
569                                    unsigned int event)
570 {
571         unsigned int cpu = policy->cpu;
572         struct cpu_dbs_info_s *this_dbs_info;
573         unsigned int j;
574         int rc;
575
576         this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
577
578         switch (event) {
579         case CPUFREQ_GOV_START:
580                 if ((!cpu_online(cpu)) || (!policy->cur))
581                         return -EINVAL;
582
583                 if (this_dbs_info->enable) /* Already enabled */
584                         break;
585
586                 mutex_lock(&dbs_mutex);
587                 dbs_enable++;
588
589                 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
590                 if (rc) {
591                         dbs_enable--;
592                         mutex_unlock(&dbs_mutex);
593                         return rc;
594                 }
595
596                 for_each_cpu(j, policy->cpus) {
597                         struct cpu_dbs_info_s *j_dbs_info;
598                         j_dbs_info = &per_cpu(cpu_dbs_info, j);
599                         j_dbs_info->cur_policy = policy;
600
601                         j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
602                                                 &j_dbs_info->prev_cpu_wall);
603                         if (dbs_tuners_ins.ignore_nice) {
604                                 j_dbs_info->prev_cpu_nice =
605                                                 kstat_cpu(j).cpustat.nice;
606                         }
607                 }
608                 this_dbs_info->cpu = cpu;
609                 /*
610                  * Start the timerschedule work, when this governor
611                  * is used for first time
612                  */
613                 if (dbs_enable == 1) {
614                         unsigned int latency;
615                         /* policy latency is in nS. Convert it to uS first */
616                         latency = policy->cpuinfo.transition_latency / 1000;
617                         if (latency == 0)
618                                 latency = 1;
619
620                         def_sampling_rate =
621                                 max(latency * LATENCY_MULTIPLIER,
622                                     MIN_STAT_SAMPLING_RATE);
623
624                         dbs_tuners_ins.sampling_rate = def_sampling_rate;
625                 }
626                 dbs_timer_init(this_dbs_info);
627
628                 mutex_unlock(&dbs_mutex);
629                 break;
630
631         case CPUFREQ_GOV_STOP:
632                 mutex_lock(&dbs_mutex);
633                 dbs_timer_exit(this_dbs_info);
634                 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
635                 dbs_enable--;
636                 mutex_unlock(&dbs_mutex);
637
638                 break;
639
640         case CPUFREQ_GOV_LIMITS:
641                 mutex_lock(&dbs_mutex);
642                 if (policy->max < this_dbs_info->cur_policy->cur)
643                         __cpufreq_driver_target(this_dbs_info->cur_policy,
644                                 policy->max, CPUFREQ_RELATION_H);
645                 else if (policy->min > this_dbs_info->cur_policy->cur)
646                         __cpufreq_driver_target(this_dbs_info->cur_policy,
647                                 policy->min, CPUFREQ_RELATION_L);
648                 mutex_unlock(&dbs_mutex);
649                 break;
650         }
651         return 0;
652 }
653
654 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
655 static
656 #endif
657 struct cpufreq_governor cpufreq_gov_ondemand = {
658         .name                   = "ondemand",
659         .governor               = cpufreq_governor_dbs,
660         .max_transition_latency = TRANSITION_LATENCY_LIMIT,
661         .owner                  = THIS_MODULE,
662 };
663
664 static int __init cpufreq_gov_dbs_init(void)
665 {
666         int err;
667         cputime64_t wall;
668         u64 idle_time;
669         int cpu = get_cpu();
670
671         idle_time = get_cpu_idle_time_us(cpu, &wall);
672         put_cpu();
673         if (idle_time != -1ULL) {
674                 /* Idle micro accounting is supported. Use finer thresholds */
675                 dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
676                 dbs_tuners_ins.down_differential =
677                                         MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
678         }
679
680         kondemand_wq = create_workqueue("kondemand");
681         if (!kondemand_wq) {
682                 printk(KERN_ERR "Creation of kondemand failed\n");
683                 return -EFAULT;
684         }
685         err = cpufreq_register_governor(&cpufreq_gov_ondemand);
686         if (err)
687                 destroy_workqueue(kondemand_wq);
688
689         return err;
690 }
691
692 static void __exit cpufreq_gov_dbs_exit(void)
693 {
694         cpufreq_unregister_governor(&cpufreq_gov_ondemand);
695         destroy_workqueue(kondemand_wq);
696 }
697
698
699 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
700 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
701 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
702         "Low Latency Frequency Transition capable processors");
703 MODULE_LICENSE("GPL");
704
705 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
706 fs_initcall(cpufreq_gov_dbs_init);
707 #else
708 module_init(cpufreq_gov_dbs_init);
709 #endif
710 module_exit(cpufreq_gov_dbs_exit);