vt_ioctl: fix lock imbalance
[linux-2.6] / drivers / cpufreq / cpufreq_conservative.c
1 /*
2  *  drivers/cpufreq/cpufreq_conservative.c
3  *
4  *  Copyright (C)  2001 Russell King
5  *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6  *                      Jun Nakajima <jun.nakajima@intel.com>
7  *            (C)  2009 Alexander Clouter <alex@digriz.org.uk>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/cpufreq.h>
18 #include <linux/cpu.h>
19 #include <linux/jiffies.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/mutex.h>
22 #include <linux/hrtimer.h>
23 #include <linux/tick.h>
24 #include <linux/ktime.h>
25 #include <linux/sched.h>
26
27 /*
28  * dbs is used in this file as a shortform for demandbased switching
29  * It helps to keep variable names smaller, simpler
30  */
31
32 #define DEF_FREQUENCY_UP_THRESHOLD              (80)
33 #define DEF_FREQUENCY_DOWN_THRESHOLD            (20)
34
35 /*
36  * The polling frequency of this governor depends on the capability of
37  * the processor. Default polling frequency is 1000 times the transition
38  * latency of the processor. The governor will work on any processor with
39  * transition latency <= 10mS, using appropriate sampling
40  * rate.
41  * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
42  * this governor will not work.
43  * All times here are in uS.
44  */
45 #define MIN_SAMPLING_RATE_RATIO                 (2)
46
47 static unsigned int min_sampling_rate;
48
49 #define LATENCY_MULTIPLIER                      (1000)
50 #define MIN_LATENCY_MULTIPLIER                  (100)
51 #define DEF_SAMPLING_DOWN_FACTOR                (1)
52 #define MAX_SAMPLING_DOWN_FACTOR                (10)
53 #define TRANSITION_LATENCY_LIMIT                (10 * 1000 * 1000)
54
55 static void do_dbs_timer(struct work_struct *work);
56
57 struct cpu_dbs_info_s {
58         cputime64_t prev_cpu_idle;
59         cputime64_t prev_cpu_wall;
60         cputime64_t prev_cpu_nice;
61         struct cpufreq_policy *cur_policy;
62         struct delayed_work work;
63         unsigned int down_skip;
64         unsigned int requested_freq;
65         int cpu;
66         unsigned int enable:1;
67 };
68 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
69
70 static unsigned int dbs_enable; /* number of CPUs using this policy */
71
72 /*
73  * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
74  * lock and dbs_mutex. cpu_hotplug lock should always be held before
75  * dbs_mutex. If any function that can potentially take cpu_hotplug lock
76  * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
77  * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
78  * is recursive for the same process. -Venki
79  * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
80  * would deadlock with cancel_delayed_work_sync(), which is needed for proper
81  * raceless workqueue teardown.
82  */
83 static DEFINE_MUTEX(dbs_mutex);
84
85 static struct workqueue_struct  *kconservative_wq;
86
87 static struct dbs_tuners {
88         unsigned int sampling_rate;
89         unsigned int sampling_down_factor;
90         unsigned int up_threshold;
91         unsigned int down_threshold;
92         unsigned int ignore_nice;
93         unsigned int freq_step;
94 } dbs_tuners_ins = {
95         .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
96         .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
97         .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
98         .ignore_nice = 0,
99         .freq_step = 5,
100 };
101
102 static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
103                                                         cputime64_t *wall)
104 {
105         cputime64_t idle_time;
106         cputime64_t cur_wall_time;
107         cputime64_t busy_time;
108
109         cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
110         busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
111                         kstat_cpu(cpu).cpustat.system);
112
113         busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
114         busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
115         busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
116         busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
117
118         idle_time = cputime64_sub(cur_wall_time, busy_time);
119         if (wall)
120                 *wall = cur_wall_time;
121
122         return idle_time;
123 }
124
125 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
126 {
127         u64 idle_time = get_cpu_idle_time_us(cpu, wall);
128
129         if (idle_time == -1ULL)
130                 return get_cpu_idle_time_jiffy(cpu, wall);
131
132         return idle_time;
133 }
134
135 /* keep track of frequency transitions */
136 static int
137 dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
138                      void *data)
139 {
140         struct cpufreq_freqs *freq = data;
141         struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info,
142                                                         freq->cpu);
143
144         struct cpufreq_policy *policy;
145
146         if (!this_dbs_info->enable)
147                 return 0;
148
149         policy = this_dbs_info->cur_policy;
150
151         /*
152          * we only care if our internally tracked freq moves outside
153          * the 'valid' ranges of freqency available to us otherwise
154          * we do not change it
155         */
156         if (this_dbs_info->requested_freq > policy->max
157                         || this_dbs_info->requested_freq < policy->min)
158                 this_dbs_info->requested_freq = freq->new;
159
160         return 0;
161 }
162
163 static struct notifier_block dbs_cpufreq_notifier_block = {
164         .notifier_call = dbs_cpufreq_notifier
165 };
166
167 /************************** sysfs interface ************************/
168 static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
169 {
170         printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
171                     "sysfs file is deprecated - used by: %s\n", current->comm);
172         return sprintf(buf, "%u\n", -1U);
173 }
174
175 static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
176 {
177         return sprintf(buf, "%u\n", min_sampling_rate);
178 }
179
180 #define define_one_ro(_name)            \
181 static struct freq_attr _name =         \
182 __ATTR(_name, 0444, show_##_name, NULL)
183
184 define_one_ro(sampling_rate_max);
185 define_one_ro(sampling_rate_min);
186
187 /* cpufreq_conservative Governor Tunables */
188 #define show_one(file_name, object)                                     \
189 static ssize_t show_##file_name                                         \
190 (struct cpufreq_policy *unused, char *buf)                              \
191 {                                                                       \
192         return sprintf(buf, "%u\n", dbs_tuners_ins.object);             \
193 }
194 show_one(sampling_rate, sampling_rate);
195 show_one(sampling_down_factor, sampling_down_factor);
196 show_one(up_threshold, up_threshold);
197 show_one(down_threshold, down_threshold);
198 show_one(ignore_nice_load, ignore_nice);
199 show_one(freq_step, freq_step);
200
201 static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
202                 const char *buf, size_t count)
203 {
204         unsigned int input;
205         int ret;
206         ret = sscanf(buf, "%u", &input);
207
208         if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
209                 return -EINVAL;
210
211         mutex_lock(&dbs_mutex);
212         dbs_tuners_ins.sampling_down_factor = input;
213         mutex_unlock(&dbs_mutex);
214
215         return count;
216 }
217
218 static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
219                 const char *buf, size_t count)
220 {
221         unsigned int input;
222         int ret;
223         ret = sscanf(buf, "%u", &input);
224
225         if (ret != 1)
226                 return -EINVAL;
227
228         mutex_lock(&dbs_mutex);
229         dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
230         mutex_unlock(&dbs_mutex);
231
232         return count;
233 }
234
235 static ssize_t store_up_threshold(struct cpufreq_policy *unused,
236                 const char *buf, size_t count)
237 {
238         unsigned int input;
239         int ret;
240         ret = sscanf(buf, "%u", &input);
241
242         mutex_lock(&dbs_mutex);
243         if (ret != 1 || input > 100 ||
244                         input <= dbs_tuners_ins.down_threshold) {
245                 mutex_unlock(&dbs_mutex);
246                 return -EINVAL;
247         }
248
249         dbs_tuners_ins.up_threshold = input;
250         mutex_unlock(&dbs_mutex);
251
252         return count;
253 }
254
255 static ssize_t store_down_threshold(struct cpufreq_policy *unused,
256                 const char *buf, size_t count)
257 {
258         unsigned int input;
259         int ret;
260         ret = sscanf(buf, "%u", &input);
261
262         mutex_lock(&dbs_mutex);
263         /* cannot be lower than 11 otherwise freq will not fall */
264         if (ret != 1 || input < 11 || input > 100 ||
265                         input >= dbs_tuners_ins.up_threshold) {
266                 mutex_unlock(&dbs_mutex);
267                 return -EINVAL;
268         }
269
270         dbs_tuners_ins.down_threshold = input;
271         mutex_unlock(&dbs_mutex);
272
273         return count;
274 }
275
276 static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
277                 const char *buf, size_t count)
278 {
279         unsigned int input;
280         int ret;
281
282         unsigned int j;
283
284         ret = sscanf(buf, "%u", &input);
285         if (ret != 1)
286                 return -EINVAL;
287
288         if (input > 1)
289                 input = 1;
290
291         mutex_lock(&dbs_mutex);
292         if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
293                 mutex_unlock(&dbs_mutex);
294                 return count;
295         }
296         dbs_tuners_ins.ignore_nice = input;
297
298         /* we need to re-evaluate prev_cpu_idle */
299         for_each_online_cpu(j) {
300                 struct cpu_dbs_info_s *dbs_info;
301                 dbs_info = &per_cpu(cpu_dbs_info, j);
302                 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
303                                                 &dbs_info->prev_cpu_wall);
304                 if (dbs_tuners_ins.ignore_nice)
305                         dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
306         }
307         mutex_unlock(&dbs_mutex);
308
309         return count;
310 }
311
312 static ssize_t store_freq_step(struct cpufreq_policy *policy,
313                 const char *buf, size_t count)
314 {
315         unsigned int input;
316         int ret;
317         ret = sscanf(buf, "%u", &input);
318
319         if (ret != 1)
320                 return -EINVAL;
321
322         if (input > 100)
323                 input = 100;
324
325         /* no need to test here if freq_step is zero as the user might actually
326          * want this, they would be crazy though :) */
327         mutex_lock(&dbs_mutex);
328         dbs_tuners_ins.freq_step = input;
329         mutex_unlock(&dbs_mutex);
330
331         return count;
332 }
333
334 #define define_one_rw(_name) \
335 static struct freq_attr _name = \
336 __ATTR(_name, 0644, show_##_name, store_##_name)
337
338 define_one_rw(sampling_rate);
339 define_one_rw(sampling_down_factor);
340 define_one_rw(up_threshold);
341 define_one_rw(down_threshold);
342 define_one_rw(ignore_nice_load);
343 define_one_rw(freq_step);
344
345 static struct attribute *dbs_attributes[] = {
346         &sampling_rate_max.attr,
347         &sampling_rate_min.attr,
348         &sampling_rate.attr,
349         &sampling_down_factor.attr,
350         &up_threshold.attr,
351         &down_threshold.attr,
352         &ignore_nice_load.attr,
353         &freq_step.attr,
354         NULL
355 };
356
357 static struct attribute_group dbs_attr_group = {
358         .attrs = dbs_attributes,
359         .name = "conservative",
360 };
361
362 /************************** sysfs end ************************/
363
364 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
365 {
366         unsigned int load = 0;
367         unsigned int freq_target;
368
369         struct cpufreq_policy *policy;
370         unsigned int j;
371
372         policy = this_dbs_info->cur_policy;
373
374         /*
375          * Every sampling_rate, we check, if current idle time is less
376          * than 20% (default), then we try to increase frequency
377          * Every sampling_rate*sampling_down_factor, we check, if current
378          * idle time is more than 80%, then we try to decrease frequency
379          *
380          * Any frequency increase takes it to the maximum frequency.
381          * Frequency reduction happens at minimum steps of
382          * 5% (default) of maximum frequency
383          */
384
385         /* Get Absolute Load */
386         for_each_cpu(j, policy->cpus) {
387                 struct cpu_dbs_info_s *j_dbs_info;
388                 cputime64_t cur_wall_time, cur_idle_time;
389                 unsigned int idle_time, wall_time;
390
391                 j_dbs_info = &per_cpu(cpu_dbs_info, j);
392
393                 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
394
395                 wall_time = (unsigned int) cputime64_sub(cur_wall_time,
396                                 j_dbs_info->prev_cpu_wall);
397                 j_dbs_info->prev_cpu_wall = cur_wall_time;
398
399                 idle_time = (unsigned int) cputime64_sub(cur_idle_time,
400                                 j_dbs_info->prev_cpu_idle);
401                 j_dbs_info->prev_cpu_idle = cur_idle_time;
402
403                 if (dbs_tuners_ins.ignore_nice) {
404                         cputime64_t cur_nice;
405                         unsigned long cur_nice_jiffies;
406
407                         cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
408                                          j_dbs_info->prev_cpu_nice);
409                         /*
410                          * Assumption: nice time between sampling periods will
411                          * be less than 2^32 jiffies for 32 bit sys
412                          */
413                         cur_nice_jiffies = (unsigned long)
414                                         cputime64_to_jiffies64(cur_nice);
415
416                         j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
417                         idle_time += jiffies_to_usecs(cur_nice_jiffies);
418                 }
419
420                 if (unlikely(!wall_time || wall_time < idle_time))
421                         continue;
422
423                 load = 100 * (wall_time - idle_time) / wall_time;
424         }
425
426         /*
427          * break out if we 'cannot' reduce the speed as the user might
428          * want freq_step to be zero
429          */
430         if (dbs_tuners_ins.freq_step == 0)
431                 return;
432
433         /* Check for frequency increase */
434         if (load > dbs_tuners_ins.up_threshold) {
435                 this_dbs_info->down_skip = 0;
436
437                 /* if we are already at full speed then break out early */
438                 if (this_dbs_info->requested_freq == policy->max)
439                         return;
440
441                 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
442
443                 /* max freq cannot be less than 100. But who knows.... */
444                 if (unlikely(freq_target == 0))
445                         freq_target = 5;
446
447                 this_dbs_info->requested_freq += freq_target;
448                 if (this_dbs_info->requested_freq > policy->max)
449                         this_dbs_info->requested_freq = policy->max;
450
451                 __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
452                         CPUFREQ_RELATION_H);
453                 return;
454         }
455
456         /*
457          * The optimal frequency is the frequency that is the lowest that
458          * can support the current CPU usage without triggering the up
459          * policy. To be safe, we focus 10 points under the threshold.
460          */
461         if (load < (dbs_tuners_ins.down_threshold - 10)) {
462                 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
463
464                 this_dbs_info->requested_freq -= freq_target;
465                 if (this_dbs_info->requested_freq < policy->min)
466                         this_dbs_info->requested_freq = policy->min;
467
468                 /*
469                  * if we cannot reduce the frequency anymore, break out early
470                  */
471                 if (policy->cur == policy->min)
472                         return;
473
474                 __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
475                                 CPUFREQ_RELATION_H);
476                 return;
477         }
478 }
479
480 static void do_dbs_timer(struct work_struct *work)
481 {
482         struct cpu_dbs_info_s *dbs_info =
483                 container_of(work, struct cpu_dbs_info_s, work.work);
484         unsigned int cpu = dbs_info->cpu;
485
486         /* We want all CPUs to do sampling nearly on same jiffy */
487         int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
488
489         delay -= jiffies % delay;
490
491         if (lock_policy_rwsem_write(cpu) < 0)
492                 return;
493
494         if (!dbs_info->enable) {
495                 unlock_policy_rwsem_write(cpu);
496                 return;
497         }
498
499         dbs_check_cpu(dbs_info);
500
501         queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay);
502         unlock_policy_rwsem_write(cpu);
503 }
504
505 static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
506 {
507         /* We want all CPUs to do sampling nearly on same jiffy */
508         int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
509         delay -= jiffies % delay;
510
511         dbs_info->enable = 1;
512         INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
513         queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work,
514                                 delay);
515 }
516
517 static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
518 {
519         dbs_info->enable = 0;
520         cancel_delayed_work_sync(&dbs_info->work);
521 }
522
523 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
524                                    unsigned int event)
525 {
526         unsigned int cpu = policy->cpu;
527         struct cpu_dbs_info_s *this_dbs_info;
528         unsigned int j;
529         int rc;
530
531         this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
532
533         switch (event) {
534         case CPUFREQ_GOV_START:
535                 if ((!cpu_online(cpu)) || (!policy->cur))
536                         return -EINVAL;
537
538                 if (this_dbs_info->enable) /* Already enabled */
539                         break;
540
541                 mutex_lock(&dbs_mutex);
542
543                 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
544                 if (rc) {
545                         mutex_unlock(&dbs_mutex);
546                         return rc;
547                 }
548
549                 for_each_cpu(j, policy->cpus) {
550                         struct cpu_dbs_info_s *j_dbs_info;
551                         j_dbs_info = &per_cpu(cpu_dbs_info, j);
552                         j_dbs_info->cur_policy = policy;
553
554                         j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
555                                                 &j_dbs_info->prev_cpu_wall);
556                         if (dbs_tuners_ins.ignore_nice) {
557                                 j_dbs_info->prev_cpu_nice =
558                                                 kstat_cpu(j).cpustat.nice;
559                         }
560                 }
561                 this_dbs_info->down_skip = 0;
562                 this_dbs_info->requested_freq = policy->cur;
563
564                 dbs_enable++;
565                 /*
566                  * Start the timerschedule work, when this governor
567                  * is used for first time
568                  */
569                 if (dbs_enable == 1) {
570                         unsigned int latency;
571                         /* policy latency is in nS. Convert it to uS first */
572                         latency = policy->cpuinfo.transition_latency / 1000;
573                         if (latency == 0)
574                                 latency = 1;
575
576                         /*
577                          * conservative does not implement micro like ondemand
578                          * governor, thus we are bound to jiffes/HZ
579                          */
580                         min_sampling_rate =
581                                 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
582                         /* Bring kernel and HW constraints together */
583                         min_sampling_rate = max(min_sampling_rate,
584                                         MIN_LATENCY_MULTIPLIER * latency);
585                         dbs_tuners_ins.sampling_rate =
586                                 max(min_sampling_rate,
587                                     latency * LATENCY_MULTIPLIER);
588
589                         cpufreq_register_notifier(
590                                         &dbs_cpufreq_notifier_block,
591                                         CPUFREQ_TRANSITION_NOTIFIER);
592                 }
593                 dbs_timer_init(this_dbs_info);
594
595                 mutex_unlock(&dbs_mutex);
596
597                 break;
598
599         case CPUFREQ_GOV_STOP:
600                 mutex_lock(&dbs_mutex);
601                 dbs_timer_exit(this_dbs_info);
602                 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
603                 dbs_enable--;
604
605                 /*
606                  * Stop the timerschedule work, when this governor
607                  * is used for first time
608                  */
609                 if (dbs_enable == 0)
610                         cpufreq_unregister_notifier(
611                                         &dbs_cpufreq_notifier_block,
612                                         CPUFREQ_TRANSITION_NOTIFIER);
613
614                 mutex_unlock(&dbs_mutex);
615
616                 break;
617
618         case CPUFREQ_GOV_LIMITS:
619                 mutex_lock(&dbs_mutex);
620                 if (policy->max < this_dbs_info->cur_policy->cur)
621                         __cpufreq_driver_target(
622                                         this_dbs_info->cur_policy,
623                                         policy->max, CPUFREQ_RELATION_H);
624                 else if (policy->min > this_dbs_info->cur_policy->cur)
625                         __cpufreq_driver_target(
626                                         this_dbs_info->cur_policy,
627                                         policy->min, CPUFREQ_RELATION_L);
628                 mutex_unlock(&dbs_mutex);
629
630                 break;
631         }
632         return 0;
633 }
634
635 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
636 static
637 #endif
638 struct cpufreq_governor cpufreq_gov_conservative = {
639         .name                   = "conservative",
640         .governor               = cpufreq_governor_dbs,
641         .max_transition_latency = TRANSITION_LATENCY_LIMIT,
642         .owner                  = THIS_MODULE,
643 };
644
645 static int __init cpufreq_gov_dbs_init(void)
646 {
647         int err;
648
649         kconservative_wq = create_workqueue("kconservative");
650         if (!kconservative_wq) {
651                 printk(KERN_ERR "Creation of kconservative failed\n");
652                 return -EFAULT;
653         }
654
655         err = cpufreq_register_governor(&cpufreq_gov_conservative);
656         if (err)
657                 destroy_workqueue(kconservative_wq);
658
659         return err;
660 }
661
662 static void __exit cpufreq_gov_dbs_exit(void)
663 {
664         cpufreq_unregister_governor(&cpufreq_gov_conservative);
665         destroy_workqueue(kconservative_wq);
666 }
667
668
669 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
670 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
671                 "Low Latency Frequency Transition capable processors "
672                 "optimised for use in a battery environment");
673 MODULE_LICENSE("GPL");
674
675 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
676 fs_initcall(cpufreq_gov_dbs_init);
677 #else
678 module_init(cpufreq_gov_dbs_init);
679 #endif
680 module_exit(cpufreq_gov_dbs_exit);