Merge branch 'core/xen' into x86/urgent
[linux-2.6] / arch / powerpc / platforms / cell / cpufreq_spudemand.c
1 /*
2  * spu aware cpufreq governor for the cell processor
3  *
4  * © Copyright IBM Corporation 2006-2008
5  *
6  * Author: Christian Krafft <krafft@de.ibm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #include <linux/cpufreq.h>
24 #include <linux/sched.h>
25 #include <linux/timer.h>
26 #include <linux/workqueue.h>
27 #include <asm/atomic.h>
28 #include <asm/machdep.h>
29 #include <asm/spu.h>
30
31 #define POLL_TIME       100000          /* in µs */
32 #define EXP             753             /* exp(-1) in fixed-point */
33
34 struct spu_gov_info_struct {
35         unsigned long busy_spus;        /* fixed-point */
36         struct cpufreq_policy *policy;
37         struct delayed_work work;
38         unsigned int poll_int;          /* µs */
39 };
40 static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info);
41
42 static struct workqueue_struct *kspugov_wq;
43
44 static int calc_freq(struct spu_gov_info_struct *info)
45 {
46         int cpu;
47         int busy_spus;
48
49         cpu = info->policy->cpu;
50         busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus);
51
52         CALC_LOAD(info->busy_spus, EXP, busy_spus * FIXED_1);
53         pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n",
54                         cpu, busy_spus, info->busy_spus);
55
56         return info->policy->max * info->busy_spus / FIXED_1;
57 }
58
59 static void spu_gov_work(struct work_struct *work)
60 {
61         struct spu_gov_info_struct *info;
62         int delay;
63         unsigned long target_freq;
64
65         info = container_of(work, struct spu_gov_info_struct, work.work);
66
67         /* after cancel_delayed_work_sync we unset info->policy */
68         BUG_ON(info->policy == NULL);
69
70         target_freq = calc_freq(info);
71         __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H);
72
73         delay = usecs_to_jiffies(info->poll_int);
74         queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay);
75 }
76
77 static void spu_gov_init_work(struct spu_gov_info_struct *info)
78 {
79         int delay = usecs_to_jiffies(info->poll_int);
80         INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work);
81         queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay);
82 }
83
84 static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
85 {
86         cancel_delayed_work_sync(&info->work);
87 }
88
89 static int spu_gov_govern(struct cpufreq_policy *policy, unsigned int event)
90 {
91         unsigned int cpu = policy->cpu;
92         struct spu_gov_info_struct *info, *affected_info;
93         int i;
94         int ret = 0;
95
96         info = &per_cpu(spu_gov_info, cpu);
97
98         switch (event) {
99         case CPUFREQ_GOV_START:
100                 if (!cpu_online(cpu)) {
101                         printk(KERN_ERR "cpu %d is not online\n", cpu);
102                         ret = -EINVAL;
103                         break;
104                 }
105
106                 if (!policy->cur) {
107                         printk(KERN_ERR "no cpu specified in policy\n");
108                         ret = -EINVAL;
109                         break;
110                 }
111
112                 /* initialize spu_gov_info for all affected cpus */
113                 for_each_cpu(i, policy->cpus) {
114                         affected_info = &per_cpu(spu_gov_info, i);
115                         affected_info->policy = policy;
116                 }
117
118                 info->poll_int = POLL_TIME;
119
120                 /* setup timer */
121                 spu_gov_init_work(info);
122
123                 break;
124
125         case CPUFREQ_GOV_STOP:
126                 /* cancel timer */
127                 spu_gov_cancel_work(info);
128
129                 /* clean spu_gov_info for all affected cpus */
130                 for_each_cpu (i, policy->cpus) {
131                         info = &per_cpu(spu_gov_info, i);
132                         info->policy = NULL;
133                 }
134
135                 break;
136         }
137
138         return ret;
139 }
140
141 static struct cpufreq_governor spu_governor = {
142         .name = "spudemand",
143         .governor = spu_gov_govern,
144         .owner = THIS_MODULE,
145 };
146
147 /*
148  * module init and destoy
149  */
150
151 static int __init spu_gov_init(void)
152 {
153         int ret;
154
155         kspugov_wq = create_workqueue("kspugov");
156         if (!kspugov_wq) {
157                 printk(KERN_ERR "creation of kspugov failed\n");
158                 ret = -EFAULT;
159                 goto out;
160         }
161
162         ret = cpufreq_register_governor(&spu_governor);
163         if (ret) {
164                 printk(KERN_ERR "registration of governor failed\n");
165                 destroy_workqueue(kspugov_wq);
166                 goto out;
167         }
168 out:
169         return ret;
170 }
171
172 static void __exit spu_gov_exit(void)
173 {
174         cpufreq_unregister_governor(&spu_governor);
175         destroy_workqueue(kspugov_wq);
176 }
177
178
179 module_init(spu_gov_init);
180 module_exit(spu_gov_exit);
181
182 MODULE_LICENSE("GPL");
183 MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
184