Merge branches 'release', 'acpi_pm_device_sleep_state' and 'battery' into release
[linux-2.6] / arch / mips / oprofile / op_model_mipsxx.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2004, 05, 06 by Ralf Baechle
7  * Copyright (C) 2005 by MIPS Technologies, Inc.
8  */
9 #include <linux/cpumask.h>
10 #include <linux/oprofile.h>
11 #include <linux/interrupt.h>
12 #include <linux/smp.h>
13 #include <asm/irq_regs.h>
14
15 #include "op_impl.h"
16
17 #define M_PERFCTL_EXL                   (1UL      <<  0)
18 #define M_PERFCTL_KERNEL                (1UL      <<  1)
19 #define M_PERFCTL_SUPERVISOR            (1UL      <<  2)
20 #define M_PERFCTL_USER                  (1UL      <<  3)
21 #define M_PERFCTL_INTERRUPT_ENABLE      (1UL      <<  4)
22 #define M_PERFCTL_EVENT(event)          (((event) & 0x3ff)  << 5)
23 #define M_PERFCTL_VPEID(vpe)            ((vpe)    << 16)
24 #define M_PERFCTL_MT_EN(filter)         ((filter) << 20)
25 #define    M_TC_EN_ALL                  M_PERFCTL_MT_EN(0)
26 #define    M_TC_EN_VPE                  M_PERFCTL_MT_EN(1)
27 #define    M_TC_EN_TC                   M_PERFCTL_MT_EN(2)
28 #define M_PERFCTL_TCID(tcid)            ((tcid)   << 22)
29 #define M_PERFCTL_WIDE                  (1UL      << 30)
30 #define M_PERFCTL_MORE                  (1UL      << 31)
31
32 #define M_COUNTER_OVERFLOW              (1UL      << 31)
33
34 #ifdef CONFIG_MIPS_MT_SMP
35 #define WHAT            (M_TC_EN_VPE | M_PERFCTL_VPEID(smp_processor_id()))
36 #define vpe_id()        smp_processor_id()
37
38 /*
39  * The number of bits to shift to convert between counters per core and
40  * counters per VPE.  There is no reasonable interface atm to obtain the
41  * number of VPEs used by Linux and in the 34K this number is fixed to two
42  * anyways so we hardcore a few things here for the moment.  The way it's
43  * done here will ensure that oprofile VSMP kernel will run right on a lesser
44  * core like a 24K also or with maxcpus=1.
45  */
46 static inline unsigned int vpe_shift(void)
47 {
48         if (num_possible_cpus() > 1)
49                 return 1;
50
51         return 0;
52 }
53
54 #else
55
56 #define WHAT            0
57 #define vpe_id()        0
58
59 static inline unsigned int vpe_shift(void)
60 {
61         return 0;
62 }
63
64 #endif
65
66 static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
67 {
68         return counters >> vpe_shift();
69 }
70
71 static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
72 {
73         return counters << vpe_shift();
74 }
75
76 #define __define_perf_accessors(r, n, np)                               \
77                                                                         \
78 static inline unsigned int r_c0_ ## r ## n(void)                        \
79 {                                                                       \
80         unsigned int cpu = vpe_id();                                    \
81                                                                         \
82         switch (cpu) {                                                  \
83         case 0:                                                         \
84                 return read_c0_ ## r ## n();                            \
85         case 1:                                                         \
86                 return read_c0_ ## r ## np();                           \
87         default:                                                        \
88                 BUG();                                                  \
89         }                                                               \
90         return 0;                                                       \
91 }                                                                       \
92                                                                         \
93 static inline void w_c0_ ## r ## n(unsigned int value)                  \
94 {                                                                       \
95         unsigned int cpu = vpe_id();                                    \
96                                                                         \
97         switch (cpu) {                                                  \
98         case 0:                                                         \
99                 write_c0_ ## r ## n(value);                             \
100                 return;                                                 \
101         case 1:                                                         \
102                 write_c0_ ## r ## np(value);                            \
103                 return;                                                 \
104         default:                                                        \
105                 BUG();                                                  \
106         }                                                               \
107         return;                                                         \
108 }                                                                       \
109
110 __define_perf_accessors(perfcntr, 0, 2)
111 __define_perf_accessors(perfcntr, 1, 3)
112 __define_perf_accessors(perfcntr, 2, 0)
113 __define_perf_accessors(perfcntr, 3, 1)
114
115 __define_perf_accessors(perfctrl, 0, 2)
116 __define_perf_accessors(perfctrl, 1, 3)
117 __define_perf_accessors(perfctrl, 2, 0)
118 __define_perf_accessors(perfctrl, 3, 1)
119
120 struct op_mips_model op_model_mipsxx_ops;
121
122 static struct mipsxx_register_config {
123         unsigned int control[4];
124         unsigned int counter[4];
125 } reg;
126
127 /* Compute all of the registers in preparation for enabling profiling.  */
128
129 static void mipsxx_reg_setup(struct op_counter_config *ctr)
130 {
131         unsigned int counters = op_model_mipsxx_ops.num_counters;
132         int i;
133
134         /* Compute the performance counter control word.  */
135         for (i = 0; i < counters; i++) {
136                 reg.control[i] = 0;
137                 reg.counter[i] = 0;
138
139                 if (!ctr[i].enabled)
140                         continue;
141
142                 reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
143                                  M_PERFCTL_INTERRUPT_ENABLE;
144                 if (ctr[i].kernel)
145                         reg.control[i] |= M_PERFCTL_KERNEL;
146                 if (ctr[i].user)
147                         reg.control[i] |= M_PERFCTL_USER;
148                 if (ctr[i].exl)
149                         reg.control[i] |= M_PERFCTL_EXL;
150                 reg.counter[i] = 0x80000000 - ctr[i].count;
151         }
152 }
153
154 /* Program all of the registers in preparation for enabling profiling.  */
155
156 static void mipsxx_cpu_setup(void *args)
157 {
158         unsigned int counters = op_model_mipsxx_ops.num_counters;
159
160         switch (counters) {
161         case 4:
162                 w_c0_perfctrl3(0);
163                 w_c0_perfcntr3(reg.counter[3]);
164         case 3:
165                 w_c0_perfctrl2(0);
166                 w_c0_perfcntr2(reg.counter[2]);
167         case 2:
168                 w_c0_perfctrl1(0);
169                 w_c0_perfcntr1(reg.counter[1]);
170         case 1:
171                 w_c0_perfctrl0(0);
172                 w_c0_perfcntr0(reg.counter[0]);
173         }
174 }
175
176 /* Start all counters on current CPU */
177 static void mipsxx_cpu_start(void *args)
178 {
179         unsigned int counters = op_model_mipsxx_ops.num_counters;
180
181         switch (counters) {
182         case 4:
183                 w_c0_perfctrl3(WHAT | reg.control[3]);
184         case 3:
185                 w_c0_perfctrl2(WHAT | reg.control[2]);
186         case 2:
187                 w_c0_perfctrl1(WHAT | reg.control[1]);
188         case 1:
189                 w_c0_perfctrl0(WHAT | reg.control[0]);
190         }
191 }
192
193 /* Stop all counters on current CPU */
194 static void mipsxx_cpu_stop(void *args)
195 {
196         unsigned int counters = op_model_mipsxx_ops.num_counters;
197
198         switch (counters) {
199         case 4:
200                 w_c0_perfctrl3(0);
201         case 3:
202                 w_c0_perfctrl2(0);
203         case 2:
204                 w_c0_perfctrl1(0);
205         case 1:
206                 w_c0_perfctrl0(0);
207         }
208 }
209
210 static int mipsxx_perfcount_handler(void)
211 {
212         unsigned int counters = op_model_mipsxx_ops.num_counters;
213         unsigned int control;
214         unsigned int counter;
215         int handled = IRQ_NONE;
216
217         if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
218                 return handled;
219
220         switch (counters) {
221 #define HANDLE_COUNTER(n)                                               \
222         case n + 1:                                                     \
223                 control = r_c0_perfctrl ## n();                         \
224                 counter = r_c0_perfcntr ## n();                         \
225                 if ((control & M_PERFCTL_INTERRUPT_ENABLE) &&           \
226                     (counter & M_COUNTER_OVERFLOW)) {                   \
227                         oprofile_add_sample(get_irq_regs(), n);         \
228                         w_c0_perfcntr ## n(reg.counter[n]);             \
229                         handled = IRQ_HANDLED;                          \
230                 }
231         HANDLE_COUNTER(3)
232         HANDLE_COUNTER(2)
233         HANDLE_COUNTER(1)
234         HANDLE_COUNTER(0)
235         }
236
237         return handled;
238 }
239
240 #define M_CONFIG1_PC    (1 << 4)
241
242 static inline int __n_counters(void)
243 {
244         if (!(read_c0_config1() & M_CONFIG1_PC))
245                 return 0;
246         if (!(r_c0_perfctrl0() & M_PERFCTL_MORE))
247                 return 1;
248         if (!(r_c0_perfctrl1() & M_PERFCTL_MORE))
249                 return 2;
250         if (!(r_c0_perfctrl2() & M_PERFCTL_MORE))
251                 return 3;
252
253         return 4;
254 }
255
256 static inline int n_counters(void)
257 {
258         int counters;
259
260         switch (current_cpu_type()) {
261         case CPU_R10000:
262                 counters = 2;
263                 break;
264
265         case CPU_R12000:
266         case CPU_R14000:
267                 counters = 4;
268                 break;
269
270         default:
271                 counters = __n_counters();
272         }
273
274         return counters;
275 }
276
277 static inline void reset_counters(int counters)
278 {
279         switch (counters) {
280         case 4:
281                 w_c0_perfctrl3(0);
282                 w_c0_perfcntr3(0);
283         case 3:
284                 w_c0_perfctrl2(0);
285                 w_c0_perfcntr2(0);
286         case 2:
287                 w_c0_perfctrl1(0);
288                 w_c0_perfcntr1(0);
289         case 1:
290                 w_c0_perfctrl0(0);
291                 w_c0_perfcntr0(0);
292         }
293 }
294
295 static int __init mipsxx_init(void)
296 {
297         int counters;
298
299         counters = n_counters();
300         if (counters == 0) {
301                 printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
302                 return -ENODEV;
303         }
304
305         reset_counters(counters);
306
307         counters = counters_total_to_per_cpu(counters);
308
309         op_model_mipsxx_ops.num_counters = counters;
310         switch (current_cpu_type()) {
311         case CPU_20KC:
312                 op_model_mipsxx_ops.cpu_type = "mips/20K";
313                 break;
314
315         case CPU_24K:
316                 op_model_mipsxx_ops.cpu_type = "mips/24K";
317                 break;
318
319         case CPU_25KF:
320                 op_model_mipsxx_ops.cpu_type = "mips/25K";
321                 break;
322
323         case CPU_34K:
324                 op_model_mipsxx_ops.cpu_type = "mips/34K";
325                 break;
326
327         case CPU_74K:
328                 op_model_mipsxx_ops.cpu_type = "mips/74K";
329                 break;
330
331         case CPU_5KC:
332                 op_model_mipsxx_ops.cpu_type = "mips/5K";
333                 break;
334
335         case CPU_R10000:
336                 if ((current_cpu_data.processor_id & 0xff) == 0x20)
337                         op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
338                 else
339                         op_model_mipsxx_ops.cpu_type = "mips/r10000";
340                 break;
341
342         case CPU_R12000:
343         case CPU_R14000:
344                 op_model_mipsxx_ops.cpu_type = "mips/r12000";
345                 break;
346
347         case CPU_SB1:
348         case CPU_SB1A:
349                 op_model_mipsxx_ops.cpu_type = "mips/sb1";
350                 break;
351
352         default:
353                 printk(KERN_ERR "Profiling unsupported for this CPU\n");
354
355                 return -ENODEV;
356         }
357
358         perf_irq = mipsxx_perfcount_handler;
359
360         return 0;
361 }
362
363 static void mipsxx_exit(void)
364 {
365         int counters = op_model_mipsxx_ops.num_counters;
366
367         counters = counters_per_cpu_to_total(counters);
368         reset_counters(counters);
369
370         perf_irq = null_perf_irq;
371 }
372
373 struct op_mips_model op_model_mipsxx_ops = {
374         .reg_setup      = mipsxx_reg_setup,
375         .cpu_setup      = mipsxx_cpu_setup,
376         .init           = mipsxx_init,
377         .exit           = mipsxx_exit,
378         .cpu_start      = mipsxx_cpu_start,
379         .cpu_stop       = mipsxx_cpu_stop,
380 };