Pull pvops into release branch
[linux-2.6] / arch / ia64 / include / asm / paravirt_privop.h
1 /******************************************************************************
2  * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
3  *                    VA Linux Systems Japan K.K.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  *
19  */
20
21 #ifndef _ASM_IA64_PARAVIRT_PRIVOP_H
22 #define _ASM_IA64_PARAVIRT_PRIVOP_H
23
24 #ifdef CONFIG_PARAVIRT
25
26 #ifndef __ASSEMBLY__
27
28 #include <linux/types.h>
29 #include <asm/kregs.h> /* for IA64_PSR_I */
30
31 /******************************************************************************
32  * replacement of intrinsics operations.
33  */
34
35 struct pv_cpu_ops {
36         void (*fc)(void *addr);
37         unsigned long (*thash)(unsigned long addr);
38         unsigned long (*get_cpuid)(int index);
39         unsigned long (*get_pmd)(int index);
40         unsigned long (*getreg)(int reg);
41         void (*setreg)(int reg, unsigned long val);
42         void (*ptcga)(unsigned long addr, unsigned long size);
43         unsigned long (*get_rr)(unsigned long index);
44         void (*set_rr)(unsigned long index, unsigned long val);
45         void (*set_rr0_to_rr4)(unsigned long val0, unsigned long val1,
46                                unsigned long val2, unsigned long val3,
47                                unsigned long val4);
48         void (*ssm_i)(void);
49         void (*rsm_i)(void);
50         unsigned long (*get_psr_i)(void);
51         void (*intrin_local_irq_restore)(unsigned long flags);
52 };
53
54 extern struct pv_cpu_ops pv_cpu_ops;
55
56 extern void ia64_native_setreg_func(int regnum, unsigned long val);
57 extern unsigned long ia64_native_getreg_func(int regnum);
58
59 /************************************************/
60 /* Instructions paravirtualized for performance */
61 /************************************************/
62
63 #ifndef ASM_SUPPORTED
64 #define paravirt_ssm_i()        pv_cpu_ops.ssm_i()
65 #define paravirt_rsm_i()        pv_cpu_ops.rsm_i()
66 #define __paravirt_getreg()     pv_cpu_ops.getreg()
67 #endif
68
69 /* mask for ia64_native_ssm/rsm() must be constant.("i" constraing).
70  * static inline function doesn't satisfy it. */
71 #define paravirt_ssm(mask)                      \
72         do {                                    \
73                 if ((mask) == IA64_PSR_I)       \
74                         paravirt_ssm_i();       \
75                 else                            \
76                         ia64_native_ssm(mask);  \
77         } while (0)
78
79 #define paravirt_rsm(mask)                      \
80         do {                                    \
81                 if ((mask) == IA64_PSR_I)       \
82                         paravirt_rsm_i();       \
83                 else                            \
84                         ia64_native_rsm(mask);  \
85         } while (0)
86
87 /* returned ip value should be the one in the caller,
88  * not in __paravirt_getreg() */
89 #define paravirt_getreg(reg)                                    \
90         ({                                                      \
91                 unsigned long res;                              \
92                 if ((reg) == _IA64_REG_IP)                      \
93                         res = ia64_native_getreg(_IA64_REG_IP); \
94                 else                                            \
95                         res = __paravirt_getreg(reg);           \
96                 res;                                            \
97         })
98
99 /******************************************************************************
100  * replacement of hand written assembly codes.
101  */
102 struct pv_cpu_asm_switch {
103         unsigned long switch_to;
104         unsigned long leave_syscall;
105         unsigned long work_processed_syscall;
106         unsigned long leave_kernel;
107 };
108 void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
109
110 #endif /* __ASSEMBLY__ */
111
112 #define IA64_PARAVIRT_ASM_FUNC(name)    paravirt_ ## name
113
114 #else
115
116 /* fallback for native case */
117 #define IA64_PARAVIRT_ASM_FUNC(name)    ia64_native_ ## name
118
119 #endif /* CONFIG_PARAVIRT */
120
121 #if defined(CONFIG_PARAVIRT) && defined(ASM_SUPPORTED)
122 #define paravirt_dv_serialize_data()    ia64_dv_serialize_data()
123 #else
124 #define paravirt_dv_serialize_data()    /* nothing */
125 #endif
126
127 /* these routines utilize privilege-sensitive or performance-sensitive
128  * privileged instructions so the code must be replaced with
129  * paravirtualized versions */
130 #define ia64_switch_to                  IA64_PARAVIRT_ASM_FUNC(switch_to)
131 #define ia64_leave_syscall              IA64_PARAVIRT_ASM_FUNC(leave_syscall)
132 #define ia64_work_processed_syscall     \
133         IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)
134 #define ia64_leave_kernel               IA64_PARAVIRT_ASM_FUNC(leave_kernel)
135
136
137 #if defined(CONFIG_PARAVIRT)
138 /******************************************************************************
139  * binary patching infrastructure
140  */
141 #define PARAVIRT_PATCH_TYPE_FC                          1
142 #define PARAVIRT_PATCH_TYPE_THASH                       2
143 #define PARAVIRT_PATCH_TYPE_GET_CPUID                   3
144 #define PARAVIRT_PATCH_TYPE_GET_PMD                     4
145 #define PARAVIRT_PATCH_TYPE_PTCGA                       5
146 #define PARAVIRT_PATCH_TYPE_GET_RR                      6
147 #define PARAVIRT_PATCH_TYPE_SET_RR                      7
148 #define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4              8
149 #define PARAVIRT_PATCH_TYPE_SSM_I                       9
150 #define PARAVIRT_PATCH_TYPE_RSM_I                       10
151 #define PARAVIRT_PATCH_TYPE_GET_PSR_I                   11
152 #define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE    12
153
154 /* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */
155 #define PARAVIRT_PATCH_TYPE_GETREG                      0x10000000
156 #define PARAVIRT_PATCH_TYPE_SETREG                      0x20000000
157
158 /*
159  * struct task_struct* (*ia64_switch_to)(void* next_task);
160  * void *ia64_leave_syscall;
161  * void *ia64_work_processed_syscall
162  * void *ia64_leave_kernel;
163  */
164
165 #define PARAVIRT_PATCH_TYPE_BR_START                    0x30000000
166 #define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO                \
167         (PARAVIRT_PATCH_TYPE_BR_START + 0)
168 #define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL            \
169         (PARAVIRT_PATCH_TYPE_BR_START + 1)
170 #define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL   \
171         (PARAVIRT_PATCH_TYPE_BR_START + 2)
172 #define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL             \
173         (PARAVIRT_PATCH_TYPE_BR_START + 3)
174
175 #ifdef ASM_SUPPORTED
176 #include <asm/paravirt_patch.h>
177
178 /*
179  * pv_cpu_ops calling stub.
180  * normal function call convension can't be written by gcc
181  * inline assembly.
182  *
183  * from the caller's point of view,
184  * the following registers will be clobbered.
185  * r2, r3
186  * r8-r15
187  * r16, r17
188  * b6, b7
189  * p6-p15
190  * ar.ccv
191  *
192  * from the callee's point of view ,
193  * the following registers can be used.
194  * r2, r3: scratch
195  * r8: scratch, input argument0 and return value
196  * r0-r15: scratch, input argument1-5
197  * b6: return pointer
198  * b7: scratch
199  * p6-p15: scratch
200  * ar.ccv: scratch
201  *
202  * other registers must not be changed. especially
203  * b0: rp: preserved. gcc ignores b0 in clobbered register.
204  * r16: saved gp
205  */
206 /* 5 bundles */
207 #define __PARAVIRT_BR                                                   \
208         ";;\n"                                                          \
209         "{ .mlx\n"                                                      \
210         "nop 0\n"                                                       \
211         "movl r2 = %[op_addr]\n"/* get function pointer address */      \
212         ";;\n"                                                          \
213         "}\n"                                                           \
214         "1:\n"                                                          \
215         "{ .mii\n"                                                      \
216         "ld8 r2 = [r2]\n"       /* load function descriptor address */  \
217         "mov r17 = ip\n"        /* get ip to calc return address */     \
218         "mov r16 = gp\n"        /* save gp */                           \
219         ";;\n"                                                          \
220         "}\n"                                                           \
221         "{ .mii\n"                                                      \
222         "ld8 r3 = [r2], 8\n"    /* load entry address */                \
223         "adds r17 =  1f - 1b, r17\n"    /* calculate return address */  \
224         ";;\n"                                                          \
225         "mov b7 = r3\n"         /* set entry address */                 \
226         "}\n"                                                           \
227         "{ .mib\n"                                                      \
228         "ld8 gp = [r2]\n"       /* load gp value */                     \
229         "mov b6 = r17\n"        /* set return address */                \
230         "br.cond.sptk.few b7\n" /* intrinsics are very short isns */    \
231         "}\n"                                                           \
232         "1:\n"                                                          \
233         "{ .mii\n"                                                      \
234         "mov gp = r16\n"        /* restore gp value */                  \
235         "nop 0\n"                                                       \
236         "nop 0\n"                                                       \
237         ";;\n"                                                          \
238         "}\n"
239
240 #define PARAVIRT_OP(op)                         \
241         [op_addr] "i"(&pv_cpu_ops.op)
242
243 #define PARAVIRT_TYPE(type)                     \
244         PARAVIRT_PATCH_TYPE_ ## type
245
246 #define PARAVIRT_REG_CLOBBERS0                                  \
247         "r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14",        \
248                 "r15", "r16", "r17"
249
250 #define PARAVIRT_REG_CLOBBERS1                                  \
251         "r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14",         \
252                 "r15", "r16", "r17"
253
254 #define PARAVIRT_REG_CLOBBERS2                                  \
255         "r2", "r3", /*"r8", "r9",*/ "r10", "r11", "r14",        \
256                 "r15", "r16", "r17"
257
258 #define PARAVIRT_REG_CLOBBERS5                                  \
259         "r2", "r3", /*"r8", "r9", "r10", "r11", "r14",*/        \
260                 "r15", "r16", "r17"
261
262 #define PARAVIRT_BR_CLOBBERS                    \
263         "b6", "b7"
264
265 #define PARAVIRT_PR_CLOBBERS                                            \
266         "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15"
267
268 #define PARAVIRT_AR_CLOBBERS                    \
269         "ar.ccv"
270
271 #define PARAVIRT_CLOBBERS0                      \
272                 PARAVIRT_REG_CLOBBERS0,         \
273                 PARAVIRT_BR_CLOBBERS,           \
274                 PARAVIRT_PR_CLOBBERS,           \
275                 PARAVIRT_AR_CLOBBERS,           \
276                 "memory"
277
278 #define PARAVIRT_CLOBBERS1                      \
279                 PARAVIRT_REG_CLOBBERS1,         \
280                 PARAVIRT_BR_CLOBBERS,           \
281                 PARAVIRT_PR_CLOBBERS,           \
282                 PARAVIRT_AR_CLOBBERS,           \
283                 "memory"
284
285 #define PARAVIRT_CLOBBERS2                      \
286                 PARAVIRT_REG_CLOBBERS2,         \
287                 PARAVIRT_BR_CLOBBERS,           \
288                 PARAVIRT_PR_CLOBBERS,           \
289                 PARAVIRT_AR_CLOBBERS,           \
290                 "memory"
291
292 #define PARAVIRT_CLOBBERS5                      \
293                 PARAVIRT_REG_CLOBBERS5,         \
294                 PARAVIRT_BR_CLOBBERS,           \
295                 PARAVIRT_PR_CLOBBERS,           \
296                 PARAVIRT_AR_CLOBBERS,           \
297                 "memory"
298
299 #define PARAVIRT_BR0(op, type)                                  \
300         register unsigned long ia64_clobber asm ("r8");         \
301         asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,        \
302                                           PARAVIRT_TYPE(type))  \
303                       : "=r"(ia64_clobber)                      \
304                       : PARAVIRT_OP(op)                         \
305                       : PARAVIRT_CLOBBERS0)
306
307 #define PARAVIRT_BR0_RET(op, type)                              \
308         register unsigned long ia64_intri_res asm ("r8");       \
309         asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,        \
310                                           PARAVIRT_TYPE(type))  \
311                       : "=r"(ia64_intri_res)                    \
312                       : PARAVIRT_OP(op)                         \
313                       : PARAVIRT_CLOBBERS0)
314
315 #define PARAVIRT_BR1(op, type, arg1)                            \
316         register unsigned long __##arg1 asm ("r8") = arg1;      \
317         register unsigned long ia64_clobber asm ("r8");         \
318         asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,        \
319                                           PARAVIRT_TYPE(type))  \
320                       : "=r"(ia64_clobber)                      \
321                       : PARAVIRT_OP(op), "0"(__##arg1)          \
322                       : PARAVIRT_CLOBBERS1)
323
324 #define PARAVIRT_BR1_RET(op, type, arg1)                        \
325         register unsigned long ia64_intri_res asm ("r8");       \
326         register unsigned long __##arg1 asm ("r8") = arg1;      \
327         asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,        \
328                                           PARAVIRT_TYPE(type))  \
329                       : "=r"(ia64_intri_res)                    \
330                       : PARAVIRT_OP(op), "0"(__##arg1)          \
331                       : PARAVIRT_CLOBBERS1)
332
333 #define PARAVIRT_BR1_VOID(op, type, arg1)                       \
334         register void *__##arg1 asm ("r8") = arg1;              \
335         register unsigned long ia64_clobber asm ("r8");         \
336         asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,        \
337                                           PARAVIRT_TYPE(type))  \
338                       : "=r"(ia64_clobber)                      \
339                       : PARAVIRT_OP(op), "0"(__##arg1)          \
340                       : PARAVIRT_CLOBBERS1)
341
342 #define PARAVIRT_BR2(op, type, arg1, arg2)                              \
343         register unsigned long __##arg1 asm ("r8") = arg1;              \
344         register unsigned long __##arg2 asm ("r9") = arg2;              \
345         register unsigned long ia64_clobber1 asm ("r8");                \
346         register unsigned long ia64_clobber2 asm ("r9");                \
347         asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,                \
348                                           PARAVIRT_TYPE(type))          \
349                       : "=r"(ia64_clobber1), "=r"(ia64_clobber2)        \
350                       : PARAVIRT_OP(op), "0"(__##arg1), "1"(__##arg2)   \
351                       : PARAVIRT_CLOBBERS2)
352
353
354 #define PARAVIRT_DEFINE_CPU_OP0(op, type)               \
355         static inline void                              \
356         paravirt_ ## op (void)                          \
357         {                                               \
358                 PARAVIRT_BR0(op, type);                 \
359         }
360
361 #define PARAVIRT_DEFINE_CPU_OP0_RET(op, type)           \
362         static inline unsigned long                     \
363         paravirt_ ## op (void)                          \
364         {                                               \
365                 PARAVIRT_BR0_RET(op, type);             \
366                 return ia64_intri_res;                  \
367         }
368
369 #define PARAVIRT_DEFINE_CPU_OP1_VOID(op, type)          \
370         static inline void                              \
371         paravirt_ ## op (void *arg1)                    \
372         {                                               \
373                 PARAVIRT_BR1_VOID(op, type, arg1);      \
374         }
375
376 #define PARAVIRT_DEFINE_CPU_OP1(op, type)               \
377         static inline void                              \
378         paravirt_ ## op (unsigned long arg1)            \
379         {                                               \
380                 PARAVIRT_BR1(op, type, arg1);           \
381         }
382
383 #define PARAVIRT_DEFINE_CPU_OP1_RET(op, type)           \
384         static inline unsigned long                     \
385         paravirt_ ## op (unsigned long arg1)            \
386         {                                               \
387                 PARAVIRT_BR1_RET(op, type, arg1);       \
388                 return ia64_intri_res;                  \
389         }
390
391 #define PARAVIRT_DEFINE_CPU_OP2(op, type)               \
392         static inline void                              \
393         paravirt_ ## op (unsigned long arg1,            \
394                          unsigned long arg2)            \
395         {                                               \
396                 PARAVIRT_BR2(op, type, arg1, arg2);     \
397         }
398
399
400 PARAVIRT_DEFINE_CPU_OP1_VOID(fc, FC);
401 PARAVIRT_DEFINE_CPU_OP1_RET(thash, THASH)
402 PARAVIRT_DEFINE_CPU_OP1_RET(get_cpuid, GET_CPUID)
403 PARAVIRT_DEFINE_CPU_OP1_RET(get_pmd, GET_PMD)
404 PARAVIRT_DEFINE_CPU_OP2(ptcga, PTCGA)
405 PARAVIRT_DEFINE_CPU_OP1_RET(get_rr, GET_RR)
406 PARAVIRT_DEFINE_CPU_OP2(set_rr, SET_RR)
407 PARAVIRT_DEFINE_CPU_OP0(ssm_i, SSM_I)
408 PARAVIRT_DEFINE_CPU_OP0(rsm_i, RSM_I)
409 PARAVIRT_DEFINE_CPU_OP0_RET(get_psr_i, GET_PSR_I)
410 PARAVIRT_DEFINE_CPU_OP1(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE)
411
412 static inline void
413 paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
414                         unsigned long val2, unsigned long val3,
415                         unsigned long val4)
416 {
417         register unsigned long __val0 asm ("r8") = val0;
418         register unsigned long __val1 asm ("r9") = val1;
419         register unsigned long __val2 asm ("r10") = val2;
420         register unsigned long __val3 asm ("r11") = val3;
421         register unsigned long __val4 asm ("r14") = val4;
422
423         register unsigned long ia64_clobber0 asm ("r8");
424         register unsigned long ia64_clobber1 asm ("r9");
425         register unsigned long ia64_clobber2 asm ("r10");
426         register unsigned long ia64_clobber3 asm ("r11");
427         register unsigned long ia64_clobber4 asm ("r14");
428
429         asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,
430                                           PARAVIRT_TYPE(SET_RR0_TO_RR4))
431                       : "=r"(ia64_clobber0),
432                         "=r"(ia64_clobber1),
433                         "=r"(ia64_clobber2),
434                         "=r"(ia64_clobber3),
435                         "=r"(ia64_clobber4)
436                       : PARAVIRT_OP(set_rr0_to_rr4),
437                         "0"(__val0), "1"(__val1), "2"(__val2),
438                         "3"(__val3), "4"(__val4)
439                       : PARAVIRT_CLOBBERS5);
440 }
441
442 /* unsigned long paravirt_getreg(int reg) */
443 #define __paravirt_getreg(reg)                                          \
444         ({                                                              \
445                 register unsigned long ia64_intri_res asm ("r8");       \
446                 register unsigned long __reg asm ("r8") = (reg);        \
447                                                                         \
448                 BUILD_BUG_ON(!__builtin_constant_p(reg));               \
449                 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,        \
450                                                   PARAVIRT_TYPE(GETREG) \
451                                                   + (reg))              \
452                               : "=r"(ia64_intri_res)                    \
453                               : PARAVIRT_OP(getreg), "0"(__reg)         \
454                               : PARAVIRT_CLOBBERS1);                    \
455                                                                         \
456                 ia64_intri_res;                                         \
457         })
458
459 /* void paravirt_setreg(int reg, unsigned long val) */
460 #define paravirt_setreg(reg, val)                                       \
461         do {                                                            \
462                 register unsigned long __val asm ("r8") = val;          \
463                 register unsigned long __reg asm ("r9") = reg;          \
464                 register unsigned long ia64_clobber1 asm ("r8");        \
465                 register unsigned long ia64_clobber2 asm ("r9");        \
466                                                                         \
467                 BUILD_BUG_ON(!__builtin_constant_p(reg));               \
468                 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,        \
469                                                   PARAVIRT_TYPE(SETREG) \
470                                                   + (reg))              \
471                               : "=r"(ia64_clobber1),                    \
472                                 "=r"(ia64_clobber2)                     \
473                               : PARAVIRT_OP(setreg),                    \
474                                 "1"(__reg), "0"(__val)                  \
475                               : PARAVIRT_CLOBBERS2);                    \
476         } while (0)
477
478 #endif /* ASM_SUPPORTED */
479 #endif /* CONFIG_PARAVIRT && ASM_SUPPOTED */
480
481 #endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */