KVM: s390: interprocessor communication via sigp
[linux-2.6] / include / asm-s390 / system.h
1 /*
2  *  include/asm-s390/system.h
3  *
4  *  S390 version
5  *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7  *
8  *  Derived from "include/asm-i386/system.h"
9  */
10
11 #ifndef __ASM_SYSTEM_H
12 #define __ASM_SYSTEM_H
13
14 #include <linux/kernel.h>
15 #include <asm/types.h>
16 #include <asm/ptrace.h>
17 #include <asm/setup.h>
18 #include <asm/processor.h>
19
20 #ifdef __KERNEL__
21
22 struct task_struct;
23
24 extern struct task_struct *__switch_to(void *, void *);
25
26 static inline void save_fp_regs(s390_fp_regs *fpregs)
27 {
28         asm volatile(
29                 "       std     0,8(%1)\n"
30                 "       std     2,24(%1)\n"
31                 "       std     4,40(%1)\n"
32                 "       std     6,56(%1)"
33                 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory");
34         if (!MACHINE_HAS_IEEE)
35                 return;
36         asm volatile(
37                 "       stfpc   0(%1)\n"
38                 "       std     1,16(%1)\n"
39                 "       std     3,32(%1)\n"
40                 "       std     5,48(%1)\n"
41                 "       std     7,64(%1)\n"
42                 "       std     8,72(%1)\n"
43                 "       std     9,80(%1)\n"
44                 "       std     10,88(%1)\n"
45                 "       std     11,96(%1)\n"
46                 "       std     12,104(%1)\n"
47                 "       std     13,112(%1)\n"
48                 "       std     14,120(%1)\n"
49                 "       std     15,128(%1)\n"
50                 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory");
51 }
52
53 static inline void restore_fp_regs(s390_fp_regs *fpregs)
54 {
55         asm volatile(
56                 "       ld      0,8(%0)\n"
57                 "       ld      2,24(%0)\n"
58                 "       ld      4,40(%0)\n"
59                 "       ld      6,56(%0)"
60                 : : "a" (fpregs), "m" (*fpregs));
61         if (!MACHINE_HAS_IEEE)
62                 return;
63         asm volatile(
64                 "       lfpc    0(%0)\n"
65                 "       ld      1,16(%0)\n"
66                 "       ld      3,32(%0)\n"
67                 "       ld      5,48(%0)\n"
68                 "       ld      7,64(%0)\n"
69                 "       ld      8,72(%0)\n"
70                 "       ld      9,80(%0)\n"
71                 "       ld      10,88(%0)\n"
72                 "       ld      11,96(%0)\n"
73                 "       ld      12,104(%0)\n"
74                 "       ld      13,112(%0)\n"
75                 "       ld      14,120(%0)\n"
76                 "       ld      15,128(%0)\n"
77                 : : "a" (fpregs), "m" (*fpregs));
78 }
79
80 static inline void save_access_regs(unsigned int *acrs)
81 {
82         asm volatile("stam 0,15,0(%0)" : : "a" (acrs) : "memory");
83 }
84
85 static inline void restore_access_regs(unsigned int *acrs)
86 {
87         asm volatile("lam 0,15,0(%0)" : : "a" (acrs));
88 }
89
90 #define switch_to(prev,next,last) do {                                       \
91         if (prev == next)                                                    \
92                 break;                                                       \
93         save_fp_regs(&prev->thread.fp_regs);                                 \
94         restore_fp_regs(&next->thread.fp_regs);                              \
95         save_access_regs(&prev->thread.acrs[0]);                             \
96         restore_access_regs(&next->thread.acrs[0]);                          \
97         prev = __switch_to(prev,next);                                       \
98 } while (0)
99
100 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
101 extern void account_vtime(struct task_struct *);
102 extern void account_tick_vtime(struct task_struct *);
103 extern void account_system_vtime(struct task_struct *);
104 #else
105 #define account_vtime(x) do { /* empty */ } while (0)
106 #endif
107
108 #ifdef CONFIG_PFAULT
109 extern void pfault_irq_init(void);
110 extern int pfault_init(void);
111 extern void pfault_fini(void);
112 #else /* CONFIG_PFAULT */
113 #define pfault_irq_init()       do { } while (0)
114 #define pfault_init()           ({-1;})
115 #define pfault_fini()           do { } while (0)
116 #endif /* CONFIG_PFAULT */
117
118 #define finish_arch_switch(prev) do {                                        \
119         set_fs(current->thread.mm_segment);                                  \
120         account_vtime(prev);                                                 \
121 } while (0)
122
123 #define nop() asm volatile("nop")
124
125 #define xchg(ptr,x)                                                       \
126 ({                                                                        \
127         __typeof__(*(ptr)) __ret;                                         \
128         __ret = (__typeof__(*(ptr)))                                      \
129                 __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \
130         __ret;                                                            \
131 })
132
133 extern void __xchg_called_with_bad_pointer(void);
134
135 static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
136 {
137         unsigned long addr, old;
138         int shift;
139
140         switch (size) {
141         case 1:
142                 addr = (unsigned long) ptr;
143                 shift = (3 ^ (addr & 3)) << 3;
144                 addr ^= addr & 3;
145                 asm volatile(
146                         "       l       %0,0(%4)\n"
147                         "0:     lr      0,%0\n"
148                         "       nr      0,%3\n"
149                         "       or      0,%2\n"
150                         "       cs      %0,0,0(%4)\n"
151                         "       jl      0b\n"
152                         : "=&d" (old), "=m" (*(int *) addr)
153                         : "d" (x << shift), "d" (~(255 << shift)), "a" (addr),
154                           "m" (*(int *) addr) : "memory", "cc", "0");
155                 return old >> shift;
156         case 2:
157                 addr = (unsigned long) ptr;
158                 shift = (2 ^ (addr & 2)) << 3;
159                 addr ^= addr & 2;
160                 asm volatile(
161                         "       l       %0,0(%4)\n"
162                         "0:     lr      0,%0\n"
163                         "       nr      0,%3\n"
164                         "       or      0,%2\n"
165                         "       cs      %0,0,0(%4)\n"
166                         "       jl      0b\n"
167                         : "=&d" (old), "=m" (*(int *) addr)
168                         : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr),
169                           "m" (*(int *) addr) : "memory", "cc", "0");
170                 return old >> shift;
171         case 4:
172                 asm volatile(
173                         "       l       %0,0(%3)\n"
174                         "0:     cs      %0,%2,0(%3)\n"
175                         "       jl      0b\n"
176                         : "=&d" (old), "=m" (*(int *) ptr)
177                         : "d" (x), "a" (ptr), "m" (*(int *) ptr)
178                         : "memory", "cc");
179                 return old;
180 #ifdef __s390x__
181         case 8:
182                 asm volatile(
183                         "       lg      %0,0(%3)\n"
184                         "0:     csg     %0,%2,0(%3)\n"
185                         "       jl      0b\n"
186                         : "=&d" (old), "=m" (*(long *) ptr)
187                         : "d" (x), "a" (ptr), "m" (*(long *) ptr)
188                         : "memory", "cc");
189                 return old;
190 #endif /* __s390x__ */
191         }
192         __xchg_called_with_bad_pointer();
193         return x;
194 }
195
196 /*
197  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
198  * store NEW in MEM.  Return the initial value in MEM.  Success is
199  * indicated by comparing RETURN with OLD.
200  */
201
202 #define __HAVE_ARCH_CMPXCHG 1
203
204 #define cmpxchg(ptr, o, n)                                              \
205         ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),       \
206                                         (unsigned long)(n), sizeof(*(ptr))))
207
208 extern void __cmpxchg_called_with_bad_pointer(void);
209
210 static inline unsigned long
211 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
212 {
213         unsigned long addr, prev, tmp;
214         int shift;
215
216         switch (size) {
217         case 1:
218                 addr = (unsigned long) ptr;
219                 shift = (3 ^ (addr & 3)) << 3;
220                 addr ^= addr & 3;
221                 asm volatile(
222                         "       l       %0,0(%4)\n"
223                         "0:     nr      %0,%5\n"
224                         "       lr      %1,%0\n"
225                         "       or      %0,%2\n"
226                         "       or      %1,%3\n"
227                         "       cs      %0,%1,0(%4)\n"
228                         "       jnl     1f\n"
229                         "       xr      %1,%0\n"
230                         "       nr      %1,%5\n"
231                         "       jnz     0b\n"
232                         "1:"
233                         : "=&d" (prev), "=&d" (tmp)
234                         : "d" (old << shift), "d" (new << shift), "a" (ptr),
235                           "d" (~(255 << shift))
236                         : "memory", "cc");
237                 return prev >> shift;
238         case 2:
239                 addr = (unsigned long) ptr;
240                 shift = (2 ^ (addr & 2)) << 3;
241                 addr ^= addr & 2;
242                 asm volatile(
243                         "       l       %0,0(%4)\n"
244                         "0:     nr      %0,%5\n"
245                         "       lr      %1,%0\n"
246                         "       or      %0,%2\n"
247                         "       or      %1,%3\n"
248                         "       cs      %0,%1,0(%4)\n"
249                         "       jnl     1f\n"
250                         "       xr      %1,%0\n"
251                         "       nr      %1,%5\n"
252                         "       jnz     0b\n"
253                         "1:"
254                         : "=&d" (prev), "=&d" (tmp)
255                         : "d" (old << shift), "d" (new << shift), "a" (ptr),
256                           "d" (~(65535 << shift))
257                         : "memory", "cc");
258                 return prev >> shift;
259         case 4:
260                 asm volatile(
261                         "       cs      %0,%2,0(%3)\n"
262                         : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
263                         : "memory", "cc");
264                 return prev;
265 #ifdef __s390x__
266         case 8:
267                 asm volatile(
268                         "       csg     %0,%2,0(%3)\n"
269                         : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
270                         : "memory", "cc");
271                 return prev;
272 #endif /* __s390x__ */
273         }
274         __cmpxchg_called_with_bad_pointer();
275         return old;
276 }
277
278 /*
279  * Force strict CPU ordering.
280  * And yes, this is required on UP too when we're talking
281  * to devices.
282  *
283  * This is very similar to the ppc eieio/sync instruction in that is
284  * does a checkpoint syncronisation & makes sure that 
285  * all memory ops have completed wrt other CPU's ( see 7-15 POP  DJB ).
286  */
287
288 #define eieio() asm volatile("bcr 15,0" : : : "memory")
289 #define SYNC_OTHER_CORES(x)   eieio()
290 #define mb()    eieio()
291 #define rmb()   eieio()
292 #define wmb()   eieio()
293 #define read_barrier_depends() do { } while(0)
294 #define smp_mb()       mb()
295 #define smp_rmb()      rmb()
296 #define smp_wmb()      wmb()
297 #define smp_read_barrier_depends()    read_barrier_depends()
298 #define smp_mb__before_clear_bit()     smp_mb()
299 #define smp_mb__after_clear_bit()      smp_mb()
300
301
302 #define set_mb(var, value)      do { var = value; mb(); } while (0)
303
304 #ifdef __s390x__
305
306 #define __ctl_load(array, low, high) ({                         \
307         typedef struct { char _[sizeof(array)]; } addrtype;     \
308         asm volatile(                                           \
309                 "       lctlg   %1,%2,0(%0)\n"                  \
310                 : : "a" (&array), "i" (low), "i" (high),        \
311                     "m" (*(addrtype *)(array)));                \
312         })
313
314 #define __ctl_store(array, low, high) ({                        \
315         typedef struct { char _[sizeof(array)]; } addrtype;     \
316         asm volatile(                                           \
317                 "       stctg   %2,%3,0(%1)\n"                  \
318                 : "=m" (*(addrtype *)(array))                   \
319                 : "a" (&array), "i" (low), "i" (high));         \
320         })
321
322 #else /* __s390x__ */
323
324 #define __ctl_load(array, low, high) ({                         \
325         typedef struct { char _[sizeof(array)]; } addrtype;     \
326         asm volatile(                                           \
327                 "       lctl    %1,%2,0(%0)\n"                  \
328                 : : "a" (&array), "i" (low), "i" (high),        \
329                     "m" (*(addrtype *)(array)));                \
330 })
331
332 #define __ctl_store(array, low, high) ({                        \
333         typedef struct { char _[sizeof(array)]; } addrtype;     \
334         asm volatile(                                           \
335                 "       stctl   %2,%3,0(%1)\n"                  \
336                 : "=m" (*(addrtype *)(array))                   \
337                 : "a" (&array), "i" (low), "i" (high));         \
338         })
339
340 #endif /* __s390x__ */
341
342 #define __ctl_set_bit(cr, bit) ({       \
343         unsigned long __dummy;          \
344         __ctl_store(__dummy, cr, cr);   \
345         __dummy |= 1UL << (bit);        \
346         __ctl_load(__dummy, cr, cr);    \
347 })
348
349 #define __ctl_clear_bit(cr, bit) ({     \
350         unsigned long __dummy;          \
351         __ctl_store(__dummy, cr, cr);   \
352         __dummy &= ~(1UL << (bit));     \
353         __ctl_load(__dummy, cr, cr);    \
354 })
355
356 #include <linux/irqflags.h>
357
358 #include <asm-generic/cmpxchg-local.h>
359
360 static inline unsigned long __cmpxchg_local(volatile void *ptr,
361                                       unsigned long old,
362                                       unsigned long new, int size)
363 {
364         switch (size) {
365         case 1:
366         case 2:
367         case 4:
368 #ifdef __s390x__
369         case 8:
370 #endif
371                 return __cmpxchg(ptr, old, new, size);
372         default:
373                 return __cmpxchg_local_generic(ptr, old, new, size);
374         }
375
376         return old;
377 }
378
379 /*
380  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
381  * them available.
382  */
383 #define cmpxchg_local(ptr, o, n)                                        \
384         ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
385                         (unsigned long)(n), sizeof(*(ptr))))
386 #ifdef __s390x__
387 #define cmpxchg64_local(ptr, o, n)                                      \
388   ({                                                                    \
389         BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
390         cmpxchg_local((ptr), (o), (n));                                 \
391   })
392 #else
393 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
394 #endif
395
396 /*
397  * Use to set psw mask except for the first byte which
398  * won't be changed by this function.
399  */
400 static inline void
401 __set_psw_mask(unsigned long mask)
402 {
403         __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8)));
404 }
405
406 #define local_mcck_enable()  __set_psw_mask(psw_kernel_bits)
407 #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
408
409 int stfle(unsigned long long *list, int doublewords);
410
411 #ifdef CONFIG_SMP
412
413 extern void smp_ctl_set_bit(int cr, int bit);
414 extern void smp_ctl_clear_bit(int cr, int bit);
415 #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
416 #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
417
418 #else
419
420 #define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
421 #define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
422
423 #endif /* CONFIG_SMP */
424
425 extern void (*_machine_restart)(char *command);
426 extern void (*_machine_halt)(void);
427 extern void (*_machine_power_off)(void);
428
429 #define arch_align_stack(x) (x)
430
431 #ifdef CONFIG_TRACE_IRQFLAGS
432 extern psw_t sysc_restore_trace_psw;
433 extern psw_t io_restore_trace_psw;
434 #endif
435
436 #endif /* __KERNEL__ */
437
438 #endif