[SPARC]: Fix robust futex syscalls and wire up migrate_pages.
[linux-2.6] / include / asm-i386 / system.h
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
3
4 #include <linux/kernel.h>
5 #include <asm/segment.h>
6 #include <asm/cpufeature.h>
7 #include <linux/bitops.h> /* for LOCK_PREFIX */
8
9 #ifdef __KERNEL__
10
11 struct task_struct;     /* one of the stranger aspects of C forward declarations.. */
12 extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
13
14 /*
15  * Saving eflags is important. It switches not only IOPL between tasks,
16  * it also protects other tasks from NT leaking through sysenter etc.
17  */
18 #define switch_to(prev,next,last) do {                                  \
19         unsigned long esi,edi;                                          \
20         asm volatile("pushfl\n\t"               /* Save flags */        \
21                      "pushl %%ebp\n\t"                                  \
22                      "movl %%esp,%0\n\t"        /* save ESP */          \
23                      "movl %5,%%esp\n\t"        /* restore ESP */       \
24                      "movl $1f,%1\n\t"          /* save EIP */          \
25                      "pushl %6\n\t"             /* restore EIP */       \
26                      "jmp __switch_to\n"                                \
27                      "1:\t"                                             \
28                      "popl %%ebp\n\t"                                   \
29                      "popfl"                                            \
30                      :"=m" (prev->thread.esp),"=m" (prev->thread.eip),  \
31                       "=a" (last),"=S" (esi),"=D" (edi)                 \
32                      :"m" (next->thread.esp),"m" (next->thread.eip),    \
33                       "2" (prev), "d" (next));                          \
34 } while (0)
35
36 #define _set_base(addr,base) do { unsigned long __pr; \
37 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
38         "rorl $16,%%edx\n\t" \
39         "movb %%dl,%2\n\t" \
40         "movb %%dh,%3" \
41         :"=&d" (__pr) \
42         :"m" (*((addr)+2)), \
43          "m" (*((addr)+4)), \
44          "m" (*((addr)+7)), \
45          "0" (base) \
46         ); } while(0)
47
48 #define _set_limit(addr,limit) do { unsigned long __lr; \
49 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
50         "rorl $16,%%edx\n\t" \
51         "movb %2,%%dh\n\t" \
52         "andb $0xf0,%%dh\n\t" \
53         "orb %%dh,%%dl\n\t" \
54         "movb %%dl,%2" \
55         :"=&d" (__lr) \
56         :"m" (*(addr)), \
57          "m" (*((addr)+6)), \
58          "0" (limit) \
59         ); } while(0)
60
61 #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
62 #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
63
64 /*
65  * Load a segment. Fall back on loading the zero
66  * segment if something goes wrong..
67  */
68 #define loadsegment(seg,value)                  \
69         asm volatile("\n"                       \
70                 "1:\t"                          \
71                 "mov %0,%%" #seg "\n"           \
72                 "2:\n"                          \
73                 ".section .fixup,\"ax\"\n"      \
74                 "3:\t"                          \
75                 "pushl $0\n\t"                  \
76                 "popl %%" #seg "\n\t"           \
77                 "jmp 2b\n"                      \
78                 ".previous\n"                   \
79                 ".section __ex_table,\"a\"\n\t" \
80                 ".align 4\n\t"                  \
81                 ".long 1b,3b\n"                 \
82                 ".previous"                     \
83                 : :"rm" (value))
84
85 /*
86  * Save a segment register away
87  */
88 #define savesegment(seg, value) \
89         asm volatile("mov %%" #seg ",%0":"=rm" (value))
90
91 #define read_cr0() ({ \
92         unsigned int __dummy; \
93         __asm__ __volatile__( \
94                 "movl %%cr0,%0\n\t" \
95                 :"=r" (__dummy)); \
96         __dummy; \
97 })
98 #define write_cr0(x) \
99         __asm__ __volatile__("movl %0,%%cr0": :"r" (x))
100
101 #define read_cr2() ({ \
102         unsigned int __dummy; \
103         __asm__ __volatile__( \
104                 "movl %%cr2,%0\n\t" \
105                 :"=r" (__dummy)); \
106         __dummy; \
107 })
108 #define write_cr2(x) \
109         __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
110
111 #define read_cr3() ({ \
112         unsigned int __dummy; \
113         __asm__ ( \
114                 "movl %%cr3,%0\n\t" \
115                 :"=r" (__dummy)); \
116         __dummy; \
117 })
118 #define write_cr3(x) \
119         __asm__ __volatile__("movl %0,%%cr3": :"r" (x))
120
121 #define read_cr4() ({ \
122         unsigned int __dummy; \
123         __asm__( \
124                 "movl %%cr4,%0\n\t" \
125                 :"=r" (__dummy)); \
126         __dummy; \
127 })
128 #define read_cr4_safe() ({                            \
129         unsigned int __dummy;                         \
130         /* This could fault if %cr4 does not exist */ \
131         __asm__("1: movl %%cr4, %0              \n"   \
132                 "2:                             \n"   \
133                 ".section __ex_table,\"a\"      \n"   \
134                 ".long 1b,2b                    \n"   \
135                 ".previous                      \n"   \
136                 : "=r" (__dummy): "0" (0));           \
137         __dummy;                                      \
138 })
139 #define write_cr4(x) \
140         __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
141
142 /*
143  * Clear and set 'TS' bit respectively
144  */
145 #define clts() __asm__ __volatile__ ("clts")
146 #define stts() write_cr0(8 | read_cr0())
147
148 #endif  /* __KERNEL__ */
149
150 #define wbinvd() \
151         __asm__ __volatile__ ("wbinvd": : :"memory")
152
153 static inline unsigned long get_limit(unsigned long segment)
154 {
155         unsigned long __limit;
156         __asm__("lsll %1,%0"
157                 :"=r" (__limit):"r" (segment));
158         return __limit+1;
159 }
160
161 #define nop() __asm__ __volatile__ ("nop")
162
163 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
164
165 #define tas(ptr) (xchg((ptr),1))
166
167 struct __xchg_dummy { unsigned long a[100]; };
168 #define __xg(x) ((struct __xchg_dummy *)(x))
169
170
171 #ifdef CONFIG_X86_CMPXCHG64
172
173 /*
174  * The semantics of XCHGCMP8B are a bit strange, this is why
175  * there is a loop and the loading of %%eax and %%edx has to
176  * be inside. This inlines well in most cases, the cached
177  * cost is around ~38 cycles. (in the future we might want
178  * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
179  * might have an implicit FPU-save as a cost, so it's not
180  * clear which path to go.)
181  *
182  * cmpxchg8b must be used with the lock prefix here to allow
183  * the instruction to be executed atomically, see page 3-102
184  * of the instruction set reference 24319102.pdf. We need
185  * the reader side to see the coherent 64bit value.
186  */
187 static inline void __set_64bit (unsigned long long * ptr,
188                 unsigned int low, unsigned int high)
189 {
190         __asm__ __volatile__ (
191                 "\n1:\t"
192                 "movl (%0), %%eax\n\t"
193                 "movl 4(%0), %%edx\n\t"
194                 "lock cmpxchg8b (%0)\n\t"
195                 "jnz 1b"
196                 : /* no outputs */
197                 :       "D"(ptr),
198                         "b"(low),
199                         "c"(high)
200                 :       "ax","dx","memory");
201 }
202
203 static inline void __set_64bit_constant (unsigned long long *ptr,
204                                                  unsigned long long value)
205 {
206         __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
207 }
208 #define ll_low(x)       *(((unsigned int*)&(x))+0)
209 #define ll_high(x)      *(((unsigned int*)&(x))+1)
210
211 static inline void __set_64bit_var (unsigned long long *ptr,
212                          unsigned long long value)
213 {
214         __set_64bit(ptr,ll_low(value), ll_high(value));
215 }
216
217 #define set_64bit(ptr,value) \
218 (__builtin_constant_p(value) ? \
219  __set_64bit_constant(ptr, value) : \
220  __set_64bit_var(ptr, value) )
221
222 #define _set_64bit(ptr,value) \
223 (__builtin_constant_p(value) ? \
224  __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
225  __set_64bit(ptr, ll_low(value), ll_high(value)) )
226
227 #endif
228
229 /*
230  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
231  * Note 2: xchg has side effect, so that attribute volatile is necessary,
232  *        but generally the primitive is invalid, *ptr is output argument. --ANK
233  */
234 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
235 {
236         switch (size) {
237                 case 1:
238                         __asm__ __volatile__("xchgb %b0,%1"
239                                 :"=q" (x)
240                                 :"m" (*__xg(ptr)), "0" (x)
241                                 :"memory");
242                         break;
243                 case 2:
244                         __asm__ __volatile__("xchgw %w0,%1"
245                                 :"=r" (x)
246                                 :"m" (*__xg(ptr)), "0" (x)
247                                 :"memory");
248                         break;
249                 case 4:
250                         __asm__ __volatile__("xchgl %0,%1"
251                                 :"=r" (x)
252                                 :"m" (*__xg(ptr)), "0" (x)
253                                 :"memory");
254                         break;
255         }
256         return x;
257 }
258
259 /*
260  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
261  * store NEW in MEM.  Return the initial value in MEM.  Success is
262  * indicated by comparing RETURN with OLD.
263  */
264
265 #ifdef CONFIG_X86_CMPXCHG
266 #define __HAVE_ARCH_CMPXCHG 1
267 #define cmpxchg(ptr,o,n)\
268         ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
269                                         (unsigned long)(n),sizeof(*(ptr))))
270 #define sync_cmpxchg(ptr,o,n)\
271         ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
272                                         (unsigned long)(n),sizeof(*(ptr))))
273 #endif
274
275 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
276                                       unsigned long new, int size)
277 {
278         unsigned long prev;
279         switch (size) {
280         case 1:
281                 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
282                                      : "=a"(prev)
283                                      : "q"(new), "m"(*__xg(ptr)), "0"(old)
284                                      : "memory");
285                 return prev;
286         case 2:
287                 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
288                                      : "=a"(prev)
289                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
290                                      : "memory");
291                 return prev;
292         case 4:
293                 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
294                                      : "=a"(prev)
295                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
296                                      : "memory");
297                 return prev;
298         }
299         return old;
300 }
301
302 /*
303  * Always use locked operations when touching memory shared with a
304  * hypervisor, since the system may be SMP even if the guest kernel
305  * isn't.
306  */
307 static inline unsigned long __sync_cmpxchg(volatile void *ptr,
308                                             unsigned long old,
309                                             unsigned long new, int size)
310 {
311         unsigned long prev;
312         switch (size) {
313         case 1:
314                 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
315                                      : "=a"(prev)
316                                      : "q"(new), "m"(*__xg(ptr)), "0"(old)
317                                      : "memory");
318                 return prev;
319         case 2:
320                 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
321                                      : "=a"(prev)
322                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
323                                      : "memory");
324                 return prev;
325         case 4:
326                 __asm__ __volatile__("lock; cmpxchgl %1,%2"
327                                      : "=a"(prev)
328                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
329                                      : "memory");
330                 return prev;
331         }
332         return old;
333 }
334
335 #ifndef CONFIG_X86_CMPXCHG
336 /*
337  * Building a kernel capable running on 80386. It may be necessary to
338  * simulate the cmpxchg on the 80386 CPU. For that purpose we define
339  * a function for each of the sizes we support.
340  */
341
342 extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
343 extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
344 extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
345
346 static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
347                                       unsigned long new, int size)
348 {
349         switch (size) {
350         case 1:
351                 return cmpxchg_386_u8(ptr, old, new);
352         case 2:
353                 return cmpxchg_386_u16(ptr, old, new);
354         case 4:
355                 return cmpxchg_386_u32(ptr, old, new);
356         }
357         return old;
358 }
359
360 #define cmpxchg(ptr,o,n)                                                \
361 ({                                                                      \
362         __typeof__(*(ptr)) __ret;                                       \
363         if (likely(boot_cpu_data.x86 > 3))                              \
364                 __ret = __cmpxchg((ptr), (unsigned long)(o),            \
365                                         (unsigned long)(n), sizeof(*(ptr))); \
366         else                                                            \
367                 __ret = cmpxchg_386((ptr), (unsigned long)(o),          \
368                                         (unsigned long)(n), sizeof(*(ptr))); \
369         __ret;                                                          \
370 })
371 #endif
372
373 #ifdef CONFIG_X86_CMPXCHG64
374
375 static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
376                                       unsigned long long new)
377 {
378         unsigned long long prev;
379         __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
380                              : "=A"(prev)
381                              : "b"((unsigned long)new),
382                                "c"((unsigned long)(new >> 32)),
383                                "m"(*__xg(ptr)),
384                                "0"(old)
385                              : "memory");
386         return prev;
387 }
388
389 #define cmpxchg64(ptr,o,n)\
390         ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
391                                         (unsigned long long)(n)))
392
393 #endif
394     
395 /*
396  * Force strict CPU ordering.
397  * And yes, this is required on UP too when we're talking
398  * to devices.
399  *
400  * For now, "wmb()" doesn't actually do anything, as all
401  * Intel CPU's follow what Intel calls a *Processor Order*,
402  * in which all writes are seen in the program order even
403  * outside the CPU.
404  *
405  * I expect future Intel CPU's to have a weaker ordering,
406  * but I'd also expect them to finally get their act together
407  * and add some real memory barriers if so.
408  *
409  * Some non intel clones support out of order store. wmb() ceases to be a
410  * nop for these.
411  */
412  
413
414 /* 
415  * Actually only lfence would be needed for mb() because all stores done 
416  * by the kernel should be already ordered. But keep a full barrier for now. 
417  */
418
419 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
420 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
421
422 /**
423  * read_barrier_depends - Flush all pending reads that subsequents reads
424  * depend on.
425  *
426  * No data-dependent reads from memory-like regions are ever reordered
427  * over this barrier.  All reads preceding this primitive are guaranteed
428  * to access memory (but not necessarily other CPUs' caches) before any
429  * reads following this primitive that depend on the data return by
430  * any of the preceding reads.  This primitive is much lighter weight than
431  * rmb() on most CPUs, and is never heavier weight than is
432  * rmb().
433  *
434  * These ordering constraints are respected by both the local CPU
435  * and the compiler.
436  *
437  * Ordering is not guaranteed by anything other than these primitives,
438  * not even by data dependencies.  See the documentation for
439  * memory_barrier() for examples and URLs to more information.
440  *
441  * For example, the following code would force ordering (the initial
442  * value of "a" is zero, "b" is one, and "p" is "&a"):
443  *
444  * <programlisting>
445  *      CPU 0                           CPU 1
446  *
447  *      b = 2;
448  *      memory_barrier();
449  *      p = &b;                         q = p;
450  *                                      read_barrier_depends();
451  *                                      d = *q;
452  * </programlisting>
453  *
454  * because the read of "*q" depends on the read of "p" and these
455  * two reads are separated by a read_barrier_depends().  However,
456  * the following code, with the same initial values for "a" and "b":
457  *
458  * <programlisting>
459  *      CPU 0                           CPU 1
460  *
461  *      a = 2;
462  *      memory_barrier();
463  *      b = 3;                          y = b;
464  *                                      read_barrier_depends();
465  *                                      x = a;
466  * </programlisting>
467  *
468  * does not enforce ordering, since there is no data dependency between
469  * the read of "a" and the read of "b".  Therefore, on some CPUs, such
470  * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
471  * in cases like this where there are no data dependencies.
472  **/
473
474 #define read_barrier_depends()  do { } while(0)
475
476 #ifdef CONFIG_X86_OOSTORE
477 /* Actually there are no OOO store capable CPUs for now that do SSE, 
478    but make it already an possibility. */
479 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
480 #else
481 #define wmb()   __asm__ __volatile__ ("": : :"memory")
482 #endif
483
484 #ifdef CONFIG_SMP
485 #define smp_mb()        mb()
486 #define smp_rmb()       rmb()
487 #define smp_wmb()       wmb()
488 #define smp_read_barrier_depends()      read_barrier_depends()
489 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
490 #else
491 #define smp_mb()        barrier()
492 #define smp_rmb()       barrier()
493 #define smp_wmb()       barrier()
494 #define smp_read_barrier_depends()      do { } while(0)
495 #define set_mb(var, value) do { var = value; barrier(); } while (0)
496 #endif
497
498 #include <linux/irqflags.h>
499
500 /*
501  * disable hlt during certain critical i/o operations
502  */
503 #define HAVE_DISABLE_HLT
504 void disable_hlt(void);
505 void enable_hlt(void);
506
507 extern int es7000_plat;
508 void cpu_idle_wait(void);
509
510 /*
511  * On SMP systems, when the scheduler does migration-cost autodetection,
512  * it needs a way to flush as much of the CPU's caches as possible:
513  */
514 static inline void sched_cacheflush(void)
515 {
516         wbinvd();
517 }
518
519 extern unsigned long arch_align_stack(unsigned long sp);
520 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
521
522 void default_idle(void);
523
524 #endif