Merge branch 'drm-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
[linux-2.6] / include / asm-sparc / system.h
1 #ifndef __SPARC_SYSTEM_H
2 #define __SPARC_SYSTEM_H
3
4 #include <linux/kernel.h>
5 #include <linux/threads.h>      /* NR_CPUS */
6 #include <linux/thread_info.h>
7
8 #include <asm/page.h>
9 #include <asm/psr.h>
10 #include <asm/ptrace.h>
11 #include <asm/btfixup.h>
12 #include <asm/smp.h>
13
14 #ifndef __ASSEMBLY__
15
16 #include <linux/irqflags.h>
17
18 /*
19  * Sparc (general) CPU types
20  */
21 enum sparc_cpu {
22   sun4        = 0x00,
23   sun4c       = 0x01,
24   sun4m       = 0x02,
25   sun4d       = 0x03,
26   sun4e       = 0x04,
27   sun4u       = 0x05, /* V8 ploos ploos */
28   sun_unknown = 0x06,
29   ap1000      = 0x07, /* almost a sun4m */
30 };
31
32 /* Really, userland should not be looking at any of this... */
33 #ifdef __KERNEL__
34
35 extern enum sparc_cpu sparc_cpu_model;
36
37 #ifndef CONFIG_SUN4
38 #define ARCH_SUN4C_SUN4 (sparc_cpu_model==sun4c)
39 #define ARCH_SUN4 0
40 #else
41 #define ARCH_SUN4C_SUN4 1
42 #define ARCH_SUN4 1
43 #endif
44
45 #define SUN4M_NCPUS            4              /* Architectural limit of sun4m. */
46
47 extern char reboot_command[];
48
49 extern struct thread_info *current_set[NR_CPUS];
50
51 extern unsigned long empty_bad_page;
52 extern unsigned long empty_bad_page_table;
53 extern unsigned long empty_zero_page;
54
55 extern void sun_do_break(void);
56 extern int serial_console;
57 extern int stop_a_enabled;
58
59 static inline int con_is_present(void)
60 {
61         return serial_console ? 0 : 1;
62 }
63
64 /* When a context switch happens we must flush all user windows so that
65  * the windows of the current process are flushed onto its stack. This
66  * way the windows are all clean for the next process and the stack
67  * frames are up to date.
68  */
69 extern void flush_user_windows(void);
70 extern void kill_user_windows(void);
71 extern void synchronize_user_stack(void);
72 extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
73                    void *fpqueue, unsigned long *fpqdepth);
74
75 #ifdef CONFIG_SMP
76 #define SWITCH_ENTER(prv) \
77         do {                    \
78         if (test_tsk_thread_flag(prv, TIF_USEDFPU)) { \
79                 put_psr(get_psr() | PSR_EF); \
80                 fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \
81                        &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \
82                 clear_tsk_thread_flag(prv, TIF_USEDFPU); \
83                 (prv)->thread.kregs->psr &= ~PSR_EF; \
84         } \
85         } while(0)
86
87 #define SWITCH_DO_LAZY_FPU(next)        /* */
88 #else
89 #define SWITCH_ENTER(prv)               /* */
90 #define SWITCH_DO_LAZY_FPU(nxt) \
91         do {                    \
92         if (last_task_used_math != (nxt))               \
93                 (nxt)->thread.kregs->psr&=~PSR_EF;      \
94         } while(0)
95 #endif
96
97 /*
98  * Flush windows so that the VM switch which follows
99  * would not pull the stack from under us.
100  *
101  * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
102  * XXX WTF is the above comment? Found in late teen 2.4.x.
103  */
104 #define prepare_arch_switch(next) do { \
105         __asm__ __volatile__( \
106         ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
107         "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
108         "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
109         "save %sp, -0x40, %sp\n\t" \
110         "restore; restore; restore; restore; restore; restore; restore"); \
111 } while(0)
112
113         /* Much care has gone into this code, do not touch it.
114          *
115          * We need to loadup regs l0/l1 for the newly forked child
116          * case because the trap return path relies on those registers
117          * holding certain values, gcc is told that they are clobbered.
118          * Gcc needs registers for 3 values in and 1 value out, so we
119          * clobber every non-fixed-usage register besides l2/l3/o4/o5.  -DaveM
120          *
121          * Hey Dave, that do not touch sign is too much of an incentive
122          * - Anton & Pete
123          */
124 #define switch_to(prev, next, last) do {                                                \
125         SWITCH_ENTER(prev);                                                             \
126         SWITCH_DO_LAZY_FPU(next);                                                       \
127         cpu_set(smp_processor_id(), next->active_mm->cpu_vm_mask);                      \
128         __asm__ __volatile__(                                                           \
129         "sethi  %%hi(here - 0x8), %%o7\n\t"                                             \
130         "mov    %%g6, %%g3\n\t"                                                         \
131         "or     %%o7, %%lo(here - 0x8), %%o7\n\t"                                       \
132         "rd     %%psr, %%g4\n\t"                                                        \
133         "std    %%sp, [%%g6 + %4]\n\t"                                                  \
134         "rd     %%wim, %%g5\n\t"                                                        \
135         "wr     %%g4, 0x20, %%psr\n\t"                                                  \
136         "nop\n\t"                                                                       \
137         "std    %%g4, [%%g6 + %3]\n\t"                                                  \
138         "ldd    [%2 + %3], %%g4\n\t"                                                    \
139         "mov    %2, %%g6\n\t"                                                           \
140         ".globl patchme_store_new_current\n"                                            \
141 "patchme_store_new_current:\n\t"                                                        \
142         "st     %2, [%1]\n\t"                                                           \
143         "wr     %%g4, 0x20, %%psr\n\t"                                                  \
144         "nop\n\t"                                                                       \
145         "nop\n\t"                                                                       \
146         "nop\n\t"       /* LEON needs all 3 nops: load to %sp depends on CWP. */                \
147         "ldd    [%%g6 + %4], %%sp\n\t"                                                  \
148         "wr     %%g5, 0x0, %%wim\n\t"                                                   \
149         "ldd    [%%sp + 0x00], %%l0\n\t"                                                \
150         "ldd    [%%sp + 0x38], %%i6\n\t"                                                \
151         "wr     %%g4, 0x0, %%psr\n\t"                                                   \
152         "nop\n\t"                                                                       \
153         "nop\n\t"                                                                       \
154         "jmpl   %%o7 + 0x8, %%g0\n\t"                                                   \
155         " ld    [%%g3 + %5], %0\n\t"                                                    \
156         "here:\n"                                                                       \
157         : "=&r" (last)                                                                  \
158         : "r" (&(current_set[hard_smp_processor_id()])),        \
159           "r" (task_thread_info(next)),                         \
160           "i" (TI_KPSR),                                        \
161           "i" (TI_KSP),                                         \
162           "i" (TI_TASK)                                         \
163         :       "g1", "g2", "g3", "g4", "g5",       "g7",       \
164           "l0", "l1",       "l3", "l4", "l5", "l6", "l7",       \
165           "i0", "i1", "i2", "i3", "i4", "i5",                   \
166           "o0", "o1", "o2", "o3",                   "o7");      \
167         } while(0)
168
169 /* XXX Change this if we ever use a PSO mode kernel. */
170 #define mb()    __asm__ __volatile__ ("" : : : "memory")
171 #define rmb()   mb()
172 #define wmb()   mb()
173 #define read_barrier_depends()  do { } while(0)
174 #define set_mb(__var, __value)  do { __var = __value; mb(); } while(0)
175 #define smp_mb()        __asm__ __volatile__("":::"memory")
176 #define smp_rmb()       __asm__ __volatile__("":::"memory")
177 #define smp_wmb()       __asm__ __volatile__("":::"memory")
178 #define smp_read_barrier_depends()      do { } while(0)
179
180 #define nop() __asm__ __volatile__ ("nop")
181
182 /* This has special calling conventions */
183 #ifndef CONFIG_SMP
184 BTFIXUPDEF_CALL(void, ___xchg32, void)
185 #endif
186
187 static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
188 {
189 #ifdef CONFIG_SMP
190         __asm__ __volatile__("swap [%2], %0"
191                              : "=&r" (val)
192                              : "0" (val), "r" (m)
193                              : "memory");
194         return val;
195 #else
196         register unsigned long *ptr asm("g1");
197         register unsigned long ret asm("g2");
198
199         ptr = (unsigned long *) m;
200         ret = val;
201
202         /* Note: this is magic and the nop there is
203            really needed. */
204         __asm__ __volatile__(
205         "mov    %%o7, %%g4\n\t"
206         "call   ___f____xchg32\n\t"
207         " nop\n\t"
208         : "=&r" (ret)
209         : "0" (ret), "r" (ptr)
210         : "g3", "g4", "g7", "memory", "cc");
211
212         return ret;
213 #endif
214 }
215
216 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
217
218 extern void __xchg_called_with_bad_pointer(void);
219
220 static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
221 {
222         switch (size) {
223         case 4:
224                 return xchg_u32(ptr, x);
225         };
226         __xchg_called_with_bad_pointer();
227         return x;
228 }
229
230 /* Emulate cmpxchg() the same way we emulate atomics,
231  * by hashing the object address and indexing into an array
232  * of spinlocks to get a bit of performance...
233  *
234  * See arch/sparc/lib/atomic32.c for implementation.
235  *
236  * Cribbed from <asm-parisc/atomic.h>
237  */
238 #define __HAVE_ARCH_CMPXCHG     1
239
240 /* bug catcher for when unsupported size is used - won't link */
241 extern void __cmpxchg_called_with_bad_pointer(void);
242 /* we only need to support cmpxchg of a u32 on sparc */
243 extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
244
245 /* don't worry...optimizer will get rid of most of this */
246 static inline unsigned long
247 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
248 {
249         switch (size) {
250         case 4:
251                 return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
252         default:
253                 __cmpxchg_called_with_bad_pointer();
254                 break;
255         }
256         return old;
257 }
258
259 #define cmpxchg(ptr, o, n)                                              \
260 ({                                                                      \
261         __typeof__(*(ptr)) _o_ = (o);                                   \
262         __typeof__(*(ptr)) _n_ = (n);                                   \
263         (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,       \
264                         (unsigned long)_n_, sizeof(*(ptr)));            \
265 })
266
267 #include <asm-generic/cmpxchg-local.h>
268
269 /*
270  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
271  * them available.
272  */
273 #define cmpxchg_local(ptr, o, n)                                               \
274         ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
275                         (unsigned long)(n), sizeof(*(ptr))))
276 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
277
278 extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
279
280 #endif /* __KERNEL__ */
281
282 #endif /* __ASSEMBLY__ */
283
284 #define arch_align_stack(x) (x)
285
286 #endif /* !(__SPARC_SYSTEM_H) */