Immunize rcu_dereference() against crazy compiler writers
[linux-2.6] / include / asm-avr32 / system.h
1 /*
2  * Copyright (C) 2004-2006 Atmel Corporation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 #ifndef __ASM_AVR32_SYSTEM_H
9 #define __ASM_AVR32_SYSTEM_H
10
11 #include <linux/compiler.h>
12 #include <linux/linkage.h>
13 #include <linux/types.h>
14
15 #include <asm/ptrace.h>
16 #include <asm/sysreg.h>
17
18 #define xchg(ptr,x) \
19         ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
20
21 #define nop() asm volatile("nop")
22
23 #define mb()                    asm volatile("" : : : "memory")
24 #define rmb()                   mb()
25 #define wmb()                   asm volatile("sync 0" : : : "memory")
26 #define read_barrier_depends()  do { } while(0)
27 #define set_mb(var, value)      do { var = value; mb(); } while(0)
28
29 /*
30  * Help PathFinder and other Nexus-compliant debuggers keep track of
31  * the current PID by emitting an Ownership Trace Message each time we
32  * switch task.
33  */
34 #ifdef CONFIG_OWNERSHIP_TRACE
35 #include <asm/ocd.h>
36 #define finish_arch_switch(prev)                        \
37         do {                                            \
38                 __mtdr(DBGREG_PID, prev->pid);          \
39                 __mtdr(DBGREG_PID, current->pid);       \
40         } while(0)
41 #endif
42
43 /*
44  * switch_to(prev, next, last) should switch from task `prev' to task
45  * `next'. `prev' will never be the same as `next'.
46  *
47  * We just delegate everything to the __switch_to assembly function,
48  * which is implemented in arch/avr32/kernel/switch_to.S
49  *
50  * mb() tells GCC not to cache `current' across this call.
51  */
52 struct cpu_context;
53 struct task_struct;
54 extern struct task_struct *__switch_to(struct task_struct *,
55                                        struct cpu_context *,
56                                        struct cpu_context *);
57 #define switch_to(prev, next, last)                                     \
58         do {                                                            \
59                 last = __switch_to(prev, &prev->thread.cpu_context + 1, \
60                                    &next->thread.cpu_context);          \
61         } while (0)
62
63 #ifdef CONFIG_SMP
64 # error "The AVR32 port does not support SMP"
65 #else
66 # define smp_mb()               barrier()
67 # define smp_rmb()              barrier()
68 # define smp_wmb()              barrier()
69 # define smp_read_barrier_depends() do { } while(0)
70 #endif
71
72 #include <linux/irqflags.h>
73
74 extern void __xchg_called_with_bad_pointer(void);
75
76 static inline unsigned long xchg_u32(u32 val, volatile u32 *m)
77 {
78         u32 ret;
79
80         asm volatile("xchg %[ret], %[m], %[val]"
81                         : [ret] "=&r"(ret), "=m"(*m)
82                         : "m"(*m), [m] "r"(m), [val] "r"(val)
83                         : "memory");
84         return ret;
85 }
86
87 static inline unsigned long __xchg(unsigned long x,
88                                        volatile void *ptr,
89                                        int size)
90 {
91         switch(size) {
92         case 4:
93                 return xchg_u32(x, ptr);
94         default:
95                 __xchg_called_with_bad_pointer();
96                 return x;
97         }
98 }
99
100 static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
101                                           unsigned long new)
102 {
103         __u32 ret;
104
105         asm volatile(
106                 "1:     ssrf    5\n"
107                 "       ld.w    %[ret], %[m]\n"
108                 "       cp.w    %[ret], %[old]\n"
109                 "       brne    2f\n"
110                 "       stcond  %[m], %[new]\n"
111                 "       brne    1b\n"
112                 "2:\n"
113                 : [ret] "=&r"(ret), [m] "=m"(*m)
114                 : "m"(m), [old] "ir"(old), [new] "r"(new)
115                 : "memory", "cc");
116         return ret;
117 }
118
119 extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
120         volatile int * m, unsigned long old, unsigned long new);
121 #define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
122
123 /* This function doesn't exist, so you'll get a linker error
124    if something tries to do an invalid cmpxchg().  */
125 extern void __cmpxchg_called_with_bad_pointer(void);
126
127 #define __HAVE_ARCH_CMPXCHG 1
128
129 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
130                                       unsigned long new, int size)
131 {
132         switch (size) {
133         case 4:
134                 return __cmpxchg_u32(ptr, old, new);
135         case 8:
136                 return __cmpxchg_u64(ptr, old, new);
137         }
138
139         __cmpxchg_called_with_bad_pointer();
140         return old;
141 }
142
143 #define cmpxchg(ptr, old, new)                                  \
144         ((typeof(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), \
145                                    (unsigned long)(new),        \
146                                    sizeof(*(ptr))))
147
148 struct pt_regs;
149 void NORET_TYPE die(const char *str, struct pt_regs *regs, long err);
150 void _exception(long signr, struct pt_regs *regs, int code,
151                 unsigned long addr);
152
153 #define arch_align_stack(x)     (x)
154
155 #endif /* __ASM_AVR32_SYSTEM_H */