[BLOCK] Fix bad sharing of tag busy list on queues with shared tag maps
[linux-2.6] / include / asm-sh64 / system.h
1 #ifndef __ASM_SH64_SYSTEM_H
2 #define __ASM_SH64_SYSTEM_H
3
4 /*
5  * This file is subject to the terms and conditions of the GNU General Public
6  * License.  See the file "COPYING" in the main directory of this archive
7  * for more details.
8  *
9  * include/asm-sh64/system.h
10  *
11  * Copyright (C) 2000, 2001  Paolo Alberelli
12  * Copyright (C) 2003  Paul Mundt
13  * Copyright (C) 2004  Richard Curnow
14  *
15  */
16
17 #include <asm/registers.h>
18 #include <asm/processor.h>
19
20 /*
21  *      switch_to() should switch tasks to task nr n, first
22  */
23
24 typedef struct {
25         unsigned long seg;
26 } mm_segment_t;
27
28 extern struct task_struct *sh64_switch_to(struct task_struct *prev,
29                                           struct thread_struct *prev_thread,
30                                           struct task_struct *next,
31                                           struct thread_struct *next_thread);
32
33 #define switch_to(prev,next,last) \
34         do {\
35                 if (last_task_used_math != next) {\
36                         struct pt_regs *regs = next->thread.uregs;\
37                         if (regs) regs->sr |= SR_FD;\
38                 }\
39                 last = sh64_switch_to(prev, &prev->thread, next, &next->thread);\
40         } while(0)
41
42 #define nop() __asm__ __volatile__ ("nop")
43
44 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
45
46 extern void __xchg_called_with_bad_pointer(void);
47
48 #define mb()    __asm__ __volatile__ ("synco": : :"memory")
49 #define rmb()   mb()
50 #define wmb()   __asm__ __volatile__ ("synco": : :"memory")
51 #define read_barrier_depends()  do { } while (0)
52
53 #ifdef CONFIG_SMP
54 #define smp_mb()        mb()
55 #define smp_rmb()       rmb()
56 #define smp_wmb()       wmb()
57 #define smp_read_barrier_depends()      read_barrier_depends()
58 #else
59 #define smp_mb()        barrier()
60 #define smp_rmb()       barrier()
61 #define smp_wmb()       barrier()
62 #define smp_read_barrier_depends()      do { } while (0)
63 #endif /* CONFIG_SMP */
64
65 #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
66
67 /* Interrupt Control */
68 #ifndef HARD_CLI
69 #define SR_MASK_L 0x000000f0L
70 #define SR_MASK_LL 0x00000000000000f0LL
71 #else
72 #define SR_MASK_L 0x10000000L
73 #define SR_MASK_LL 0x0000000010000000LL
74 #endif
75
76 static __inline__ void local_irq_enable(void)
77 {
78         /* cli/sti based on SR.BL */
79         unsigned long long __dummy0, __dummy1=~SR_MASK_LL;
80
81         __asm__ __volatile__("getcon    " __SR ", %0\n\t"
82                              "and       %0, %1, %0\n\t"
83                              "putcon    %0, " __SR "\n\t"
84                              : "=&r" (__dummy0)
85                              : "r" (__dummy1));
86 }
87
88 static __inline__ void local_irq_disable(void)
89 {
90         /* cli/sti based on SR.BL */
91         unsigned long long __dummy0, __dummy1=SR_MASK_LL;
92         __asm__ __volatile__("getcon    " __SR ", %0\n\t"
93                              "or        %0, %1, %0\n\t"
94                              "putcon    %0, " __SR "\n\t"
95                              : "=&r" (__dummy0)
96                              : "r" (__dummy1));
97 }
98
99 #define local_save_flags(x)                                             \
100 (__extension__ ({       unsigned long long __dummy=SR_MASK_LL;          \
101         __asm__ __volatile__(                                           \
102                 "getcon " __SR ", %0\n\t"                               \
103                 "and    %0, %1, %0"                                     \
104                 : "=&r" (x)                                             \
105                 : "r" (__dummy));}))
106
107 #define local_irq_save(x)                                               \
108 (__extension__ ({       unsigned long long __d2=SR_MASK_LL, __d1;       \
109         __asm__ __volatile__(                                           \
110                 "getcon " __SR ", %1\n\t"                               \
111                 "or     %1, r63, %0\n\t"                                \
112                 "or     %1, %2, %1\n\t"                                 \
113                 "putcon %1, " __SR "\n\t"                               \
114                 "and    %0, %2, %0"                                     \
115                 : "=&r" (x), "=&r" (__d1)                               \
116                 : "r" (__d2));}));
117
118 #define local_irq_restore(x) do {                                       \
119         if ( ((x) & SR_MASK_L) == 0 )           /* dropping to 0 ? */   \
120                 local_irq_enable();             /* yes...re-enable */   \
121 } while (0)
122
123 #define irqs_disabled()                 \
124 ({                                      \
125         unsigned long flags;            \
126         local_save_flags(flags);        \
127         (flags != 0);                   \
128 })
129
130 static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
131 {
132         unsigned long flags, retval;
133
134         local_irq_save(flags);
135         retval = *m;
136         *m = val;
137         local_irq_restore(flags);
138         return retval;
139 }
140
141 static inline unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
142 {
143         unsigned long flags, retval;
144
145         local_irq_save(flags);
146         retval = *m;
147         *m = val & 0xff;
148         local_irq_restore(flags);
149         return retval;
150 }
151
152 static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
153 {
154         switch (size) {
155         case 4:
156                 return xchg_u32(ptr, x);
157                 break;
158         case 1:
159                 return xchg_u8(ptr, x);
160                 break;
161         }
162         __xchg_called_with_bad_pointer();
163         return x;
164 }
165
166 /* XXX
167  * disable hlt during certain critical i/o operations
168  */
169 #define HAVE_DISABLE_HLT
170 void disable_hlt(void);
171 void enable_hlt(void);
172
173
174 #define smp_mb()        barrier()
175 #define smp_rmb()       barrier()
176 #define smp_wmb()       barrier()
177
178 #ifdef CONFIG_SH_ALPHANUMERIC
179 /* This is only used for debugging. */
180 extern void print_seg(char *file,int line);
181 #define PLS() print_seg(__FILE__,__LINE__)
182 #else   /* CONFIG_SH_ALPHANUMERIC */
183 #define PLS()
184 #endif  /* CONFIG_SH_ALPHANUMERIC */
185
186 #define PL() printk("@ <%s,%s:%d>\n",__FILE__,__FUNCTION__,__LINE__)
187
188 #define arch_align_stack(x) (x)
189
190 #endif /* __ASM_SH64_SYSTEM_H */