[PATCH] ia64: re-implement dma_get_cache_alignment to avoid EXPORT_SYMBOL
[linux-2.6] / include / asm-ia64 / intrinsics.h
1 #ifndef _ASM_IA64_INTRINSICS_H
2 #define _ASM_IA64_INTRINSICS_H
3
4 /*
5  * Compiler-dependent intrinsics.
6  *
7  * Copyright (C) 2002-2003 Hewlett-Packard Co
8  *      David Mosberger-Tang <davidm@hpl.hp.com>
9  */
10
11 #ifndef __ASSEMBLY__
12 #include <linux/config.h>
13
14 /* include compiler specific intrinsics */
15 #include <asm/ia64regs.h>
16 #ifdef __INTEL_COMPILER
17 # include <asm/intel_intrin.h>
18 #else
19 # include <asm/gcc_intrin.h>
20 #endif
21
22 /*
23  * Force an unresolved reference if someone tries to use
24  * ia64_fetch_and_add() with a bad value.
25  */
26 extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
27 extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
28
29 #define IA64_FETCHADD(tmp,v,n,sz,sem)                                           \
30 ({                                                                              \
31         switch (sz) {                                                           \
32               case 4:                                                           \
33                 tmp = ia64_fetchadd4_##sem((unsigned int *) v, n);              \
34                 break;                                                          \
35                                                                                 \
36               case 8:                                                           \
37                 tmp = ia64_fetchadd8_##sem((unsigned long *) v, n);             \
38                 break;                                                          \
39                                                                                 \
40               default:                                                          \
41                 __bad_size_for_ia64_fetch_and_add();                            \
42         }                                                                       \
43 })
44
45 #define ia64_fetchadd(i,v,sem)                                                          \
46 ({                                                                                      \
47         __u64 _tmp;                                                                     \
48         volatile __typeof__(*(v)) *_v = (v);                                            \
49         /* Can't use a switch () here: gcc isn't always smart enough for that... */     \
50         if ((i) == -16)                                                                 \
51                 IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem);                        \
52         else if ((i) == -8)                                                             \
53                 IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem);                         \
54         else if ((i) == -4)                                                             \
55                 IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem);                         \
56         else if ((i) == -1)                                                             \
57                 IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem);                         \
58         else if ((i) == 1)                                                              \
59                 IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem);                          \
60         else if ((i) == 4)                                                              \
61                 IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem);                          \
62         else if ((i) == 8)                                                              \
63                 IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem);                          \
64         else if ((i) == 16)                                                             \
65                 IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem);                         \
66         else                                                                            \
67                 _tmp = __bad_increment_for_ia64_fetch_and_add();                        \
68         (__typeof__(*(v))) (_tmp);      /* return old value */                          \
69 })
70
71 #define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */
72
73 /*
74  * This function doesn't exist, so you'll get a linker error if
75  * something tries to do an invalid xchg().
76  */
77 extern void ia64_xchg_called_with_bad_pointer (void);
78
79 #define __xchg(x,ptr,size)                                              \
80 ({                                                                      \
81         unsigned long __xchg_result;                                    \
82                                                                         \
83         switch (size) {                                                 \
84               case 1:                                                   \
85                 __xchg_result = ia64_xchg1((__u8 *)ptr, x);             \
86                 break;                                                  \
87                                                                         \
88               case 2:                                                   \
89                 __xchg_result = ia64_xchg2((__u16 *)ptr, x);            \
90                 break;                                                  \
91                                                                         \
92               case 4:                                                   \
93                 __xchg_result = ia64_xchg4((__u32 *)ptr, x);            \
94                 break;                                                  \
95                                                                         \
96               case 8:                                                   \
97                 __xchg_result = ia64_xchg8((__u64 *)ptr, x);            \
98                 break;                                                  \
99               default:                                                  \
100                 ia64_xchg_called_with_bad_pointer();                    \
101         }                                                               \
102         __xchg_result;                                                  \
103 })
104
105 #define xchg(ptr,x)                                                          \
106   ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
107
108 /*
109  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
110  * store NEW in MEM.  Return the initial value in MEM.  Success is
111  * indicated by comparing RETURN with OLD.
112  */
113
114 #define __HAVE_ARCH_CMPXCHG 1
115
116 /*
117  * This function doesn't exist, so you'll get a linker error
118  * if something tries to do an invalid cmpxchg().
119  */
120 extern long ia64_cmpxchg_called_with_bad_pointer (void);
121
122 #define ia64_cmpxchg(sem,ptr,old,new,size)                                              \
123 ({                                                                                      \
124         __u64 _o_, _r_;                                                                 \
125                                                                                         \
126         switch (size) {                                                                 \
127               case 1: _o_ = (__u8 ) (long) (old); break;                                \
128               case 2: _o_ = (__u16) (long) (old); break;                                \
129               case 4: _o_ = (__u32) (long) (old); break;                                \
130               case 8: _o_ = (__u64) (long) (old); break;                                \
131               default: break;                                                           \
132         }                                                                               \
133         switch (size) {                                                                 \
134               case 1:                                                                   \
135                 _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_);                      \
136                 break;                                                                  \
137                                                                                         \
138               case 2:                                                                   \
139                _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_);                      \
140                 break;                                                                  \
141                                                                                         \
142               case 4:                                                                   \
143                 _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_);                     \
144                 break;                                                                  \
145                                                                                         \
146               case 8:                                                                   \
147                 _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_);                     \
148                 break;                                                                  \
149                                                                                         \
150               default:                                                                  \
151                 _r_ = ia64_cmpxchg_called_with_bad_pointer();                           \
152                 break;                                                                  \
153         }                                                                               \
154         (__typeof__(old)) _r_;                                                          \
155 })
156
157 #define cmpxchg_acq(ptr,o,n)    ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
158 #define cmpxchg_rel(ptr,o,n)    ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
159
160 /* for compatibility with other platforms: */
161 #define cmpxchg(ptr,o,n)        cmpxchg_acq(ptr,o,n)
162
163 #ifdef CONFIG_IA64_DEBUG_CMPXCHG
164 # define CMPXCHG_BUGCHECK_DECL  int _cmpxchg_bugcheck_count = 128;
165 # define CMPXCHG_BUGCHECK(v)                                                    \
166   do {                                                                          \
167         if (_cmpxchg_bugcheck_count-- <= 0) {                                   \
168                 void *ip;                                                       \
169                 extern int printk(const char *fmt, ...);                        \
170                 ip = (void *) ia64_getreg(_IA64_REG_IP);                        \
171                 printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));  \
172                 break;                                                          \
173         }                                                                       \
174   } while (0)
175 #else /* !CONFIG_IA64_DEBUG_CMPXCHG */
176 # define CMPXCHG_BUGCHECK_DECL
177 # define CMPXCHG_BUGCHECK(v)
178 #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
179
180 #endif
181 #endif /* _ASM_IA64_INTRINSICS_H */