atomic.h: add atomic64 cmpxchg, xchg and add_unless to x86_64
[linux-2.6] / include / asm-i386 / percpu.h
1 #ifndef __ARCH_I386_PERCPU__
2 #define __ARCH_I386_PERCPU__
3
4 #ifdef __ASSEMBLY__
5
6 /*
7  * PER_CPU finds an address of a per-cpu variable.
8  *
9  * Args:
10  *    var - variable name
11  *    reg - 32bit register
12  *
13  * The resulting address is stored in the "reg" argument.
14  *
15  * Example:
16  *    PER_CPU(cpu_gdt_descr, %ebx)
17  */
18 #ifdef CONFIG_SMP
19 #define PER_CPU(var, reg)                               \
20         movl %fs:per_cpu__##this_cpu_off, reg;          \
21         lea per_cpu__##var(reg), reg
22 #define PER_CPU_VAR(var)        %fs:per_cpu__##var
23 #else /* ! SMP */
24 #define PER_CPU(var, reg)                       \
25         movl $per_cpu__##var, reg
26 #define PER_CPU_VAR(var)        per_cpu__##var
27 #endif  /* SMP */
28
29 #else /* ...!ASSEMBLY */
30
31 /*
32  * PER_CPU finds an address of a per-cpu variable.
33  *
34  * Args:
35  *    var - variable name
36  *    cpu - 32bit register containing the current CPU number
37  *
38  * The resulting address is stored in the "cpu" argument.
39  *
40  * Example:
41  *    PER_CPU(cpu_gdt_descr, %ebx)
42  */
43 #ifdef CONFIG_SMP
44 /* Same as generic implementation except for optimized local access. */
45 #define __GENERIC_PER_CPU
46
47 /* This is used for other cpus to find our section. */
48 extern unsigned long __per_cpu_offset[];
49
50 #define per_cpu_offset(x) (__per_cpu_offset[x])
51
52 /* Separate out the type, so (int[3], foo) works. */
53 #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
54 #define DEFINE_PER_CPU(type, name) \
55     __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
56
57 /* We can use this directly for local CPU (faster). */
58 DECLARE_PER_CPU(unsigned long, this_cpu_off);
59
60 /* var is in discarded region: offset to particular copy we want */
61 #define per_cpu(var, cpu) (*({                          \
62         extern int simple_indentifier_##var(void);      \
63         RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); }))
64
65 #define __raw_get_cpu_var(var) (*({                                     \
66         extern int simple_indentifier_##var(void);                      \
67         RELOC_HIDE(&per_cpu__##var, x86_read_percpu(this_cpu_off));     \
68 }))
69
70 #define __get_cpu_var(var) __raw_get_cpu_var(var)
71
72 /* A macro to avoid #include hell... */
73 #define percpu_modcopy(pcpudst, src, size)                      \
74 do {                                                            \
75         unsigned int __i;                                       \
76         for_each_possible_cpu(__i)                              \
77                 memcpy((pcpudst)+__per_cpu_offset[__i],         \
78                        (src), (size));                          \
79 } while (0)
80
81 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
82 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
83
84 /* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
85 #define __percpu_seg "%%fs:"
86 #else  /* !SMP */
87 #include <asm-generic/percpu.h>
88 #define __percpu_seg ""
89 #endif  /* SMP */
90
91 /* For arch-specific code, we can use direct single-insn ops (they
92  * don't give an lvalue though). */
93 extern void __bad_percpu_size(void);
94
95 #define percpu_to_op(op,var,val)                                \
96         do {                                                    \
97                 typedef typeof(var) T__;                        \
98                 if (0) { T__ tmp__; tmp__ = (val); }            \
99                 switch (sizeof(var)) {                          \
100                 case 1:                                         \
101                         asm(op "b %1,"__percpu_seg"%0"          \
102                             : "+m" (var)                        \
103                             :"ri" ((T__)val));                  \
104                         break;                                  \
105                 case 2:                                         \
106                         asm(op "w %1,"__percpu_seg"%0"          \
107                             : "+m" (var)                        \
108                             :"ri" ((T__)val));                  \
109                         break;                                  \
110                 case 4:                                         \
111                         asm(op "l %1,"__percpu_seg"%0"          \
112                             : "+m" (var)                        \
113                             :"ri" ((T__)val));                  \
114                         break;                                  \
115                 default: __bad_percpu_size();                   \
116                 }                                               \
117         } while (0)
118
119 #define percpu_from_op(op,var)                                  \
120         ({                                                      \
121                 typeof(var) ret__;                              \
122                 switch (sizeof(var)) {                          \
123                 case 1:                                         \
124                         asm(op "b "__percpu_seg"%1,%0"          \
125                             : "=r" (ret__)                      \
126                             : "m" (var));                       \
127                         break;                                  \
128                 case 2:                                         \
129                         asm(op "w "__percpu_seg"%1,%0"          \
130                             : "=r" (ret__)                      \
131                             : "m" (var));                       \
132                         break;                                  \
133                 case 4:                                         \
134                         asm(op "l "__percpu_seg"%1,%0"          \
135                             : "=r" (ret__)                      \
136                             : "m" (var));                       \
137                         break;                                  \
138                 default: __bad_percpu_size();                   \
139                 }                                               \
140                 ret__; })
141
142 #define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
143 #define x86_write_percpu(var,val) percpu_to_op("mov", per_cpu__##var, val)
144 #define x86_add_percpu(var,val) percpu_to_op("add", per_cpu__##var, val)
145 #define x86_sub_percpu(var,val) percpu_to_op("sub", per_cpu__##var, val)
146 #define x86_or_percpu(var,val) percpu_to_op("or", per_cpu__##var, val)
147 #endif /* !__ASSEMBLY__ */
148
149 #endif /* __ARCH_I386_PERCPU__ */