Merge branch 'core/percpu' into stackprotector
[linux-2.6] / arch / x86 / include / asm / pda.h
1 #ifndef _ASM_X86_PDA_H
2 #define _ASM_X86_PDA_H
3
4 #ifndef __ASSEMBLY__
5 #include <linux/stddef.h>
6 #include <linux/types.h>
7 #include <linux/cache.h>
8 #include <linux/threads.h>
9 #include <asm/page.h>
10 #include <asm/percpu.h>
11
12 /* Per processor datastructure. %gs points to it while the kernel runs */
13 struct x8664_pda {
14         unsigned long unused1;
15         unsigned long unused2;
16         unsigned long unused3;
17         unsigned long unused4;
18         int unused5;
19         unsigned int unused6;           /* 36 was cpunumber */
20         unsigned long stack_canary;     /* 40 stack canary value */
21                                         /* gcc-ABI: this canary MUST be at
22                                            offset 40!!! */
23         short in_bootmem;               /* pda lives in bootmem */
24 } ____cacheline_aligned_in_smp;
25
26 DECLARE_PER_CPU(struct x8664_pda, __pda);
27 extern void pda_init(int);
28
29 #define cpu_pda(cpu)            (&per_cpu(__pda, cpu))
30
31 #define read_pda(field)         percpu_read(__pda.field)
32 #define write_pda(field, val)   percpu_write(__pda.field, val)
33 #define add_pda(field, val)     percpu_add(__pda.field, val)
34 #define sub_pda(field, val)     percpu_sub(__pda.field, val)
35 #define or_pda(field, val)      percpu_or(__pda.field, val)
36
37 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
38 #define test_and_clear_bit_pda(bit, field)                              \
39         x86_test_and_clear_bit_percpu(bit, __pda.field)
40
41 #endif
42
43 #define refresh_stack_canary() write_pda(stack_canary, current->stack_canary)
44
45 #endif /* _ASM_X86_PDA_H */