5 #include <linux/stddef.h>
6 #include <linux/types.h>
7 #include <linux/cache.h>
8 #include <linux/threads.h>
10 #include <asm/percpu.h>
12 /* Per processor datastructure. %gs points to it while the kernel runs */
14 unsigned long unused1;
15 unsigned long unused2;
16 unsigned long unused3;
17 unsigned long unused4;
19 unsigned int unused6; /* 36 was cpunumber */
20 #ifdef CONFIG_CC_STACKPROTECTOR
21 unsigned long stack_canary; /* 40 stack canary value */
22 /* gcc-ABI: this canary MUST be at
25 short nodenumber; /* number of current node (32k max) */
26 short in_bootmem; /* pda lives in bootmem */
28 } ____cacheline_aligned_in_smp;
30 DECLARE_PER_CPU(struct x8664_pda, __pda);
31 extern void pda_init(int);
33 #define cpu_pda(cpu) (&per_cpu(__pda, cpu))
35 #define read_pda(field) percpu_read(__pda.field)
36 #define write_pda(field, val) percpu_write(__pda.field, val)
37 #define add_pda(field, val) percpu_add(__pda.field, val)
38 #define sub_pda(field, val) percpu_sub(__pda.field, val)
39 #define or_pda(field, val) percpu_or(__pda.field, val)
41 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
42 #define test_and_clear_bit_pda(bit, field) \
43 x86_test_and_clear_bit_percpu(bit, __pda.field)
47 #endif /* _ASM_X86_PDA_H */