Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_PERCPU_H |
2 | #define __LINUX_PERCPU_H | |
7ff6f082 | 3 | |
0a3021f4 | 4 | #include <linux/preempt.h> |
1da177e4 LT |
5 | #include <linux/slab.h> /* For kmalloc() */ |
6 | #include <linux/smp.h> | |
7ff6f082 MP |
7 | #include <linux/cpumask.h> |
8 | ||
1da177e4 LT |
9 | #include <asm/percpu.h> |
10 | ||
5280e004 | 11 | #ifdef CONFIG_SMP |
12 | #define DEFINE_PER_CPU(type, name) \ | |
13 | __attribute__((__section__(".data.percpu"))) \ | |
14 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | |
15 | ||
44c81433 ED |
16 | #ifdef MODULE |
17 | #define SHARED_ALIGNED_SECTION ".data.percpu" | |
18 | #else | |
19 | #define SHARED_ALIGNED_SECTION ".data.percpu.shared_aligned" | |
20 | #endif | |
21 | ||
5280e004 | 22 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ |
44c81433 | 23 | __attribute__((__section__(SHARED_ALIGNED_SECTION))) \ |
5280e004 | 24 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ |
25 | ____cacheline_aligned_in_smp | |
26 | #else | |
27 | #define DEFINE_PER_CPU(type, name) \ | |
28 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | |
29 | ||
30 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | |
31 | DEFINE_PER_CPU(type, name) | |
32 | #endif | |
33 | ||
34 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | |
35 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) | |
36 | ||
1da177e4 LT |
37 | /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ |
38 | #ifndef PERCPU_ENOUGH_ROOM | |
b00742d3 JF |
39 | #ifdef CONFIG_MODULES |
40 | #define PERCPU_MODULE_RESERVE 8192 | |
41 | #else | |
42 | #define PERCPU_MODULE_RESERVE 0 | |
1da177e4 LT |
43 | #endif |
44 | ||
b00742d3 JF |
45 | #define PERCPU_ENOUGH_ROOM \ |
46 | (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE) | |
47 | #endif /* PERCPU_ENOUGH_ROOM */ | |
48 | ||
632bbfee JB |
49 | /* |
50 | * Must be an lvalue. Since @var must be a simple identifier, | |
51 | * we force a syntax error here if it isn't. | |
52 | */ | |
53 | #define get_cpu_var(var) (*({ \ | |
a666ecfb | 54 | extern int simple_identifier_##var(void); \ |
632bbfee JB |
55 | preempt_disable(); \ |
56 | &__get_cpu_var(var); })) | |
1da177e4 LT |
57 | #define put_cpu_var(var) preempt_enable() |
58 | ||
59 | #ifdef CONFIG_SMP | |
60 | ||
61 | struct percpu_data { | |
b3242151 | 62 | void *ptrs[1]; |
1da177e4 LT |
63 | }; |
64 | ||
7ff6f082 | 65 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) |
1da177e4 | 66 | /* |
7ff6f082 MP |
67 | * Use this to get to a cpu's version of the per-cpu object dynamically |
68 | * allocated. Non-atomic access to the current CPU's version should | |
1da177e4 LT |
69 | * probably be combined with get_cpu()/put_cpu(). |
70 | */ | |
7ff6f082 MP |
71 | #define percpu_ptr(ptr, cpu) \ |
72 | ({ \ | |
73 | struct percpu_data *__p = __percpu_disguise(ptr); \ | |
74 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ | |
1da177e4 LT |
75 | }) |
76 | ||
7ff6f082 MP |
77 | extern void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu); |
78 | extern void percpu_depopulate(void *__pdata, int cpu); | |
79 | extern int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, | |
80 | cpumask_t *mask); | |
81 | extern void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask); | |
82 | extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); | |
83 | extern void percpu_free(void *__pdata); | |
1da177e4 LT |
84 | |
85 | #else /* CONFIG_SMP */ | |
86 | ||
7ff6f082 MP |
87 | #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
88 | ||
89 | static inline void percpu_depopulate(void *__pdata, int cpu) | |
90 | { | |
91 | } | |
92 | ||
93 | static inline void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) | |
94 | { | |
95 | } | |
1da177e4 | 96 | |
7ff6f082 MP |
97 | static inline void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, |
98 | int cpu) | |
1da177e4 | 99 | { |
7ff6f082 | 100 | return percpu_ptr(__pdata, cpu); |
1da177e4 | 101 | } |
7ff6f082 MP |
102 | |
103 | static inline int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, | |
104 | cpumask_t *mask) | |
105 | { | |
106 | return 0; | |
107 | } | |
108 | ||
0891a8d7 | 109 | static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) |
7ff6f082 MP |
110 | { |
111 | return kzalloc(size, gfp); | |
112 | } | |
113 | ||
114 | static inline void percpu_free(void *__pdata) | |
115 | { | |
116 | kfree(__pdata); | |
1da177e4 LT |
117 | } |
118 | ||
119 | #endif /* CONFIG_SMP */ | |
120 | ||
7ff6f082 MP |
121 | #define percpu_populate_mask(__pdata, size, gfp, mask) \ |
122 | __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) | |
123 | #define percpu_depopulate_mask(__pdata, mask) \ | |
124 | __percpu_depopulate_mask((__pdata), &(mask)) | |
125 | #define percpu_alloc_mask(size, gfp, mask) \ | |
126 | __percpu_alloc_mask((size), (gfp), &(mask)) | |
127 | ||
128 | #define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map) | |
129 | ||
130 | /* (legacy) interface for use without CPU hotplug handling */ | |
131 | ||
132 | #define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \ | |
133 | cpu_possible_map) | |
134 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type)) | |
135 | #define free_percpu(ptr) percpu_free((ptr)) | |
136 | #define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu)) | |
1da177e4 LT |
137 | |
138 | #endif /* __LINUX_PERCPU_H */ |