Merge branch 'x86/core' into tracing/textedit
[linux-2.6] / include / linux / percpu.h
1 #ifndef __LINUX_PERCPU_H
2 #define __LINUX_PERCPU_H
3
4 #include <linux/preempt.h>
5 #include <linux/slab.h> /* For kmalloc() */
6 #include <linux/smp.h>
7 #include <linux/cpumask.h>
8
9 #include <asm/percpu.h>
10
11 #ifndef PER_CPU_BASE_SECTION
12 #ifdef CONFIG_SMP
13 #define PER_CPU_BASE_SECTION ".data.percpu"
14 #else
15 #define PER_CPU_BASE_SECTION ".data"
16 #endif
17 #endif
18
19 #ifdef CONFIG_SMP
20
21 #ifdef MODULE
22 #define PER_CPU_SHARED_ALIGNED_SECTION ""
23 #else
24 #define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
25 #endif
26 #define PER_CPU_FIRST_SECTION ".first"
27
28 #else
29
30 #define PER_CPU_SHARED_ALIGNED_SECTION ""
31 #define PER_CPU_FIRST_SECTION ""
32
33 #endif
34
35 #define DEFINE_PER_CPU_SECTION(type, name, section)                     \
36         __attribute__((__section__(PER_CPU_BASE_SECTION section)))      \
37         PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
38
39 #define DEFINE_PER_CPU(type, name)                                      \
40         DEFINE_PER_CPU_SECTION(type, name, "")
41
42 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)                       \
43         DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
44         ____cacheline_aligned_in_smp
45
46 #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)                         \
47         DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
48
49 #define DEFINE_PER_CPU_FIRST(type, name)                                \
50         DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
51
52 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
53 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
54
55 /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
56 #ifndef PERCPU_ENOUGH_ROOM
57 #ifdef CONFIG_MODULES
58 #define PERCPU_MODULE_RESERVE   8192
59 #else
60 #define PERCPU_MODULE_RESERVE   0
61 #endif
62
63 #define PERCPU_ENOUGH_ROOM                                              \
64         (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE)
65 #endif  /* PERCPU_ENOUGH_ROOM */
66
67 /*
68  * Must be an lvalue. Since @var must be a simple identifier,
69  * we force a syntax error here if it isn't.
70  */
71 #define get_cpu_var(var) (*({                           \
72         extern int simple_identifier_##var(void);       \
73         preempt_disable();                              \
74         &__get_cpu_var(var); }))
75 #define put_cpu_var(var) preempt_enable()
76
77 #ifdef CONFIG_SMP
78
79 #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
80
81 /* minimum unit size, also is the maximum supported allocation size */
82 #define PCPU_MIN_UNIT_SIZE              (16UL << PAGE_SHIFT)
83
84 /*
85  * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
86  * back on the first chunk if arch is manually allocating and mapping
87  * it for faster access (as a part of large page mapping for example).
88  * Note that dynamic percpu allocator covers both static and dynamic
89  * areas, so these values are bigger than PERCPU_MODULE_RESERVE.
90  *
91  * On typical configuration with modules, the following values leave
92  * about 8k of free space on the first chunk after boot on both x86_32
93  * and 64 when module support is enabled.  When module support is
94  * disabled, it's much tighter.
95  */
96 #ifndef PERCPU_DYNAMIC_RESERVE
97 #  if BITS_PER_LONG > 32
98 #    ifdef CONFIG_MODULES
99 #      define PERCPU_DYNAMIC_RESERVE    (6 << PAGE_SHIFT)
100 #    else
101 #      define PERCPU_DYNAMIC_RESERVE    (4 << PAGE_SHIFT)
102 #    endif
103 #  else
104 #    ifdef CONFIG_MODULES
105 #      define PERCPU_DYNAMIC_RESERVE    (4 << PAGE_SHIFT)
106 #    else
107 #      define PERCPU_DYNAMIC_RESERVE    (2 << PAGE_SHIFT)
108 #    endif
109 #  endif
110 #endif  /* PERCPU_DYNAMIC_RESERVE */
111
112 extern void *pcpu_base_addr;
113
114 typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
115 typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
116
117 extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
118                                         size_t static_size, size_t unit_size,
119                                         size_t free_size, void *base_addr,
120                                         pcpu_populate_pte_fn_t populate_pte_fn);
121
122 /*
123  * Use this to get to a cpu's version of the per-cpu object
124  * dynamically allocated. Non-atomic access to the current CPU's
125  * version should probably be combined with get_cpu()/put_cpu().
126  */
127 #define per_cpu_ptr(ptr, cpu)   SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
128
129 #else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
130
131 struct percpu_data {
132         void *ptrs[1];
133 };
134
135 #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
136
137 #define per_cpu_ptr(ptr, cpu)                                           \
138 ({                                                                      \
139         struct percpu_data *__p = __percpu_disguise(ptr);               \
140         (__typeof__(ptr))__p->ptrs[(cpu)];                              \
141 })
142
143 #endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
144
145 extern void *__alloc_percpu(size_t size, size_t align);
146 extern void free_percpu(void *__pdata);
147
148 #else /* CONFIG_SMP */
149
150 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
151
152 static inline void *__alloc_percpu(size_t size, size_t align)
153 {
154         /*
155          * Can't easily make larger alignment work with kmalloc.  WARN
156          * on it.  Larger alignment should only be used for module
157          * percpu sections on SMP for which this path isn't used.
158          */
159         WARN_ON_ONCE(align > SMP_CACHE_BYTES);
160         return kzalloc(size, GFP_KERNEL);
161 }
162
163 static inline void free_percpu(void *p)
164 {
165         kfree(p);
166 }
167
168 #endif /* CONFIG_SMP */
169
170 #define alloc_percpu(type)      (type *)__alloc_percpu(sizeof(type), \
171                                                        __alignof__(type))
172
173 #endif /* __LINUX_PERCPU_H */