percpu: use negative for auto for pcpu_setup_first_chunk() arguments
[linux-2.6] / include / linux / percpu.h
1 #ifndef __LINUX_PERCPU_H
2 #define __LINUX_PERCPU_H
3
4 #include <linux/preempt.h>
5 #include <linux/slab.h> /* For kmalloc() */
6 #include <linux/smp.h>
7 #include <linux/cpumask.h>
8 #include <linux/pfn.h>
9
10 #include <asm/percpu.h>
11
12 #ifndef PER_CPU_BASE_SECTION
13 #ifdef CONFIG_SMP
14 #define PER_CPU_BASE_SECTION ".data.percpu"
15 #else
16 #define PER_CPU_BASE_SECTION ".data"
17 #endif
18 #endif
19
20 #ifdef CONFIG_SMP
21
22 #ifdef MODULE
23 #define PER_CPU_SHARED_ALIGNED_SECTION ""
24 #else
25 #define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
26 #endif
27 #define PER_CPU_FIRST_SECTION ".first"
28
29 #else
30
31 #define PER_CPU_SHARED_ALIGNED_SECTION ""
32 #define PER_CPU_FIRST_SECTION ""
33
34 #endif
35
36 #define DEFINE_PER_CPU_SECTION(type, name, section)                     \
37         __attribute__((__section__(PER_CPU_BASE_SECTION section)))      \
38         PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
39
40 #define DEFINE_PER_CPU(type, name)                                      \
41         DEFINE_PER_CPU_SECTION(type, name, "")
42
43 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)                       \
44         DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
45         ____cacheline_aligned_in_smp
46
47 #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)                         \
48         DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
49
50 #define DEFINE_PER_CPU_FIRST(type, name)                                \
51         DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
52
53 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
54 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
55
56 /* enough to cover all DEFINE_PER_CPUs in modules */
57 #ifdef CONFIG_MODULES
58 #define PERCPU_MODULE_RESERVE           (8 << 10)
59 #else
60 #define PERCPU_MODULE_RESERVE           0
61 #endif
62
63 #ifndef PERCPU_ENOUGH_ROOM
64 #define PERCPU_ENOUGH_ROOM                                              \
65         (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) +      \
66          PERCPU_MODULE_RESERVE)
67 #endif
68
69 /*
70  * Must be an lvalue. Since @var must be a simple identifier,
71  * we force a syntax error here if it isn't.
72  */
73 #define get_cpu_var(var) (*({                           \
74         extern int simple_identifier_##var(void);       \
75         preempt_disable();                              \
76         &__get_cpu_var(var); }))
77 #define put_cpu_var(var) preempt_enable()
78
79 #ifdef CONFIG_SMP
80
81 #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
82
83 /* minimum unit size, also is the maximum supported allocation size */
84 #define PCPU_MIN_UNIT_SIZE              PFN_ALIGN(64 << 10)
85
86 /*
87  * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
88  * back on the first chunk if arch is manually allocating and mapping
89  * it for faster access (as a part of large page mapping for example).
90  * Note that dynamic percpu allocator covers both static and dynamic
91  * areas, so these values are bigger than PERCPU_MODULE_RESERVE.
92  *
93  * On typical configuration with modules, the following values leave
94  * about 8k of free space on the first chunk after boot on both x86_32
95  * and 64 when module support is enabled.  When module support is
96  * disabled, it's much tighter.
97  */
98 #ifndef PERCPU_DYNAMIC_RESERVE
99 #  if BITS_PER_LONG > 32
100 #    ifdef CONFIG_MODULES
101 #      define PERCPU_DYNAMIC_RESERVE    (24 << 10)
102 #    else
103 #      define PERCPU_DYNAMIC_RESERVE    (16 << 10)
104 #    endif
105 #  else
106 #    ifdef CONFIG_MODULES
107 #      define PERCPU_DYNAMIC_RESERVE    (16 << 10)
108 #    else
109 #      define PERCPU_DYNAMIC_RESERVE    (8 << 10)
110 #    endif
111 #  endif
112 #endif  /* PERCPU_DYNAMIC_RESERVE */
113
114 extern void *pcpu_base_addr;
115
116 typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
117 typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
118
119 extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
120                                         size_t static_size,
121                                         ssize_t unit_size, ssize_t dyn_size,
122                                         void *base_addr,
123                                         pcpu_populate_pte_fn_t populate_pte_fn);
124
125 /*
126  * Use this to get to a cpu's version of the per-cpu object
127  * dynamically allocated. Non-atomic access to the current CPU's
128  * version should probably be combined with get_cpu()/put_cpu().
129  */
130 #define per_cpu_ptr(ptr, cpu)   SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
131
132 #else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
133
134 struct percpu_data {
135         void *ptrs[1];
136 };
137
138 #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
139
140 #define per_cpu_ptr(ptr, cpu)                                           \
141 ({                                                                      \
142         struct percpu_data *__p = __percpu_disguise(ptr);               \
143         (__typeof__(ptr))__p->ptrs[(cpu)];                              \
144 })
145
146 #endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
147
148 extern void *__alloc_percpu(size_t size, size_t align);
149 extern void free_percpu(void *__pdata);
150
151 #else /* CONFIG_SMP */
152
153 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
154
155 static inline void *__alloc_percpu(size_t size, size_t align)
156 {
157         /*
158          * Can't easily make larger alignment work with kmalloc.  WARN
159          * on it.  Larger alignment should only be used for module
160          * percpu sections on SMP for which this path isn't used.
161          */
162         WARN_ON_ONCE(align > SMP_CACHE_BYTES);
163         return kzalloc(size, GFP_KERNEL);
164 }
165
166 static inline void free_percpu(void *p)
167 {
168         kfree(p);
169 }
170
171 #endif /* CONFIG_SMP */
172
173 #define alloc_percpu(type)      (type *)__alloc_percpu(sizeof(type), \
174                                                        __alignof__(type))
175
176 #endif /* __LINUX_PERCPU_H */