Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * workqueue.h --- work queue handling for Linux. | |
3 | */ | |
4 | ||
5 | #ifndef _LINUX_WORKQUEUE_H | |
6 | #define _LINUX_WORKQUEUE_H | |
7 | ||
8 | #include <linux/timer.h> | |
9 | #include <linux/linkage.h> | |
10 | #include <linux/bitops.h> | |
4e6045f1 | 11 | #include <linux/lockdep.h> |
a08727ba | 12 | #include <asm/atomic.h> |
1da177e4 LT |
13 | |
14 | struct workqueue_struct; | |
15 | ||
65f27f38 DH |
16 | struct work_struct; |
17 | typedef void (*work_func_t)(struct work_struct *work); | |
6bb49e59 | 18 | |
a08727ba LT |
19 | /* |
20 | * The first word is the work queue pointer and the flags rolled into | |
21 | * one | |
22 | */ | |
23 | #define work_data_bits(work) ((unsigned long *)(&(work)->data)) | |
24 | ||
1da177e4 | 25 | struct work_struct { |
a08727ba | 26 | atomic_long_t data; |
365970a1 DH |
27 | #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ |
28 | #define WORK_STRUCT_FLAG_MASK (3UL) | |
29 | #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) | |
1da177e4 | 30 | struct list_head entry; |
6bb49e59 | 31 | work_func_t func; |
4e6045f1 JB |
32 | #ifdef CONFIG_LOCKDEP |
33 | struct lockdep_map lockdep_map; | |
34 | #endif | |
52bad64d DH |
35 | }; |
36 | ||
23b2e599 | 37 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) |
a08727ba | 38 | |
52bad64d DH |
39 | struct delayed_work { |
40 | struct work_struct work; | |
1da177e4 LT |
41 | struct timer_list timer; |
42 | }; | |
43 | ||
bf6aede7 JD |
44 | static inline struct delayed_work *to_delayed_work(struct work_struct *work) |
45 | { | |
46 | return container_of(work, struct delayed_work, work); | |
47 | } | |
48 | ||
1fa44eca JB |
49 | struct execute_work { |
50 | struct work_struct work; | |
51 | }; | |
52 | ||
4e6045f1 JB |
53 | #ifdef CONFIG_LOCKDEP |
54 | /* | |
55 | * NB: because we have to copy the lockdep_map, setting _key | |
56 | * here is required, otherwise it could get initialised to the | |
57 | * copy of the lockdep_map! | |
58 | */ | |
59 | #define __WORK_INIT_LOCKDEP_MAP(n, k) \ | |
60 | .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), | |
61 | #else | |
62 | #define __WORK_INIT_LOCKDEP_MAP(n, k) | |
63 | #endif | |
64 | ||
65f27f38 | 65 | #define __WORK_INITIALIZER(n, f) { \ |
23b2e599 ON |
66 | .data = WORK_DATA_INIT(), \ |
67 | .entry = { &(n).entry, &(n).entry }, \ | |
65f27f38 | 68 | .func = (f), \ |
4e6045f1 | 69 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ |
65f27f38 DH |
70 | } |
71 | ||
72 | #define __DELAYED_WORK_INITIALIZER(n, f) { \ | |
73 | .work = __WORK_INITIALIZER((n).work, (f)), \ | |
74 | .timer = TIMER_INITIALIZER(NULL, 0, 0), \ | |
75 | } | |
76 | ||
65f27f38 DH |
77 | #define DECLARE_WORK(n, f) \ |
78 | struct work_struct n = __WORK_INITIALIZER(n, f) | |
79 | ||
65f27f38 DH |
80 | #define DECLARE_DELAYED_WORK(n, f) \ |
81 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) | |
82 | ||
1da177e4 | 83 | /* |
65f27f38 | 84 | * initialize a work item's function pointer |
1da177e4 | 85 | */ |
65f27f38 | 86 | #define PREPARE_WORK(_work, _func) \ |
1da177e4 | 87 | do { \ |
52bad64d | 88 | (_work)->func = (_func); \ |
1da177e4 LT |
89 | } while (0) |
90 | ||
65f27f38 DH |
91 | #define PREPARE_DELAYED_WORK(_work, _func) \ |
92 | PREPARE_WORK(&(_work)->work, (_func)) | |
52bad64d | 93 | |
1da177e4 | 94 | /* |
52bad64d | 95 | * initialize all of a work item in one go |
a08727ba LT |
96 | * |
97 | * NOTE! No point in using "atomic_long_set()": useing a direct | |
98 | * assignment of the work data initializer allows the compiler | |
99 | * to generate better code. | |
1da177e4 | 100 | */ |
4e6045f1 | 101 | #ifdef CONFIG_LOCKDEP |
23b2e599 | 102 | #define INIT_WORK(_work, _func) \ |
65f27f38 | 103 | do { \ |
4e6045f1 JB |
104 | static struct lock_class_key __key; \ |
105 | \ | |
23b2e599 | 106 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
4e6045f1 | 107 | lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\ |
65f27f38 DH |
108 | INIT_LIST_HEAD(&(_work)->entry); \ |
109 | PREPARE_WORK((_work), (_func)); \ | |
110 | } while (0) | |
4e6045f1 JB |
111 | #else |
112 | #define INIT_WORK(_work, _func) \ | |
113 | do { \ | |
114 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ | |
115 | INIT_LIST_HEAD(&(_work)->entry); \ | |
116 | PREPARE_WORK((_work), (_func)); \ | |
117 | } while (0) | |
118 | #endif | |
65f27f38 DH |
119 | |
120 | #define INIT_DELAYED_WORK(_work, _func) \ | |
121 | do { \ | |
122 | INIT_WORK(&(_work)->work, (_func)); \ | |
123 | init_timer(&(_work)->timer); \ | |
52bad64d DH |
124 | } while (0) |
125 | ||
6d612b0f PZ |
126 | #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ |
127 | do { \ | |
128 | INIT_WORK(&(_work)->work, (_func)); \ | |
129 | init_timer_on_stack(&(_work)->timer); \ | |
130 | } while (0) | |
131 | ||
28287033 VP |
132 | #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ |
133 | do { \ | |
134 | INIT_WORK(&(_work)->work, (_func)); \ | |
135 | init_timer_deferrable(&(_work)->timer); \ | |
136 | } while (0) | |
137 | ||
336f6c32 TG |
138 | #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ |
139 | do { \ | |
140 | INIT_WORK(&(_work)->work, (_func)); \ | |
141 | init_timer_on_stack(&(_work)->timer); \ | |
142 | } while (0) | |
143 | ||
365970a1 DH |
144 | /** |
145 | * work_pending - Find out whether a work item is currently pending | |
146 | * @work: The work item in question | |
147 | */ | |
148 | #define work_pending(work) \ | |
a08727ba | 149 | test_bit(WORK_STRUCT_PENDING, work_data_bits(work)) |
365970a1 DH |
150 | |
151 | /** | |
152 | * delayed_work_pending - Find out whether a delayable work item is currently | |
153 | * pending | |
154 | * @work: The work item in question | |
155 | */ | |
0221872a LT |
156 | #define delayed_work_pending(w) \ |
157 | work_pending(&(w)->work) | |
365970a1 | 158 | |
65f27f38 | 159 | /** |
23b2e599 ON |
160 | * work_clear_pending - for internal use only, mark a work item as not pending |
161 | * @work: The work item in question | |
65f27f38 | 162 | */ |
23b2e599 | 163 | #define work_clear_pending(work) \ |
a08727ba | 164 | clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) |
65f27f38 | 165 | |
52bad64d | 166 | |
4e6045f1 JB |
167 | extern struct workqueue_struct * |
168 | __create_workqueue_key(const char *name, int singlethread, | |
0d557dc9 | 169 | int freezeable, int rt, struct lock_class_key *key, |
eb13ba87 | 170 | const char *lock_name); |
4e6045f1 JB |
171 | |
172 | #ifdef CONFIG_LOCKDEP | |
0d557dc9 | 173 | #define __create_workqueue(name, singlethread, freezeable, rt) \ |
4e6045f1 JB |
174 | ({ \ |
175 | static struct lock_class_key __key; \ | |
eb13ba87 JB |
176 | const char *__lock_name; \ |
177 | \ | |
178 | if (__builtin_constant_p(name)) \ | |
179 | __lock_name = (name); \ | |
180 | else \ | |
181 | __lock_name = #name; \ | |
4e6045f1 JB |
182 | \ |
183 | __create_workqueue_key((name), (singlethread), \ | |
0d557dc9 | 184 | (freezeable), (rt), &__key, \ |
eb13ba87 | 185 | __lock_name); \ |
4e6045f1 JB |
186 | }) |
187 | #else | |
0d557dc9 HC |
188 | #define __create_workqueue(name, singlethread, freezeable, rt) \ |
189 | __create_workqueue_key((name), (singlethread), (freezeable), (rt), \ | |
190 | NULL, NULL) | |
4e6045f1 JB |
191 | #endif |
192 | ||
0d557dc9 HC |
193 | #define create_workqueue(name) __create_workqueue((name), 0, 0, 0) |
194 | #define create_rt_workqueue(name) __create_workqueue((name), 0, 0, 1) | |
195 | #define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1, 0) | |
196 | #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0) | |
1da177e4 LT |
197 | |
198 | extern void destroy_workqueue(struct workqueue_struct *wq); | |
199 | ||
b3c97528 | 200 | extern int queue_work(struct workqueue_struct *wq, struct work_struct *work); |
c1a220e7 ZR |
201 | extern int queue_work_on(int cpu, struct workqueue_struct *wq, |
202 | struct work_struct *work); | |
b3c97528 HH |
203 | extern int queue_delayed_work(struct workqueue_struct *wq, |
204 | struct delayed_work *work, unsigned long delay); | |
7a6bc1cd | 205 | extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
28e53bdd ON |
206 | struct delayed_work *work, unsigned long delay); |
207 | ||
b3c97528 | 208 | extern void flush_workqueue(struct workqueue_struct *wq); |
28e53bdd | 209 | extern void flush_scheduled_work(void); |
1da177e4 | 210 | |
b3c97528 | 211 | extern int schedule_work(struct work_struct *work); |
c1a220e7 | 212 | extern int schedule_work_on(int cpu, struct work_struct *work); |
b3c97528 | 213 | extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); |
28e53bdd ON |
214 | extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, |
215 | unsigned long delay); | |
65f27f38 | 216 | extern int schedule_on_each_cpu(work_func_t func); |
1da177e4 LT |
217 | extern int current_is_keventd(void); |
218 | extern int keventd_up(void); | |
219 | ||
220 | extern void init_workqueues(void); | |
65f27f38 | 221 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
1da177e4 | 222 | |
db700897 ON |
223 | extern int flush_work(struct work_struct *work); |
224 | ||
1f1f642e | 225 | extern int cancel_work_sync(struct work_struct *work); |
28e53bdd | 226 | |
1da177e4 LT |
227 | /* |
228 | * Kill off a pending schedule_delayed_work(). Note that the work callback | |
071b6386 ON |
229 | * function may still be running on return from cancel_delayed_work(), unless |
230 | * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or | |
28e53bdd | 231 | * cancel_work_sync() to wait on it. |
1da177e4 | 232 | */ |
52bad64d | 233 | static inline int cancel_delayed_work(struct delayed_work *work) |
1da177e4 LT |
234 | { |
235 | int ret; | |
236 | ||
223a10a9 | 237 | ret = del_timer_sync(&work->timer); |
1da177e4 | 238 | if (ret) |
23b2e599 | 239 | work_clear_pending(&work->work); |
1da177e4 LT |
240 | return ret; |
241 | } | |
242 | ||
1f1f642e | 243 | extern int cancel_delayed_work_sync(struct delayed_work *work); |
1634c48f | 244 | |
f5a421a4 | 245 | /* Obsolete. use cancel_delayed_work_sync() */ |
1634c48f ON |
246 | static inline |
247 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, | |
248 | struct delayed_work *work) | |
249 | { | |
f5a421a4 ON |
250 | cancel_delayed_work_sync(work); |
251 | } | |
252 | ||
253 | /* Obsolete. use cancel_delayed_work_sync() */ | |
254 | static inline | |
255 | void cancel_rearming_delayed_work(struct delayed_work *work) | |
256 | { | |
257 | cancel_delayed_work_sync(work); | |
1634c48f ON |
258 | } |
259 | ||
2d3854a3 RR |
260 | #ifndef CONFIG_SMP |
261 | static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | |
262 | { | |
263 | return fn(arg); | |
264 | } | |
265 | #else | |
266 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); | |
267 | #endif /* CONFIG_SMP */ | |
1da177e4 | 268 | #endif |