x86, SGI UV: hardcode the TLB flush interrupt system vector
[linux-2.6] / include / linux / workqueue.h
1 /*
2  * workqueue.h --- work queue handling for Linux.
3  */
4
5 #ifndef _LINUX_WORKQUEUE_H
6 #define _LINUX_WORKQUEUE_H
7
8 #include <linux/timer.h>
9 #include <linux/linkage.h>
10 #include <linux/bitops.h>
11 #include <linux/lockdep.h>
12 #include <asm/atomic.h>
13
14 struct workqueue_struct;
15
16 struct work_struct;
17 typedef void (*work_func_t)(struct work_struct *work);
18
19 /*
20  * The first word is the work queue pointer and the flags rolled into
21  * one
22  */
23 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
24
25 struct work_struct {
26         atomic_long_t data;
27 #define WORK_STRUCT_PENDING 0           /* T if work item pending execution */
28 #define WORK_STRUCT_FLAG_MASK (3UL)
29 #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
30         struct list_head entry;
31         work_func_t func;
32 #ifdef CONFIG_LOCKDEP
33         struct lockdep_map lockdep_map;
34 #endif
35 };
36
37 #define WORK_DATA_INIT()        ATOMIC_LONG_INIT(0)
38
39 struct delayed_work {
40         struct work_struct work;
41         struct timer_list timer;
42 };
43
44 struct execute_work {
45         struct work_struct work;
46 };
47
48 #ifdef CONFIG_LOCKDEP
49 /*
50  * NB: because we have to copy the lockdep_map, setting _key
51  * here is required, otherwise it could get initialised to the
52  * copy of the lockdep_map!
53  */
54 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
55         .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
56 #else
57 #define __WORK_INIT_LOCKDEP_MAP(n, k)
58 #endif
59
60 #define __WORK_INITIALIZER(n, f) {                              \
61         .data = WORK_DATA_INIT(),                               \
62         .entry  = { &(n).entry, &(n).entry },                   \
63         .func = (f),                                            \
64         __WORK_INIT_LOCKDEP_MAP(#n, &(n))                       \
65         }
66
67 #define __DELAYED_WORK_INITIALIZER(n, f) {                      \
68         .work = __WORK_INITIALIZER((n).work, (f)),              \
69         .timer = TIMER_INITIALIZER(NULL, 0, 0),                 \
70         }
71
72 #define DECLARE_WORK(n, f)                                      \
73         struct work_struct n = __WORK_INITIALIZER(n, f)
74
75 #define DECLARE_DELAYED_WORK(n, f)                              \
76         struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
77
78 /*
79  * initialize a work item's function pointer
80  */
81 #define PREPARE_WORK(_work, _func)                              \
82         do {                                                    \
83                 (_work)->func = (_func);                        \
84         } while (0)
85
86 #define PREPARE_DELAYED_WORK(_work, _func)                      \
87         PREPARE_WORK(&(_work)->work, (_func))
88
89 /*
90  * initialize all of a work item in one go
91  *
92  * NOTE! No point in using "atomic_long_set()": useing a direct
93  * assignment of the work data initializer allows the compiler
94  * to generate better code.
95  */
96 #ifdef CONFIG_LOCKDEP
97 #define INIT_WORK(_work, _func)                                         \
98         do {                                                            \
99                 static struct lock_class_key __key;                     \
100                                                                         \
101                 (_work)->data = (atomic_long_t) WORK_DATA_INIT();       \
102                 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
103                 INIT_LIST_HEAD(&(_work)->entry);                        \
104                 PREPARE_WORK((_work), (_func));                         \
105         } while (0)
106 #else
107 #define INIT_WORK(_work, _func)                                         \
108         do {                                                            \
109                 (_work)->data = (atomic_long_t) WORK_DATA_INIT();       \
110                 INIT_LIST_HEAD(&(_work)->entry);                        \
111                 PREPARE_WORK((_work), (_func));                         \
112         } while (0)
113 #endif
114
115 #define INIT_DELAYED_WORK(_work, _func)                         \
116         do {                                                    \
117                 INIT_WORK(&(_work)->work, (_func));             \
118                 init_timer(&(_work)->timer);                    \
119         } while (0)
120
121 #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func)                      \
122         do {                                                    \
123                 INIT_WORK(&(_work)->work, (_func));             \
124                 init_timer_deferrable(&(_work)->timer);         \
125         } while (0)
126
127 /**
128  * work_pending - Find out whether a work item is currently pending
129  * @work: The work item in question
130  */
131 #define work_pending(work) \
132         test_bit(WORK_STRUCT_PENDING, work_data_bits(work))
133
134 /**
135  * delayed_work_pending - Find out whether a delayable work item is currently
136  * pending
137  * @work: The work item in question
138  */
139 #define delayed_work_pending(w) \
140         work_pending(&(w)->work)
141
142 /**
143  * work_clear_pending - for internal use only, mark a work item as not pending
144  * @work: The work item in question
145  */
146 #define work_clear_pending(work) \
147         clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
148
149
150 extern struct workqueue_struct *
151 __create_workqueue_key(const char *name, int singlethread,
152                        int freezeable, struct lock_class_key *key,
153                        const char *lock_name);
154
155 #ifdef CONFIG_LOCKDEP
156 #define __create_workqueue(name, singlethread, freezeable)      \
157 ({                                                              \
158         static struct lock_class_key __key;                     \
159         const char *__lock_name;                                \
160                                                                 \
161         if (__builtin_constant_p(name))                         \
162                 __lock_name = (name);                           \
163         else                                                    \
164                 __lock_name = #name;                            \
165                                                                 \
166         __create_workqueue_key((name), (singlethread),          \
167                                (freezeable), &__key,            \
168                                __lock_name);                    \
169 })
170 #else
171 #define __create_workqueue(name, singlethread, freezeable)      \
172         __create_workqueue_key((name), (singlethread), (freezeable), NULL, NULL)
173 #endif
174
175 #define create_workqueue(name) __create_workqueue((name), 0, 0)
176 #define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1)
177 #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0)
178
179 extern void destroy_workqueue(struct workqueue_struct *wq);
180
181 extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
182 extern int queue_work_on(int cpu, struct workqueue_struct *wq,
183                         struct work_struct *work);
184 extern int queue_delayed_work(struct workqueue_struct *wq,
185                         struct delayed_work *work, unsigned long delay);
186 extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
187                         struct delayed_work *work, unsigned long delay);
188
189 extern void flush_workqueue(struct workqueue_struct *wq);
190 extern void flush_scheduled_work(void);
191
192 extern int schedule_work(struct work_struct *work);
193 extern int schedule_work_on(int cpu, struct work_struct *work);
194 extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
195 extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
196                                         unsigned long delay);
197 extern int schedule_on_each_cpu(work_func_t func);
198 extern int current_is_keventd(void);
199 extern int keventd_up(void);
200
201 extern void init_workqueues(void);
202 int execute_in_process_context(work_func_t fn, struct execute_work *);
203
204 extern int flush_work(struct work_struct *work);
205
206 extern int cancel_work_sync(struct work_struct *work);
207
208 /*
209  * Kill off a pending schedule_delayed_work().  Note that the work callback
210  * function may still be running on return from cancel_delayed_work(), unless
211  * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
212  * cancel_work_sync() to wait on it.
213  */
214 static inline int cancel_delayed_work(struct delayed_work *work)
215 {
216         int ret;
217
218         ret = del_timer_sync(&work->timer);
219         if (ret)
220                 work_clear_pending(&work->work);
221         return ret;
222 }
223
224 extern int cancel_delayed_work_sync(struct delayed_work *work);
225
226 /* Obsolete. use cancel_delayed_work_sync() */
227 static inline
228 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
229                                         struct delayed_work *work)
230 {
231         cancel_delayed_work_sync(work);
232 }
233
234 /* Obsolete. use cancel_delayed_work_sync() */
235 static inline
236 void cancel_rearming_delayed_work(struct delayed_work *work)
237 {
238         cancel_delayed_work_sync(work);
239 }
240
241 #endif