[ALSA] hda-intel: fix ASUS M2V detection
[linux-2.6] / kernel / workqueue.c
1 /*
2  * linux/kernel/workqueue.c
3  *
4  * Generic mechanism for defining kernel helper threads for running
5  * arbitrary tasks in process context.
6  *
7  * Started by Ingo Molnar, Copyright (C) 2002
8  *
9  * Derived from the taskqueue/keventd code by:
10  *
11  *   David Woodhouse <dwmw2@infradead.org>
12  *   Andrew Morton <andrewm@uow.edu.au>
13  *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
14  *   Theodore Ts'o <tytso@mit.edu>
15  *
16  * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
17  */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35
36 /*
37  * The per-CPU workqueue (if single thread, we always use the first
38  * possible cpu).
39  */
40 struct cpu_workqueue_struct {
41
42         spinlock_t lock;
43
44         struct list_head worklist;
45         wait_queue_head_t more_work;
46         struct work_struct *current_work;
47
48         struct workqueue_struct *wq;
49         struct task_struct *thread;
50
51         int run_depth;          /* Detect run_workqueue() recursion depth */
52 } ____cacheline_aligned;
53
54 /*
55  * The externally visible workqueue abstraction is an array of
56  * per-CPU workqueues:
57  */
58 struct workqueue_struct {
59         struct cpu_workqueue_struct *cpu_wq;
60         struct list_head list;
61         const char *name;
62         int singlethread;
63         int freezeable;         /* Freeze threads during suspend */
64 };
65
66 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
67    threads to each one as cpus come/go. */
68 static DEFINE_MUTEX(workqueue_mutex);
69 static LIST_HEAD(workqueues);
70
71 static int singlethread_cpu __read_mostly;
72 static cpumask_t cpu_singlethread_map __read_mostly;
73 /*
74  * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
75  * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
76  * which comes in between can't use for_each_online_cpu(). We could
77  * use cpu_possible_map, the cpumask below is more a documentation
78  * than optimization.
79  */
80 static cpumask_t cpu_populated_map __read_mostly;
81
82 /* If it's single threaded, it isn't in the list of workqueues. */
83 static inline int is_single_threaded(struct workqueue_struct *wq)
84 {
85         return wq->singlethread;
86 }
87
88 static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
89 {
90         return is_single_threaded(wq)
91                 ? &cpu_singlethread_map : &cpu_populated_map;
92 }
93
94 static
95 struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
96 {
97         if (unlikely(is_single_threaded(wq)))
98                 cpu = singlethread_cpu;
99         return per_cpu_ptr(wq->cpu_wq, cpu);
100 }
101
102 /*
103  * Set the workqueue on which a work item is to be run
104  * - Must *only* be called if the pending flag is set
105  */
106 static inline void set_wq_data(struct work_struct *work,
107                                 struct cpu_workqueue_struct *cwq)
108 {
109         unsigned long new;
110
111         BUG_ON(!work_pending(work));
112
113         new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
114         new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
115         atomic_long_set(&work->data, new);
116 }
117
118 static inline
119 struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
120 {
121         return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
122 }
123
124 static void insert_work(struct cpu_workqueue_struct *cwq,
125                                 struct work_struct *work, int tail)
126 {
127         set_wq_data(work, cwq);
128         /*
129          * Ensure that we get the right work->data if we see the
130          * result of list_add() below, see try_to_grab_pending().
131          */
132         smp_wmb();
133         if (tail)
134                 list_add_tail(&work->entry, &cwq->worklist);
135         else
136                 list_add(&work->entry, &cwq->worklist);
137         wake_up(&cwq->more_work);
138 }
139
140 /* Preempt must be disabled. */
141 static void __queue_work(struct cpu_workqueue_struct *cwq,
142                          struct work_struct *work)
143 {
144         unsigned long flags;
145
146         spin_lock_irqsave(&cwq->lock, flags);
147         insert_work(cwq, work, 1);
148         spin_unlock_irqrestore(&cwq->lock, flags);
149 }
150
151 /**
152  * queue_work - queue work on a workqueue
153  * @wq: workqueue to use
154  * @work: work to queue
155  *
156  * Returns 0 if @work was already on a queue, non-zero otherwise.
157  *
158  * We queue the work to the CPU it was submitted, but there is no
159  * guarantee that it will be processed by that CPU.
160  */
161 int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
162 {
163         int ret = 0;
164
165         if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
166                 BUG_ON(!list_empty(&work->entry));
167                 __queue_work(wq_per_cpu(wq, get_cpu()), work);
168                 put_cpu();
169                 ret = 1;
170         }
171         return ret;
172 }
173 EXPORT_SYMBOL_GPL(queue_work);
174
175 void delayed_work_timer_fn(unsigned long __data)
176 {
177         struct delayed_work *dwork = (struct delayed_work *)__data;
178         struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
179         struct workqueue_struct *wq = cwq->wq;
180
181         __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
182 }
183
184 /**
185  * queue_delayed_work - queue work on a workqueue after delay
186  * @wq: workqueue to use
187  * @dwork: delayable work to queue
188  * @delay: number of jiffies to wait before queueing
189  *
190  * Returns 0 if @work was already on a queue, non-zero otherwise.
191  */
192 int fastcall queue_delayed_work(struct workqueue_struct *wq,
193                         struct delayed_work *dwork, unsigned long delay)
194 {
195         timer_stats_timer_set_start_info(&dwork->timer);
196         if (delay == 0)
197                 return queue_work(wq, &dwork->work);
198
199         return queue_delayed_work_on(-1, wq, dwork, delay);
200 }
201 EXPORT_SYMBOL_GPL(queue_delayed_work);
202
203 /**
204  * queue_delayed_work_on - queue work on specific CPU after delay
205  * @cpu: CPU number to execute work on
206  * @wq: workqueue to use
207  * @dwork: work to queue
208  * @delay: number of jiffies to wait before queueing
209  *
210  * Returns 0 if @work was already on a queue, non-zero otherwise.
211  */
212 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
213                         struct delayed_work *dwork, unsigned long delay)
214 {
215         int ret = 0;
216         struct timer_list *timer = &dwork->timer;
217         struct work_struct *work = &dwork->work;
218
219         if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
220                 BUG_ON(timer_pending(timer));
221                 BUG_ON(!list_empty(&work->entry));
222
223                 /* This stores cwq for the moment, for the timer_fn */
224                 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
225                 timer->expires = jiffies + delay;
226                 timer->data = (unsigned long)dwork;
227                 timer->function = delayed_work_timer_fn;
228
229                 if (unlikely(cpu >= 0))
230                         add_timer_on(timer, cpu);
231                 else
232                         add_timer(timer);
233                 ret = 1;
234         }
235         return ret;
236 }
237 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
238
239 static void run_workqueue(struct cpu_workqueue_struct *cwq)
240 {
241         spin_lock_irq(&cwq->lock);
242         cwq->run_depth++;
243         if (cwq->run_depth > 3) {
244                 /* morton gets to eat his hat */
245                 printk("%s: recursion depth exceeded: %d\n",
246                         __FUNCTION__, cwq->run_depth);
247                 dump_stack();
248         }
249         while (!list_empty(&cwq->worklist)) {
250                 struct work_struct *work = list_entry(cwq->worklist.next,
251                                                 struct work_struct, entry);
252                 work_func_t f = work->func;
253
254                 cwq->current_work = work;
255                 list_del_init(cwq->worklist.next);
256                 spin_unlock_irq(&cwq->lock);
257
258                 BUG_ON(get_wq_data(work) != cwq);
259                 work_clear_pending(work);
260                 f(work);
261
262                 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
263                         printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
264                                         "%s/0x%08x/%d\n",
265                                         current->comm, preempt_count(),
266                                         current->pid);
267                         printk(KERN_ERR "    last function: ");
268                         print_symbol("%s\n", (unsigned long)f);
269                         debug_show_held_locks(current);
270                         dump_stack();
271                 }
272
273                 spin_lock_irq(&cwq->lock);
274                 cwq->current_work = NULL;
275         }
276         cwq->run_depth--;
277         spin_unlock_irq(&cwq->lock);
278 }
279
280 static int worker_thread(void *__cwq)
281 {
282         struct cpu_workqueue_struct *cwq = __cwq;
283         DEFINE_WAIT(wait);
284
285         if (!cwq->wq->freezeable)
286                 current->flags |= PF_NOFREEZE;
287
288         set_user_nice(current, -5);
289
290         for (;;) {
291                 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
292                 if (!freezing(current) &&
293                     !kthread_should_stop() &&
294                     list_empty(&cwq->worklist))
295                         schedule();
296                 finish_wait(&cwq->more_work, &wait);
297
298                 try_to_freeze();
299
300                 if (kthread_should_stop())
301                         break;
302
303                 run_workqueue(cwq);
304         }
305
306         return 0;
307 }
308
309 struct wq_barrier {
310         struct work_struct      work;
311         struct completion       done;
312 };
313
314 static void wq_barrier_func(struct work_struct *work)
315 {
316         struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
317         complete(&barr->done);
318 }
319
320 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
321                                         struct wq_barrier *barr, int tail)
322 {
323         INIT_WORK(&barr->work, wq_barrier_func);
324         __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
325
326         init_completion(&barr->done);
327
328         insert_work(cwq, &barr->work, tail);
329 }
330
331 static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
332 {
333         int active;
334
335         if (cwq->thread == current) {
336                 /*
337                  * Probably keventd trying to flush its own queue. So simply run
338                  * it by hand rather than deadlocking.
339                  */
340                 run_workqueue(cwq);
341                 active = 1;
342         } else {
343                 struct wq_barrier barr;
344
345                 active = 0;
346                 spin_lock_irq(&cwq->lock);
347                 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
348                         insert_wq_barrier(cwq, &barr, 1);
349                         active = 1;
350                 }
351                 spin_unlock_irq(&cwq->lock);
352
353                 if (active)
354                         wait_for_completion(&barr.done);
355         }
356
357         return active;
358 }
359
360 /**
361  * flush_workqueue - ensure that any scheduled work has run to completion.
362  * @wq: workqueue to flush
363  *
364  * Forces execution of the workqueue and blocks until its completion.
365  * This is typically used in driver shutdown handlers.
366  *
367  * We sleep until all works which were queued on entry have been handled,
368  * but we are not livelocked by new incoming ones.
369  *
370  * This function used to run the workqueues itself.  Now we just wait for the
371  * helper threads to do it.
372  */
373 void fastcall flush_workqueue(struct workqueue_struct *wq)
374 {
375         const cpumask_t *cpu_map = wq_cpu_map(wq);
376         int cpu;
377
378         might_sleep();
379         for_each_cpu_mask(cpu, *cpu_map)
380                 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
381 }
382 EXPORT_SYMBOL_GPL(flush_workqueue);
383
384 /*
385  * Upon a successful return, the caller "owns" WORK_STRUCT_PENDING bit,
386  * so this work can't be re-armed in any way.
387  */
388 static int try_to_grab_pending(struct work_struct *work)
389 {
390         struct cpu_workqueue_struct *cwq;
391         int ret = 0;
392
393         if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
394                 return 1;
395
396         /*
397          * The queueing is in progress, or it is already queued. Try to
398          * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
399          */
400
401         cwq = get_wq_data(work);
402         if (!cwq)
403                 return ret;
404
405         spin_lock_irq(&cwq->lock);
406         if (!list_empty(&work->entry)) {
407                 /*
408                  * This work is queued, but perhaps we locked the wrong cwq.
409                  * In that case we must see the new value after rmb(), see
410                  * insert_work()->wmb().
411                  */
412                 smp_rmb();
413                 if (cwq == get_wq_data(work)) {
414                         list_del_init(&work->entry);
415                         ret = 1;
416                 }
417         }
418         spin_unlock_irq(&cwq->lock);
419
420         return ret;
421 }
422
423 static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
424                                 struct work_struct *work)
425 {
426         struct wq_barrier barr;
427         int running = 0;
428
429         spin_lock_irq(&cwq->lock);
430         if (unlikely(cwq->current_work == work)) {
431                 insert_wq_barrier(cwq, &barr, 0);
432                 running = 1;
433         }
434         spin_unlock_irq(&cwq->lock);
435
436         if (unlikely(running))
437                 wait_for_completion(&barr.done);
438 }
439
440 static void wait_on_work(struct work_struct *work)
441 {
442         struct cpu_workqueue_struct *cwq;
443         struct workqueue_struct *wq;
444         const cpumask_t *cpu_map;
445         int cpu;
446
447         might_sleep();
448
449         cwq = get_wq_data(work);
450         if (!cwq)
451                 return;
452
453         wq = cwq->wq;
454         cpu_map = wq_cpu_map(wq);
455
456         for_each_cpu_mask(cpu, *cpu_map)
457                 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
458 }
459
460 /**
461  * cancel_work_sync - block until a work_struct's callback has terminated
462  * @work: the work which is to be flushed
463  *
464  * cancel_work_sync() will cancel the work if it is queued. If the work's
465  * callback appears to be running, cancel_work_sync() will block until it
466  * has completed.
467  *
468  * It is possible to use this function if the work re-queues itself. It can
469  * cancel the work even if it migrates to another workqueue, however in that
470  * case it only guarantees that work->func() has completed on the last queued
471  * workqueue.
472  *
473  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
474  * pending, otherwise it goes into a busy-wait loop until the timer expires.
475  *
476  * The caller must ensure that workqueue_struct on which this work was last
477  * queued can't be destroyed before this function returns.
478  */
479 void cancel_work_sync(struct work_struct *work)
480 {
481         while (!try_to_grab_pending(work))
482                 cpu_relax();
483         wait_on_work(work);
484         work_clear_pending(work);
485 }
486 EXPORT_SYMBOL_GPL(cancel_work_sync);
487
488 /**
489  * cancel_rearming_delayed_work - reliably kill off a delayed work.
490  * @dwork: the delayed work struct
491  *
492  * It is possible to use this function if @dwork rearms itself via queue_work()
493  * or queue_delayed_work(). See also the comment for cancel_work_sync().
494  */
495 void cancel_rearming_delayed_work(struct delayed_work *dwork)
496 {
497         while (!del_timer(&dwork->timer) &&
498                !try_to_grab_pending(&dwork->work))
499                 cpu_relax();
500         wait_on_work(&dwork->work);
501         work_clear_pending(&dwork->work);
502 }
503 EXPORT_SYMBOL(cancel_rearming_delayed_work);
504
505 static struct workqueue_struct *keventd_wq __read_mostly;
506
507 /**
508  * schedule_work - put work task in global workqueue
509  * @work: job to be done
510  *
511  * This puts a job in the kernel-global workqueue.
512  */
513 int fastcall schedule_work(struct work_struct *work)
514 {
515         return queue_work(keventd_wq, work);
516 }
517 EXPORT_SYMBOL(schedule_work);
518
519 /**
520  * schedule_delayed_work - put work task in global workqueue after delay
521  * @dwork: job to be done
522  * @delay: number of jiffies to wait or 0 for immediate execution
523  *
524  * After waiting for a given time this puts a job in the kernel-global
525  * workqueue.
526  */
527 int fastcall schedule_delayed_work(struct delayed_work *dwork,
528                                         unsigned long delay)
529 {
530         timer_stats_timer_set_start_info(&dwork->timer);
531         return queue_delayed_work(keventd_wq, dwork, delay);
532 }
533 EXPORT_SYMBOL(schedule_delayed_work);
534
535 /**
536  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
537  * @cpu: cpu to use
538  * @dwork: job to be done
539  * @delay: number of jiffies to wait
540  *
541  * After waiting for a given time this puts a job in the kernel-global
542  * workqueue on the specified CPU.
543  */
544 int schedule_delayed_work_on(int cpu,
545                         struct delayed_work *dwork, unsigned long delay)
546 {
547         return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
548 }
549 EXPORT_SYMBOL(schedule_delayed_work_on);
550
551 /**
552  * schedule_on_each_cpu - call a function on each online CPU from keventd
553  * @func: the function to call
554  *
555  * Returns zero on success.
556  * Returns -ve errno on failure.
557  *
558  * Appears to be racy against CPU hotplug.
559  *
560  * schedule_on_each_cpu() is very slow.
561  */
562 int schedule_on_each_cpu(work_func_t func)
563 {
564         int cpu;
565         struct work_struct *works;
566
567         works = alloc_percpu(struct work_struct);
568         if (!works)
569                 return -ENOMEM;
570
571         preempt_disable();              /* CPU hotplug */
572         for_each_online_cpu(cpu) {
573                 struct work_struct *work = per_cpu_ptr(works, cpu);
574
575                 INIT_WORK(work, func);
576                 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
577                 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
578         }
579         preempt_enable();
580         flush_workqueue(keventd_wq);
581         free_percpu(works);
582         return 0;
583 }
584
585 void flush_scheduled_work(void)
586 {
587         flush_workqueue(keventd_wq);
588 }
589 EXPORT_SYMBOL(flush_scheduled_work);
590
591 /**
592  * execute_in_process_context - reliably execute the routine with user context
593  * @fn:         the function to execute
594  * @ew:         guaranteed storage for the execute work structure (must
595  *              be available when the work executes)
596  *
597  * Executes the function immediately if process context is available,
598  * otherwise schedules the function for delayed execution.
599  *
600  * Returns:     0 - function was executed
601  *              1 - function was scheduled for execution
602  */
603 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
604 {
605         if (!in_interrupt()) {
606                 fn(&ew->work);
607                 return 0;
608         }
609
610         INIT_WORK(&ew->work, fn);
611         schedule_work(&ew->work);
612
613         return 1;
614 }
615 EXPORT_SYMBOL_GPL(execute_in_process_context);
616
617 int keventd_up(void)
618 {
619         return keventd_wq != NULL;
620 }
621
622 int current_is_keventd(void)
623 {
624         struct cpu_workqueue_struct *cwq;
625         int cpu = smp_processor_id();   /* preempt-safe: keventd is per-cpu */
626         int ret = 0;
627
628         BUG_ON(!keventd_wq);
629
630         cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
631         if (current == cwq->thread)
632                 ret = 1;
633
634         return ret;
635
636 }
637
638 static struct cpu_workqueue_struct *
639 init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
640 {
641         struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
642
643         cwq->wq = wq;
644         spin_lock_init(&cwq->lock);
645         INIT_LIST_HEAD(&cwq->worklist);
646         init_waitqueue_head(&cwq->more_work);
647
648         return cwq;
649 }
650
651 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
652 {
653         struct workqueue_struct *wq = cwq->wq;
654         const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
655         struct task_struct *p;
656
657         p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
658         /*
659          * Nobody can add the work_struct to this cwq,
660          *      if (caller is __create_workqueue)
661          *              nobody should see this wq
662          *      else // caller is CPU_UP_PREPARE
663          *              cpu is not on cpu_online_map
664          * so we can abort safely.
665          */
666         if (IS_ERR(p))
667                 return PTR_ERR(p);
668
669         cwq->thread = p;
670
671         return 0;
672 }
673
674 static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
675 {
676         struct task_struct *p = cwq->thread;
677
678         if (p != NULL) {
679                 if (cpu >= 0)
680                         kthread_bind(p, cpu);
681                 wake_up_process(p);
682         }
683 }
684
685 struct workqueue_struct *__create_workqueue(const char *name,
686                                             int singlethread, int freezeable)
687 {
688         struct workqueue_struct *wq;
689         struct cpu_workqueue_struct *cwq;
690         int err = 0, cpu;
691
692         wq = kzalloc(sizeof(*wq), GFP_KERNEL);
693         if (!wq)
694                 return NULL;
695
696         wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
697         if (!wq->cpu_wq) {
698                 kfree(wq);
699                 return NULL;
700         }
701
702         wq->name = name;
703         wq->singlethread = singlethread;
704         wq->freezeable = freezeable;
705         INIT_LIST_HEAD(&wq->list);
706
707         if (singlethread) {
708                 cwq = init_cpu_workqueue(wq, singlethread_cpu);
709                 err = create_workqueue_thread(cwq, singlethread_cpu);
710                 start_workqueue_thread(cwq, -1);
711         } else {
712                 mutex_lock(&workqueue_mutex);
713                 list_add(&wq->list, &workqueues);
714
715                 for_each_possible_cpu(cpu) {
716                         cwq = init_cpu_workqueue(wq, cpu);
717                         if (err || !cpu_online(cpu))
718                                 continue;
719                         err = create_workqueue_thread(cwq, cpu);
720                         start_workqueue_thread(cwq, cpu);
721                 }
722                 mutex_unlock(&workqueue_mutex);
723         }
724
725         if (err) {
726                 destroy_workqueue(wq);
727                 wq = NULL;
728         }
729         return wq;
730 }
731 EXPORT_SYMBOL_GPL(__create_workqueue);
732
733 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
734 {
735         /*
736          * Our caller is either destroy_workqueue() or CPU_DEAD,
737          * workqueue_mutex protects cwq->thread
738          */
739         if (cwq->thread == NULL)
740                 return;
741
742         /*
743          * If the caller is CPU_DEAD the single flush_cpu_workqueue()
744          * is not enough, a concurrent flush_workqueue() can insert a
745          * barrier after us.
746          * When ->worklist becomes empty it is safe to exit because no
747          * more work_structs can be queued on this cwq: flush_workqueue
748          * checks list_empty(), and a "normal" queue_work() can't use
749          * a dead CPU.
750          */
751         while (flush_cpu_workqueue(cwq))
752                 ;
753
754         kthread_stop(cwq->thread);
755         cwq->thread = NULL;
756 }
757
758 /**
759  * destroy_workqueue - safely terminate a workqueue
760  * @wq: target workqueue
761  *
762  * Safely destroy a workqueue. All work currently pending will be done first.
763  */
764 void destroy_workqueue(struct workqueue_struct *wq)
765 {
766         const cpumask_t *cpu_map = wq_cpu_map(wq);
767         struct cpu_workqueue_struct *cwq;
768         int cpu;
769
770         mutex_lock(&workqueue_mutex);
771         list_del(&wq->list);
772         mutex_unlock(&workqueue_mutex);
773
774         for_each_cpu_mask(cpu, *cpu_map) {
775                 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
776                 cleanup_workqueue_thread(cwq, cpu);
777         }
778
779         free_percpu(wq->cpu_wq);
780         kfree(wq);
781 }
782 EXPORT_SYMBOL_GPL(destroy_workqueue);
783
784 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
785                                                 unsigned long action,
786                                                 void *hcpu)
787 {
788         unsigned int cpu = (unsigned long)hcpu;
789         struct cpu_workqueue_struct *cwq;
790         struct workqueue_struct *wq;
791
792         action &= ~CPU_TASKS_FROZEN;
793
794         switch (action) {
795         case CPU_LOCK_ACQUIRE:
796                 mutex_lock(&workqueue_mutex);
797                 return NOTIFY_OK;
798
799         case CPU_LOCK_RELEASE:
800                 mutex_unlock(&workqueue_mutex);
801                 return NOTIFY_OK;
802
803         case CPU_UP_PREPARE:
804                 cpu_set(cpu, cpu_populated_map);
805         }
806
807         list_for_each_entry(wq, &workqueues, list) {
808                 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
809
810                 switch (action) {
811                 case CPU_UP_PREPARE:
812                         if (!create_workqueue_thread(cwq, cpu))
813                                 break;
814                         printk(KERN_ERR "workqueue for %i failed\n", cpu);
815                         return NOTIFY_BAD;
816
817                 case CPU_ONLINE:
818                         start_workqueue_thread(cwq, cpu);
819                         break;
820
821                 case CPU_UP_CANCELED:
822                         start_workqueue_thread(cwq, -1);
823                 case CPU_DEAD:
824                         cleanup_workqueue_thread(cwq, cpu);
825                         break;
826                 }
827         }
828
829         return NOTIFY_OK;
830 }
831
832 void __init init_workqueues(void)
833 {
834         cpu_populated_map = cpu_online_map;
835         singlethread_cpu = first_cpu(cpu_possible_map);
836         cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
837         hotcpu_notifier(workqueue_cpu_callback, 0);
838         keventd_wq = create_workqueue("events");
839         BUG_ON(!keventd_wq);
840 }