tracing: expand the ring buffers when an event is activated
[linux-2.6] / kernel / softirq.c
1 /*
2  *      linux/kernel/softirq.c
3  *
4  *      Copyright (C) 1992 Linus Torvalds
5  *
6  *      Distribute under GPLv2.
7  *
8  *      Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9  *
10  *      Remote softirq infrastructure is by Jens Axboe.
11  */
12
13 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/mm.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/tick.h>
27
28 #include <asm/irq.h>
29 /*
30    - No shared variables, all the data are CPU local.
31    - If a softirq needs serialization, let it serialize itself
32      by its own spinlocks.
33    - Even if softirq is serialized, only local cpu is marked for
34      execution. Hence, we get something sort of weak cpu binding.
35      Though it is still not clear, will it result in better locality
36      or will not.
37
38    Examples:
39    - NET RX softirq. It is multithreaded and does not require
40      any global serialization.
41    - NET TX softirq. It kicks software netdevice queues, hence
42      it is logically serialized per device, but this serialization
43      is invisible to common code.
44    - Tasklets: serialized wrt itself.
45  */
46
47 #ifndef __ARCH_IRQ_STAT
48 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
49 EXPORT_SYMBOL(irq_stat);
50 #endif
51
52 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
53
54 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
55
56 /*
57  * we cannot loop indefinitely here to avoid userspace starvation,
58  * but we also don't want to introduce a worst case 1/HZ latency
59  * to the pending events, so lets the scheduler to balance
60  * the softirq load for us.
61  */
62 static inline void wakeup_softirqd(void)
63 {
64         /* Interrupts are disabled: no need to stop preemption */
65         struct task_struct *tsk = __get_cpu_var(ksoftirqd);
66
67         if (tsk && tsk->state != TASK_RUNNING)
68                 wake_up_process(tsk);
69 }
70
71 /*
72  * This one is for softirq.c-internal use,
73  * where hardirqs are disabled legitimately:
74  */
75 #ifdef CONFIG_TRACE_IRQFLAGS
76 static void __local_bh_disable(unsigned long ip)
77 {
78         unsigned long flags;
79
80         WARN_ON_ONCE(in_irq());
81
82         raw_local_irq_save(flags);
83         /*
84          * The preempt tracer hooks into add_preempt_count and will break
85          * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
86          * is set and before current->softirq_enabled is cleared.
87          * We must manually increment preempt_count here and manually
88          * call the trace_preempt_off later.
89          */
90         preempt_count() += SOFTIRQ_OFFSET;
91         /*
92          * Were softirqs turned off above:
93          */
94         if (softirq_count() == SOFTIRQ_OFFSET)
95                 trace_softirqs_off(ip);
96         raw_local_irq_restore(flags);
97
98         if (preempt_count() == SOFTIRQ_OFFSET)
99                 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
100 }
101 #else /* !CONFIG_TRACE_IRQFLAGS */
102 static inline void __local_bh_disable(unsigned long ip)
103 {
104         add_preempt_count(SOFTIRQ_OFFSET);
105         barrier();
106 }
107 #endif /* CONFIG_TRACE_IRQFLAGS */
108
109 void local_bh_disable(void)
110 {
111         __local_bh_disable((unsigned long)__builtin_return_address(0));
112 }
113
114 EXPORT_SYMBOL(local_bh_disable);
115
116 /*
117  * Special-case - softirqs can safely be enabled in
118  * cond_resched_softirq(), or by __do_softirq(),
119  * without processing still-pending softirqs:
120  */
121 void _local_bh_enable(void)
122 {
123         WARN_ON_ONCE(in_irq());
124         WARN_ON_ONCE(!irqs_disabled());
125
126         if (softirq_count() == SOFTIRQ_OFFSET)
127                 trace_softirqs_on((unsigned long)__builtin_return_address(0));
128         sub_preempt_count(SOFTIRQ_OFFSET);
129 }
130
131 EXPORT_SYMBOL(_local_bh_enable);
132
133 static inline void _local_bh_enable_ip(unsigned long ip)
134 {
135         WARN_ON_ONCE(in_irq() || irqs_disabled());
136 #ifdef CONFIG_TRACE_IRQFLAGS
137         local_irq_disable();
138 #endif
139         /*
140          * Are softirqs going to be turned on now:
141          */
142         if (softirq_count() == SOFTIRQ_OFFSET)
143                 trace_softirqs_on(ip);
144         /*
145          * Keep preemption disabled until we are done with
146          * softirq processing:
147          */
148         sub_preempt_count(SOFTIRQ_OFFSET - 1);
149
150         if (unlikely(!in_interrupt() && local_softirq_pending()))
151                 do_softirq();
152
153         dec_preempt_count();
154 #ifdef CONFIG_TRACE_IRQFLAGS
155         local_irq_enable();
156 #endif
157         preempt_check_resched();
158 }
159
160 void local_bh_enable(void)
161 {
162         _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
163 }
164 EXPORT_SYMBOL(local_bh_enable);
165
166 void local_bh_enable_ip(unsigned long ip)
167 {
168         _local_bh_enable_ip(ip);
169 }
170 EXPORT_SYMBOL(local_bh_enable_ip);
171
172 /*
173  * We restart softirq processing MAX_SOFTIRQ_RESTART times,
174  * and we fall back to softirqd after that.
175  *
176  * This number has been established via experimentation.
177  * The two things to balance is latency against fairness -
178  * we want to handle softirqs as soon as possible, but they
179  * should not be able to lock up the box.
180  */
181 #define MAX_SOFTIRQ_RESTART 10
182
183 asmlinkage void __do_softirq(void)
184 {
185         struct softirq_action *h;
186         __u32 pending;
187         int max_restart = MAX_SOFTIRQ_RESTART;
188         int cpu;
189
190         pending = local_softirq_pending();
191         account_system_vtime(current);
192
193         __local_bh_disable((unsigned long)__builtin_return_address(0));
194         trace_softirq_enter();
195
196         cpu = smp_processor_id();
197 restart:
198         /* Reset the pending bitmask before enabling irqs */
199         set_softirq_pending(0);
200
201         local_irq_enable();
202
203         h = softirq_vec;
204
205         do {
206                 if (pending & 1) {
207                         int prev_count = preempt_count();
208
209                         h->action(h);
210
211                         if (unlikely(prev_count != preempt_count())) {
212                                 printk(KERN_ERR "huh, entered softirq %td %p"
213                                        "with preempt_count %08x,"
214                                        " exited with %08x?\n", h - softirq_vec,
215                                        h->action, prev_count, preempt_count());
216                                 preempt_count() = prev_count;
217                         }
218
219                         rcu_bh_qsctr_inc(cpu);
220                 }
221                 h++;
222                 pending >>= 1;
223         } while (pending);
224
225         local_irq_disable();
226
227         pending = local_softirq_pending();
228         if (pending && --max_restart)
229                 goto restart;
230
231         if (pending)
232                 wakeup_softirqd();
233
234         trace_softirq_exit();
235
236         account_system_vtime(current);
237         _local_bh_enable();
238 }
239
240 #ifndef __ARCH_HAS_DO_SOFTIRQ
241
242 asmlinkage void do_softirq(void)
243 {
244         __u32 pending;
245         unsigned long flags;
246
247         if (in_interrupt())
248                 return;
249
250         local_irq_save(flags);
251
252         pending = local_softirq_pending();
253
254         if (pending)
255                 __do_softirq();
256
257         local_irq_restore(flags);
258 }
259
260 #endif
261
262 /*
263  * Enter an interrupt context.
264  */
265 void irq_enter(void)
266 {
267         int cpu = smp_processor_id();
268
269         rcu_irq_enter();
270         if (idle_cpu(cpu) && !in_interrupt()) {
271                 __irq_enter();
272                 tick_check_idle(cpu);
273         } else
274                 __irq_enter();
275 }
276
277 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
278 # define invoke_softirq()       __do_softirq()
279 #else
280 # define invoke_softirq()       do_softirq()
281 #endif
282
283 /*
284  * Exit an interrupt context. Process softirqs if needed and possible:
285  */
286 void irq_exit(void)
287 {
288         account_system_vtime(current);
289         trace_hardirq_exit();
290         sub_preempt_count(IRQ_EXIT_OFFSET);
291         if (!in_interrupt() && local_softirq_pending())
292                 invoke_softirq();
293
294 #ifdef CONFIG_NO_HZ
295         /* Make sure that timer wheel updates are propagated */
296         rcu_irq_exit();
297         if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
298                 tick_nohz_stop_sched_tick(0);
299 #endif
300         preempt_enable_no_resched();
301 }
302
303 /*
304  * This function must run with irqs disabled!
305  */
306 inline void raise_softirq_irqoff(unsigned int nr)
307 {
308         __raise_softirq_irqoff(nr);
309
310         /*
311          * If we're in an interrupt or softirq, we're done
312          * (this also catches softirq-disabled code). We will
313          * actually run the softirq once we return from
314          * the irq or softirq.
315          *
316          * Otherwise we wake up ksoftirqd to make sure we
317          * schedule the softirq soon.
318          */
319         if (!in_interrupt())
320                 wakeup_softirqd();
321 }
322
323 void raise_softirq(unsigned int nr)
324 {
325         unsigned long flags;
326
327         local_irq_save(flags);
328         raise_softirq_irqoff(nr);
329         local_irq_restore(flags);
330 }
331
332 void open_softirq(int nr, void (*action)(struct softirq_action *))
333 {
334         softirq_vec[nr].action = action;
335 }
336
337 /* Tasklets */
338 struct tasklet_head
339 {
340         struct tasklet_struct *head;
341         struct tasklet_struct **tail;
342 };
343
344 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
345 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
346
347 void __tasklet_schedule(struct tasklet_struct *t)
348 {
349         unsigned long flags;
350
351         local_irq_save(flags);
352         t->next = NULL;
353         *__get_cpu_var(tasklet_vec).tail = t;
354         __get_cpu_var(tasklet_vec).tail = &(t->next);
355         raise_softirq_irqoff(TASKLET_SOFTIRQ);
356         local_irq_restore(flags);
357 }
358
359 EXPORT_SYMBOL(__tasklet_schedule);
360
361 void __tasklet_hi_schedule(struct tasklet_struct *t)
362 {
363         unsigned long flags;
364
365         local_irq_save(flags);
366         t->next = NULL;
367         *__get_cpu_var(tasklet_hi_vec).tail = t;
368         __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
369         raise_softirq_irqoff(HI_SOFTIRQ);
370         local_irq_restore(flags);
371 }
372
373 EXPORT_SYMBOL(__tasklet_hi_schedule);
374
375 static void tasklet_action(struct softirq_action *a)
376 {
377         struct tasklet_struct *list;
378
379         local_irq_disable();
380         list = __get_cpu_var(tasklet_vec).head;
381         __get_cpu_var(tasklet_vec).head = NULL;
382         __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
383         local_irq_enable();
384
385         while (list) {
386                 struct tasklet_struct *t = list;
387
388                 list = list->next;
389
390                 if (tasklet_trylock(t)) {
391                         if (!atomic_read(&t->count)) {
392                                 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
393                                         BUG();
394                                 t->func(t->data);
395                                 tasklet_unlock(t);
396                                 continue;
397                         }
398                         tasklet_unlock(t);
399                 }
400
401                 local_irq_disable();
402                 t->next = NULL;
403                 *__get_cpu_var(tasklet_vec).tail = t;
404                 __get_cpu_var(tasklet_vec).tail = &(t->next);
405                 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
406                 local_irq_enable();
407         }
408 }
409
410 static void tasklet_hi_action(struct softirq_action *a)
411 {
412         struct tasklet_struct *list;
413
414         local_irq_disable();
415         list = __get_cpu_var(tasklet_hi_vec).head;
416         __get_cpu_var(tasklet_hi_vec).head = NULL;
417         __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
418         local_irq_enable();
419
420         while (list) {
421                 struct tasklet_struct *t = list;
422
423                 list = list->next;
424
425                 if (tasklet_trylock(t)) {
426                         if (!atomic_read(&t->count)) {
427                                 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
428                                         BUG();
429                                 t->func(t->data);
430                                 tasklet_unlock(t);
431                                 continue;
432                         }
433                         tasklet_unlock(t);
434                 }
435
436                 local_irq_disable();
437                 t->next = NULL;
438                 *__get_cpu_var(tasklet_hi_vec).tail = t;
439                 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
440                 __raise_softirq_irqoff(HI_SOFTIRQ);
441                 local_irq_enable();
442         }
443 }
444
445
446 void tasklet_init(struct tasklet_struct *t,
447                   void (*func)(unsigned long), unsigned long data)
448 {
449         t->next = NULL;
450         t->state = 0;
451         atomic_set(&t->count, 0);
452         t->func = func;
453         t->data = data;
454 }
455
456 EXPORT_SYMBOL(tasklet_init);
457
458 void tasklet_kill(struct tasklet_struct *t)
459 {
460         if (in_interrupt())
461                 printk("Attempt to kill tasklet from interrupt\n");
462
463         while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
464                 do
465                         yield();
466                 while (test_bit(TASKLET_STATE_SCHED, &t->state));
467         }
468         tasklet_unlock_wait(t);
469         clear_bit(TASKLET_STATE_SCHED, &t->state);
470 }
471
472 EXPORT_SYMBOL(tasklet_kill);
473
474 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
475 EXPORT_PER_CPU_SYMBOL(softirq_work_list);
476
477 static void __local_trigger(struct call_single_data *cp, int softirq)
478 {
479         struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
480
481         list_add_tail(&cp->list, head);
482
483         /* Trigger the softirq only if the list was previously empty.  */
484         if (head->next == &cp->list)
485                 raise_softirq_irqoff(softirq);
486 }
487
488 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
489 static void remote_softirq_receive(void *data)
490 {
491         struct call_single_data *cp = data;
492         unsigned long flags;
493         int softirq;
494
495         softirq = cp->priv;
496
497         local_irq_save(flags);
498         __local_trigger(cp, softirq);
499         local_irq_restore(flags);
500 }
501
502 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
503 {
504         if (cpu_online(cpu)) {
505                 cp->func = remote_softirq_receive;
506                 cp->info = cp;
507                 cp->flags = 0;
508                 cp->priv = softirq;
509
510                 __smp_call_function_single(cpu, cp);
511                 return 0;
512         }
513         return 1;
514 }
515 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
516 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
517 {
518         return 1;
519 }
520 #endif
521
522 /**
523  * __send_remote_softirq - try to schedule softirq work on a remote cpu
524  * @cp: private SMP call function data area
525  * @cpu: the remote cpu
526  * @this_cpu: the currently executing cpu
527  * @softirq: the softirq for the work
528  *
529  * Attempt to schedule softirq work on a remote cpu.  If this cannot be
530  * done, the work is instead queued up on the local cpu.
531  *
532  * Interrupts must be disabled.
533  */
534 void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
535 {
536         if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
537                 __local_trigger(cp, softirq);
538 }
539 EXPORT_SYMBOL(__send_remote_softirq);
540
541 /**
542  * send_remote_softirq - try to schedule softirq work on a remote cpu
543  * @cp: private SMP call function data area
544  * @cpu: the remote cpu
545  * @softirq: the softirq for the work
546  *
547  * Like __send_remote_softirq except that disabling interrupts and
548  * computing the current cpu is done for the caller.
549  */
550 void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
551 {
552         unsigned long flags;
553         int this_cpu;
554
555         local_irq_save(flags);
556         this_cpu = smp_processor_id();
557         __send_remote_softirq(cp, cpu, this_cpu, softirq);
558         local_irq_restore(flags);
559 }
560 EXPORT_SYMBOL(send_remote_softirq);
561
562 static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
563                                                unsigned long action, void *hcpu)
564 {
565         /*
566          * If a CPU goes away, splice its entries to the current CPU
567          * and trigger a run of the softirq
568          */
569         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
570                 int cpu = (unsigned long) hcpu;
571                 int i;
572
573                 local_irq_disable();
574                 for (i = 0; i < NR_SOFTIRQS; i++) {
575                         struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
576                         struct list_head *local_head;
577
578                         if (list_empty(head))
579                                 continue;
580
581                         local_head = &__get_cpu_var(softirq_work_list[i]);
582                         list_splice_init(head, local_head);
583                         raise_softirq_irqoff(i);
584                 }
585                 local_irq_enable();
586         }
587
588         return NOTIFY_OK;
589 }
590
591 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
592         .notifier_call  = remote_softirq_cpu_notify,
593 };
594
595 void __init softirq_init(void)
596 {
597         int cpu;
598
599         for_each_possible_cpu(cpu) {
600                 int i;
601
602                 per_cpu(tasklet_vec, cpu).tail =
603                         &per_cpu(tasklet_vec, cpu).head;
604                 per_cpu(tasklet_hi_vec, cpu).tail =
605                         &per_cpu(tasklet_hi_vec, cpu).head;
606                 for (i = 0; i < NR_SOFTIRQS; i++)
607                         INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
608         }
609
610         register_hotcpu_notifier(&remote_softirq_cpu_notifier);
611
612         open_softirq(TASKLET_SOFTIRQ, tasklet_action);
613         open_softirq(HI_SOFTIRQ, tasklet_hi_action);
614 }
615
616 static int ksoftirqd(void * __bind_cpu)
617 {
618         set_current_state(TASK_INTERRUPTIBLE);
619
620         while (!kthread_should_stop()) {
621                 preempt_disable();
622                 if (!local_softirq_pending()) {
623                         preempt_enable_no_resched();
624                         schedule();
625                         preempt_disable();
626                 }
627
628                 __set_current_state(TASK_RUNNING);
629
630                 while (local_softirq_pending()) {
631                         /* Preempt disable stops cpu going offline.
632                            If already offline, we'll be on wrong CPU:
633                            don't process */
634                         if (cpu_is_offline((long)__bind_cpu))
635                                 goto wait_to_die;
636                         do_softirq();
637                         preempt_enable_no_resched();
638                         cond_resched();
639                         preempt_disable();
640                 }
641                 preempt_enable();
642                 set_current_state(TASK_INTERRUPTIBLE);
643         }
644         __set_current_state(TASK_RUNNING);
645         return 0;
646
647 wait_to_die:
648         preempt_enable();
649         /* Wait for kthread_stop */
650         set_current_state(TASK_INTERRUPTIBLE);
651         while (!kthread_should_stop()) {
652                 schedule();
653                 set_current_state(TASK_INTERRUPTIBLE);
654         }
655         __set_current_state(TASK_RUNNING);
656         return 0;
657 }
658
659 #ifdef CONFIG_HOTPLUG_CPU
660 /*
661  * tasklet_kill_immediate is called to remove a tasklet which can already be
662  * scheduled for execution on @cpu.
663  *
664  * Unlike tasklet_kill, this function removes the tasklet
665  * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
666  *
667  * When this function is called, @cpu must be in the CPU_DEAD state.
668  */
669 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
670 {
671         struct tasklet_struct **i;
672
673         BUG_ON(cpu_online(cpu));
674         BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
675
676         if (!test_bit(TASKLET_STATE_SCHED, &t->state))
677                 return;
678
679         /* CPU is dead, so no lock needed. */
680         for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
681                 if (*i == t) {
682                         *i = t->next;
683                         /* If this was the tail element, move the tail ptr */
684                         if (*i == NULL)
685                                 per_cpu(tasklet_vec, cpu).tail = i;
686                         return;
687                 }
688         }
689         BUG();
690 }
691
692 static void takeover_tasklets(unsigned int cpu)
693 {
694         /* CPU is dead, so no lock needed. */
695         local_irq_disable();
696
697         /* Find end, append list for that CPU. */
698         if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
699                 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
700                 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
701                 per_cpu(tasklet_vec, cpu).head = NULL;
702                 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
703         }
704         raise_softirq_irqoff(TASKLET_SOFTIRQ);
705
706         if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
707                 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
708                 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
709                 per_cpu(tasklet_hi_vec, cpu).head = NULL;
710                 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
711         }
712         raise_softirq_irqoff(HI_SOFTIRQ);
713
714         local_irq_enable();
715 }
716 #endif /* CONFIG_HOTPLUG_CPU */
717
718 static int __cpuinit cpu_callback(struct notifier_block *nfb,
719                                   unsigned long action,
720                                   void *hcpu)
721 {
722         int hotcpu = (unsigned long)hcpu;
723         struct task_struct *p;
724
725         switch (action) {
726         case CPU_UP_PREPARE:
727         case CPU_UP_PREPARE_FROZEN:
728                 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
729                 if (IS_ERR(p)) {
730                         printk("ksoftirqd for %i failed\n", hotcpu);
731                         return NOTIFY_BAD;
732                 }
733                 kthread_bind(p, hotcpu);
734                 per_cpu(ksoftirqd, hotcpu) = p;
735                 break;
736         case CPU_ONLINE:
737         case CPU_ONLINE_FROZEN:
738                 wake_up_process(per_cpu(ksoftirqd, hotcpu));
739                 break;
740 #ifdef CONFIG_HOTPLUG_CPU
741         case CPU_UP_CANCELED:
742         case CPU_UP_CANCELED_FROZEN:
743                 if (!per_cpu(ksoftirqd, hotcpu))
744                         break;
745                 /* Unbind so it can run.  Fall thru. */
746                 kthread_bind(per_cpu(ksoftirqd, hotcpu),
747                              cpumask_any(cpu_online_mask));
748         case CPU_DEAD:
749         case CPU_DEAD_FROZEN: {
750                 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
751
752                 p = per_cpu(ksoftirqd, hotcpu);
753                 per_cpu(ksoftirqd, hotcpu) = NULL;
754                 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
755                 kthread_stop(p);
756                 takeover_tasklets(hotcpu);
757                 break;
758         }
759 #endif /* CONFIG_HOTPLUG_CPU */
760         }
761         return NOTIFY_OK;
762 }
763
764 static struct notifier_block __cpuinitdata cpu_nfb = {
765         .notifier_call = cpu_callback
766 };
767
768 static __init int spawn_ksoftirqd(void)
769 {
770         void *cpu = (void *)(long)smp_processor_id();
771         int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
772
773         BUG_ON(err == NOTIFY_BAD);
774         cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
775         register_cpu_notifier(&cpu_nfb);
776         return 0;
777 }
778 early_initcall(spawn_ksoftirqd);
779
780 #ifdef CONFIG_SMP
781 /*
782  * Call a function on all processors
783  */
784 int on_each_cpu(void (*func) (void *info), void *info, int wait)
785 {
786         int ret = 0;
787
788         preempt_disable();
789         ret = smp_call_function(func, info, wait);
790         local_irq_disable();
791         func(info);
792         local_irq_enable();
793         preempt_enable();
794         return ret;
795 }
796 EXPORT_SYMBOL(on_each_cpu);
797 #endif
798
799 /*
800  * [ These __weak aliases are kept in a separate compilation unit, so that
801  *   GCC does not inline them incorrectly. ]
802  */
803
804 int __init __weak early_irq_init(void)
805 {
806         return 0;
807 }
808
809 int __init __weak arch_early_irq_init(void)
810 {
811         return 0;
812 }
813
814 int __weak arch_init_chip_data(struct irq_desc *desc, int cpu)
815 {
816         return 0;
817 }