2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
10 * Remote softirq infrastructure is by Jens Axboe.
13 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/tick.h>
27 #include <trace/irq.h>
31 - No shared variables, all the data are CPU local.
32 - If a softirq needs serialization, let it serialize itself
34 - Even if softirq is serialized, only local cpu is marked for
35 execution. Hence, we get something sort of weak cpu binding.
36 Though it is still not clear, will it result in better locality
40 - NET RX softirq. It is multithreaded and does not require
41 any global serialization.
42 - NET TX softirq. It kicks software netdevice queues, hence
43 it is logically serialized per device, but this serialization
44 is invisible to common code.
45 - Tasklets: serialized wrt itself.
48 #ifndef __ARCH_IRQ_STAT
49 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
50 EXPORT_SYMBOL(irq_stat);
53 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
55 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
57 char *softirq_to_name[NR_SOFTIRQS] = {
58 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK",
59 "TASKLET", "SCHED", "HRTIMER", "RCU"
63 * we cannot loop indefinitely here to avoid userspace starvation,
64 * but we also don't want to introduce a worst case 1/HZ latency
65 * to the pending events, so lets the scheduler to balance
66 * the softirq load for us.
68 void wakeup_softirqd(void)
70 /* Interrupts are disabled: no need to stop preemption */
71 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
73 if (tsk && tsk->state != TASK_RUNNING)
78 * This one is for softirq.c-internal use,
79 * where hardirqs are disabled legitimately:
81 #ifdef CONFIG_TRACE_IRQFLAGS
82 static void __local_bh_disable(unsigned long ip)
86 WARN_ON_ONCE(in_irq());
88 raw_local_irq_save(flags);
90 * The preempt tracer hooks into add_preempt_count and will break
91 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
92 * is set and before current->softirq_enabled is cleared.
93 * We must manually increment preempt_count here and manually
94 * call the trace_preempt_off later.
96 preempt_count() += SOFTIRQ_OFFSET;
98 * Were softirqs turned off above:
100 if (softirq_count() == SOFTIRQ_OFFSET)
101 trace_softirqs_off(ip);
102 raw_local_irq_restore(flags);
104 if (preempt_count() == SOFTIRQ_OFFSET)
105 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
107 #else /* !CONFIG_TRACE_IRQFLAGS */
108 static inline void __local_bh_disable(unsigned long ip)
110 add_preempt_count(SOFTIRQ_OFFSET);
113 #endif /* CONFIG_TRACE_IRQFLAGS */
115 void local_bh_disable(void)
117 __local_bh_disable((unsigned long)__builtin_return_address(0));
120 EXPORT_SYMBOL(local_bh_disable);
123 * Special-case - softirqs can safely be enabled in
124 * cond_resched_softirq(), or by __do_softirq(),
125 * without processing still-pending softirqs:
127 void _local_bh_enable(void)
129 WARN_ON_ONCE(in_irq());
130 WARN_ON_ONCE(!irqs_disabled());
132 if (softirq_count() == SOFTIRQ_OFFSET)
133 trace_softirqs_on((unsigned long)__builtin_return_address(0));
134 sub_preempt_count(SOFTIRQ_OFFSET);
137 EXPORT_SYMBOL(_local_bh_enable);
139 static inline void _local_bh_enable_ip(unsigned long ip)
141 WARN_ON_ONCE(in_irq() || irqs_disabled());
142 #ifdef CONFIG_TRACE_IRQFLAGS
146 * Are softirqs going to be turned on now:
148 if (softirq_count() == SOFTIRQ_OFFSET)
149 trace_softirqs_on(ip);
151 * Keep preemption disabled until we are done with
152 * softirq processing:
154 sub_preempt_count(SOFTIRQ_OFFSET - 1);
156 if (unlikely(!in_interrupt() && local_softirq_pending()))
160 #ifdef CONFIG_TRACE_IRQFLAGS
163 preempt_check_resched();
166 void local_bh_enable(void)
168 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
170 EXPORT_SYMBOL(local_bh_enable);
172 void local_bh_enable_ip(unsigned long ip)
174 _local_bh_enable_ip(ip);
176 EXPORT_SYMBOL(local_bh_enable_ip);
179 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
180 * and we fall back to softirqd after that.
182 * This number has been established via experimentation.
183 * The two things to balance is latency against fairness -
184 * we want to handle softirqs as soon as possible, but they
185 * should not be able to lock up the box.
187 #define MAX_SOFTIRQ_RESTART 10
189 DEFINE_TRACE(softirq_entry);
190 DEFINE_TRACE(softirq_exit);
192 asmlinkage void __do_softirq(void)
194 struct softirq_action *h;
196 int max_restart = MAX_SOFTIRQ_RESTART;
199 pending = local_softirq_pending();
200 account_system_vtime(current);
202 __local_bh_disable((unsigned long)__builtin_return_address(0));
203 lockdep_softirq_enter();
205 cpu = smp_processor_id();
207 /* Reset the pending bitmask before enabling irqs */
208 set_softirq_pending(0);
216 int prev_count = preempt_count();
218 trace_softirq_entry(h, softirq_vec);
220 trace_softirq_exit(h, softirq_vec);
221 if (unlikely(prev_count != preempt_count())) {
222 printk(KERN_ERR "huh, entered softirq %td %s %p"
223 "with preempt_count %08x,"
224 " exited with %08x?\n", h - softirq_vec,
225 softirq_to_name[h - softirq_vec],
226 h->action, prev_count, preempt_count());
227 preempt_count() = prev_count;
230 rcu_bh_qsctr_inc(cpu);
238 pending = local_softirq_pending();
239 if (pending && --max_restart)
245 lockdep_softirq_exit();
247 account_system_vtime(current);
251 #ifndef __ARCH_HAS_DO_SOFTIRQ
253 asmlinkage void do_softirq(void)
261 local_irq_save(flags);
263 pending = local_softirq_pending();
268 local_irq_restore(flags);
274 * Enter an interrupt context.
278 int cpu = smp_processor_id();
281 if (idle_cpu(cpu) && !in_interrupt()) {
283 tick_check_idle(cpu);
288 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
289 # define invoke_softirq() __do_softirq()
291 # define invoke_softirq() do_softirq()
295 * Exit an interrupt context. Process softirqs if needed and possible:
299 account_system_vtime(current);
300 trace_hardirq_exit();
301 sub_preempt_count(IRQ_EXIT_OFFSET);
302 if (!in_interrupt() && local_softirq_pending())
306 /* Make sure that timer wheel updates are propagated */
308 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
309 tick_nohz_stop_sched_tick(0);
311 preempt_enable_no_resched();
315 * This function must run with irqs disabled!
317 inline void raise_softirq_irqoff(unsigned int nr)
319 __raise_softirq_irqoff(nr);
322 * If we're in an interrupt or softirq, we're done
323 * (this also catches softirq-disabled code). We will
324 * actually run the softirq once we return from
325 * the irq or softirq.
327 * Otherwise we wake up ksoftirqd to make sure we
328 * schedule the softirq soon.
334 void raise_softirq(unsigned int nr)
338 local_irq_save(flags);
339 raise_softirq_irqoff(nr);
340 local_irq_restore(flags);
343 void open_softirq(int nr, void (*action)(struct softirq_action *))
345 softirq_vec[nr].action = action;
351 struct tasklet_struct *head;
352 struct tasklet_struct **tail;
355 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
356 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
358 void __tasklet_schedule(struct tasklet_struct *t)
362 local_irq_save(flags);
364 *__get_cpu_var(tasklet_vec).tail = t;
365 __get_cpu_var(tasklet_vec).tail = &(t->next);
366 raise_softirq_irqoff(TASKLET_SOFTIRQ);
367 local_irq_restore(flags);
370 EXPORT_SYMBOL(__tasklet_schedule);
372 void __tasklet_hi_schedule(struct tasklet_struct *t)
376 local_irq_save(flags);
378 *__get_cpu_var(tasklet_hi_vec).tail = t;
379 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
380 raise_softirq_irqoff(HI_SOFTIRQ);
381 local_irq_restore(flags);
384 EXPORT_SYMBOL(__tasklet_hi_schedule);
386 static void tasklet_action(struct softirq_action *a)
388 struct tasklet_struct *list;
391 list = __get_cpu_var(tasklet_vec).head;
392 __get_cpu_var(tasklet_vec).head = NULL;
393 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
397 struct tasklet_struct *t = list;
401 if (tasklet_trylock(t)) {
402 if (!atomic_read(&t->count)) {
403 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
414 *__get_cpu_var(tasklet_vec).tail = t;
415 __get_cpu_var(tasklet_vec).tail = &(t->next);
416 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
421 static void tasklet_hi_action(struct softirq_action *a)
423 struct tasklet_struct *list;
426 list = __get_cpu_var(tasklet_hi_vec).head;
427 __get_cpu_var(tasklet_hi_vec).head = NULL;
428 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
432 struct tasklet_struct *t = list;
436 if (tasklet_trylock(t)) {
437 if (!atomic_read(&t->count)) {
438 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
449 *__get_cpu_var(tasklet_hi_vec).tail = t;
450 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
451 __raise_softirq_irqoff(HI_SOFTIRQ);
457 void tasklet_init(struct tasklet_struct *t,
458 void (*func)(unsigned long), unsigned long data)
462 atomic_set(&t->count, 0);
467 EXPORT_SYMBOL(tasklet_init);
469 void tasklet_kill(struct tasklet_struct *t)
472 printk("Attempt to kill tasklet from interrupt\n");
474 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
477 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
479 tasklet_unlock_wait(t);
480 clear_bit(TASKLET_STATE_SCHED, &t->state);
483 EXPORT_SYMBOL(tasklet_kill);
485 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
486 EXPORT_PER_CPU_SYMBOL(softirq_work_list);
488 static void __local_trigger(struct call_single_data *cp, int softirq)
490 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
492 list_add_tail(&cp->list, head);
494 /* Trigger the softirq only if the list was previously empty. */
495 if (head->next == &cp->list)
496 raise_softirq_irqoff(softirq);
499 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
500 static void remote_softirq_receive(void *data)
502 struct call_single_data *cp = data;
508 local_irq_save(flags);
509 __local_trigger(cp, softirq);
510 local_irq_restore(flags);
513 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
515 if (cpu_online(cpu)) {
516 cp->func = remote_softirq_receive;
521 __smp_call_function_single(cpu, cp, 0);
526 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
527 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
534 * __send_remote_softirq - try to schedule softirq work on a remote cpu
535 * @cp: private SMP call function data area
536 * @cpu: the remote cpu
537 * @this_cpu: the currently executing cpu
538 * @softirq: the softirq for the work
540 * Attempt to schedule softirq work on a remote cpu. If this cannot be
541 * done, the work is instead queued up on the local cpu.
543 * Interrupts must be disabled.
545 void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
547 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
548 __local_trigger(cp, softirq);
550 EXPORT_SYMBOL(__send_remote_softirq);
553 * send_remote_softirq - try to schedule softirq work on a remote cpu
554 * @cp: private SMP call function data area
555 * @cpu: the remote cpu
556 * @softirq: the softirq for the work
558 * Like __send_remote_softirq except that disabling interrupts and
559 * computing the current cpu is done for the caller.
561 void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
566 local_irq_save(flags);
567 this_cpu = smp_processor_id();
568 __send_remote_softirq(cp, cpu, this_cpu, softirq);
569 local_irq_restore(flags);
571 EXPORT_SYMBOL(send_remote_softirq);
573 static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
574 unsigned long action, void *hcpu)
577 * If a CPU goes away, splice its entries to the current CPU
578 * and trigger a run of the softirq
580 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
581 int cpu = (unsigned long) hcpu;
585 for (i = 0; i < NR_SOFTIRQS; i++) {
586 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
587 struct list_head *local_head;
589 if (list_empty(head))
592 local_head = &__get_cpu_var(softirq_work_list[i]);
593 list_splice_init(head, local_head);
594 raise_softirq_irqoff(i);
602 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
603 .notifier_call = remote_softirq_cpu_notify,
606 void __init softirq_init(void)
610 for_each_possible_cpu(cpu) {
613 per_cpu(tasklet_vec, cpu).tail =
614 &per_cpu(tasklet_vec, cpu).head;
615 per_cpu(tasklet_hi_vec, cpu).tail =
616 &per_cpu(tasklet_hi_vec, cpu).head;
617 for (i = 0; i < NR_SOFTIRQS; i++)
618 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
621 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
623 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
624 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
627 static int ksoftirqd(void * __bind_cpu)
629 set_current_state(TASK_INTERRUPTIBLE);
631 while (!kthread_should_stop()) {
633 if (!local_softirq_pending()) {
634 preempt_enable_no_resched();
639 __set_current_state(TASK_RUNNING);
641 while (local_softirq_pending()) {
642 /* Preempt disable stops cpu going offline.
643 If already offline, we'll be on wrong CPU:
645 if (cpu_is_offline((long)__bind_cpu))
648 preempt_enable_no_resched();
651 rcu_qsctr_inc((long)__bind_cpu);
654 set_current_state(TASK_INTERRUPTIBLE);
656 __set_current_state(TASK_RUNNING);
661 /* Wait for kthread_stop */
662 set_current_state(TASK_INTERRUPTIBLE);
663 while (!kthread_should_stop()) {
665 set_current_state(TASK_INTERRUPTIBLE);
667 __set_current_state(TASK_RUNNING);
671 #ifdef CONFIG_HOTPLUG_CPU
673 * tasklet_kill_immediate is called to remove a tasklet which can already be
674 * scheduled for execution on @cpu.
676 * Unlike tasklet_kill, this function removes the tasklet
677 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
679 * When this function is called, @cpu must be in the CPU_DEAD state.
681 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
683 struct tasklet_struct **i;
685 BUG_ON(cpu_online(cpu));
686 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
688 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
691 /* CPU is dead, so no lock needed. */
692 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
695 /* If this was the tail element, move the tail ptr */
697 per_cpu(tasklet_vec, cpu).tail = i;
704 static void takeover_tasklets(unsigned int cpu)
706 /* CPU is dead, so no lock needed. */
709 /* Find end, append list for that CPU. */
710 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
711 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
712 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
713 per_cpu(tasklet_vec, cpu).head = NULL;
714 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
716 raise_softirq_irqoff(TASKLET_SOFTIRQ);
718 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
719 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
720 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
721 per_cpu(tasklet_hi_vec, cpu).head = NULL;
722 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
724 raise_softirq_irqoff(HI_SOFTIRQ);
728 #endif /* CONFIG_HOTPLUG_CPU */
730 static int __cpuinit cpu_callback(struct notifier_block *nfb,
731 unsigned long action,
734 int hotcpu = (unsigned long)hcpu;
735 struct task_struct *p;
739 case CPU_UP_PREPARE_FROZEN:
740 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
742 printk("ksoftirqd for %i failed\n", hotcpu);
745 kthread_bind(p, hotcpu);
746 per_cpu(ksoftirqd, hotcpu) = p;
749 case CPU_ONLINE_FROZEN:
750 wake_up_process(per_cpu(ksoftirqd, hotcpu));
752 #ifdef CONFIG_HOTPLUG_CPU
753 case CPU_UP_CANCELED:
754 case CPU_UP_CANCELED_FROZEN:
755 if (!per_cpu(ksoftirqd, hotcpu))
757 /* Unbind so it can run. Fall thru. */
758 kthread_bind(per_cpu(ksoftirqd, hotcpu),
759 cpumask_any(cpu_online_mask));
761 case CPU_DEAD_FROZEN: {
762 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
764 p = per_cpu(ksoftirqd, hotcpu);
765 per_cpu(ksoftirqd, hotcpu) = NULL;
766 sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
768 takeover_tasklets(hotcpu);
771 #endif /* CONFIG_HOTPLUG_CPU */
776 static struct notifier_block __cpuinitdata cpu_nfb = {
777 .notifier_call = cpu_callback
780 static __init int spawn_ksoftirqd(void)
782 void *cpu = (void *)(long)smp_processor_id();
783 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
785 BUG_ON(err == NOTIFY_BAD);
786 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
787 register_cpu_notifier(&cpu_nfb);
790 early_initcall(spawn_ksoftirqd);
794 * Call a function on all processors
796 int on_each_cpu(void (*func) (void *info), void *info, int wait)
801 ret = smp_call_function(func, info, wait);
808 EXPORT_SYMBOL(on_each_cpu);
812 * [ These __weak aliases are kept in a separate compilation unit, so that
813 * GCC does not inline them incorrectly. ]
816 int __init __weak early_irq_init(void)
821 int __init __weak arch_probe_nr_irqs(void)
826 int __init __weak arch_early_irq_init(void)
831 int __weak arch_init_chip_data(struct irq_desc *desc, int node)