2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 #include <linux/module.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
14 #include <linux/notifier.h>
15 #include <linux/percpu.h>
16 #include <linux/cpu.h>
17 #include <linux/kthread.h>
18 #include <linux/rcupdate.h>
19 #include <linux/smp.h>
23 - No shared variables, all the data are CPU local.
24 - If a softirq needs serialization, let it serialize itself
26 - Even if softirq is serialized, only local cpu is marked for
27 execution. Hence, we get something sort of weak cpu binding.
28 Though it is still not clear, will it result in better locality
32 - NET RX softirq. It is multithreaded and does not require
33 any global serialization.
34 - NET TX softirq. It kicks software netdevice queues, hence
35 it is logically serialized per device, but this serialization
36 is invisible to common code.
37 - Tasklets: serialized wrt itself.
40 #ifndef __ARCH_IRQ_STAT
41 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
42 EXPORT_SYMBOL(irq_stat);
45 static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
47 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
50 * we cannot loop indefinitely here to avoid userspace starvation,
51 * but we also don't want to introduce a worst case 1/HZ latency
52 * to the pending events, so lets the scheduler to balance
53 * the softirq load for us.
55 static inline void wakeup_softirqd(void)
57 /* Interrupts are disabled: no need to stop preemption */
58 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
60 if (tsk && tsk->state != TASK_RUNNING)
65 * This one is for softirq.c-internal use,
66 * where hardirqs are disabled legitimately:
68 static void __local_bh_disable(unsigned long ip)
72 WARN_ON_ONCE(in_irq());
74 raw_local_irq_save(flags);
75 add_preempt_count(SOFTIRQ_OFFSET);
77 * Were softirqs turned off above:
79 if (softirq_count() == SOFTIRQ_OFFSET)
80 trace_softirqs_off(ip);
81 raw_local_irq_restore(flags);
84 void local_bh_disable(void)
86 __local_bh_disable((unsigned long)__builtin_return_address(0));
89 EXPORT_SYMBOL(local_bh_disable);
91 void __local_bh_enable(void)
93 WARN_ON_ONCE(in_irq());
96 * softirqs should never be enabled by __local_bh_enable(),
97 * it always nests inside local_bh_enable() sections:
99 WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
101 sub_preempt_count(SOFTIRQ_OFFSET);
103 EXPORT_SYMBOL_GPL(__local_bh_enable);
106 * Special-case - softirqs can safely be enabled in
107 * cond_resched_softirq(), or by __do_softirq(),
108 * without processing still-pending softirqs:
110 void _local_bh_enable(void)
112 WARN_ON_ONCE(in_irq());
113 WARN_ON_ONCE(!irqs_disabled());
115 if (softirq_count() == SOFTIRQ_OFFSET)
116 trace_softirqs_on((unsigned long)__builtin_return_address(0));
117 sub_preempt_count(SOFTIRQ_OFFSET);
120 EXPORT_SYMBOL(_local_bh_enable);
122 void local_bh_enable(void)
126 WARN_ON_ONCE(in_irq());
127 WARN_ON_ONCE(irqs_disabled());
129 local_irq_save(flags);
131 * Are softirqs going to be turned on now:
133 if (softirq_count() == SOFTIRQ_OFFSET)
134 trace_softirqs_on((unsigned long)__builtin_return_address(0));
136 * Keep preemption disabled until we are done with
137 * softirq processing:
139 sub_preempt_count(SOFTIRQ_OFFSET - 1);
141 if (unlikely(!in_interrupt() && local_softirq_pending()))
145 local_irq_restore(flags);
146 preempt_check_resched();
148 EXPORT_SYMBOL(local_bh_enable);
150 void local_bh_enable_ip(unsigned long ip)
154 WARN_ON_ONCE(in_irq());
156 local_irq_save(flags);
158 * Are softirqs going to be turned on now:
160 if (softirq_count() == SOFTIRQ_OFFSET)
161 trace_softirqs_on(ip);
163 * Keep preemption disabled until we are done with
164 * softirq processing:
166 sub_preempt_count(SOFTIRQ_OFFSET - 1);
168 if (unlikely(!in_interrupt() && local_softirq_pending()))
172 local_irq_restore(flags);
173 preempt_check_resched();
175 EXPORT_SYMBOL(local_bh_enable_ip);
178 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
179 * and we fall back to softirqd after that.
181 * This number has been established via experimentation.
182 * The two things to balance is latency against fairness -
183 * we want to handle softirqs as soon as possible, but they
184 * should not be able to lock up the box.
186 #define MAX_SOFTIRQ_RESTART 10
188 asmlinkage void __do_softirq(void)
190 struct softirq_action *h;
192 int max_restart = MAX_SOFTIRQ_RESTART;
195 pending = local_softirq_pending();
196 account_system_vtime(current);
198 __local_bh_disable((unsigned long)__builtin_return_address(0));
199 trace_softirq_enter();
201 cpu = smp_processor_id();
203 /* Reset the pending bitmask before enabling irqs */
204 set_softirq_pending(0);
213 rcu_bh_qsctr_inc(cpu);
221 pending = local_softirq_pending();
222 if (pending && --max_restart)
228 trace_softirq_exit();
230 account_system_vtime(current);
234 #ifndef __ARCH_HAS_DO_SOFTIRQ
236 asmlinkage void do_softirq(void)
244 local_irq_save(flags);
246 pending = local_softirq_pending();
251 local_irq_restore(flags);
254 EXPORT_SYMBOL(do_softirq);
258 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
259 # define invoke_softirq() __do_softirq()
261 # define invoke_softirq() do_softirq()
265 * Exit an interrupt context. Process softirqs if needed and possible:
269 account_system_vtime(current);
270 trace_hardirq_exit();
271 sub_preempt_count(IRQ_EXIT_OFFSET);
272 if (!in_interrupt() && local_softirq_pending())
274 preempt_enable_no_resched();
278 * This function must run with irqs disabled!
280 inline fastcall void raise_softirq_irqoff(unsigned int nr)
282 __raise_softirq_irqoff(nr);
285 * If we're in an interrupt or softirq, we're done
286 * (this also catches softirq-disabled code). We will
287 * actually run the softirq once we return from
288 * the irq or softirq.
290 * Otherwise we wake up ksoftirqd to make sure we
291 * schedule the softirq soon.
297 EXPORT_SYMBOL(raise_softirq_irqoff);
299 void fastcall raise_softirq(unsigned int nr)
303 local_irq_save(flags);
304 raise_softirq_irqoff(nr);
305 local_irq_restore(flags);
308 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
310 softirq_vec[nr].data = data;
311 softirq_vec[nr].action = action;
317 struct tasklet_struct *list;
320 /* Some compilers disobey section attribute on statics when not
322 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
323 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
325 void fastcall __tasklet_schedule(struct tasklet_struct *t)
329 local_irq_save(flags);
330 t->next = __get_cpu_var(tasklet_vec).list;
331 __get_cpu_var(tasklet_vec).list = t;
332 raise_softirq_irqoff(TASKLET_SOFTIRQ);
333 local_irq_restore(flags);
336 EXPORT_SYMBOL(__tasklet_schedule);
338 void fastcall __tasklet_hi_schedule(struct tasklet_struct *t)
342 local_irq_save(flags);
343 t->next = __get_cpu_var(tasklet_hi_vec).list;
344 __get_cpu_var(tasklet_hi_vec).list = t;
345 raise_softirq_irqoff(HI_SOFTIRQ);
346 local_irq_restore(flags);
349 EXPORT_SYMBOL(__tasklet_hi_schedule);
351 static void tasklet_action(struct softirq_action *a)
353 struct tasklet_struct *list;
356 list = __get_cpu_var(tasklet_vec).list;
357 __get_cpu_var(tasklet_vec).list = NULL;
361 struct tasklet_struct *t = list;
365 if (tasklet_trylock(t)) {
366 if (!atomic_read(&t->count)) {
367 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
377 t->next = __get_cpu_var(tasklet_vec).list;
378 __get_cpu_var(tasklet_vec).list = t;
379 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
384 static void tasklet_hi_action(struct softirq_action *a)
386 struct tasklet_struct *list;
389 list = __get_cpu_var(tasklet_hi_vec).list;
390 __get_cpu_var(tasklet_hi_vec).list = NULL;
394 struct tasklet_struct *t = list;
398 if (tasklet_trylock(t)) {
399 if (!atomic_read(&t->count)) {
400 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
410 t->next = __get_cpu_var(tasklet_hi_vec).list;
411 __get_cpu_var(tasklet_hi_vec).list = t;
412 __raise_softirq_irqoff(HI_SOFTIRQ);
418 void tasklet_init(struct tasklet_struct *t,
419 void (*func)(unsigned long), unsigned long data)
423 atomic_set(&t->count, 0);
428 EXPORT_SYMBOL(tasklet_init);
430 void tasklet_kill(struct tasklet_struct *t)
433 printk("Attempt to kill tasklet from interrupt\n");
435 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
438 while (test_bit(TASKLET_STATE_SCHED, &t->state));
440 tasklet_unlock_wait(t);
441 clear_bit(TASKLET_STATE_SCHED, &t->state);
444 EXPORT_SYMBOL(tasklet_kill);
446 void __init softirq_init(void)
448 open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
449 open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
452 static int ksoftirqd(void * __bind_cpu)
454 set_user_nice(current, 19);
455 current->flags |= PF_NOFREEZE;
457 set_current_state(TASK_INTERRUPTIBLE);
459 while (!kthread_should_stop()) {
461 if (!local_softirq_pending()) {
462 preempt_enable_no_resched();
467 __set_current_state(TASK_RUNNING);
469 while (local_softirq_pending()) {
470 /* Preempt disable stops cpu going offline.
471 If already offline, we'll be on wrong CPU:
473 if (cpu_is_offline((long)__bind_cpu))
476 preempt_enable_no_resched();
481 set_current_state(TASK_INTERRUPTIBLE);
483 __set_current_state(TASK_RUNNING);
488 /* Wait for kthread_stop */
489 set_current_state(TASK_INTERRUPTIBLE);
490 while (!kthread_should_stop()) {
492 set_current_state(TASK_INTERRUPTIBLE);
494 __set_current_state(TASK_RUNNING);
498 #ifdef CONFIG_HOTPLUG_CPU
500 * tasklet_kill_immediate is called to remove a tasklet which can already be
501 * scheduled for execution on @cpu.
503 * Unlike tasklet_kill, this function removes the tasklet
504 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
506 * When this function is called, @cpu must be in the CPU_DEAD state.
508 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
510 struct tasklet_struct **i;
512 BUG_ON(cpu_online(cpu));
513 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
515 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
518 /* CPU is dead, so no lock needed. */
519 for (i = &per_cpu(tasklet_vec, cpu).list; *i; i = &(*i)->next) {
528 static void takeover_tasklets(unsigned int cpu)
530 struct tasklet_struct **i;
532 /* CPU is dead, so no lock needed. */
535 /* Find end, append list for that CPU. */
536 for (i = &__get_cpu_var(tasklet_vec).list; *i; i = &(*i)->next);
537 *i = per_cpu(tasklet_vec, cpu).list;
538 per_cpu(tasklet_vec, cpu).list = NULL;
539 raise_softirq_irqoff(TASKLET_SOFTIRQ);
541 for (i = &__get_cpu_var(tasklet_hi_vec).list; *i; i = &(*i)->next);
542 *i = per_cpu(tasklet_hi_vec, cpu).list;
543 per_cpu(tasklet_hi_vec, cpu).list = NULL;
544 raise_softirq_irqoff(HI_SOFTIRQ);
548 #endif /* CONFIG_HOTPLUG_CPU */
550 static int __devinit cpu_callback(struct notifier_block *nfb,
551 unsigned long action,
554 int hotcpu = (unsigned long)hcpu;
555 struct task_struct *p;
559 BUG_ON(per_cpu(tasklet_vec, hotcpu).list);
560 BUG_ON(per_cpu(tasklet_hi_vec, hotcpu).list);
561 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
563 printk("ksoftirqd for %i failed\n", hotcpu);
566 kthread_bind(p, hotcpu);
567 per_cpu(ksoftirqd, hotcpu) = p;
570 wake_up_process(per_cpu(ksoftirqd, hotcpu));
572 #ifdef CONFIG_HOTPLUG_CPU
573 case CPU_UP_CANCELED:
574 if (!per_cpu(ksoftirqd, hotcpu))
576 /* Unbind so it can run. Fall thru. */
577 kthread_bind(per_cpu(ksoftirqd, hotcpu),
578 any_online_cpu(cpu_online_map));
580 p = per_cpu(ksoftirqd, hotcpu);
581 per_cpu(ksoftirqd, hotcpu) = NULL;
583 takeover_tasklets(hotcpu);
585 #endif /* CONFIG_HOTPLUG_CPU */
590 static struct notifier_block __devinitdata cpu_nfb = {
591 .notifier_call = cpu_callback
594 __init int spawn_ksoftirqd(void)
596 void *cpu = (void *)(long)smp_processor_id();
597 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
598 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
599 register_cpu_notifier(&cpu_nfb);
605 * Call a function on all processors
607 int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait)
612 ret = smp_call_function(func, info, retry, wait);
619 EXPORT_SYMBOL(on_each_cpu);