Commit | Line | Data |
---|---|---|
e5582ca2 RR |
1 | /* Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. |
2 | * GPL v2 and any later version. | |
3 | */ | |
1da177e4 LT |
4 | #include <linux/cpu.h> |
5 | #include <linux/err.h> | |
ee527cd3 PB |
6 | #include <linux/kthread.h> |
7 | #include <linux/module.h> | |
8 | #include <linux/sched.h> | |
9 | #include <linux/stop_machine.h> | |
1da177e4 | 10 | #include <linux/syscalls.h> |
a12bb444 BH |
11 | #include <linux/interrupt.h> |
12 | ||
1da177e4 LT |
13 | #include <asm/atomic.h> |
14 | #include <asm/semaphore.h> | |
15 | #include <asm/uaccess.h> | |
16 | ||
17 | /* Since we effect priority and affinity (both of which are visible | |
18 | * to, and settable by outside processes) we do indirection via a | |
19 | * kthread. */ | |
20 | ||
21 | /* Thread to stop each CPU in user context. */ | |
22 | enum stopmachine_state { | |
23 | STOPMACHINE_WAIT, | |
24 | STOPMACHINE_PREPARE, | |
25 | STOPMACHINE_DISABLE_IRQ, | |
26 | STOPMACHINE_EXIT, | |
27 | }; | |
28 | ||
29 | static enum stopmachine_state stopmachine_state; | |
30 | static unsigned int stopmachine_num_threads; | |
31 | static atomic_t stopmachine_thread_ack; | |
1da177e4 | 32 | |
d8cb7c1d | 33 | static int stopmachine(void *cpu) |
1da177e4 LT |
34 | { |
35 | int irqs_disabled = 0; | |
36 | int prepared = 0; | |
37 | ||
d8cb7c1d AM |
38 | set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu)); |
39 | ||
1da177e4 | 40 | /* Ack: we are alive */ |
d59dd462 | 41 | smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ |
1da177e4 LT |
42 | atomic_inc(&stopmachine_thread_ack); |
43 | ||
44 | /* Simple state machine */ | |
45 | while (stopmachine_state != STOPMACHINE_EXIT) { | |
46 | if (stopmachine_state == STOPMACHINE_DISABLE_IRQ | |
47 | && !irqs_disabled) { | |
48 | local_irq_disable(); | |
a12bb444 | 49 | hard_irq_disable(); |
1da177e4 LT |
50 | irqs_disabled = 1; |
51 | /* Ack: irqs disabled. */ | |
d59dd462 | 52 | smp_mb(); /* Must read state first. */ |
1da177e4 LT |
53 | atomic_inc(&stopmachine_thread_ack); |
54 | } else if (stopmachine_state == STOPMACHINE_PREPARE | |
55 | && !prepared) { | |
56 | /* Everyone is in place, hold CPU. */ | |
57 | preempt_disable(); | |
58 | prepared = 1; | |
d59dd462 | 59 | smp_mb(); /* Must read state first. */ |
1da177e4 LT |
60 | atomic_inc(&stopmachine_thread_ack); |
61 | } | |
62 | /* Yield in first stage: migration threads need to | |
63 | * help our sisters onto their CPUs. */ | |
64 | if (!prepared && !irqs_disabled) | |
65 | yield(); | |
66 | else | |
67 | cpu_relax(); | |
68 | } | |
69 | ||
70 | /* Ack: we are exiting. */ | |
d59dd462 | 71 | smp_mb(); /* Must read state first. */ |
1da177e4 LT |
72 | atomic_inc(&stopmachine_thread_ack); |
73 | ||
74 | if (irqs_disabled) | |
75 | local_irq_enable(); | |
76 | if (prepared) | |
77 | preempt_enable(); | |
78 | ||
79 | return 0; | |
80 | } | |
81 | ||
82 | /* Change the thread state */ | |
83 | static void stopmachine_set_state(enum stopmachine_state state) | |
84 | { | |
85 | atomic_set(&stopmachine_thread_ack, 0); | |
d59dd462 | 86 | smp_wmb(); |
1da177e4 LT |
87 | stopmachine_state = state; |
88 | while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) | |
89 | cpu_relax(); | |
90 | } | |
91 | ||
92 | static int stop_machine(void) | |
93 | { | |
d8cb7c1d | 94 | int i, ret = 0; |
1da177e4 LT |
95 | |
96 | atomic_set(&stopmachine_thread_ack, 0); | |
97 | stopmachine_num_threads = 0; | |
98 | stopmachine_state = STOPMACHINE_WAIT; | |
99 | ||
100 | for_each_online_cpu(i) { | |
39c715b7 | 101 | if (i == raw_smp_processor_id()) |
1da177e4 | 102 | continue; |
d8cb7c1d AM |
103 | ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); |
104 | if (ret < 0) | |
1da177e4 LT |
105 | break; |
106 | stopmachine_num_threads++; | |
107 | } | |
108 | ||
109 | /* Wait for them all to come to life. */ | |
110 | while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) | |
111 | yield(); | |
112 | ||
113 | /* If some failed, kill them all. */ | |
114 | if (ret < 0) { | |
115 | stopmachine_set_state(STOPMACHINE_EXIT); | |
1da177e4 LT |
116 | return ret; |
117 | } | |
118 | ||
1da177e4 | 119 | /* Now they are all started, make them hold the CPUs, ready. */ |
4557398f | 120 | preempt_disable(); |
1da177e4 LT |
121 | stopmachine_set_state(STOPMACHINE_PREPARE); |
122 | ||
123 | /* Make them disable irqs. */ | |
4557398f | 124 | local_irq_disable(); |
a12bb444 | 125 | hard_irq_disable(); |
1da177e4 LT |
126 | stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); |
127 | ||
128 | return 0; | |
129 | } | |
130 | ||
131 | static void restart_machine(void) | |
132 | { | |
133 | stopmachine_set_state(STOPMACHINE_EXIT); | |
134 | local_irq_enable(); | |
4557398f | 135 | preempt_enable_no_resched(); |
1da177e4 LT |
136 | } |
137 | ||
138 | struct stop_machine_data | |
139 | { | |
140 | int (*fn)(void *); | |
141 | void *data; | |
142 | struct completion done; | |
143 | }; | |
144 | ||
145 | static int do_stop(void *_smdata) | |
146 | { | |
147 | struct stop_machine_data *smdata = _smdata; | |
148 | int ret; | |
149 | ||
150 | ret = stop_machine(); | |
151 | if (ret == 0) { | |
152 | ret = smdata->fn(smdata->data); | |
153 | restart_machine(); | |
154 | } | |
155 | ||
156 | /* We're done: you can kthread_stop us now */ | |
157 | complete(&smdata->done); | |
158 | ||
159 | /* Wait for kthread_stop */ | |
160 | set_current_state(TASK_INTERRUPTIBLE); | |
161 | while (!kthread_should_stop()) { | |
162 | schedule(); | |
163 | set_current_state(TASK_INTERRUPTIBLE); | |
164 | } | |
165 | __set_current_state(TASK_RUNNING); | |
166 | return ret; | |
167 | } | |
168 | ||
169 | struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, | |
170 | unsigned int cpu) | |
171 | { | |
6c6080f7 | 172 | static DEFINE_MUTEX(stopmachine_mutex); |
1da177e4 LT |
173 | struct stop_machine_data smdata; |
174 | struct task_struct *p; | |
175 | ||
176 | smdata.fn = fn; | |
177 | smdata.data = data; | |
178 | init_completion(&smdata.done); | |
179 | ||
6c6080f7 | 180 | mutex_lock(&stopmachine_mutex); |
1da177e4 LT |
181 | |
182 | /* If they don't care which CPU fn runs on, bind to any online one. */ | |
183 | if (cpu == NR_CPUS) | |
39c715b7 | 184 | cpu = raw_smp_processor_id(); |
1da177e4 LT |
185 | |
186 | p = kthread_create(do_stop, &smdata, "kstopmachine"); | |
187 | if (!IS_ERR(p)) { | |
85653af7 ST |
188 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
189 | ||
190 | /* One high-prio thread per cpu. We'll do this one. */ | |
191 | sched_setscheduler(p, SCHED_FIFO, ¶m); | |
1da177e4 LT |
192 | kthread_bind(p, cpu); |
193 | wake_up_process(p); | |
194 | wait_for_completion(&smdata.done); | |
195 | } | |
6c6080f7 | 196 | mutex_unlock(&stopmachine_mutex); |
1da177e4 LT |
197 | return p; |
198 | } | |
199 | ||
200 | int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) | |
201 | { | |
202 | struct task_struct *p; | |
203 | int ret; | |
204 | ||
205 | /* No CPUs can come up or down during this. */ | |
86ef5c9a | 206 | get_online_cpus(); |
1da177e4 LT |
207 | p = __stop_machine_run(fn, data, cpu); |
208 | if (!IS_ERR(p)) | |
209 | ret = kthread_stop(p); | |
210 | else | |
211 | ret = PTR_ERR(p); | |
86ef5c9a | 212 | put_online_cpus(); |
1da177e4 LT |
213 | |
214 | return ret; | |
215 | } | |
ee527cd3 | 216 | EXPORT_SYMBOL_GPL(stop_machine_run); |