2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
6 #include "linux/sched.h"
7 #include "linux/signal.h"
8 #include "linux/kernel.h"
9 #include "linux/interrupt.h"
10 #include "linux/ptrace.h"
11 #include "asm/system.h"
12 #include "asm/pgalloc.h"
13 #include "asm/ptrace.h"
14 #include "asm/tlbflush.h"
16 #include "signal_user.h"
17 #include "kern_util.h"
18 #include "user_util.h"
21 #include "sigcontext.h"
22 #include "time_user.h"
26 #include "mode_kern.h"
30 void switch_to_tt(void *prev, void *next)
32 struct task_struct *from, *to, *prev_sched;
34 int err, vtalrm, alrm, prof, cpu;
40 cpu = from->thread_info->cpu;
42 forward_interrupts(to->thread.mode.tt.extern_pid);
44 forward_ipi(cpu_data[cpu].ipi_pipe[0], to->thread.mode.tt.extern_pid);
46 local_irq_save(flags);
48 vtalrm = change_sig(SIGVTALRM, 0);
49 alrm = change_sig(SIGALRM, 0);
50 prof = change_sig(SIGPROF, 0);
52 forward_pending_sigio(to->thread.mode.tt.extern_pid);
56 err = os_write_file(to->thread.mode.tt.switch_pipe[1], &c, sizeof(c));
58 panic("write of switch_pipe failed, err = %d", -err);
60 if(from->thread.mode.tt.switch_pipe[0] == -1)
61 os_kill_process(os_getpid(), 0);
63 err = os_read_file(from->thread.mode.tt.switch_pipe[0], &c, sizeof(c));
65 panic("read of switch_pipe failed, errno = %d", -err);
67 /* If the process that we have just scheduled away from has exited,
68 * then it needs to be killed here. The reason is that, even though
69 * it will kill itself when it next runs, that may be too late. Its
70 * stack will be freed, possibly before then, and if that happens,
71 * we have a use-after-free situation. So, it gets killed here
72 * in case it has not already killed itself.
74 prev_sched = current->thread.prev_sched;
75 if(prev_sched->thread.mode.tt.switch_pipe[0] == -1)
76 os_kill_process(prev_sched->thread.mode.tt.extern_pid, 1);
78 change_sig(SIGVTALRM, vtalrm);
79 change_sig(SIGALRM, alrm);
80 change_sig(SIGPROF, prof);
85 local_irq_restore(flags);
88 void release_thread_tt(struct task_struct *task)
90 int pid = task->thread.mode.tt.extern_pid;
93 * We first have to kill the other process, before
94 * closing its switch_pipe. Else it might wake up
95 * and receive "EOF" before we could kill it.
97 if(os_getpid() != pid)
98 os_kill_process(pid, 0);
100 os_close_file(task->thread.mode.tt.switch_pipe[0]);
101 os_close_file(task->thread.mode.tt.switch_pipe[1]);
102 /* use switch_pipe as flag: thread is released */
103 task->thread.mode.tt.switch_pipe[0] = -1;
106 void suspend_new_thread(int fd)
111 os_stop_process(os_getpid());
112 err = os_read_file(fd, &c, sizeof(c));
114 panic("read failed in suspend_new_thread, err = %d", -err);
117 void schedule_tail(task_t *prev);
119 static void new_thread_handler(int sig)
121 unsigned long disable;
125 fn = current->thread.request.u.thread.proc;
126 arg = current->thread.request.u.thread.arg;
128 UPT_SC(¤t->thread.regs.regs) = (void *) (&sig + 1);
129 disable = (1 << (SIGVTALRM - 1)) | (1 << (SIGALRM - 1)) |
130 (1 << (SIGIO - 1)) | (1 << (SIGPROF - 1));
131 SC_SIGMASK(UPT_SC(¤t->thread.regs.regs)) &= ~disable;
133 suspend_new_thread(current->thread.mode.tt.switch_pipe[0]);
136 if(current->thread.prev_sched != NULL)
137 schedule_tail(current->thread.prev_sched);
138 current->thread.prev_sched = NULL;
140 init_new_thread_signals(1);
142 free_page(current->thread.temp_stack);
143 set_cmdline("(kernel thread)");
145 change_sig(SIGUSR1, 1);
146 change_sig(SIGVTALRM, 1);
147 change_sig(SIGPROF, 1);
149 if(!run_kernel_thread(fn, arg, ¤t->thread.exec_buf))
152 /* XXX No set_user_mode here because a newly execed process will
153 * immediately segfault on its non-existent IP, coming straight back
154 * to the signal handler, which will call set_user_mode on its way
155 * out. This should probably change since it's confusing.
159 static int new_thread_proc(void *stack)
161 /* local_irq_disable is needed to block out signals until this thread is
162 * properly scheduled. Otherwise, the tracing thread will get mighty
163 * upset about any signals that arrive before that.
164 * This has the complication that it sets the saved signal mask in
165 * the sigcontext to block signals. This gets restored when this
166 * thread (or a descendant, since they get a copy of this sigcontext)
167 * returns to userspace.
168 * So, this is compensated for elsewhere.
169 * XXX There is still a small window until local_irq_disable() actually
170 * finishes where signals are possible - shouldn't be a problem in
171 * practice since SIGIO hasn't been forwarded here yet, and the
172 * local_irq_disable should finish before a SIGVTALRM has time to be
177 init_new_thread_stack(stack, new_thread_handler);
178 os_usr1_process(os_getpid());
179 change_sig(SIGUSR1, 1);
183 /* Signal masking - signals are blocked at the start of fork_tramp. They
184 * are re-enabled when finish_fork_handler is entered by fork_tramp hitting
185 * itself with a SIGUSR1. set_user_mode has to be run with SIGUSR1 off,
186 * so it is blocked before it's called. They are re-enabled on sigreturn
187 * despite the fact that they were blocked when the SIGUSR1 was issued because
188 * copy_thread copies the parent's sigcontext, including the signal mask
189 * onto the signal frame.
192 void finish_fork_handler(int sig)
194 UPT_SC(¤t->thread.regs.regs) = (void *) (&sig + 1);
195 suspend_new_thread(current->thread.mode.tt.switch_pipe[0]);
198 if(current->thread.prev_sched != NULL)
199 schedule_tail(current->thread.prev_sched);
200 current->thread.prev_sched = NULL;
203 change_sig(SIGVTALRM, 1);
205 if(current->mm != current->parent->mm)
206 protect_memory(uml_reserved, high_physmem - uml_reserved, 1,
208 task_protections((unsigned long) current_thread);
210 free_page(current->thread.temp_stack);
212 change_sig(SIGUSR1, 0);
213 set_user_mode(current);
216 int fork_tramp(void *stack)
220 init_new_thread_stack(stack, finish_fork_handler);
222 os_usr1_process(os_getpid());
223 change_sig(SIGUSR1, 1);
227 int copy_thread_tt(int nr, unsigned long clone_flags, unsigned long sp,
228 unsigned long stack_top, struct task_struct * p,
229 struct pt_regs *regs)
231 int (*tramp)(void *);
235 if(current->thread.forking)
238 tramp = new_thread_proc;
239 p->thread.request.u.thread = current->thread.request.u.thread;
242 err = os_pipe(p->thread.mode.tt.switch_pipe, 1, 1);
244 printk("copy_thread : pipe failed, err = %d\n", -err);
248 stack = alloc_stack(0, 0);
250 printk(KERN_ERR "copy_thread : failed to allocate "
251 "temporary stack\n");
255 clone_flags &= CLONE_VM;
256 p->thread.temp_stack = stack;
257 new_pid = start_fork_tramp(p->thread_info, stack, clone_flags, tramp);
259 printk(KERN_ERR "copy_thread : clone failed - errno = %d\n",
264 if(current->thread.forking){
265 sc_to_sc(UPT_SC(&p->thread.regs.regs), UPT_SC(®s->regs));
266 SC_SET_SYSCALL_RETURN(UPT_SC(&p->thread.regs.regs), 0);
268 SC_SP(UPT_SC(&p->thread.regs.regs)) = sp;
270 p->thread.mode.tt.extern_pid = new_pid;
272 current->thread.request.op = OP_FORK;
273 current->thread.request.u.fork.pid = new_pid;
274 os_usr1_process(os_getpid());
276 /* Enable the signal and then disable it to ensure that it is handled
277 * here, and nowhere else.
279 change_sig(SIGUSR1, 1);
281 change_sig(SIGUSR1, 0);
288 current->thread.request.op = OP_REBOOT;
289 os_usr1_process(os_getpid());
290 change_sig(SIGUSR1, 1);
295 current->thread.request.op = OP_HALT;
296 os_usr1_process(os_getpid());
297 change_sig(SIGUSR1, 1);
300 void kill_off_processes_tt(void)
302 struct task_struct *p;
307 if(p->thread.mode.tt.extern_pid != me)
308 os_kill_process(p->thread.mode.tt.extern_pid, 0);
310 if(init_task.thread.mode.tt.extern_pid != me)
311 os_kill_process(init_task.thread.mode.tt.extern_pid, 0);
314 void initial_thread_cb_tt(void (*proc)(void *), void *arg)
316 if(os_getpid() == tracing_pid){
320 current->thread.request.op = OP_CB;
321 current->thread.request.u.cb.proc = proc;
322 current->thread.request.u.cb.arg = arg;
323 os_usr1_process(os_getpid());
324 change_sig(SIGUSR1, 1);
326 change_sig(SIGUSR1, 0);
330 int do_proc_op(void *t, int proc_id)
332 struct task_struct *task;
333 struct thread_struct *thread;
337 thread = &task->thread;
338 op = thread->request.op;
344 pid = thread->request.u.exec.pid;
345 do_exec(thread->mode.tt.extern_pid, pid);
346 thread->mode.tt.extern_pid = pid;
347 cpu_tasks[task->thread_info->cpu].pid = pid;
350 attach_process(thread->request.u.fork.pid);
353 (*thread->request.u.cb.proc)(thread->request.u.cb.arg);
359 tracer_panic("Bad op in do_proc_op");
362 thread->request.op = OP_NONE;
366 void init_idle_tt(void)
371 extern void start_kernel(void);
373 static int start_kernel_proc(void *unused)
380 cpu_tasks[0].pid = pid;
381 cpu_tasks[0].task = current;
383 cpu_online_map = cpumask_of_cpu(0);
385 if(debug) os_stop_process(pid);
390 void set_tracing(void *task, int tracing)
392 ((struct task_struct *) task)->thread.mode.tt.tracing = tracing;
395 int is_tracing(void *t)
397 return (((struct task_struct *) t)->thread.mode.tt.tracing);
400 int set_user_mode(void *t)
402 struct task_struct *task;
404 task = t ? t : current;
405 if(task->thread.mode.tt.tracing)
407 task->thread.request.op = OP_TRACE_ON;
408 os_usr1_process(os_getpid());
412 void set_init_pid(int pid)
416 init_task.thread.mode.tt.extern_pid = pid;
417 err = os_pipe(init_task.thread.mode.tt.switch_pipe, 1, 1);
419 panic("Can't create switch pipe for init_task, errno = %d",
423 int start_uml_tt(void)
428 pages = (1 << CONFIG_KERNEL_STACK_ORDER);
429 sp = (void *) ((unsigned long) init_task.thread_info) +
430 pages * PAGE_SIZE - sizeof(unsigned long);
431 return(tracer(start_kernel_proc, sp));
434 int external_pid_tt(struct task_struct *task)
436 return(task->thread.mode.tt.extern_pid);
439 int thread_pid_tt(struct task_struct *task)
441 return(task->thread.mode.tt.extern_pid);
444 int is_valid_pid(int pid)
446 struct task_struct *task;
448 read_lock(&tasklist_lock);
449 for_each_process(task){
450 if(task->thread.mode.tt.extern_pid == pid){
451 read_unlock(&tasklist_lock);
455 read_unlock(&tasklist_lock);