Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/m68knommu/kernel/signal.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * | |
6 | * This file is subject to the terms and conditions of the GNU General Public | |
7 | * License. See the file COPYING in the main directory of this archive | |
8 | * for more details. | |
9 | */ | |
10 | ||
11 | /* | |
12 | * Linux/m68k support by Hamish Macdonald | |
13 | * | |
14 | * 68060 fixes by Jesper Skov | |
15 | * | |
16 | * 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab | |
17 | * | |
18 | * mathemu support by Roman Zippel | |
19 | * (Note: fpstate in the signal context is completely ignored for the emulator | |
20 | * and the internal floating point format is put on stack) | |
21 | */ | |
22 | ||
23 | /* | |
24 | * ++roman (07/09/96): implemented signal stacks (specially for tosemu on | |
25 | * Atari :-) Current limitation: Only one sigstack can be active at one time. | |
26 | * If a second signal with SA_ONSTACK set arrives while working on a sigstack, | |
27 | * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested | |
28 | * signal handlers! | |
29 | */ | |
30 | ||
31 | #include <linux/sched.h> | |
32 | #include <linux/mm.h> | |
33 | #include <linux/kernel.h> | |
34 | #include <linux/signal.h> | |
35 | #include <linux/syscalls.h> | |
36 | #include <linux/errno.h> | |
37 | #include <linux/wait.h> | |
38 | #include <linux/ptrace.h> | |
39 | #include <linux/unistd.h> | |
40 | #include <linux/stddef.h> | |
41 | #include <linux/highuid.h> | |
42 | #include <linux/tty.h> | |
43 | #include <linux/personality.h> | |
44 | #include <linux/binfmts.h> | |
45 | ||
46 | #include <asm/setup.h> | |
47 | #include <asm/uaccess.h> | |
48 | #include <asm/pgtable.h> | |
49 | #include <asm/traps.h> | |
50 | #include <asm/ucontext.h> | |
51 | ||
52 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | |
53 | ||
54 | asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs); | |
55 | ||
56 | /* | |
57 | * Atomically swap in the new signal mask, and wait for a signal. | |
58 | */ | |
59 | asmlinkage int do_sigsuspend(struct pt_regs *regs) | |
60 | { | |
61 | old_sigset_t mask = regs->d3; | |
62 | sigset_t saveset; | |
63 | ||
64 | mask &= _BLOCKABLE; | |
65 | spin_lock_irq(¤t->sighand->siglock); | |
66 | saveset = current->blocked; | |
67 | siginitset(¤t->blocked, mask); | |
68 | recalc_sigpending(); | |
69 | spin_unlock_irq(¤t->sighand->siglock); | |
70 | ||
71 | regs->d0 = -EINTR; | |
72 | while (1) { | |
73 | current->state = TASK_INTERRUPTIBLE; | |
74 | schedule(); | |
75 | if (do_signal(&saveset, regs)) | |
76 | return -EINTR; | |
77 | } | |
78 | } | |
79 | ||
80 | asmlinkage int | |
81 | do_rt_sigsuspend(struct pt_regs *regs) | |
82 | { | |
83 | sigset_t *unewset = (sigset_t *)regs->d1; | |
84 | size_t sigsetsize = (size_t)regs->d2; | |
85 | sigset_t saveset, newset; | |
86 | ||
87 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
88 | if (sigsetsize != sizeof(sigset_t)) | |
89 | return -EINVAL; | |
90 | ||
91 | if (copy_from_user(&newset, unewset, sizeof(newset))) | |
92 | return -EFAULT; | |
93 | sigdelsetmask(&newset, ~_BLOCKABLE); | |
94 | ||
95 | spin_lock_irq(¤t->sighand->siglock); | |
96 | saveset = current->blocked; | |
97 | current->blocked = newset; | |
98 | recalc_sigpending(); | |
99 | spin_unlock_irq(¤t->sighand->siglock); | |
100 | ||
101 | regs->d0 = -EINTR; | |
102 | while (1) { | |
103 | current->state = TASK_INTERRUPTIBLE; | |
104 | schedule(); | |
105 | if (do_signal(&saveset, regs)) | |
106 | return -EINTR; | |
107 | } | |
108 | } | |
109 | ||
110 | asmlinkage int | |
111 | sys_sigaction(int sig, const struct old_sigaction *act, | |
112 | struct old_sigaction *oact) | |
113 | { | |
114 | struct k_sigaction new_ka, old_ka; | |
115 | int ret; | |
116 | ||
117 | if (act) { | |
118 | old_sigset_t mask; | |
119 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | |
120 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | |
121 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) | |
122 | return -EFAULT; | |
123 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); | |
124 | __get_user(mask, &act->sa_mask); | |
125 | siginitset(&new_ka.sa.sa_mask, mask); | |
126 | } | |
127 | ||
128 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | |
129 | ||
130 | if (!ret && oact) { | |
131 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | |
132 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | |
133 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) | |
134 | return -EFAULT; | |
135 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | |
136 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | |
137 | } | |
138 | ||
139 | return ret; | |
140 | } | |
141 | ||
142 | asmlinkage int | |
143 | sys_sigaltstack(const stack_t *uss, stack_t *uoss) | |
144 | { | |
145 | return do_sigaltstack(uss, uoss, rdusp()); | |
146 | } | |
147 | ||
148 | ||
149 | /* | |
150 | * Do a signal return; undo the signal stack. | |
151 | * | |
152 | * Keep the return code on the stack quadword aligned! | |
153 | * That makes the cache flush below easier. | |
154 | */ | |
155 | ||
156 | struct sigframe | |
157 | { | |
158 | char *pretcode; | |
159 | int sig; | |
160 | int code; | |
161 | struct sigcontext *psc; | |
162 | char retcode[8]; | |
163 | unsigned long extramask[_NSIG_WORDS-1]; | |
164 | struct sigcontext sc; | |
165 | }; | |
166 | ||
167 | struct rt_sigframe | |
168 | { | |
169 | char *pretcode; | |
170 | int sig; | |
171 | struct siginfo *pinfo; | |
172 | void *puc; | |
173 | char retcode[8]; | |
174 | struct siginfo info; | |
175 | struct ucontext uc; | |
176 | }; | |
177 | ||
178 | #ifdef CONFIG_FPU | |
179 | ||
180 | static unsigned char fpu_version = 0; /* version number of fpu, set by setup_frame */ | |
181 | ||
182 | static inline int restore_fpu_state(struct sigcontext *sc) | |
183 | { | |
184 | int err = 1; | |
185 | ||
186 | if (FPU_IS_EMU) { | |
187 | /* restore registers */ | |
188 | memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12); | |
189 | memcpy(current->thread.fp, sc->sc_fpregs, 24); | |
190 | return 0; | |
191 | } | |
192 | ||
193 | if (sc->sc_fpstate[0]) { | |
194 | /* Verify the frame format. */ | |
195 | if (sc->sc_fpstate[0] != fpu_version) | |
196 | goto out; | |
197 | ||
198 | __asm__ volatile (".chip 68k/68881\n\t" | |
199 | "fmovemx %0,%/fp0-%/fp1\n\t" | |
200 | "fmoveml %1,%/fpcr/%/fpsr/%/fpiar\n\t" | |
201 | ".chip 68k" | |
202 | : /* no outputs */ | |
203 | : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl)); | |
204 | } | |
205 | __asm__ volatile (".chip 68k/68881\n\t" | |
206 | "frestore %0\n\t" | |
207 | ".chip 68k" : : "m" (*sc->sc_fpstate)); | |
208 | err = 0; | |
209 | ||
210 | out: | |
211 | return err; | |
212 | } | |
213 | ||
214 | #define FPCONTEXT_SIZE 216 | |
215 | #define uc_fpstate uc_filler[0] | |
216 | #define uc_formatvec uc_filler[FPCONTEXT_SIZE/4] | |
217 | #define uc_extra uc_filler[FPCONTEXT_SIZE/4+1] | |
218 | ||
219 | static inline int rt_restore_fpu_state(struct ucontext *uc) | |
220 | { | |
221 | unsigned char fpstate[FPCONTEXT_SIZE]; | |
222 | int context_size = 0; | |
223 | fpregset_t fpregs; | |
224 | int err = 1; | |
225 | ||
226 | if (FPU_IS_EMU) { | |
227 | /* restore fpu control register */ | |
228 | if (__copy_from_user(current->thread.fpcntl, | |
229 | &uc->uc_mcontext.fpregs.f_pcr, 12)) | |
230 | goto out; | |
231 | /* restore all other fpu register */ | |
232 | if (__copy_from_user(current->thread.fp, | |
233 | uc->uc_mcontext.fpregs.f_fpregs, 96)) | |
234 | goto out; | |
235 | return 0; | |
236 | } | |
237 | ||
238 | if (__get_user(*(long *)fpstate, (long *)&uc->uc_fpstate)) | |
239 | goto out; | |
240 | if (fpstate[0]) { | |
241 | context_size = fpstate[1]; | |
242 | ||
243 | /* Verify the frame format. */ | |
244 | if (fpstate[0] != fpu_version) | |
245 | goto out; | |
246 | if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs, | |
247 | sizeof(fpregs))) | |
248 | goto out; | |
249 | __asm__ volatile (".chip 68k/68881\n\t" | |
250 | "fmovemx %0,%/fp0-%/fp7\n\t" | |
251 | "fmoveml %1,%/fpcr/%/fpsr/%/fpiar\n\t" | |
252 | ".chip 68k" | |
253 | : /* no outputs */ | |
254 | : "m" (*fpregs.f_fpregs), | |
255 | "m" (fpregs.f_pcr)); | |
256 | } | |
257 | if (context_size && | |
258 | __copy_from_user(fpstate + 4, (long *)&uc->uc_fpstate + 1, | |
259 | context_size)) | |
260 | goto out; | |
261 | __asm__ volatile (".chip 68k/68881\n\t" | |
262 | "frestore %0\n\t" | |
263 | ".chip 68k" : : "m" (*fpstate)); | |
264 | err = 0; | |
265 | ||
266 | out: | |
267 | return err; | |
268 | } | |
269 | ||
270 | #endif | |
271 | ||
272 | static inline int | |
273 | restore_sigcontext(struct pt_regs *regs, struct sigcontext *usc, void *fp, | |
274 | int *pd0) | |
275 | { | |
276 | int formatvec; | |
277 | struct sigcontext context; | |
278 | int err = 0; | |
279 | ||
280 | /* get previous context */ | |
281 | if (copy_from_user(&context, usc, sizeof(context))) | |
282 | goto badframe; | |
283 | ||
284 | /* restore passed registers */ | |
285 | regs->d1 = context.sc_d1; | |
286 | regs->a0 = context.sc_a0; | |
287 | regs->a1 = context.sc_a1; | |
288 | regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff); | |
289 | regs->pc = context.sc_pc; | |
290 | regs->orig_d0 = -1; /* disable syscall checks */ | |
291 | wrusp(context.sc_usp); | |
292 | formatvec = context.sc_formatvec; | |
293 | regs->format = formatvec >> 12; | |
294 | regs->vector = formatvec & 0xfff; | |
295 | ||
296 | #ifdef CONFIG_FPU | |
297 | err = restore_fpu_state(&context); | |
298 | #endif | |
299 | ||
300 | *pd0 = context.sc_d0; | |
301 | return err; | |
302 | ||
303 | badframe: | |
304 | return 1; | |
305 | } | |
306 | ||
307 | static inline int | |
308 | rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw, | |
309 | struct ucontext *uc, int *pd0) | |
310 | { | |
311 | int temp; | |
312 | greg_t *gregs = uc->uc_mcontext.gregs; | |
313 | unsigned long usp; | |
314 | int err; | |
315 | ||
316 | err = __get_user(temp, &uc->uc_mcontext.version); | |
317 | if (temp != MCONTEXT_VERSION) | |
318 | goto badframe; | |
319 | /* restore passed registers */ | |
320 | err |= __get_user(regs->d0, &gregs[0]); | |
321 | err |= __get_user(regs->d1, &gregs[1]); | |
322 | err |= __get_user(regs->d2, &gregs[2]); | |
323 | err |= __get_user(regs->d3, &gregs[3]); | |
324 | err |= __get_user(regs->d4, &gregs[4]); | |
325 | err |= __get_user(regs->d5, &gregs[5]); | |
326 | err |= __get_user(sw->d6, &gregs[6]); | |
327 | err |= __get_user(sw->d7, &gregs[7]); | |
328 | err |= __get_user(regs->a0, &gregs[8]); | |
329 | err |= __get_user(regs->a1, &gregs[9]); | |
330 | err |= __get_user(regs->a2, &gregs[10]); | |
331 | err |= __get_user(sw->a3, &gregs[11]); | |
332 | err |= __get_user(sw->a4, &gregs[12]); | |
333 | err |= __get_user(sw->a5, &gregs[13]); | |
334 | err |= __get_user(sw->a6, &gregs[14]); | |
335 | err |= __get_user(usp, &gregs[15]); | |
336 | wrusp(usp); | |
337 | err |= __get_user(regs->pc, &gregs[16]); | |
338 | err |= __get_user(temp, &gregs[17]); | |
339 | regs->sr = (regs->sr & 0xff00) | (temp & 0xff); | |
340 | regs->orig_d0 = -1; /* disable syscall checks */ | |
341 | regs->format = temp >> 12; | |
342 | regs->vector = temp & 0xfff; | |
343 | ||
344 | if (do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT) | |
345 | goto badframe; | |
346 | ||
347 | *pd0 = regs->d0; | |
348 | return err; | |
349 | ||
350 | badframe: | |
351 | return 1; | |
352 | } | |
353 | ||
354 | asmlinkage int do_sigreturn(unsigned long __unused) | |
355 | { | |
356 | struct switch_stack *sw = (struct switch_stack *) &__unused; | |
357 | struct pt_regs *regs = (struct pt_regs *) (sw + 1); | |
358 | unsigned long usp = rdusp(); | |
359 | struct sigframe *frame = (struct sigframe *)(usp - 4); | |
360 | sigset_t set; | |
361 | int d0; | |
362 | ||
363 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | |
364 | goto badframe; | |
365 | if (__get_user(set.sig[0], &frame->sc.sc_mask) || | |
366 | (_NSIG_WORDS > 1 && | |
367 | __copy_from_user(&set.sig[1], &frame->extramask, | |
368 | sizeof(frame->extramask)))) | |
369 | goto badframe; | |
370 | ||
371 | sigdelsetmask(&set, ~_BLOCKABLE); | |
372 | spin_lock_irq(¤t->sighand->siglock); | |
373 | current->blocked = set; | |
374 | recalc_sigpending(); | |
375 | spin_unlock_irq(¤t->sighand->siglock); | |
376 | ||
377 | if (restore_sigcontext(regs, &frame->sc, frame + 1, &d0)) | |
378 | goto badframe; | |
379 | return d0; | |
380 | ||
381 | badframe: | |
382 | force_sig(SIGSEGV, current); | |
383 | return 0; | |
384 | } | |
385 | ||
386 | asmlinkage int do_rt_sigreturn(unsigned long __unused) | |
387 | { | |
388 | struct switch_stack *sw = (struct switch_stack *) &__unused; | |
389 | struct pt_regs *regs = (struct pt_regs *) (sw + 1); | |
390 | unsigned long usp = rdusp(); | |
391 | struct rt_sigframe *frame = (struct rt_sigframe *)(usp - 4); | |
392 | sigset_t set; | |
393 | int d0; | |
394 | ||
395 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | |
396 | goto badframe; | |
397 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | |
398 | goto badframe; | |
399 | ||
400 | sigdelsetmask(&set, ~_BLOCKABLE); | |
401 | spin_lock_irq(¤t->sighand->siglock); | |
402 | current->blocked = set; | |
403 | recalc_sigpending(); | |
404 | spin_unlock_irq(¤t->sighand->siglock); | |
405 | ||
406 | if (rt_restore_ucontext(regs, sw, &frame->uc, &d0)) | |
407 | goto badframe; | |
408 | return d0; | |
409 | ||
410 | badframe: | |
411 | force_sig(SIGSEGV, current); | |
412 | return 0; | |
413 | } | |
414 | ||
415 | #ifdef CONFIG_FPU | |
416 | /* | |
417 | * Set up a signal frame. | |
418 | */ | |
419 | ||
420 | static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs) | |
421 | { | |
422 | if (FPU_IS_EMU) { | |
423 | /* save registers */ | |
424 | memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12); | |
425 | memcpy(sc->sc_fpregs, current->thread.fp, 24); | |
426 | return; | |
427 | } | |
428 | ||
429 | __asm__ volatile (".chip 68k/68881\n\t" | |
430 | "fsave %0\n\t" | |
431 | ".chip 68k" | |
432 | : : "m" (*sc->sc_fpstate) : "memory"); | |
433 | ||
434 | if (sc->sc_fpstate[0]) { | |
435 | fpu_version = sc->sc_fpstate[0]; | |
436 | __asm__ volatile (".chip 68k/68881\n\t" | |
437 | "fmovemx %/fp0-%/fp1,%0\n\t" | |
438 | "fmoveml %/fpcr/%/fpsr/%/fpiar,%1\n\t" | |
439 | ".chip 68k" | |
440 | : /* no outputs */ | |
441 | : "m" (*sc->sc_fpregs), | |
442 | "m" (*sc->sc_fpcntl) | |
443 | : "memory"); | |
444 | } | |
445 | } | |
446 | ||
447 | static inline int rt_save_fpu_state(struct ucontext *uc, struct pt_regs *regs) | |
448 | { | |
449 | unsigned char fpstate[FPCONTEXT_SIZE]; | |
450 | int context_size = 0; | |
451 | int err = 0; | |
452 | ||
453 | if (FPU_IS_EMU) { | |
454 | /* save fpu control register */ | |
455 | err |= copy_to_user(&uc->uc_mcontext.fpregs.f_pcr, | |
456 | current->thread.fpcntl, 12); | |
457 | /* save all other fpu register */ | |
458 | err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs, | |
459 | current->thread.fp, 96); | |
460 | return err; | |
461 | } | |
462 | ||
463 | __asm__ volatile (".chip 68k/68881\n\t" | |
464 | "fsave %0\n\t" | |
465 | ".chip 68k" | |
466 | : : "m" (*fpstate) : "memory"); | |
467 | ||
468 | err |= __put_user(*(long *)fpstate, (long *)&uc->uc_fpstate); | |
469 | if (fpstate[0]) { | |
470 | fpregset_t fpregs; | |
471 | context_size = fpstate[1]; | |
472 | fpu_version = fpstate[0]; | |
473 | __asm__ volatile (".chip 68k/68881\n\t" | |
474 | "fmovemx %/fp0-%/fp7,%0\n\t" | |
475 | "fmoveml %/fpcr/%/fpsr/%/fpiar,%1\n\t" | |
476 | ".chip 68k" | |
477 | : /* no outputs */ | |
478 | : "m" (*fpregs.f_fpregs), | |
479 | "m" (fpregs.f_pcr) | |
480 | : "memory"); | |
481 | err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs, | |
482 | sizeof(fpregs)); | |
483 | } | |
484 | if (context_size) | |
485 | err |= copy_to_user((long *)&uc->uc_fpstate + 1, fpstate + 4, | |
486 | context_size); | |
487 | return err; | |
488 | } | |
489 | ||
490 | #endif | |
491 | ||
492 | static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, | |
493 | unsigned long mask) | |
494 | { | |
495 | sc->sc_mask = mask; | |
496 | sc->sc_usp = rdusp(); | |
497 | sc->sc_d0 = regs->d0; | |
498 | sc->sc_d1 = regs->d1; | |
499 | sc->sc_a0 = regs->a0; | |
500 | sc->sc_a1 = regs->a1; | |
501 | sc->sc_sr = regs->sr; | |
502 | sc->sc_pc = regs->pc; | |
503 | sc->sc_formatvec = regs->format << 12 | regs->vector; | |
504 | #ifdef CONFIG_FPU | |
505 | save_fpu_state(sc, regs); | |
506 | #endif | |
507 | } | |
508 | ||
509 | static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) | |
510 | { | |
511 | struct switch_stack *sw = (struct switch_stack *)regs - 1; | |
512 | greg_t *gregs = uc->uc_mcontext.gregs; | |
513 | int err = 0; | |
514 | ||
515 | err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); | |
516 | err |= __put_user(regs->d0, &gregs[0]); | |
517 | err |= __put_user(regs->d1, &gregs[1]); | |
518 | err |= __put_user(regs->d2, &gregs[2]); | |
519 | err |= __put_user(regs->d3, &gregs[3]); | |
520 | err |= __put_user(regs->d4, &gregs[4]); | |
521 | err |= __put_user(regs->d5, &gregs[5]); | |
522 | err |= __put_user(sw->d6, &gregs[6]); | |
523 | err |= __put_user(sw->d7, &gregs[7]); | |
524 | err |= __put_user(regs->a0, &gregs[8]); | |
525 | err |= __put_user(regs->a1, &gregs[9]); | |
526 | err |= __put_user(regs->a2, &gregs[10]); | |
527 | err |= __put_user(sw->a3, &gregs[11]); | |
528 | err |= __put_user(sw->a4, &gregs[12]); | |
529 | err |= __put_user(sw->a5, &gregs[13]); | |
530 | err |= __put_user(sw->a6, &gregs[14]); | |
531 | err |= __put_user(rdusp(), &gregs[15]); | |
532 | err |= __put_user(regs->pc, &gregs[16]); | |
533 | err |= __put_user(regs->sr, &gregs[17]); | |
534 | #ifdef CONFIG_FPU | |
535 | err |= rt_save_fpu_state(uc, regs); | |
536 | #endif | |
537 | return err; | |
538 | } | |
539 | ||
540 | static inline void push_cache (unsigned long vaddr) | |
541 | { | |
542 | } | |
543 | ||
544 | static inline void * | |
545 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | |
546 | { | |
547 | unsigned long usp; | |
548 | ||
549 | /* Default to using normal stack. */ | |
550 | usp = rdusp(); | |
551 | ||
552 | /* This is the X/Open sanctioned signal stack switching. */ | |
553 | if (ka->sa.sa_flags & SA_ONSTACK) { | |
554 | if (!on_sig_stack(usp)) | |
555 | usp = current->sas_ss_sp + current->sas_ss_size; | |
556 | } | |
557 | return (void *)((usp - frame_size) & -8UL); | |
558 | } | |
559 | ||
560 | static void setup_frame (int sig, struct k_sigaction *ka, | |
561 | sigset_t *set, struct pt_regs *regs) | |
562 | { | |
563 | struct sigframe *frame; | |
564 | struct sigcontext context; | |
565 | int err = 0; | |
566 | ||
567 | frame = get_sigframe(ka, regs, sizeof(*frame)); | |
568 | ||
569 | err |= __put_user((current_thread_info()->exec_domain | |
570 | && current_thread_info()->exec_domain->signal_invmap | |
571 | && sig < 32 | |
572 | ? current_thread_info()->exec_domain->signal_invmap[sig] | |
573 | : sig), | |
574 | &frame->sig); | |
575 | ||
576 | err |= __put_user(regs->vector, &frame->code); | |
577 | err |= __put_user(&frame->sc, &frame->psc); | |
578 | ||
579 | if (_NSIG_WORDS > 1) | |
580 | err |= copy_to_user(frame->extramask, &set->sig[1], | |
581 | sizeof(frame->extramask)); | |
582 | ||
583 | setup_sigcontext(&context, regs, set->sig[0]); | |
584 | err |= copy_to_user (&frame->sc, &context, sizeof(context)); | |
585 | ||
586 | /* Set up to return from userspace. */ | |
587 | err |= __put_user(frame->retcode, &frame->pretcode); | |
588 | /* moveq #,d0; trap #0 */ | |
589 | err |= __put_user(0x70004e40 + (__NR_sigreturn << 16), | |
590 | (long *)(frame->retcode)); | |
591 | ||
592 | if (err) | |
593 | goto give_sigsegv; | |
594 | ||
595 | push_cache ((unsigned long) &frame->retcode); | |
596 | ||
597 | /* Set up registers for signal handler */ | |
598 | wrusp ((unsigned long) frame); | |
599 | regs->pc = (unsigned long) ka->sa.sa_handler; | |
600 | ||
601 | adjust_stack: | |
602 | /* Prepare to skip over the extra stuff in the exception frame. */ | |
603 | if (regs->stkadj) { | |
604 | struct pt_regs *tregs = | |
605 | (struct pt_regs *)((ulong)regs + regs->stkadj); | |
606 | #if DEBUG | |
607 | printk(KERN_DEBUG "Performing stackadjust=%04x\n", regs->stkadj); | |
608 | #endif | |
609 | /* This must be copied with decreasing addresses to | |
610 | handle overlaps. */ | |
611 | tregs->vector = 0; | |
612 | tregs->format = 0; | |
613 | tregs->pc = regs->pc; | |
614 | tregs->sr = regs->sr; | |
615 | } | |
616 | return; | |
617 | ||
618 | give_sigsegv: | |
619 | force_sigsegv(sig, current); | |
620 | goto adjust_stack; | |
621 | } | |
622 | ||
623 | static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info, | |
624 | sigset_t *set, struct pt_regs *regs) | |
625 | { | |
626 | struct rt_sigframe *frame; | |
627 | int err = 0; | |
628 | ||
629 | frame = get_sigframe(ka, regs, sizeof(*frame)); | |
630 | ||
631 | err |= __put_user((current_thread_info()->exec_domain | |
632 | && current_thread_info()->exec_domain->signal_invmap | |
633 | && sig < 32 | |
634 | ? current_thread_info()->exec_domain->signal_invmap[sig] | |
635 | : sig), | |
636 | &frame->sig); | |
637 | err |= __put_user(&frame->info, &frame->pinfo); | |
638 | err |= __put_user(&frame->uc, &frame->puc); | |
639 | err |= copy_siginfo_to_user(&frame->info, info); | |
640 | ||
641 | /* Create the ucontext. */ | |
642 | err |= __put_user(0, &frame->uc.uc_flags); | |
643 | err |= __put_user(0, &frame->uc.uc_link); | |
644 | err |= __put_user((void *)current->sas_ss_sp, | |
645 | &frame->uc.uc_stack.ss_sp); | |
646 | err |= __put_user(sas_ss_flags(rdusp()), | |
647 | &frame->uc.uc_stack.ss_flags); | |
648 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | |
649 | err |= rt_setup_ucontext(&frame->uc, regs); | |
650 | err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set)); | |
651 | ||
652 | /* Set up to return from userspace. */ | |
653 | err |= __put_user(frame->retcode, &frame->pretcode); | |
654 | /* moveq #,d0; notb d0; trap #0 */ | |
655 | err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16), | |
656 | (long *)(frame->retcode + 0)); | |
657 | err |= __put_user(0x4e40, (short *)(frame->retcode + 4)); | |
658 | ||
659 | if (err) | |
660 | goto give_sigsegv; | |
661 | ||
662 | push_cache ((unsigned long) &frame->retcode); | |
663 | ||
664 | /* Set up registers for signal handler */ | |
665 | wrusp ((unsigned long) frame); | |
666 | regs->pc = (unsigned long) ka->sa.sa_handler; | |
667 | ||
668 | adjust_stack: | |
669 | /* Prepare to skip over the extra stuff in the exception frame. */ | |
670 | if (regs->stkadj) { | |
671 | struct pt_regs *tregs = | |
672 | (struct pt_regs *)((ulong)regs + regs->stkadj); | |
673 | #if DEBUG | |
674 | printk(KERN_DEBUG "Performing stackadjust=%04x\n", regs->stkadj); | |
675 | #endif | |
676 | /* This must be copied with decreasing addresses to | |
677 | handle overlaps. */ | |
678 | tregs->vector = 0; | |
679 | tregs->format = 0; | |
680 | tregs->pc = regs->pc; | |
681 | tregs->sr = regs->sr; | |
682 | } | |
683 | return; | |
684 | ||
685 | give_sigsegv: | |
686 | force_sigsegv(sig, current); | |
687 | goto adjust_stack; | |
688 | } | |
689 | ||
690 | static inline void | |
691 | handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) | |
692 | { | |
693 | switch (regs->d0) { | |
694 | case -ERESTARTNOHAND: | |
695 | if (!has_handler) | |
696 | goto do_restart; | |
697 | regs->d0 = -EINTR; | |
698 | break; | |
699 | ||
700 | case -ERESTARTSYS: | |
701 | if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) { | |
702 | regs->d0 = -EINTR; | |
703 | break; | |
704 | } | |
705 | /* fallthrough */ | |
706 | case -ERESTARTNOINTR: | |
707 | do_restart: | |
708 | regs->d0 = regs->orig_d0; | |
709 | regs->pc -= 2; | |
710 | break; | |
711 | } | |
712 | } | |
713 | ||
714 | /* | |
715 | * OK, we're invoking a handler | |
716 | */ | |
717 | static void | |
718 | handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, | |
719 | sigset_t *oldset, struct pt_regs *regs) | |
720 | { | |
721 | /* are we from a system call? */ | |
722 | if (regs->orig_d0 >= 0) | |
723 | /* If so, check system call restarting.. */ | |
724 | handle_restart(regs, ka, 1); | |
725 | ||
726 | /* set up the stack frame */ | |
727 | if (ka->sa.sa_flags & SA_SIGINFO) | |
728 | setup_rt_frame(sig, ka, info, oldset, regs); | |
729 | else | |
730 | setup_frame(sig, ka, oldset, regs); | |
731 | ||
732 | if (ka->sa.sa_flags & SA_ONESHOT) | |
733 | ka->sa.sa_handler = SIG_DFL; | |
734 | ||
735 | if (!(ka->sa.sa_flags & SA_NODEFER)) { | |
736 | spin_lock_irq(¤t->sighand->siglock); | |
737 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | |
738 | sigaddset(¤t->blocked,sig); | |
739 | recalc_sigpending(); | |
740 | spin_unlock_irq(¤t->sighand->siglock); | |
741 | } | |
742 | } | |
743 | ||
744 | /* | |
745 | * Note that 'init' is a special process: it doesn't get signals it doesn't | |
746 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | |
747 | * mistake. | |
748 | */ | |
749 | asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs) | |
750 | { | |
751 | struct k_sigaction ka; | |
752 | siginfo_t info; | |
753 | int signr; | |
754 | ||
755 | /* | |
756 | * We want the common case to go fast, which | |
757 | * is why we may in certain cases get here from | |
758 | * kernel mode. Just return without doing anything | |
759 | * if so. | |
760 | */ | |
761 | if (!user_mode(regs)) | |
762 | return 1; | |
763 | ||
764 | if (!oldset) | |
765 | oldset = ¤t->blocked; | |
766 | ||
767 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | |
768 | if (signr > 0) { | |
769 | /* Whee! Actually deliver the signal. */ | |
770 | handle_signal(signr, &ka, &info, oldset, regs); | |
771 | return 1; | |
772 | } | |
773 | ||
774 | /* Did we come from a system call? */ | |
775 | if (regs->orig_d0 >= 0) { | |
776 | /* Restart the system call - no handlers present */ | |
777 | if (regs->d0 == -ERESTARTNOHAND | |
778 | || regs->d0 == -ERESTARTSYS | |
779 | || regs->d0 == -ERESTARTNOINTR) { | |
780 | regs->d0 = regs->orig_d0; | |
781 | regs->pc -= 2; | |
782 | } else if (regs->d0 == -ERESTART_RESTARTBLOCK) { | |
783 | regs->d0 = __NR_restart_syscall; | |
784 | regs->pc -= 2; | |
785 | } | |
786 | } | |
787 | return 0; | |
788 | } |