Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-2.6] / arch / sh64 / kernel / traps.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * arch/sh64/kernel/traps.c
7  *
8  * Copyright (C) 2000, 2001  Paolo Alberelli
9  * Copyright (C) 2003, 2004  Paul Mundt
10  * Copyright (C) 2003, 2004  Richard Curnow
11  *
12  */
13
14 /*
15  * 'Traps.c' handles hardware traps and faults after we have saved some
16  * state in 'entry.S'.
17  */
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/ptrace.h>
23 #include <linux/timer.h>
24 #include <linux/mm.h>
25 #include <linux/smp.h>
26 #include <linux/init.h>
27 #include <linux/delay.h>
28 #include <linux/spinlock.h>
29 #include <linux/kallsyms.h>
30 #include <linux/interrupt.h>
31 #include <linux/sysctl.h>
32 #include <linux/module.h>
33
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
36 #include <asm/io.h>
37 #include <asm/atomic.h>
38 #include <asm/processor.h>
39 #include <asm/pgtable.h>
40
41 #undef DEBUG_EXCEPTION
42 #ifdef DEBUG_EXCEPTION
43 /* implemented in ../lib/dbg.c */
44 extern void show_excp_regs(char *fname, int trapnr, int signr,
45                            struct pt_regs *regs);
46 #else
47 #define show_excp_regs(a, b, c, d)
48 #endif
49
50 static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
51                 unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
52
53 #define DO_ERROR(trapnr, signr, str, name, tsk) \
54 asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
55 { \
56         do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
57 }
58
59 spinlock_t die_lock;
60
61 void die(const char * str, struct pt_regs * regs, long err)
62 {
63         console_verbose();
64         spin_lock_irq(&die_lock);
65         printk("%s: %lx\n", str, (err & 0xffffff));
66         show_regs(regs);
67         spin_unlock_irq(&die_lock);
68         do_exit(SIGSEGV);
69 }
70
71 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
72 {
73         if (!user_mode(regs))
74                 die(str, regs, err);
75 }
76
77 static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
78 {
79         if (!user_mode(regs)) {
80                 const struct exception_table_entry *fixup;
81                 fixup = search_exception_tables(regs->pc);
82                 if (fixup) {
83                         regs->pc = fixup->fixup;
84                         return;
85                 }
86                 die(str, regs, err);
87         }
88 }
89
90 DO_ERROR(13, SIGILL,  "illegal slot instruction", illegal_slot_inst, current)
91 DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
92
93
94 /* Implement misaligned load/store handling for kernel (and optionally for user
95    mode too).  Limitation : only SHmedia mode code is handled - there is no
96    handling at all for misaligned accesses occurring in SHcompact code yet. */
97
98 static int misaligned_fixup(struct pt_regs *regs);
99
100 asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
101 {
102         if (misaligned_fixup(regs) < 0) {
103                 do_unhandled_exception(7, SIGSEGV, "address error(load)",
104                                 "do_address_error_load",
105                                 error_code, regs, current);
106         }
107         return;
108 }
109
110 asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
111 {
112         if (misaligned_fixup(regs) < 0) {
113                 do_unhandled_exception(8, SIGSEGV, "address error(store)",
114                                 "do_address_error_store",
115                                 error_code, regs, current);
116         }
117         return;
118 }
119
120 #if defined(CONFIG_SH64_ID2815_WORKAROUND)
121
122 #define OPCODE_INVALID      0
123 #define OPCODE_USER_VALID   1
124 #define OPCODE_PRIV_VALID   2
125
126 /* getcon/putcon - requires checking which control register is referenced. */
127 #define OPCODE_CTRL_REG     3
128
129 /* Table of valid opcodes for SHmedia mode.
130    Form a 10-bit value by concatenating the major/minor opcodes i.e.
131    opcode[31:26,20:16].  The 6 MSBs of this value index into the following
132    array.  The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
133    LSBs==4'b0000 etc). */
134 static unsigned long shmedia_opcode_table[64] = {
135         0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
136         0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
137         0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
138         0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
139         0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
140         0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
141         0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
142         0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
143 };
144
145 void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
146 {
147         /* Workaround SH5-101 cut2 silicon defect #2815 :
148            in some situations, inter-mode branches from SHcompact -> SHmedia
149            which should take ITLBMISS or EXECPROT exceptions at the target
150            falsely take RESINST at the target instead. */
151
152         unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
153         unsigned long pc, aligned_pc;
154         int get_user_error;
155         int trapnr = 12;
156         int signr = SIGILL;
157         char *exception_name = "reserved_instruction";
158
159         pc = regs->pc;
160         if ((pc & 3) == 1) {
161                 /* SHmedia : check for defect.  This requires executable vmas
162                    to be readable too. */
163                 aligned_pc = pc & ~3;
164                 if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
165                         get_user_error = -EFAULT;
166                 } else {
167                         get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
168                 }
169                 if (get_user_error >= 0) {
170                         unsigned long index, shift;
171                         unsigned long major, minor, combined;
172                         unsigned long reserved_field;
173                         reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
174                         major = (opcode >> 26) & 0x3f;
175                         minor = (opcode >> 16) & 0xf;
176                         combined = (major << 4) | minor;
177                         index = major;
178                         shift = minor << 1;
179                         if (reserved_field == 0) {
180                                 int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
181                                 switch (opcode_state) {
182                                         case OPCODE_INVALID:
183                                                 /* Trap. */
184                                                 break;
185                                         case OPCODE_USER_VALID:
186                                                 /* Restart the instruction : the branch to the instruction will now be from an RTE
187                                                    not from SHcompact so the silicon defect won't be triggered. */
188                                                 return;
189                                         case OPCODE_PRIV_VALID:
190                                                 if (!user_mode(regs)) {
191                                                         /* Should only ever get here if a module has
192                                                            SHcompact code inside it.  If so, the same fix up is needed. */
193                                                         return; /* same reason */
194                                                 }
195                                                 /* Otherwise, user mode trying to execute a privileged instruction -
196                                                    fall through to trap. */
197                                                 break;
198                                         case OPCODE_CTRL_REG:
199                                                 /* If in privileged mode, return as above. */
200                                                 if (!user_mode(regs)) return;
201                                                 /* In user mode ... */
202                                                 if (combined == 0x9f) { /* GETCON */
203                                                         unsigned long regno = (opcode >> 20) & 0x3f;
204                                                         if (regno >= 62) {
205                                                                 return;
206                                                         }
207                                                         /* Otherwise, reserved or privileged control register, => trap */
208                                                 } else if (combined == 0x1bf) { /* PUTCON */
209                                                         unsigned long regno = (opcode >> 4) & 0x3f;
210                                                         if (regno >= 62) {
211                                                                 return;
212                                                         }
213                                                         /* Otherwise, reserved or privileged control register, => trap */
214                                                 } else {
215                                                         /* Trap */
216                                                 }
217                                                 break;
218                                         default:
219                                                 /* Fall through to trap. */
220                                                 break;
221                                 }
222                         }
223                         /* fall through to normal resinst processing */
224                 } else {
225                         /* Error trying to read opcode.  This typically means a
226                            real fault, not a RESINST any more.  So change the
227                            codes. */
228                         trapnr = 87;
229                         exception_name = "address error (exec)";
230                         signr = SIGSEGV;
231                 }
232         }
233
234         do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
235 }
236
237 #else /* CONFIG_SH64_ID2815_WORKAROUND */
238
239 /* If the workaround isn't needed, this is just a straightforward reserved
240    instruction */
241 DO_ERROR(12, SIGILL,  "reserved instruction", reserved_inst, current)
242
243 #endif /* CONFIG_SH64_ID2815_WORKAROUND */
244
245
246 #include <asm/system.h>
247
248 /* Called with interrupts disabled */
249 asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
250 {
251         PLS();
252         show_excp_regs(__FUNCTION__, -1, -1, regs);
253         die_if_kernel("exception", regs, ex);
254 }
255
256 int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
257 {
258         /* Syscall debug */
259         printk("System call ID error: [0x1#args:8 #syscall:16  0x%lx]\n", scId);
260
261         die_if_kernel("unknown trapa", regs, scId);
262
263         return -ENOSYS;
264 }
265
266 void show_stack(struct task_struct *tsk, unsigned long *sp)
267 {
268 #ifdef CONFIG_KALLSYMS
269         extern void sh64_unwind(struct pt_regs *regs);
270         struct pt_regs *regs;
271
272         regs = tsk ? tsk->thread.kregs : NULL;
273
274         sh64_unwind(regs);
275 #else
276         printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
277 #endif
278 }
279
280 void show_task(unsigned long *sp)
281 {
282         show_stack(NULL, sp);
283 }
284
285 void dump_stack(void)
286 {
287         show_task(NULL);
288 }
289 /* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
290 EXPORT_SYMBOL(dump_stack);
291
292 static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
293                 unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
294 {
295         show_excp_regs(fn_name, trapnr, signr, regs);
296         tsk->thread.error_code = error_code;
297         tsk->thread.trap_no = trapnr;
298
299         if (user_mode(regs))
300                 force_sig(signr, tsk);
301
302         die_if_no_fixup(str, regs, error_code);
303 }
304
305 static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
306 {
307         int get_user_error;
308         unsigned long aligned_pc;
309         unsigned long opcode;
310
311         if ((pc & 3) == 1) {
312                 /* SHmedia */
313                 aligned_pc = pc & ~3;
314                 if (from_user_mode) {
315                         if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
316                                 get_user_error = -EFAULT;
317                         } else {
318                                 get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
319                                 *result_opcode = opcode;
320                         }
321                         return get_user_error;
322                 } else {
323                         /* If the fault was in the kernel, we can either read
324                          * this directly, or if not, we fault.
325                         */
326                         *result_opcode = *(unsigned long *) aligned_pc;
327                         return 0;
328                 }
329         } else if ((pc & 1) == 0) {
330                 /* SHcompact */
331                 /* TODO : provide handling for this.  We don't really support
332                    user-mode SHcompact yet, and for a kernel fault, this would
333                    have to come from a module built for SHcompact.  */
334                 return -EFAULT;
335         } else {
336                 /* misaligned */
337                 return -EFAULT;
338         }
339 }
340
341 static int address_is_sign_extended(__u64 a)
342 {
343         __u64 b;
344 #if (NEFF == 32)
345         b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
346         return (b == a) ? 1 : 0;
347 #else
348 #error "Sign extend check only works for NEFF==32"
349 #endif
350 }
351
352 static int generate_and_check_address(struct pt_regs *regs,
353                                       __u32 opcode,
354                                       int displacement_not_indexed,
355                                       int width_shift,
356                                       __u64 *address)
357 {
358         /* return -1 for fault, 0 for OK */
359
360         __u64 base_address, addr;
361         int basereg;
362
363         basereg = (opcode >> 20) & 0x3f;
364         base_address = regs->regs[basereg];
365         if (displacement_not_indexed) {
366                 __s64 displacement;
367                 displacement = (opcode >> 10) & 0x3ff;
368                 displacement = ((displacement << 54) >> 54); /* sign extend */
369                 addr = (__u64)((__s64)base_address + (displacement << width_shift));
370         } else {
371                 __u64 offset;
372                 int offsetreg;
373                 offsetreg = (opcode >> 10) & 0x3f;
374                 offset = regs->regs[offsetreg];
375                 addr = base_address + offset;
376         }
377
378         /* Check sign extended */
379         if (!address_is_sign_extended(addr)) {
380                 return -1;
381         }
382
383 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
384         /* Check accessible.  For misaligned access in the kernel, assume the
385            address is always accessible (and if not, just fault when the
386            load/store gets done.) */
387         if (user_mode(regs)) {
388                 if (addr >= TASK_SIZE) {
389                         return -1;
390                 }
391                 /* Do access_ok check later - it depends on whether it's a load or a store. */
392         }
393 #endif
394
395         *address = addr;
396         return 0;
397 }
398
399 /* Default value as for sh */
400 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
401 static int user_mode_unaligned_fixup_count = 10;
402 static int user_mode_unaligned_fixup_enable = 1;
403 #endif
404
405 static int kernel_mode_unaligned_fixup_count = 32;
406
407 static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
408 {
409         unsigned short x;
410         unsigned char *p, *q;
411         p = (unsigned char *) (int) address;
412         q = (unsigned char *) &x;
413         q[0] = p[0];
414         q[1] = p[1];
415
416         if (do_sign_extend) {
417                 *result = (__u64)(__s64) *(short *) &x;
418         } else {
419                 *result = (__u64) x;
420         }
421 }
422
423 static void misaligned_kernel_word_store(__u64 address, __u64 value)
424 {
425         unsigned short x;
426         unsigned char *p, *q;
427         p = (unsigned char *) (int) address;
428         q = (unsigned char *) &x;
429
430         x = (__u16) value;
431         p[0] = q[0];
432         p[1] = q[1];
433 }
434
435 static int misaligned_load(struct pt_regs *regs,
436                            __u32 opcode,
437                            int displacement_not_indexed,
438                            int width_shift,
439                            int do_sign_extend)
440 {
441         /* Return -1 for a fault, 0 for OK */
442         int error;
443         int destreg;
444         __u64 address;
445
446         error = generate_and_check_address(regs, opcode,
447                         displacement_not_indexed, width_shift, &address);
448         if (error < 0) {
449                 return error;
450         }
451
452         destreg = (opcode >> 4) & 0x3f;
453 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
454         if (user_mode(regs)) {
455                 __u64 buffer;
456
457                 if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
458                         return -1;
459                 }
460
461                 if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
462                         return -1; /* fault */
463                 }
464                 switch (width_shift) {
465                 case 1:
466                         if (do_sign_extend) {
467                                 regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
468                         } else {
469                                 regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
470                         }
471                         break;
472                 case 2:
473                         regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
474                         break;
475                 case 3:
476                         regs->regs[destreg] = buffer;
477                         break;
478                 default:
479                         printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
480                                 width_shift, (unsigned long) regs->pc);
481                         break;
482                 }
483         } else
484 #endif
485         {
486                 /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
487                 __u64 lo, hi;
488
489                 switch (width_shift) {
490                 case 1:
491                         misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
492                         break;
493                 case 2:
494                         asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
495                         asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
496                         regs->regs[destreg] = lo | hi;
497                         break;
498                 case 3:
499                         asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
500                         asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
501                         regs->regs[destreg] = lo | hi;
502                         break;
503
504                 default:
505                         printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
506                                 width_shift, (unsigned long) regs->pc);
507                         break;
508                 }
509         }
510
511         return 0;
512
513 }
514
515 static int misaligned_store(struct pt_regs *regs,
516                             __u32 opcode,
517                             int displacement_not_indexed,
518                             int width_shift)
519 {
520         /* Return -1 for a fault, 0 for OK */
521         int error;
522         int srcreg;
523         __u64 address;
524
525         error = generate_and_check_address(regs, opcode,
526                         displacement_not_indexed, width_shift, &address);
527         if (error < 0) {
528                 return error;
529         }
530
531         srcreg = (opcode >> 4) & 0x3f;
532 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
533         if (user_mode(regs)) {
534                 __u64 buffer;
535
536                 if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
537                         return -1;
538                 }
539
540                 switch (width_shift) {
541                 case 1:
542                         *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
543                         break;
544                 case 2:
545                         *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
546                         break;
547                 case 3:
548                         buffer = regs->regs[srcreg];
549                         break;
550                 default:
551                         printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
552                                 width_shift, (unsigned long) regs->pc);
553                         break;
554                 }
555
556                 if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
557                         return -1; /* fault */
558                 }
559         } else
560 #endif
561         {
562                 /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
563                 __u64 val = regs->regs[srcreg];
564
565                 switch (width_shift) {
566                 case 1:
567                         misaligned_kernel_word_store(address, val);
568                         break;
569                 case 2:
570                         asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
571                         asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
572                         break;
573                 case 3:
574                         asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
575                         asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
576                         break;
577
578                 default:
579                         printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
580                                 width_shift, (unsigned long) regs->pc);
581                         break;
582                 }
583         }
584
585         return 0;
586
587 }
588
589 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
590 /* Never need to fix up misaligned FPU accesses within the kernel since that's a real
591    error. */
592 static int misaligned_fpu_load(struct pt_regs *regs,
593                            __u32 opcode,
594                            int displacement_not_indexed,
595                            int width_shift,
596                            int do_paired_load)
597 {
598         /* Return -1 for a fault, 0 for OK */
599         int error;
600         int destreg;
601         __u64 address;
602
603         error = generate_and_check_address(regs, opcode,
604                         displacement_not_indexed, width_shift, &address);
605         if (error < 0) {
606                 return error;
607         }
608
609         destreg = (opcode >> 4) & 0x3f;
610         if (user_mode(regs)) {
611                 __u64 buffer;
612                 __u32 buflo, bufhi;
613
614                 if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
615                         return -1;
616                 }
617
618                 if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
619                         return -1; /* fault */
620                 }
621                 /* 'current' may be the current owner of the FPU state, so
622                    context switch the registers into memory so they can be
623                    indexed by register number. */
624                 if (last_task_used_math == current) {
625                         grab_fpu();
626                         fpsave(&current->thread.fpu.hard);
627                         release_fpu();
628                         last_task_used_math = NULL;
629                         regs->sr |= SR_FD;
630                 }
631
632                 buflo = *(__u32*) &buffer;
633                 bufhi = *(1 + (__u32*) &buffer);
634
635                 switch (width_shift) {
636                 case 2:
637                         current->thread.fpu.hard.fp_regs[destreg] = buflo;
638                         break;
639                 case 3:
640                         if (do_paired_load) {
641                                 current->thread.fpu.hard.fp_regs[destreg] = buflo;
642                                 current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
643                         } else {
644 #if defined(CONFIG_LITTLE_ENDIAN)
645                                 current->thread.fpu.hard.fp_regs[destreg] = bufhi;
646                                 current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
647 #else
648                                 current->thread.fpu.hard.fp_regs[destreg] = buflo;
649                                 current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
650 #endif
651                         }
652                         break;
653                 default:
654                         printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
655                                 width_shift, (unsigned long) regs->pc);
656                         break;
657                 }
658                 return 0;
659         } else {
660                 die ("Misaligned FPU load inside kernel", regs, 0);
661                 return -1;
662         }
663
664
665 }
666
667 static int misaligned_fpu_store(struct pt_regs *regs,
668                            __u32 opcode,
669                            int displacement_not_indexed,
670                            int width_shift,
671                            int do_paired_load)
672 {
673         /* Return -1 for a fault, 0 for OK */
674         int error;
675         int srcreg;
676         __u64 address;
677
678         error = generate_and_check_address(regs, opcode,
679                         displacement_not_indexed, width_shift, &address);
680         if (error < 0) {
681                 return error;
682         }
683
684         srcreg = (opcode >> 4) & 0x3f;
685         if (user_mode(regs)) {
686                 __u64 buffer;
687                 /* Initialise these to NaNs. */
688                 __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
689
690                 if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
691                         return -1;
692                 }
693
694                 /* 'current' may be the current owner of the FPU state, so
695                    context switch the registers into memory so they can be
696                    indexed by register number. */
697                 if (last_task_used_math == current) {
698                         grab_fpu();
699                         fpsave(&current->thread.fpu.hard);
700                         release_fpu();
701                         last_task_used_math = NULL;
702                         regs->sr |= SR_FD;
703                 }
704
705                 switch (width_shift) {
706                 case 2:
707                         buflo = current->thread.fpu.hard.fp_regs[srcreg];
708                         break;
709                 case 3:
710                         if (do_paired_load) {
711                                 buflo = current->thread.fpu.hard.fp_regs[srcreg];
712                                 bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
713                         } else {
714 #if defined(CONFIG_LITTLE_ENDIAN)
715                                 bufhi = current->thread.fpu.hard.fp_regs[srcreg];
716                                 buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
717 #else
718                                 buflo = current->thread.fpu.hard.fp_regs[srcreg];
719                                 bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
720 #endif
721                         }
722                         break;
723                 default:
724                         printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
725                                 width_shift, (unsigned long) regs->pc);
726                         break;
727                 }
728
729                 *(__u32*) &buffer = buflo;
730                 *(1 + (__u32*) &buffer) = bufhi;
731                 if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
732                         return -1; /* fault */
733                 }
734                 return 0;
735         } else {
736                 die ("Misaligned FPU load inside kernel", regs, 0);
737                 return -1;
738         }
739 }
740 #endif
741
742 static int misaligned_fixup(struct pt_regs *regs)
743 {
744         unsigned long opcode;
745         int error;
746         int major, minor;
747
748 #if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
749         /* Never fixup user mode misaligned accesses without this option enabled. */
750         return -1;
751 #else
752         if (!user_mode_unaligned_fixup_enable) return -1;
753 #endif
754
755         error = read_opcode(regs->pc, &opcode, user_mode(regs));
756         if (error < 0) {
757                 return error;
758         }
759         major = (opcode >> 26) & 0x3f;
760         minor = (opcode >> 16) & 0xf;
761
762 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
763         if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
764                 --user_mode_unaligned_fixup_count;
765                 /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
766                 printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
767                        current->comm, current->pid, (__u32)regs->pc, opcode);
768         } else
769 #endif
770         if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
771                 --kernel_mode_unaligned_fixup_count;
772                 if (in_interrupt()) {
773                         printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
774                                (__u32)regs->pc, opcode);
775                 } else {
776                         printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
777                                current->comm, current->pid, (__u32)regs->pc, opcode);
778                 }
779         }
780
781
782         switch (major) {
783                 case (0x84>>2): /* LD.W */
784                         error = misaligned_load(regs, opcode, 1, 1, 1);
785                         break;
786                 case (0xb0>>2): /* LD.UW */
787                         error = misaligned_load(regs, opcode, 1, 1, 0);
788                         break;
789                 case (0x88>>2): /* LD.L */
790                         error = misaligned_load(regs, opcode, 1, 2, 1);
791                         break;
792                 case (0x8c>>2): /* LD.Q */
793                         error = misaligned_load(regs, opcode, 1, 3, 0);
794                         break;
795
796                 case (0xa4>>2): /* ST.W */
797                         error = misaligned_store(regs, opcode, 1, 1);
798                         break;
799                 case (0xa8>>2): /* ST.L */
800                         error = misaligned_store(regs, opcode, 1, 2);
801                         break;
802                 case (0xac>>2): /* ST.Q */
803                         error = misaligned_store(regs, opcode, 1, 3);
804                         break;
805
806                 case (0x40>>2): /* indexed loads */
807                         switch (minor) {
808                                 case 0x1: /* LDX.W */
809                                         error = misaligned_load(regs, opcode, 0, 1, 1);
810                                         break;
811                                 case 0x5: /* LDX.UW */
812                                         error = misaligned_load(regs, opcode, 0, 1, 0);
813                                         break;
814                                 case 0x2: /* LDX.L */
815                                         error = misaligned_load(regs, opcode, 0, 2, 1);
816                                         break;
817                                 case 0x3: /* LDX.Q */
818                                         error = misaligned_load(regs, opcode, 0, 3, 0);
819                                         break;
820                                 default:
821                                         error = -1;
822                                         break;
823                         }
824                         break;
825
826                 case (0x60>>2): /* indexed stores */
827                         switch (minor) {
828                                 case 0x1: /* STX.W */
829                                         error = misaligned_store(regs, opcode, 0, 1);
830                                         break;
831                                 case 0x2: /* STX.L */
832                                         error = misaligned_store(regs, opcode, 0, 2);
833                                         break;
834                                 case 0x3: /* STX.Q */
835                                         error = misaligned_store(regs, opcode, 0, 3);
836                                         break;
837                                 default:
838                                         error = -1;
839                                         break;
840                         }
841                         break;
842
843 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
844                 case (0x94>>2): /* FLD.S */
845                         error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
846                         break;
847                 case (0x98>>2): /* FLD.P */
848                         error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
849                         break;
850                 case (0x9c>>2): /* FLD.D */
851                         error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
852                         break;
853                 case (0x1c>>2): /* floating indexed loads */
854                         switch (minor) {
855                         case 0x8: /* FLDX.S */
856                                 error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
857                                 break;
858                         case 0xd: /* FLDX.P */
859                                 error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
860                                 break;
861                         case 0x9: /* FLDX.D */
862                                 error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
863                                 break;
864                         default:
865                                 error = -1;
866                                 break;
867                         }
868                         break;
869                 case (0xb4>>2): /* FLD.S */
870                         error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
871                         break;
872                 case (0xb8>>2): /* FLD.P */
873                         error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
874                         break;
875                 case (0xbc>>2): /* FLD.D */
876                         error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
877                         break;
878                 case (0x3c>>2): /* floating indexed stores */
879                         switch (minor) {
880                         case 0x8: /* FSTX.S */
881                                 error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
882                                 break;
883                         case 0xd: /* FSTX.P */
884                                 error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
885                                 break;
886                         case 0x9: /* FSTX.D */
887                                 error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
888                                 break;
889                         default:
890                                 error = -1;
891                                 break;
892                         }
893                         break;
894 #endif
895
896                 default:
897                         /* Fault */
898                         error = -1;
899                         break;
900         }
901
902         if (error < 0) {
903                 return error;
904         } else {
905                 regs->pc += 4; /* Skip the instruction that's just been emulated */
906                 return 0;
907         }
908
909 }
910
911 static ctl_table unaligned_table[] = {
912         {
913                 .ctl_name       = CTL_UNNUMBERED,
914                 .procname       = "kernel_reports",
915                 .data           = &kernel_mode_unaligned_fixup_count,
916                 .maxlen         = sizeof(int),
917                 .mode           = 0644,
918                 .proc_handler   = &proc_dointvec
919         },
920 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
921         {
922                 .ctl_name       = CTL_UNNUMBERED,
923                 .procname       = "user_reports",
924                 .data           = &user_mode_unaligned_fixup_count,
925                 .maxlen         = sizeof(int),
926                 .mode           = 0644,
927                 .proc_handler   = &proc_dointvec
928         },
929         {
930                 .ctl_name       = CTL_UNNUMBERED,
931                 .procname       = "user_enable",
932                 .data           = &user_mode_unaligned_fixup_enable,
933                 .maxlen         = sizeof(int),
934                 .mode           = 0644,
935                 .proc_handler   = &proc_dointvec},
936 #endif
937         {}
938 };
939
940 static ctl_table unaligned_root[] = {
941         {
942                 .ctl_name       = CTL_UNNUMBERED,
943                 .procname       = "unaligned_fixup",
944                 .mode           = 0555,
945                 unaligned_table
946         },
947         {}
948 };
949
950 static ctl_table sh64_root[] = {
951         {
952                 .ctl_name       = CTL_UNNUMBERED,
953                 .procname       = "sh64",
954                 .mode           = 0555,
955                 .child          = unaligned_root
956         },
957         {}
958 };
959 static struct ctl_table_header *sysctl_header;
960 static int __init init_sysctl(void)
961 {
962         sysctl_header = register_sysctl_table(sh64_root);
963         return 0;
964 }
965
966 __initcall(init_sysctl);
967
968
969 asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
970 {
971         u64 peek_real_address_q(u64 addr);
972         u64 poke_real_address_q(u64 addr, u64 val);
973         unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
974         unsigned long long exp_cause;
975         /* It's not worth ioremapping the debug module registers for the amount
976            of access we make to them - just go direct to their physical
977            addresses. */
978         exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
979         if (exp_cause & ~4) {
980                 printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
981                         (unsigned long)(exp_cause & 0xffffffff));
982         }
983         show_state();
984         /* Clear all DEBUGINT causes */
985         poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
986 }
987