Pull kmalloc into release branch
[linux-2.6] / arch / m32r / kernel / traps.c
1 /*
2  *  linux/arch/m32r/kernel/traps.c
3  *
4  *  Copyright (C) 2001, 2002  Hirokazu Takata, Hiroyuki Kondo,
5  *                            Hitoshi Yamamoto
6  */
7
8 /*
9  * 'traps.c' handles hardware traps and faults after we have saved some
10  * state in 'entry.S'.
11  */
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/kallsyms.h>
15 #include <linux/stddef.h>
16 #include <linux/ptrace.h>
17 #include <linux/mm.h>
18 #include <asm/page.h>
19 #include <asm/processor.h>
20
21 #include <asm/system.h>
22 #include <asm/uaccess.h>
23 #include <asm/io.h>
24 #include <asm/atomic.h>
25
26 #include <asm/smp.h>
27
28 #include <linux/module.h>
29
30 asmlinkage void alignment_check(void);
31 asmlinkage void ei_handler(void);
32 asmlinkage void rie_handler(void);
33 asmlinkage void debug_trap(void);
34 asmlinkage void cache_flushing_handler(void);
35 asmlinkage void ill_trap(void);
36
37 #ifdef CONFIG_SMP
38 extern void smp_reschedule_interrupt(void);
39 extern void smp_invalidate_interrupt(void);
40 extern void smp_call_function_interrupt(void);
41 extern void smp_ipi_timer_interrupt(void);
42 extern void smp_flush_cache_all_interrupt(void);
43
44 /*
45  * for Boot AP function
46  */
47 asm (
48         "       .section .eit_vector4,\"ax\"    \n"
49         "       .global _AP_RE                  \n"
50         "       .global startup_AP              \n"
51         "_AP_RE:                                \n"
52         "       .fill 32, 4, 0                  \n"
53         "_AP_EI: bra    startup_AP              \n"
54         "       .previous                       \n"
55 );
56 #endif  /* CONFIG_SMP */
57
58 extern unsigned long    eit_vector[];
59 #define BRA_INSN(func, entry)   \
60         ((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \
61         + 0xff000000UL
62
63 void    set_eit_vector_entries(void)
64 {
65         extern void default_eit_handler(void);
66         extern void system_call(void);
67         extern void pie_handler(void);
68         extern void ace_handler(void);
69         extern void tme_handler(void);
70         extern void _flush_cache_copyback_all(void);
71
72         eit_vector[0] = 0xd0c00001; /* seth r0, 0x01 */
73         eit_vector[1] = BRA_INSN(default_eit_handler, 1);
74         eit_vector[4] = 0xd0c00010; /* seth r0, 0x10 */
75         eit_vector[5] = BRA_INSN(default_eit_handler, 5);
76         eit_vector[8] = BRA_INSN(rie_handler, 8);
77         eit_vector[12] = BRA_INSN(alignment_check, 12);
78         eit_vector[16] = BRA_INSN(ill_trap, 16);
79         eit_vector[17] = BRA_INSN(debug_trap, 17);
80         eit_vector[18] = BRA_INSN(system_call, 18);
81         eit_vector[19] = BRA_INSN(ill_trap, 19);
82         eit_vector[20] = BRA_INSN(ill_trap, 20);
83         eit_vector[21] = BRA_INSN(ill_trap, 21);
84         eit_vector[22] = BRA_INSN(ill_trap, 22);
85         eit_vector[23] = BRA_INSN(ill_trap, 23);
86         eit_vector[24] = BRA_INSN(ill_trap, 24);
87         eit_vector[25] = BRA_INSN(ill_trap, 25);
88         eit_vector[26] = BRA_INSN(ill_trap, 26);
89         eit_vector[27] = BRA_INSN(ill_trap, 27);
90         eit_vector[28] = BRA_INSN(cache_flushing_handler, 28);
91         eit_vector[29] = BRA_INSN(ill_trap, 29);
92         eit_vector[30] = BRA_INSN(ill_trap, 30);
93         eit_vector[31] = BRA_INSN(ill_trap, 31);
94         eit_vector[32] = BRA_INSN(ei_handler, 32);
95         eit_vector[64] = BRA_INSN(pie_handler, 64);
96 #ifdef CONFIG_MMU
97         eit_vector[68] = BRA_INSN(ace_handler, 68);
98         eit_vector[72] = BRA_INSN(tme_handler, 72);
99 #endif /* CONFIG_MMU */
100 #ifdef CONFIG_SMP
101         eit_vector[184] = (unsigned long)smp_reschedule_interrupt;
102         eit_vector[185] = (unsigned long)smp_invalidate_interrupt;
103         eit_vector[186] = (unsigned long)smp_call_function_interrupt;
104         eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
105         eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
106         eit_vector[189] = 0;
107         eit_vector[190] = 0;
108         eit_vector[191] = 0;
109 #endif
110         _flush_cache_copyback_all();
111 }
112
113 void __init trap_init(void)
114 {
115         set_eit_vector_entries();
116
117         /*
118          * Should be a barrier for any external CPU state.
119          */
120         cpu_init();
121 }
122
123 int kstack_depth_to_print = 24;
124
125 void show_trace(struct task_struct *task, unsigned long *stack)
126 {
127         unsigned long addr;
128
129         if (!stack)
130                 stack = (unsigned long*)&stack;
131
132         printk("Call Trace: ");
133         while (!kstack_end(stack)) {
134                 addr = *stack++;
135                 if (__kernel_text_address(addr)) {
136                         printk("[<%08lx>] ", addr);
137                         print_symbol("%s\n", addr);
138                 }
139         }
140         printk("\n");
141 }
142
143 void show_stack(struct task_struct *task, unsigned long *sp)
144 {
145         unsigned long  *stack;
146         int  i;
147
148         /*
149          * debugging aid: "show_stack(NULL);" prints the
150          * back trace for this cpu.
151          */
152
153         if(sp==NULL) {
154                 if (task)
155                         sp = (unsigned long *)task->thread.sp;
156                 else
157                         sp=(unsigned long*)&sp;
158         }
159
160         stack = sp;
161         for(i=0; i < kstack_depth_to_print; i++) {
162                 if (kstack_end(stack))
163                         break;
164                 if (i && ((i % 4) == 0))
165                         printk("\n       ");
166                 printk("%08lx ", *stack++);
167         }
168         printk("\n");
169         show_trace(task, sp);
170 }
171
172 void dump_stack(void)
173 {
174         unsigned long stack;
175
176         show_trace(current, &stack);
177 }
178
179 EXPORT_SYMBOL(dump_stack);
180
181 static void show_registers(struct pt_regs *regs)
182 {
183         int i = 0;
184         int in_kernel = 1;
185         unsigned long sp;
186
187         printk("CPU:    %d\n", smp_processor_id());
188         show_regs(regs);
189
190         sp = (unsigned long) (1+regs);
191         if (user_mode(regs)) {
192                 in_kernel = 0;
193                 sp = regs->spu;
194                 printk("SPU: %08lx\n", sp);
195         } else {
196                 printk("SPI: %08lx\n", sp);
197         }
198         printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
199                 current->comm, current->pid, 0xffff & i, 4096+(unsigned long)current);
200
201         /*
202          * When in-kernel, we also print out the stack and code at the
203          * time of the fault..
204          */
205         if (in_kernel) {
206                 printk("\nStack: ");
207                 show_stack(current, (unsigned long*) sp);
208
209                 printk("\nCode: ");
210                 if (regs->bpc < PAGE_OFFSET)
211                         goto bad;
212
213                 for(i=0;i<20;i++) {
214                         unsigned char c;
215                         if (__get_user(c, &((unsigned char*)regs->bpc)[i])) {
216 bad:
217                                 printk(" Bad PC value.");
218                                 break;
219                         }
220                         printk("%02x ", c);
221                 }
222         }
223         printk("\n");
224 }
225
226 DEFINE_SPINLOCK(die_lock);
227
228 void die(const char * str, struct pt_regs * regs, long err)
229 {
230         console_verbose();
231         spin_lock_irq(&die_lock);
232         bust_spinlocks(1);
233         printk("%s: %04lx\n", str, err & 0xffff);
234         show_registers(regs);
235         bust_spinlocks(0);
236         spin_unlock_irq(&die_lock);
237         do_exit(SIGSEGV);
238 }
239
240 static __inline__ void die_if_kernel(const char * str,
241         struct pt_regs * regs, long err)
242 {
243         if (!user_mode(regs))
244                 die(str, regs, err);
245 }
246
247 static __inline__ void do_trap(int trapnr, int signr, const char * str,
248         struct pt_regs * regs, long error_code, siginfo_t *info)
249 {
250         if (user_mode(regs)) {
251                 /* trap_signal */
252                 struct task_struct *tsk = current;
253                 tsk->thread.error_code = error_code;
254                 tsk->thread.trap_no = trapnr;
255                 if (info)
256                         force_sig_info(signr, info, tsk);
257                 else
258                         force_sig(signr, tsk);
259                 return;
260         } else {
261                 /* kernel_trap */
262                 if (!fixup_exception(regs))
263                         die(str, regs, error_code);
264                 return;
265         }
266 }
267
268 #define DO_ERROR(trapnr, signr, str, name) \
269 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
270 { \
271         do_trap(trapnr, signr, 0, regs, error_code, NULL); \
272 }
273
274 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
275 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
276 { \
277         siginfo_t info; \
278         info.si_signo = signr; \
279         info.si_errno = 0; \
280         info.si_code = sicode; \
281         info.si_addr = (void __user *)siaddr; \
282         do_trap(trapnr, signr, str, regs, error_code, &info); \
283 }
284
285 DO_ERROR( 1, SIGTRAP, "debug trap", debug_trap)
286 DO_ERROR_INFO(0x20, SIGILL,  "reserved instruction ", rie_handler, ILL_ILLOPC, regs->bpc)
287 DO_ERROR_INFO(0x100, SIGILL,  "privileged instruction", pie_handler, ILL_PRVOPC, regs->bpc)
288 DO_ERROR_INFO(-1, SIGILL,  "illegal trap", ill_trap, ILL_ILLTRP, regs->bpc)
289
290 extern int handle_unaligned_access(unsigned long, struct pt_regs *);
291
292 /* This code taken from arch/sh/kernel/traps.c */
293 asmlinkage void do_alignment_check(struct pt_regs *regs, long error_code)
294 {
295         mm_segment_t oldfs;
296         unsigned long insn;
297         int tmp;
298
299         oldfs = get_fs();
300
301         if (user_mode(regs)) {
302                 local_irq_enable();
303                 current->thread.error_code = error_code;
304                 current->thread.trap_no = 0x17;
305
306                 set_fs(USER_DS);
307                 if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
308                         set_fs(oldfs);
309                         goto uspace_segv;
310                 }
311                 tmp = handle_unaligned_access(insn, regs);
312                 set_fs(oldfs);
313
314                 if (!tmp)
315                         return;
316
317         uspace_segv:
318                 printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
319                         "access\n", current->comm);
320                 force_sig(SIGSEGV, current);
321         } else {
322                 set_fs(KERNEL_DS);
323                 if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
324                         set_fs(oldfs);
325                         die("insn faulting in do_address_error", regs, 0);
326                 }
327                 handle_unaligned_access(insn, regs);
328                 set_fs(oldfs);
329         }
330 }