ffe127f09f3e7d7d758284446e0d066cf674688c
[linux-2.6.git] / arch / sh / kernel / traps.c
1 /*
2  * 'traps.c' handles hardware traps and faults after we have saved some
3  * state in 'entry.S'.
4  *
5  *  SuperH version: Copyright (C) 1999 Niibe Yutaka
6  *                  Copyright (C) 2000 Philipp Rumpf
7  *                  Copyright (C) 2000 David Howells
8  *                  Copyright (C) 2002 - 2006 Paul Mundt
9  *
10  * This file is subject to the terms and conditions of the GNU General Public
11  * License.  See the file "COPYING" in the main directory of this archive
12  * for more details.
13  */
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/ptrace.h>
19 #include <linux/timer.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/smp_lock.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/module.h>
27 #include <linux/kallsyms.h>
28
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <asm/io.h>
32 #include <asm/atomic.h>
33 #include <asm/processor.h>
34 #include <asm/sections.h>
35
36 #ifdef CONFIG_SH_KGDB
37 #include <asm/kgdb.h>
38 #define CHK_REMOTE_DEBUG(regs)                  \
39 {                                               \
40         if (kgdb_debug_hook && !user_mode(regs))\
41                 (*kgdb_debug_hook)(regs);       \
42 }
43 #else
44 #define CHK_REMOTE_DEBUG(regs)
45 #endif
46
47 #ifdef CONFIG_CPU_SH2
48 #define TRAP_RESERVED_INST      4
49 #define TRAP_ILLEGAL_SLOT_INST  6
50 #else
51 #define TRAP_RESERVED_INST      12
52 #define TRAP_ILLEGAL_SLOT_INST  13
53 #endif
54
55 static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
56 {
57         unsigned long p;
58         int i;
59
60         printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
61
62         for (p = bottom & ~31; p < top; ) {
63                 printk("%04lx: ", p & 0xffff);
64
65                 for (i = 0; i < 8; i++, p += 4) {
66                         unsigned int val;
67
68                         if (p < bottom || p >= top)
69                                 printk("         ");
70                         else {
71                                 if (__get_user(val, (unsigned int __user *)p)) {
72                                         printk("\n");
73                                         return;
74                                 }
75                                 printk("%08x ", val);
76                         }
77                 }
78                 printk("\n");
79         }
80 }
81
82 DEFINE_SPINLOCK(die_lock);
83
84 void die(const char * str, struct pt_regs * regs, long err)
85 {
86         static int die_counter;
87
88         console_verbose();
89         spin_lock_irq(&die_lock);
90         bust_spinlocks(1);
91
92         printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
93
94         CHK_REMOTE_DEBUG(regs);
95         print_modules();
96         show_regs(regs);
97
98         printk("Process: %s (pid: %d, stack limit = %p)\n",
99                current->comm, current->pid, task_stack_page(current) + 1);
100
101         if (!user_mode(regs) || in_interrupt())
102                 dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
103                          (unsigned long)task_stack_page(current));
104
105         bust_spinlocks(0);
106         spin_unlock_irq(&die_lock);
107         do_exit(SIGSEGV);
108 }
109
110 static inline void die_if_kernel(const char *str, struct pt_regs *regs,
111                                  long err)
112 {
113         if (!user_mode(regs))
114                 die(str, regs, err);
115 }
116
117 static int handle_unaligned_notify_count = 10;
118
119 /*
120  * try and fix up kernelspace address errors
121  * - userspace errors just cause EFAULT to be returned, resulting in SEGV
122  * - kernel/userspace interfaces cause a jump to an appropriate handler
123  * - other kernel errors are bad
124  * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
125  */
126 static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
127 {
128         if (!user_mode(regs)) {
129                 const struct exception_table_entry *fixup;
130                 fixup = search_exception_tables(regs->pc);
131                 if (fixup) {
132                         regs->pc = fixup->fixup;
133                         return 0;
134                 }
135                 die(str, regs, err);
136         }
137         return -EFAULT;
138 }
139
140 /*
141  * handle an instruction that does an unaligned memory access by emulating the
142  * desired behaviour
143  * - note that PC _may not_ point to the faulting instruction
144  *   (if that instruction is in a branch delay slot)
145  * - return 0 if emulation okay, -EFAULT on existential error
146  */
147 static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
148 {
149         int ret, index, count;
150         unsigned long *rm, *rn;
151         unsigned char *src, *dst;
152
153         index = (instruction>>8)&15;    /* 0x0F00 */
154         rn = &regs->regs[index];
155
156         index = (instruction>>4)&15;    /* 0x00F0 */
157         rm = &regs->regs[index];
158
159         count = 1<<(instruction&3);
160
161         ret = -EFAULT;
162         switch (instruction>>12) {
163         case 0: /* mov.[bwl] to/from memory via r0+rn */
164                 if (instruction & 8) {
165                         /* from memory */
166                         src = (unsigned char*) *rm;
167                         src += regs->regs[0];
168                         dst = (unsigned char*) rn;
169                         *(unsigned long*)dst = 0;
170
171 #ifdef __LITTLE_ENDIAN__
172                         if (copy_from_user(dst, src, count))
173                                 goto fetch_fault;
174
175                         if ((count == 2) && dst[1] & 0x80) {
176                                 dst[2] = 0xff;
177                                 dst[3] = 0xff;
178                         }
179 #else
180                         dst += 4-count;
181
182                         if (__copy_user(dst, src, count))
183                                 goto fetch_fault;
184
185                         if ((count == 2) && dst[2] & 0x80) {
186                                 dst[0] = 0xff;
187                                 dst[1] = 0xff;
188                         }
189 #endif
190                 } else {
191                         /* to memory */
192                         src = (unsigned char*) rm;
193 #if !defined(__LITTLE_ENDIAN__)
194                         src += 4-count;
195 #endif
196                         dst = (unsigned char*) *rn;
197                         dst += regs->regs[0];
198
199                         if (copy_to_user(dst, src, count))
200                                 goto fetch_fault;
201                 }
202                 ret = 0;
203                 break;
204
205         case 1: /* mov.l Rm,@(disp,Rn) */
206                 src = (unsigned char*) rm;
207                 dst = (unsigned char*) *rn;
208                 dst += (instruction&0x000F)<<2;
209
210                 if (copy_to_user(dst,src,4))
211                         goto fetch_fault;
212                 ret = 0;
213                 break;
214
215         case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
216                 if (instruction & 4)
217                         *rn -= count;
218                 src = (unsigned char*) rm;
219                 dst = (unsigned char*) *rn;
220 #if !defined(__LITTLE_ENDIAN__)
221                 src += 4-count;
222 #endif
223                 if (copy_to_user(dst, src, count))
224                         goto fetch_fault;
225                 ret = 0;
226                 break;
227
228         case 5: /* mov.l @(disp,Rm),Rn */
229                 src = (unsigned char*) *rm;
230                 src += (instruction&0x000F)<<2;
231                 dst = (unsigned char*) rn;
232                 *(unsigned long*)dst = 0;
233
234                 if (copy_from_user(dst,src,4))
235                         goto fetch_fault;
236                 ret = 0;
237                 break;
238
239         case 6: /* mov.[bwl] from memory, possibly with post-increment */
240                 src = (unsigned char*) *rm;
241                 if (instruction & 4)
242                         *rm += count;
243                 dst = (unsigned char*) rn;
244                 *(unsigned long*)dst = 0;
245                 
246 #ifdef __LITTLE_ENDIAN__
247                 if (copy_from_user(dst, src, count))
248                         goto fetch_fault;
249
250                 if ((count == 2) && dst[1] & 0x80) {
251                         dst[2] = 0xff;
252                         dst[3] = 0xff;
253                 }
254 #else
255                 dst += 4-count;
256                 
257                 if (copy_from_user(dst, src, count))
258                         goto fetch_fault;
259
260                 if ((count == 2) && dst[2] & 0x80) {
261                         dst[0] = 0xff;
262                         dst[1] = 0xff;
263                 }
264 #endif
265                 ret = 0;
266                 break;
267
268         case 8:
269                 switch ((instruction&0xFF00)>>8) {
270                 case 0x81: /* mov.w R0,@(disp,Rn) */
271                         src = (unsigned char*) &regs->regs[0];
272 #if !defined(__LITTLE_ENDIAN__)
273                         src += 2;
274 #endif
275                         dst = (unsigned char*) *rm; /* called Rn in the spec */
276                         dst += (instruction&0x000F)<<1;
277
278                         if (copy_to_user(dst, src, 2))
279                                 goto fetch_fault;
280                         ret = 0;
281                         break;
282
283                 case 0x85: /* mov.w @(disp,Rm),R0 */
284                         src = (unsigned char*) *rm;
285                         src += (instruction&0x000F)<<1;
286                         dst = (unsigned char*) &regs->regs[0];
287                         *(unsigned long*)dst = 0;
288
289 #if !defined(__LITTLE_ENDIAN__)
290                         dst += 2;
291 #endif
292
293                         if (copy_from_user(dst, src, 2))
294                                 goto fetch_fault;
295
296 #ifdef __LITTLE_ENDIAN__
297                         if (dst[1] & 0x80) {
298                                 dst[2] = 0xff;
299                                 dst[3] = 0xff;
300                         }
301 #else
302                         if (dst[2] & 0x80) {
303                                 dst[0] = 0xff;
304                                 dst[1] = 0xff;
305                         }
306 #endif
307                         ret = 0;
308                         break;
309                 }
310                 break;
311         }
312         return ret;
313
314  fetch_fault:
315         /* Argh. Address not only misaligned but also non-existent.
316          * Raise an EFAULT and see if it's trapped
317          */
318         return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
319 }
320
321 /*
322  * emulate the instruction in the delay slot
323  * - fetches the instruction from PC+2
324  */
325 static inline int handle_unaligned_delayslot(struct pt_regs *regs)
326 {
327         u16 instruction;
328
329         if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) {
330                 /* the instruction-fetch faulted */
331                 if (user_mode(regs))
332                         return -EFAULT;
333
334                 /* kernel */
335                 die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0);
336         }
337
338         return handle_unaligned_ins(instruction,regs);
339 }
340
341 /*
342  * handle an instruction that does an unaligned memory access
343  * - have to be careful of branch delay-slot instructions that fault
344  *  SH3:
345  *   - if the branch would be taken PC points to the branch
346  *   - if the branch would not be taken, PC points to delay-slot
347  *  SH4:
348  *   - PC always points to delayed branch
349  * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
350  */
351
352 /* Macros to determine offset from current PC for branch instructions */
353 /* Explicit type coercion is used to force sign extension where needed */
354 #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
355 #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
356
357 static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
358 {
359         u_int rm;
360         int ret, index;
361
362         index = (instruction>>8)&15;    /* 0x0F00 */
363         rm = regs->regs[index];
364
365         /* shout about the first ten userspace fixups */
366         if (user_mode(regs) && handle_unaligned_notify_count>0) {
367                 handle_unaligned_notify_count--;
368
369                 printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
370                        current->comm,current->pid,(u16*)regs->pc,instruction);
371         }
372
373         ret = -EFAULT;
374         switch (instruction&0xF000) {
375         case 0x0000:
376                 if (instruction==0x000B) {
377                         /* rts */
378                         ret = handle_unaligned_delayslot(regs);
379                         if (ret==0)
380                                 regs->pc = regs->pr;
381                 }
382                 else if ((instruction&0x00FF)==0x0023) {
383                         /* braf @Rm */
384                         ret = handle_unaligned_delayslot(regs);
385                         if (ret==0)
386                                 regs->pc += rm + 4;
387                 }
388                 else if ((instruction&0x00FF)==0x0003) {
389                         /* bsrf @Rm */
390                         ret = handle_unaligned_delayslot(regs);
391                         if (ret==0) {
392                                 regs->pr = regs->pc + 4;
393                                 regs->pc += rm + 4;
394                         }
395                 }
396                 else {
397                         /* mov.[bwl] to/from memory via r0+rn */
398                         goto simple;
399                 }
400                 break;
401
402         case 0x1000: /* mov.l Rm,@(disp,Rn) */
403                 goto simple;
404
405         case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
406                 goto simple;
407
408         case 0x4000:
409                 if ((instruction&0x00FF)==0x002B) {
410                         /* jmp @Rm */
411                         ret = handle_unaligned_delayslot(regs);
412                         if (ret==0)
413                                 regs->pc = rm;
414                 }
415                 else if ((instruction&0x00FF)==0x000B) {
416                         /* jsr @Rm */
417                         ret = handle_unaligned_delayslot(regs);
418                         if (ret==0) {
419                                 regs->pr = regs->pc + 4;
420                                 regs->pc = rm;
421                         }
422                 }
423                 else {
424                         /* mov.[bwl] to/from memory via r0+rn */
425                         goto simple;
426                 }
427                 break;
428
429         case 0x5000: /* mov.l @(disp,Rm),Rn */
430                 goto simple;
431
432         case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
433                 goto simple;
434
435         case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
436                 switch (instruction&0x0F00) {
437                 case 0x0100: /* mov.w R0,@(disp,Rm) */
438                         goto simple;
439                 case 0x0500: /* mov.w @(disp,Rm),R0 */
440                         goto simple;
441                 case 0x0B00: /* bf   lab - no delayslot*/
442                         break;
443                 case 0x0F00: /* bf/s lab */
444                         ret = handle_unaligned_delayslot(regs);
445                         if (ret==0) {
446 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
447                                 if ((regs->sr & 0x00000001) != 0)
448                                         regs->pc += 4; /* next after slot */
449                                 else
450 #endif
451                                         regs->pc += SH_PC_8BIT_OFFSET(instruction);
452                         }
453                         break;
454                 case 0x0900: /* bt   lab - no delayslot */
455                         break;
456                 case 0x0D00: /* bt/s lab */
457                         ret = handle_unaligned_delayslot(regs);
458                         if (ret==0) {
459 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
460                                 if ((regs->sr & 0x00000001) == 0)
461                                         regs->pc += 4; /* next after slot */
462                                 else
463 #endif
464                                         regs->pc += SH_PC_8BIT_OFFSET(instruction);
465                         }
466                         break;
467                 }
468                 break;
469
470         case 0xA000: /* bra label */
471                 ret = handle_unaligned_delayslot(regs);
472                 if (ret==0)
473                         regs->pc += SH_PC_12BIT_OFFSET(instruction);
474                 break;
475
476         case 0xB000: /* bsr label */
477                 ret = handle_unaligned_delayslot(regs);
478                 if (ret==0) {
479                         regs->pr = regs->pc + 4;
480                         regs->pc += SH_PC_12BIT_OFFSET(instruction);
481                 }
482                 break;
483         }
484         return ret;
485
486         /* handle non-delay-slot instruction */
487  simple:
488         ret = handle_unaligned_ins(instruction,regs);
489         if (ret==0)
490                 regs->pc += 2;
491         return ret;
492 }
493
494 /*
495  * Handle various address error exceptions
496  */
497 asmlinkage void do_address_error(struct pt_regs *regs, 
498                                  unsigned long writeaccess,
499                                  unsigned long address)
500 {
501         unsigned long error_code;
502         mm_segment_t oldfs;
503         u16 instruction;
504         int tmp;
505
506         asm volatile("stc       r2_bank,%0": "=r" (error_code));
507
508         oldfs = get_fs();
509
510         if (user_mode(regs)) {
511                 local_irq_enable();
512                 current->thread.error_code = error_code;
513                 current->thread.trap_no = (writeaccess) ? 8 : 7;
514
515                 /* bad PC is not something we can fix */
516                 if (regs->pc & 1)
517                         goto uspace_segv;
518
519                 set_fs(USER_DS);
520                 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
521                         /* Argh. Fault on the instruction itself.
522                            This should never happen non-SMP
523                         */
524                         set_fs(oldfs);
525                         goto uspace_segv;
526                 }
527
528                 tmp = handle_unaligned_access(instruction, regs);
529                 set_fs(oldfs);
530
531                 if (tmp==0)
532                         return; /* sorted */
533
534         uspace_segv:
535                 printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm);
536                 force_sig(SIGSEGV, current);
537         } else {
538                 if (regs->pc & 1)
539                         die("unaligned program counter", regs, error_code);
540
541                 set_fs(KERNEL_DS);
542                 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
543                         /* Argh. Fault on the instruction itself.
544                            This should never happen non-SMP
545                         */
546                         set_fs(oldfs);
547                         die("insn faulting in do_address_error", regs, 0);
548                 }
549
550                 handle_unaligned_access(instruction, regs);
551                 set_fs(oldfs);
552         }
553 }
554
555 #ifdef CONFIG_SH_DSP
556 /*
557  *      SH-DSP support gerg@snapgear.com.
558  */
559 int is_dsp_inst(struct pt_regs *regs)
560 {
561         unsigned short inst;
562
563         /* 
564          * Safe guard if DSP mode is already enabled or we're lacking
565          * the DSP altogether.
566          */
567         if (!(cpu_data->flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
568                 return 0;
569
570         get_user(inst, ((unsigned short *) regs->pc));
571
572         inst &= 0xf000;
573
574         /* Check for any type of DSP or support instruction */
575         if ((inst == 0xf000) || (inst == 0x4000))
576                 return 1;
577
578         return 0;
579 }
580 #else
581 #define is_dsp_inst(regs)       (0)
582 #endif /* CONFIG_SH_DSP */
583
584 extern int do_fpu_inst(unsigned short, struct pt_regs*);
585
586 asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
587                                 unsigned long r6, unsigned long r7,
588                                 struct pt_regs regs)
589 {
590         unsigned long error_code;
591         struct task_struct *tsk = current;
592
593 #ifdef CONFIG_SH_FPU_EMU
594         unsigned short inst;
595         int err;
596
597         get_user(inst, (unsigned short*)regs.pc);
598
599         err = do_fpu_inst(inst, &regs);
600         if (!err) {
601                 regs.pc += 2;
602                 return;
603         }
604         /* not a FPU inst. */
605 #endif
606
607 #ifdef CONFIG_SH_DSP
608         /* Check if it's a DSP instruction */
609         if (is_dsp_inst(&regs)) {
610                 /* Enable DSP mode, and restart instruction. */
611                 regs.sr |= SR_DSP;
612                 return;
613         }
614 #endif
615
616         asm volatile("stc       r2_bank, %0": "=r" (error_code));
617         local_irq_enable();
618         tsk->thread.error_code = error_code;
619         tsk->thread.trap_no = TRAP_RESERVED_INST;
620         CHK_REMOTE_DEBUG(&regs);
621         force_sig(SIGILL, tsk);
622         die_if_no_fixup("reserved instruction", &regs, error_code);
623 }
624
625 #ifdef CONFIG_SH_FPU_EMU
626 static int emulate_branch(unsigned short inst, struct pt_regs* regs)
627 {
628         /*
629          * bfs: 8fxx: PC+=d*2+4;
630          * bts: 8dxx: PC+=d*2+4;
631          * bra: axxx: PC+=D*2+4;
632          * bsr: bxxx: PC+=D*2+4  after PR=PC+4;
633          * braf:0x23: PC+=Rn*2+4;
634          * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
635          * jmp: 4x2b: PC=Rn;
636          * jsr: 4x0b: PC=Rn      after PR=PC+4;
637          * rts: 000b: PC=PR;
638          */
639         if ((inst & 0xfd00) == 0x8d00) {
640                 regs->pc += SH_PC_8BIT_OFFSET(inst);
641                 return 0;
642         }
643
644         if ((inst & 0xe000) == 0xa000) {
645                 regs->pc += SH_PC_12BIT_OFFSET(inst);
646                 return 0;
647         }
648
649         if ((inst & 0xf0df) == 0x0003) {
650                 regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
651                 return 0;
652         }
653
654         if ((inst & 0xf0df) == 0x400b) {
655                 regs->pc = regs->regs[(inst & 0x0f00) >> 8];
656                 return 0;
657         }
658
659         if ((inst & 0xffff) == 0x000b) {
660                 regs->pc = regs->pr;
661                 return 0;
662         }
663
664         return 1;
665 }
666 #endif
667
668 asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
669                                 unsigned long r6, unsigned long r7,
670                                 struct pt_regs regs)
671 {
672         unsigned long error_code;
673         struct task_struct *tsk = current;
674 #ifdef CONFIG_SH_FPU_EMU
675         unsigned short inst;
676
677         get_user(inst, (unsigned short *)regs.pc + 1);
678         if (!do_fpu_inst(inst, &regs)) {
679                 get_user(inst, (unsigned short *)regs.pc);
680                 if (!emulate_branch(inst, &regs))
681                         return;
682                 /* fault in branch.*/
683         }
684         /* not a FPU inst. */
685 #endif
686
687         asm volatile("stc       r2_bank, %0": "=r" (error_code));
688         local_irq_enable();
689         tsk->thread.error_code = error_code;
690         tsk->thread.trap_no = TRAP_RESERVED_INST;
691         CHK_REMOTE_DEBUG(&regs);
692         force_sig(SIGILL, tsk);
693         die_if_no_fixup("illegal slot instruction", &regs, error_code);
694 }
695
696 asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
697                                    unsigned long r6, unsigned long r7,
698                                    struct pt_regs regs)
699 {
700         long ex;
701         asm volatile("stc       r2_bank, %0" : "=r" (ex));
702         die_if_kernel("exception", &regs, ex);
703 }
704
705 #if defined(CONFIG_SH_STANDARD_BIOS)
706 void *gdb_vbr_vector;
707
708 static inline void __init gdb_vbr_init(void)
709 {
710         register unsigned long vbr;
711
712         /*
713          * Read the old value of the VBR register to initialise
714          * the vector through which debug and BIOS traps are
715          * delegated by the Linux trap handler.
716          */
717         asm volatile("stc vbr, %0" : "=r" (vbr));
718
719         gdb_vbr_vector = (void *)(vbr + 0x100);
720         printk("Setting GDB trap vector to 0x%08lx\n",
721                (unsigned long)gdb_vbr_vector);
722 }
723 #endif
724
725 void __init per_cpu_trap_init(void)
726 {
727         extern void *vbr_base;
728
729 #ifdef CONFIG_SH_STANDARD_BIOS
730         gdb_vbr_init();
731 #endif
732
733         /* NOTE: The VBR value should be at P1
734            (or P2, virtural "fixed" address space).
735            It's definitely should not in physical address.  */
736
737         asm volatile("ldc       %0, vbr"
738                      : /* no output */
739                      : "r" (&vbr_base)
740                      : "memory");
741 }
742
743 void __init trap_init(void)
744 {
745         extern void *exception_handling_table[];
746
747         exception_handling_table[TRAP_RESERVED_INST]
748                 = (void *)do_reserved_inst;
749         exception_handling_table[TRAP_ILLEGAL_SLOT_INST]
750                 = (void *)do_illegal_slot_inst;
751
752 #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
753     defined(CONFIG_SH_FPU_EMU)
754         /*
755          * For SH-4 lacking an FPU, treat floating point instructions as
756          * reserved. They'll be handled in the math-emu case, or faulted on
757          * otherwise.
758          */
759         /* entry 64 corresponds to EXPEVT=0x800 */
760         exception_handling_table[64] = (void *)do_reserved_inst;
761         exception_handling_table[65] = (void *)do_illegal_slot_inst;
762 #endif
763                 
764         /* Setup VBR for boot cpu */
765         per_cpu_trap_init();
766 }
767
768 void show_trace(struct task_struct *tsk, unsigned long *sp,
769                 struct pt_regs *regs)
770 {
771         unsigned long addr;
772
773         if (regs && user_mode(regs))
774                 return;
775
776         printk("\nCall trace: ");
777 #ifdef CONFIG_KALLSYMS
778         printk("\n");
779 #endif
780
781         while (!kstack_end(sp)) {
782                 addr = *sp++;
783                 if (kernel_text_address(addr))
784                         print_ip_sym(addr);
785         }
786
787         printk("\n");
788 }
789
790 void show_stack(struct task_struct *tsk, unsigned long *sp)
791 {
792         unsigned long stack;
793
794         if (!tsk)
795                 tsk = current;
796         if (tsk == current)
797                 sp = (unsigned long *)current_stack_pointer;
798         else
799                 sp = (unsigned long *)tsk->thread.sp;
800
801         stack = (unsigned long)sp;
802         dump_mem("Stack: ", stack, THREAD_SIZE +
803                  (unsigned long)task_stack_page(tsk));
804         show_trace(tsk, sp, NULL);
805 }
806
807 void dump_stack(void)
808 {
809         show_stack(NULL, NULL);
810 }
811 EXPORT_SYMBOL(dump_stack);