]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - arch/powerpc/kernel/entry_32.S
powerpc: Fix page fault with lockdep regression
[linux-2.6.git] / arch / powerpc / kernel / entry_32.S
index 094eea6fbd69bab60f4c21660fc8090e155aaf8e..ba3aeb4bc06a81453105c6294bd74b81b340ed77 100644 (file)
@@ -19,7 +19,6 @@
  *
  */
 
-#include <linux/config.h>
 #include <linux/errno.h>
 #include <linux/sys.h>
 #include <linux/threads.h>
@@ -31,6 +30,8 @@
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/unistd.h>
+#include <asm/ftrace.h>
+#include <asm/ptrace.h>
 
 #undef SHOW_SYSCALLS
 #undef SHOW_SYSCALLS_TASK
 #endif
 
 #ifdef CONFIG_BOOKE
-#include "head_booke.h"
-#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level)       \
-       mtspr   exc_level##_SPRG,r8;                    \
-       BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);          \
-       lwz     r0,GPR10-INT_FRAME_SIZE(r8);            \
-       stw     r0,GPR10(r11);                          \
-       lwz     r0,GPR11-INT_FRAME_SIZE(r8);            \
-       stw     r0,GPR11(r11);                          \
-       mfspr   r8,exc_level##_SPRG
-
        .globl  mcheck_transfer_to_handler
 mcheck_transfer_to_handler:
-       TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
-       b       transfer_to_handler_full
+       mfspr   r0,SPRN_DSRR0
+       stw     r0,_DSRR0(r11)
+       mfspr   r0,SPRN_DSRR1
+       stw     r0,_DSRR1(r11)
+       /* fall through */
 
        .globl  debug_transfer_to_handler
 debug_transfer_to_handler:
-       TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
-       b       transfer_to_handler_full
+       mfspr   r0,SPRN_CSRR0
+       stw     r0,_CSRR0(r11)
+       mfspr   r0,SPRN_CSRR1
+       stw     r0,_CSRR1(r11)
+       /* fall through */
 
        .globl  crit_transfer_to_handler
 crit_transfer_to_handler:
-       TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
+#ifdef CONFIG_PPC_BOOK3E_MMU
+       mfspr   r0,SPRN_MAS0
+       stw     r0,MAS0(r11)
+       mfspr   r0,SPRN_MAS1
+       stw     r0,MAS1(r11)
+       mfspr   r0,SPRN_MAS2
+       stw     r0,MAS2(r11)
+       mfspr   r0,SPRN_MAS3
+       stw     r0,MAS3(r11)
+       mfspr   r0,SPRN_MAS6
+       stw     r0,MAS6(r11)
+#ifdef CONFIG_PHYS_64BIT
+       mfspr   r0,SPRN_MAS7
+       stw     r0,MAS7(r11)
+#endif /* CONFIG_PHYS_64BIT */
+#endif /* CONFIG_PPC_BOOK3E_MMU */
+#ifdef CONFIG_44x
+       mfspr   r0,SPRN_MMUCR
+       stw     r0,MMUCR(r11)
+#endif
+       mfspr   r0,SPRN_SRR0
+       stw     r0,_SRR0(r11)
+       mfspr   r0,SPRN_SRR1
+       stw     r0,_SRR1(r11)
+
+       mfspr   r8,SPRN_SPRG_THREAD
+       lwz     r0,KSP_LIMIT(r8)
+       stw     r0,SAVED_KSP_LIMIT(r11)
+       rlwimi  r0,r1,0,0,(31-THREAD_SHIFT)
+       stw     r0,KSP_LIMIT(r8)
        /* fall through */
 #endif
 
@@ -78,6 +104,16 @@ crit_transfer_to_handler:
        stw     r0,GPR10(r11)
        lwz     r0,crit_r11@l(0)
        stw     r0,GPR11(r11)
+       mfspr   r0,SPRN_SRR0
+       stw     r0,crit_srr0@l(0)
+       mfspr   r0,SPRN_SRR1
+       stw     r0,crit_srr1@l(0)
+
+       mfspr   r8,SPRN_SPRG_THREAD
+       lwz     r0,KSP_LIMIT(r8)
+       stw     r0,saved_ksp_limit@l(0)
+       rlwimi  r0,r1,0,0,(31-THREAD_SHIFT)
+       stw     r0,KSP_LIMIT(r8)
        /* fall through */
 #endif
 
@@ -103,7 +139,7 @@ transfer_to_handler:
        mfspr   r2,SPRN_XER
        stw     r12,_CTR(r11)
        stw     r2,_XER(r11)
-       mfspr   r12,SPRN_SPRG3
+       mfspr   r12,SPRN_SPRG_THREAD
        addi    r2,r12,-THREAD
        tovirt(r2,r2)                   /* set r2 to current */
        beq     2f                      /* if from user, fix up THREAD.regs */
@@ -111,9 +147,9 @@ transfer_to_handler:
        stw     r11,PT_REGS(r12)
 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
        /* Check to see if the dbcr0 register is set up to debug.  Use the
-          single-step bit to do this. */
+          internal debug mode bit to do this. */
        lwz     r12,THREAD_DBCR0(r12)
-       andis.  r12,r12,DBCR0_IC@h
+       andis.  r12,r12,DBCR0_IDM@h
        beq+    3f
        /* From user and task is ptraced - load up global dbcr0 */
        li      r12,-1                  /* clear all pending debug events */
@@ -121,6 +157,12 @@ transfer_to_handler:
        lis     r11,global_dbcr0@ha
        tophys(r11,r11)
        addi    r11,r11,global_dbcr0@l
+#ifdef CONFIG_SMP
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
+       lwz     r9,TI_CPU(r9)
+       slwi    r9,r9,3
+       add     r11,r11,r9
+#endif
        lwz     r12,0(r11)
        mtspr   SPRN_DBCR0,r12
        lwz     r12,4(r11)
@@ -128,34 +170,102 @@ transfer_to_handler:
        stw     r12,4(r11)
 #endif
        b       3f
+
 2:     /* if from kernel, check interrupted DOZE/NAP mode and
          * check for stack overflow
          */
-#ifdef CONFIG_6xx
-       mfspr   r11,SPRN_HID0
-       mtcr    r11
-BEGIN_FTR_SECTION
-       bt-     8,power_save_6xx_restore        /* Check DOZE */
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
-BEGIN_FTR_SECTION
-       bt-     9,power_save_6xx_restore        /* Check NAP */
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
-#endif /* CONFIG_6xx */
+       lwz     r9,KSP_LIMIT(r12)
+       cmplw   r1,r9                   /* if r1 <= ksp_limit */
+       ble-    stack_ovf               /* then the kernel stack overflowed */
+5:
+#if defined(CONFIG_6xx) || defined(CONFIG_E500)
+       rlwinm  r9,r1,0,0,31-THREAD_SHIFT
+       tophys(r9,r9)                   /* check local flags */
+       lwz     r12,TI_LOCAL_FLAGS(r9)
+       mtcrf   0x01,r12
+       bt-     31-TLF_NAPPING,4f
+       bt-     31-TLF_SLEEPING,7f
+#endif /* CONFIG_6xx || CONFIG_E500 */
        .globl transfer_to_handler_cont
 transfer_to_handler_cont:
-       lwz     r11,THREAD_INFO-THREAD(r12)
-       cmplw   r1,r11                  /* if r1 <= current->thread_info */
-       ble-    stack_ovf               /* then the kernel stack overflowed */
 3:
        mflr    r9
        lwz     r11,0(r9)               /* virtual address of handler */
        lwz     r9,4(r9)                /* where to go when done */
-       FIX_SRR1(r10,r12)
+#ifdef CONFIG_TRACE_IRQFLAGS
+       lis     r12,reenable_mmu@h
+       ori     r12,r12,reenable_mmu@l
+       mtspr   SPRN_SRR0,r12
+       mtspr   SPRN_SRR1,r10
+       SYNC
+       RFI
+reenable_mmu:                          /* re-enable mmu so we can */
+       mfmsr   r10
+       lwz     r12,_MSR(r1)
+       xor     r10,r10,r12
+       andi.   r10,r10,MSR_EE          /* Did EE change? */
+       beq     1f
+
+       /*
+        * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
+        * If from user mode there is only one stack frame on the stack, and
+        * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
+        * stack frame to make trace_hardirqs_off happy.
+        *
+        * This is handy because we also need to save a bunch of GPRs,
+        * r3 can be different from GPR3(r1) at this point, r9 and r11
+        * contains the old MSR and handler address respectively,
+        * r4 & r5 can contain page fault arguments that need to be passed
+        * along as well. r12, CCR, CTR, XER etc... are left clobbered as
+        * they aren't useful past this point (aren't syscall arguments),
+        * the rest is restored from the exception frame.
+        */
+       stwu    r1,-32(r1)
+       stw     r9,8(r1)
+       stw     r11,12(r1)
+       stw     r3,16(r1)
+       stw     r4,20(r1)
+       stw     r5,24(r1)
+       andi.   r12,r12,MSR_PR
+       b       11f
+       bl      trace_hardirqs_off
+       b       12f
+11:
+       bl      trace_hardirqs_off
+12:
+       lwz     r5,24(r1)
+       lwz     r4,20(r1)
+       lwz     r3,16(r1)
+       lwz     r11,12(r1)
+       lwz     r9,8(r1)
+       addi    r1,r1,32
+       lwz     r0,GPR0(r1)
+       lwz     r6,GPR6(r1)
+       lwz     r7,GPR7(r1)
+       lwz     r8,GPR8(r1)
+1:     mtctr   r11
+       mtlr    r9
+       bctr                            /* jump to handler */
+#else /* CONFIG_TRACE_IRQFLAGS */
        mtspr   SPRN_SRR0,r11
        mtspr   SPRN_SRR1,r10
        mtlr    r9
        SYNC
        RFI                             /* jump to handler, enable MMU */
+#endif /* CONFIG_TRACE_IRQFLAGS */
+
+#if defined (CONFIG_6xx) || defined(CONFIG_E500)
+4:     rlwinm  r12,r12,0,~_TLF_NAPPING
+       stw     r12,TI_LOCAL_FLAGS(r9)
+       b       power_save_ppc32_restore
+
+7:     rlwinm  r12,r12,0,~_TLF_SLEEPING
+       stw     r12,TI_LOCAL_FLAGS(r9)
+       lwz     r9,_MSR(r11)            /* if sleeping, clear MSR.EE */
+       rlwinm  r9,r9,0,~MSR_EE
+       lwz     r12,_LINK(r11)          /* and return to address in LR */
+       b       fast_exception_return
+#endif
 
 /*
  * On kernel stack overflow, load up an initial stack pointer
@@ -163,10 +273,10 @@ transfer_to_handler_cont:
  */
 stack_ovf:
        /* sometimes we use a statically-allocated stack, which is OK. */
-       lis     r11,_end@h
-       ori     r11,r11,_end@l
-       cmplw   r1,r11
-       ble     3b                      /* r1 <= &_end is OK */
+       lis     r12,_end@h
+       ori     r12,r12,_end@l
+       cmplw   r1,r12
+       ble     5b                      /* r1 <= &_end is OK */
        SAVE_NVGPRS(r11)
        addi    r3,r1,STACK_FRAME_OVERHEAD
        lis     r1,init_thread_union@ha
@@ -189,7 +299,6 @@ stack_ovf:
 0:
 
 _GLOBAL(DoSyscall)
-       stw     r0,THREAD+LAST_SYSCALL(r2)
        stw     r3,ORIG_GPR3(r1)
        li      r12,0
        stw     r12,RESULT(r1)
@@ -199,10 +308,32 @@ _GLOBAL(DoSyscall)
 #ifdef SHOW_SYSCALLS
        bl      do_show_syscall
 #endif /* SHOW_SYSCALLS */
-       rlwinm  r10,r1,0,0,18   /* current_thread_info() */
-       lwz     r11,TI_LOCAL_FLAGS(r10)
-       rlwinm  r11,r11,0,~_TIFL_FORCE_NOERROR
-       stw     r11,TI_LOCAL_FLAGS(r10)
+#ifdef CONFIG_TRACE_IRQFLAGS
+       /* Return from syscalls can (and generally will) hard enable
+        * interrupts. You aren't supposed to call a syscall with
+        * interrupts disabled in the first place. However, to ensure
+        * that we get it right vs. lockdep if it happens, we force
+        * that hard enable here with appropriate tracing if we see
+        * that we have been called with interrupts off
+        */
+       mfmsr   r11
+       andi.   r12,r11,MSR_EE
+       bne+    1f
+       /* We came in with interrupts disabled, we enable them now */
+       bl      trace_hardirqs_on
+       mfmsr   r11
+       lwz     r0,GPR0(r1)
+       lwz     r3,GPR3(r1)
+       lwz     r4,GPR4(r1)
+       ori     r11,r11,MSR_EE
+       lwz     r5,GPR5(r1)
+       lwz     r6,GPR6(r1)
+       lwz     r7,GPR7(r1)
+       lwz     r8,GPR8(r1)
+       mtmsr   r11
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
+       rlwinm  r10,r1,0,0,(31-THREAD_SHIFT)    /* current_thread_info() */
        lwz     r11,TI_FLAGS(r10)
        andi.   r11,r11,_TIF_SYSCALL_T_OR_A
        bne-    syscall_dotrace
@@ -223,40 +354,61 @@ ret_from_syscall:
        bl      do_show_syscall_exit
 #endif
        mr      r6,r3
-       li      r11,-_LAST_ERRNO
-       cmplw   0,r3,r11
-       rlwinm  r12,r1,0,0,18   /* current_thread_info() */
-       blt+    30f
-       lwz     r11,TI_LOCAL_FLAGS(r12)
-       andi.   r11,r11,_TIFL_FORCE_NOERROR
-       bne     30f
-       neg     r3,r3
-       lwz     r10,_CCR(r1)    /* Set SO bit in CR */
-       oris    r10,r10,0x1000
-       stw     r10,_CCR(r1)
-
+       rlwinm  r12,r1,0,0,(31-THREAD_SHIFT)    /* current_thread_info() */
        /* disable interrupts so current_thread_info()->flags can't change */
-30:    LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
+       LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
+       /* Note: We don't bother telling lockdep about it */
        SYNC
        MTMSRD(r10)
        lwz     r9,TI_FLAGS(r12)
-       andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
+       li      r8,-_LAST_ERRNO
+       andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
        bne-    syscall_exit_work
+       cmplw   0,r3,r8
+       blt+    syscall_exit_cont
+       lwz     r11,_CCR(r1)                    /* Load CR */
+       neg     r3,r3
+       oris    r11,r11,0x1000  /* Set SO bit in CR */
+       stw     r11,_CCR(r1)
 syscall_exit_cont:
+       lwz     r8,_MSR(r1)
+#ifdef CONFIG_TRACE_IRQFLAGS
+       /* If we are going to return from the syscall with interrupts
+        * off, we trace that here. It shouldn't happen though but we
+        * want to catch the bugger if it does right ?
+        */
+       andi.   r10,r8,MSR_EE
+       bne+    1f
+       stw     r3,GPR3(r1)
+       bl      trace_hardirqs_off
+       lwz     r3,GPR3(r1)
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
-       /* If the process has its own DBCR0 value, load it up.  The single
-          step bit tells us that dbcr0 should be loaded. */
+       /* If the process has its own DBCR0 value, load it up.  The internal
+          debug mode bit tells us that dbcr0 should be loaded. */
        lwz     r0,THREAD+THREAD_DBCR0(r2)
-       andis.  r10,r0,DBCR0_IC@h
+       andis.  r10,r0,DBCR0_IDM@h
        bnel-   load_dbcr0
 #endif
+#ifdef CONFIG_44x
+BEGIN_MMU_FTR_SECTION
+       lis     r4,icache_44x_need_flush@ha
+       lwz     r5,icache_44x_need_flush@l(r4)
+       cmplwi  cr0,r5,0
+       bne-    2f
+1:
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
+#endif /* CONFIG_44x */
+BEGIN_FTR_SECTION
+       lwarx   r7,0,r1
+END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
        stwcx.  r0,0,r1                 /* to clear the reservation */
        lwz     r4,_LINK(r1)
        lwz     r5,_CCR(r1)
        mtlr    r4
        mtcr    r5
        lwz     r7,_NIP(r1)
-       lwz     r8,_MSR(r1)
        FIX_SRR1(r8, r0)
        lwz     r2,GPR2(r1)
        lwz     r1,GPR1(r1)
@@ -264,6 +416,12 @@ syscall_exit_cont:
        mtspr   SPRN_SRR1,r8
        SYNC
        RFI
+#ifdef CONFIG_44x
+2:     li      r7,0
+       iccci   r0,r0
+       stw     r7,icache_44x_need_flush@l(r4)
+       b       1b
+#endif  /* CONFIG_44x */
 
 66:    li      r3,-ENOSYS
        b       ret_from_syscall
@@ -279,10 +437,15 @@ ret_from_fork:
 syscall_dotrace:
        SAVE_NVGPRS(r1)
        li      r0,0xc00
-       stw     r0,TRAP(r1)
+       stw     r0,_TRAP(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      do_syscall_trace_enter
-       lwz     r0,GPR0(r1)     /* Restore original registers */
+       /*
+        * Restore argument registers possibly just changed.
+        * We use the return value of do_syscall_trace_enter
+        * for call number to look up in the table (r0).
+        */
+       mr      r0,r3
        lwz     r3,GPR3(r1)
        lwz     r4,GPR4(r1)
        lwz     r5,GPR5(r1)
@@ -293,45 +456,59 @@ syscall_dotrace:
        b       syscall_dotrace_cont
 
 syscall_exit_work:
-       stw     r6,RESULT(r1)   /* Save result */
+       andi.   r0,r9,_TIF_RESTOREALL
+       beq+    0f
+       REST_NVGPRS(r1)
+       b       2f
+0:     cmplw   0,r3,r8
+       blt+    1f
+       andi.   r0,r9,_TIF_NOERROR
+       bne-    1f
+       lwz     r11,_CCR(r1)                    /* Load CR */
+       neg     r3,r3
+       oris    r11,r11,0x1000  /* Set SO bit in CR */
+       stw     r11,_CCR(r1)
+
+1:     stw     r6,RESULT(r1)   /* Save result */
        stw     r3,GPR3(r1)     /* Update return value */
-       andi.   r0,r9,_TIF_SYSCALL_T_OR_A
-       beq     5f
+2:     andi.   r0,r9,(_TIF_PERSYSCALL_MASK)
+       beq     4f
+
+       /* Clear per-syscall TIF flags if any are set.  */
+
+       li      r11,_TIF_PERSYSCALL_MASK
+       addi    r12,r12,TI_FLAGS
+3:     lwarx   r8,0,r12
+       andc    r8,r8,r11
+#ifdef CONFIG_IBM405_ERR77
+       dcbt    0,r12
+#endif
+       stwcx.  r8,0,r12
+       bne-    3b
+       subi    r12,r12,TI_FLAGS
+       
+4:     /* Anything which requires enabling interrupts? */
+       andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
+       beq     ret_from_except
+
+       /* Re-enable interrupts. There is no need to trace that with
+        * lockdep as we are supposed to have IRQs on at this point
+        */
        ori     r10,r10,MSR_EE
        SYNC
-       MTMSRD(r10)             /* re-enable interrupts */
-       lwz     r4,TRAP(r1)
+       MTMSRD(r10)
+
+       /* Save NVGPRS if they're not saved already */
+       lwz     r4,_TRAP(r1)
        andi.   r4,r4,1
-       beq     4f
+       beq     5f
        SAVE_NVGPRS(r1)
        li      r4,0xc00
-       stw     r4,TRAP(r1)
-4:
+       stw     r4,_TRAP(r1)
+5:
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      do_syscall_trace_leave
-       REST_NVGPRS(r1)
-2:
-       lwz     r3,GPR3(r1)
-       LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
-       SYNC
-       MTMSRD(r10)             /* disable interrupts again */
-       rlwinm  r12,r1,0,0,18   /* current_thread_info() */
-       lwz     r9,TI_FLAGS(r12)
-5:
-       andi.   r0,r9,_TIF_NEED_RESCHED
-       bne     1f
-       lwz     r5,_MSR(r1)
-       andi.   r5,r5,MSR_PR
-       beq     syscall_exit_cont
-       andi.   r0,r9,_TIF_SIGPENDING
-       beq     syscall_exit_cont
-       b       do_user_signal
-1:
-       ori     r10,r10,MSR_EE
-       SYNC
-       MTMSRD(r10)             /* re-enable interrupts */
-       bl      schedule
-       b       2b
+       b       ret_from_except_full
 
 #ifdef SHOW_SYSCALLS
 do_show_syscall:
@@ -402,58 +579,40 @@ show_syscalls_task:
 #endif /* SHOW_SYSCALLS */
 
 /*
- * The sigsuspend and rt_sigsuspend system calls can call do_signal
- * and thus put the process into the stopped state where we might
- * want to examine its user state with ptrace.  Therefore we need
- * to save all the nonvolatile registers (r13 - r31) before calling
- * the C code.
+ * The fork/clone functions need to copy the full register set into
+ * the child process. Therefore we need to save all the nonvolatile
+ * registers (r13 - r31) before calling the C code.
  */
-       .globl  ppc_sigsuspend
-ppc_sigsuspend:
-       SAVE_NVGPRS(r1)
-       lwz     r0,TRAP(r1)
-       rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
-       stw     r0,TRAP(r1)             /* register set saved */
-       b       sys_sigsuspend
-
-       .globl  ppc_rt_sigsuspend
-ppc_rt_sigsuspend:
-       SAVE_NVGPRS(r1)
-       lwz     r0,TRAP(r1)
-       rlwinm  r0,r0,0,0,30
-       stw     r0,TRAP(r1)
-       b       sys_rt_sigsuspend
-
        .globl  ppc_fork
 ppc_fork:
        SAVE_NVGPRS(r1)
-       lwz     r0,TRAP(r1)
+       lwz     r0,_TRAP(r1)
        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
-       stw     r0,TRAP(r1)             /* register set saved */
+       stw     r0,_TRAP(r1)            /* register set saved */
        b       sys_fork
 
        .globl  ppc_vfork
 ppc_vfork:
        SAVE_NVGPRS(r1)
-       lwz     r0,TRAP(r1)
+       lwz     r0,_TRAP(r1)
        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
-       stw     r0,TRAP(r1)             /* register set saved */
+       stw     r0,_TRAP(r1)            /* register set saved */
        b       sys_vfork
 
        .globl  ppc_clone
 ppc_clone:
        SAVE_NVGPRS(r1)
-       lwz     r0,TRAP(r1)
+       lwz     r0,_TRAP(r1)
        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
-       stw     r0,TRAP(r1)             /* register set saved */
+       stw     r0,_TRAP(r1)            /* register set saved */
        b       sys_clone
 
        .globl  ppc_swapcontext
 ppc_swapcontext:
        SAVE_NVGPRS(r1)
-       lwz     r0,TRAP(r1)
+       lwz     r0,_TRAP(r1)
        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
-       stw     r0,TRAP(r1)             /* register set saved */
+       stw     r0,_TRAP(r1)            /* register set saved */
        b       sys_swapcontext
 
 /*
@@ -470,9 +629,9 @@ handle_page_fault:
        cmpwi   r3,0
        beq+    ret_from_except
        SAVE_NVGPRS(r1)
-       lwz     r0,TRAP(r1)
+       lwz     r0,_TRAP(r1)
        clrrwi  r0,r0,1
-       stw     r0,TRAP(r1)
+       stw     r0,_TRAP(r1)
        mr      r5,r3
        addi    r3,r1,STACK_FRAME_OVERHEAD
        lwz     r4,_DAR(r1)
@@ -516,9 +675,11 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_SPE
+BEGIN_FTR_SECTION
        oris    r0,r0,MSR_SPE@h  /* Disable SPE */
        mfspr   r12,SPRN_SPEFSCR /* save spefscr register value */
        stw     r12,THREAD+THREAD_SPEFSCR(r2)
+END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 #endif /* CONFIG_SPE */
        and.    r0,r0,r11       /* FP or altivec or SPE enabled? */
        beq+    1f
@@ -540,7 +701,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 
        tophys(r0,r4)
        CLR_TOP32(r0)
-       mtspr   SPRN_SPRG3,r0   /* Update current THREAD phys addr */
+       mtspr   SPRN_SPRG_THREAD,r0     /* Update current THREAD phys addr */
        lwz     r1,KSP(r4)      /* Load new stack pointer */
 
        /* save the old current 'last' for return value */
@@ -554,8 +715,10 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_SPE
+BEGIN_FTR_SECTION
        lwz     r0,THREAD+THREAD_SPEFSCR(r2)
        mtspr   SPRN_SPEFSCR,r0         /* restore SPEFSCR reg */
+END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 #endif /* CONFIG_SPE */
 
        lwz     r0,_CCR(r1)
@@ -608,7 +771,11 @@ fast_exception_return:
        mr      r12,r4          /* restart at exc_exit_restart */
        b       2b
 
-       .comm   fee_restarts,4
+       .section .bss
+       .align  2
+fee_restarts:
+       .space  4
+       .previous
 
 /* aargh, a nonrecoverable interrupt, panic */
 /* aargh, we don't know which trap this is */
@@ -618,7 +785,7 @@ BEGIN_FTR_SECTION
        b       2b
 END_FTR_SECTION_IFSET(CPU_FTR_601)
        li      r10,-1
-       stw     r10,TRAP(r11)
+       stw     r10,_TRAP(r11)
        addi    r3,r1,STACK_FRAME_OVERHEAD
        lis     r10,MSR_KERNEL@h
        ori     r10,r10,MSR_KERNEL@l
@@ -627,15 +794,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
        .long   ret_from_except
 #endif
 
-       .globl  sigreturn_exit
-sigreturn_exit:
-       subi    r1,r3,STACK_FRAME_OVERHEAD
-       rlwinm  r12,r1,0,0,18   /* current_thread_info() */
-       lwz     r9,TI_FLAGS(r12)
-       andi.   r0,r9,_TIF_SYSCALL_T_OR_A
-       bnel-   do_syscall_trace_leave
-       /* fall through */
-
        .globl  ret_from_except_full
 ret_from_except_full:
        REST_NVGPRS(r1)
@@ -646,6 +804,7 @@ ret_from_except:
        /* Hard-disable interrupts so that current_thread_info()->flags
         * can't change between when we test it and when we return
         * from the interrupt. */
+       /* Note: We don't bother telling lockdep about it */
        LOAD_MSR_KERNEL(r10,MSR_KERNEL)
        SYNC                    /* Some chip revs have problems here... */
        MTMSRD(r10)             /* disable interrupts */
@@ -656,17 +815,17 @@ ret_from_except:
 
 user_exc_return:               /* r10 contains MSR_KERNEL here */
        /* Check current_thread_info()->flags */
-       rlwinm  r9,r1,0,0,18
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
        lwz     r9,TI_FLAGS(r9)
-       andi.   r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
+       andi.   r0,r9,_TIF_USER_WORK_MASK
        bne     do_work
 
 restore_user:
 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
-       /* Check whether this process has its own DBCR0 value.  The single
-          step bit tells us that dbcr0 should be loaded. */
+       /* Check whether this process has its own DBCR0 value.  The internal
+          debug mode bit tells us that dbcr0 should be loaded. */
        lwz     r0,THREAD+THREAD_DBCR0(r2)
-       andis.  r10,r0,DBCR0_IC@h
+       andis.  r10,r0,DBCR0_IDM@h
        bnel-   load_dbcr0
 #endif
 
@@ -676,7 +835,7 @@ restore_user:
 /* N.B. the only way to get here is from the beq following ret_from_except. */
 resume_kernel:
        /* check current_thread_info->preempt_count */
-       rlwinm  r9,r1,0,0,18
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
        lwz     r0,TI_PREEMPT(r9)
        cmpwi   0,r0,0          /* if non-zero, just restore regs and return */
        bne     restore
@@ -685,17 +844,76 @@ resume_kernel:
        beq+    restore
        andi.   r0,r3,MSR_EE    /* interrupts off? */
        beq     restore         /* don't schedule if so */
+#ifdef CONFIG_TRACE_IRQFLAGS
+       /* Lockdep thinks irqs are enabled, we need to call
+        * preempt_schedule_irq with IRQs off, so we inform lockdep
+        * now that we -did- turn them off already
+        */
+       bl      trace_hardirqs_off
+#endif
 1:     bl      preempt_schedule_irq
-       rlwinm  r9,r1,0,0,18
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
        lwz     r3,TI_FLAGS(r9)
        andi.   r0,r3,_TIF_NEED_RESCHED
        bne-    1b
+#ifdef CONFIG_TRACE_IRQFLAGS
+       /* And now, to properly rebalance the above, we tell lockdep they
+        * are being turned back on, which will happen when we return
+        */
+       bl      trace_hardirqs_on
+#endif
 #else
 resume_kernel:
 #endif /* CONFIG_PREEMPT */
 
        /* interrupts are hard-disabled at this point */
 restore:
+#ifdef CONFIG_44x
+BEGIN_MMU_FTR_SECTION
+       b       1f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
+       lis     r4,icache_44x_need_flush@ha
+       lwz     r5,icache_44x_need_flush@l(r4)
+       cmplwi  cr0,r5,0
+       beq+    1f
+       li      r6,0
+       iccci   r0,r0
+       stw     r6,icache_44x_need_flush@l(r4)
+1:
+#endif  /* CONFIG_44x */
+
+       lwz     r9,_MSR(r1)
+#ifdef CONFIG_TRACE_IRQFLAGS
+       /* Lockdep doesn't know about the fact that IRQs are temporarily turned
+        * off in this assembly code while peeking at TI_FLAGS() and such. However
+        * we need to inform it if the exception turned interrupts off, and we
+        * are about to trun them back on.
+        *
+        * The problem here sadly is that we don't know whether the exceptions was
+        * one that turned interrupts off or not. So we always tell lockdep about
+        * turning them on here when we go back to wherever we came from with EE
+        * on, even if that may meen some redudant calls being tracked. Maybe later
+        * we could encode what the exception did somewhere or test the exception
+        * type in the pt_regs but that sounds overkill
+        */
+       andi.   r10,r9,MSR_EE
+       beq     1f
+       /*
+        * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
+        * which is the stack frame here, we need to force a stack frame
+        * in case we came from user space.
+        */
+       stwu    r1,-32(r1)
+       mflr    r0
+       stw     r0,4(r1)
+       stwu    r1,-32(r1)
+       bl      trace_hardirqs_on
+       lwz     r1,0(r1)
+       lwz     r1,0(r1)
+       lwz     r9,_MSR(r1)
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
+
        lwz     r0,GPR0(r1)
        lwz     r2,GPR2(r1)
        REST_4GPRS(3, r1)
@@ -707,10 +925,12 @@ restore:
        mtctr   r11
 
        PPC405_ERR77(0,r1)
+BEGIN_FTR_SECTION
+       lwarx   r11,0,r1
+END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
        stwcx.  r0,0,r1                 /* to clear the reservation */
 
 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
-       lwz     r9,_MSR(r1)
        andi.   r10,r9,MSR_RI           /* check if this exception occurred */
        beql    nonrecoverable          /* at a bad place (MSR:RI = 0) */
 
@@ -733,7 +953,6 @@ restore:
        MTMSRD(r10)             /* clear the RI bit */
        .globl exc_exit_restart
 exc_exit_restart:
-       lwz     r9,_MSR(r1)
        lwz     r12,_NIP(r1)
        FIX_SRR1(r9,r10)
        mtspr   SPRN_SRR0,r12
@@ -836,18 +1055,91 @@ exc_exit_restart_end:
        exc_lvl_rfi;                                                    \
        b       .;              /* prevent prefetch past exc_lvl_rfi */
 
+#define        RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)                        \
+       lwz     r9,_##exc_lvl_srr0(r1);                                 \
+       lwz     r10,_##exc_lvl_srr1(r1);                                \
+       mtspr   SPRN_##exc_lvl_srr0,r9;                                 \
+       mtspr   SPRN_##exc_lvl_srr1,r10;
+
+#if defined(CONFIG_PPC_BOOK3E_MMU)
+#ifdef CONFIG_PHYS_64BIT
+#define        RESTORE_MAS7                                                    \
+       lwz     r11,MAS7(r1);                                           \
+       mtspr   SPRN_MAS7,r11;
+#else
+#define        RESTORE_MAS7
+#endif /* CONFIG_PHYS_64BIT */
+#define RESTORE_MMU_REGS                                               \
+       lwz     r9,MAS0(r1);                                            \
+       lwz     r10,MAS1(r1);                                           \
+       lwz     r11,MAS2(r1);                                           \
+       mtspr   SPRN_MAS0,r9;                                           \
+       lwz     r9,MAS3(r1);                                            \
+       mtspr   SPRN_MAS1,r10;                                          \
+       lwz     r10,MAS6(r1);                                           \
+       mtspr   SPRN_MAS2,r11;                                          \
+       mtspr   SPRN_MAS3,r9;                                           \
+       mtspr   SPRN_MAS6,r10;                                          \
+       RESTORE_MAS7;
+#elif defined(CONFIG_44x)
+#define RESTORE_MMU_REGS                                               \
+       lwz     r9,MMUCR(r1);                                           \
+       mtspr   SPRN_MMUCR,r9;
+#else
+#define RESTORE_MMU_REGS
+#endif
+
+#ifdef CONFIG_40x
        .globl  ret_from_crit_exc
 ret_from_crit_exc:
-       RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
+       mfspr   r9,SPRN_SPRG_THREAD
+       lis     r10,saved_ksp_limit@ha;
+       lwz     r10,saved_ksp_limit@l(r10);
+       tovirt(r9,r9);
+       stw     r10,KSP_LIMIT(r9)
+       lis     r9,crit_srr0@ha;
+       lwz     r9,crit_srr0@l(r9);
+       lis     r10,crit_srr1@ha;
+       lwz     r10,crit_srr1@l(r10);
+       mtspr   SPRN_SRR0,r9;
+       mtspr   SPRN_SRR1,r10;
+       RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
+#endif /* CONFIG_40x */
 
 #ifdef CONFIG_BOOKE
+       .globl  ret_from_crit_exc
+ret_from_crit_exc:
+       mfspr   r9,SPRN_SPRG_THREAD
+       lwz     r10,SAVED_KSP_LIMIT(r1)
+       stw     r10,KSP_LIMIT(r9)
+       RESTORE_xSRR(SRR0,SRR1);
+       RESTORE_MMU_REGS;
+       RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
+
        .globl  ret_from_debug_exc
 ret_from_debug_exc:
-       RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
+       mfspr   r9,SPRN_SPRG_THREAD
+       lwz     r10,SAVED_KSP_LIMIT(r1)
+       stw     r10,KSP_LIMIT(r9)
+       lwz     r9,THREAD_INFO-THREAD(r9)
+       rlwinm  r10,r1,0,0,(31-THREAD_SHIFT)
+       lwz     r10,TI_PREEMPT(r10)
+       stw     r10,TI_PREEMPT(r9)
+       RESTORE_xSRR(SRR0,SRR1);
+       RESTORE_xSRR(CSRR0,CSRR1);
+       RESTORE_MMU_REGS;
+       RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
 
        .globl  ret_from_mcheck_exc
 ret_from_mcheck_exc:
-       RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
+       mfspr   r9,SPRN_SPRG_THREAD
+       lwz     r10,SAVED_KSP_LIMIT(r1)
+       stw     r10,KSP_LIMIT(r9)
+       RESTORE_xSRR(SRR0,SRR1);
+       RESTORE_xSRR(CSRR0,CSRR1);
+       RESTORE_xSRR(DSRR0,DSRR1);
+       RESTORE_MMU_REGS;
+       RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
 #endif /* CONFIG_BOOKE */
 
 /*
@@ -863,6 +1155,12 @@ load_dbcr0:
        mfspr   r10,SPRN_DBCR0
        lis     r11,global_dbcr0@ha
        addi    r11,r11,global_dbcr0@l
+#ifdef CONFIG_SMP
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
+       lwz     r9,TI_CPU(r9)
+       slwi    r9,r9,3
+       add     r11,r11,r9
+#endif
        stw     r10,0(r11)
        mtspr   SPRN_DBCR0,r0
        lwz     r10,4(r11)
@@ -872,7 +1170,11 @@ load_dbcr0:
        mtspr   SPRN_DBSR,r11   /* clear all pending debug events */
        blr
 
-       .comm   global_dbcr0,8
+       .section .bss
+       .align  4
+global_dbcr0:
+       .space  8*NR_CPUS
+       .previous
 #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
 
 do_work:                       /* r10 contains MSR_KERNEL here */
@@ -880,34 +1182,41 @@ do_work:                 /* r10 contains MSR_KERNEL here */
        beq     do_user_signal
 
 do_resched:                    /* r10 contains MSR_KERNEL here */
+       /* Note: We don't need to inform lockdep that we are enabling
+        * interrupts here. As far as it knows, they are already enabled
+        */
        ori     r10,r10,MSR_EE
        SYNC
        MTMSRD(r10)             /* hard-enable interrupts */
        bl      schedule
 recheck:
+       /* Note: And we don't tell it we are disabling them again
+        * neither. Those disable/enable cycles used to peek at
+        * TI_FLAGS aren't advertised.
+        */
        LOAD_MSR_KERNEL(r10,MSR_KERNEL)
        SYNC
        MTMSRD(r10)             /* disable interrupts */
-       rlwinm  r9,r1,0,0,18
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
        lwz     r9,TI_FLAGS(r9)
        andi.   r0,r9,_TIF_NEED_RESCHED
        bne-    do_resched
-       andi.   r0,r9,_TIF_SIGPENDING
+       andi.   r0,r9,_TIF_USER_WORK_MASK
        beq     restore_user
 do_user_signal:                        /* r10 contains MSR_KERNEL here */
        ori     r10,r10,MSR_EE
        SYNC
        MTMSRD(r10)             /* hard-enable interrupts */
        /* save r13-r31 in the exception frame, if not already done */
-       lwz     r3,TRAP(r1)
+       lwz     r3,_TRAP(r1)
        andi.   r0,r3,1
        beq     2f
        SAVE_NVGPRS(r1)
        rlwinm  r3,r3,0,0,30
-       stw     r3,TRAP(r1)
-2:     li      r3,0
-       addi    r4,r1,STACK_FRAME_OVERHEAD
-       bl      do_signal
+       stw     r3,_TRAP(r1)
+2:     addi    r3,r1,STACK_FRAME_OVERHEAD
+       mr      r4,r9
+       bl      do_notify_resume
        REST_NVGPRS(r1)
        b       recheck
 
@@ -936,25 +1245,29 @@ nonrecoverable:
 BEGIN_FTR_SECTION
        blr
 END_FTR_SECTION_IFSET(CPU_FTR_601)
-       lwz     r3,TRAP(r1)
+       lwz     r3,_TRAP(r1)
        andi.   r0,r3,1
        beq     4f
        SAVE_NVGPRS(r1)
        rlwinm  r3,r3,0,0,30
-       stw     r3,TRAP(r1)
+       stw     r3,_TRAP(r1)
 4:     addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      nonrecoverable_exception
        /* shouldn't return */
        b       4b
 
-       .comm   ee_restarts,4
+       .section .bss
+       .align  2
+ee_restarts:
+       .space  4
+       .previous
 
 /*
  * PROM code for specific machines follows.  Put it
  * here so it's easy to add arch-specific sections later.
  * -- Cort
  */
-#ifdef CONFIG_PPC_OF
+#ifdef CONFIG_PPC_RTAS
 /*
  * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
  * called with the MMU off.
@@ -963,14 +1276,13 @@ _GLOBAL(enter_rtas)
        stwu    r1,-INT_FRAME_SIZE(r1)
        mflr    r0
        stw     r0,INT_FRAME_SIZE+4(r1)
-       lis     r4,rtas_data@ha
-       lwz     r4,rtas_data@l(r4)
+       LOAD_REG_ADDR(r4, rtas)
        lis     r6,1f@ha        /* physical return address for rtas */
        addi    r6,r6,1f@l
        tophys(r6,r6)
        tophys(r7,r1)
-       lis     r8,rtas_entry@ha
-       lwz     r8,rtas_entry@l(r8)
+       lwz     r8,RTASENTRY(r4)
+       lwz     r4,RTASBASE(r4)
        mfmsr   r9
        stw     r9,8(r1)
        LOAD_MSR_KERNEL(r0,MSR_KERNEL)
@@ -978,8 +1290,7 @@ _GLOBAL(enter_rtas)
        MTMSRD(r0)              /* don't get trashed */
        li      r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
        mtlr    r6
-       CLR_TOP32(r7)
-       mtspr   SPRN_SPRG2,r7
+       mtspr   SPRN_SPRG_RTAS,r7
        mtspr   SPRN_SRR0,r8
        mtspr   SPRN_SRR1,r9
        RFI
@@ -989,7 +1300,7 @@ _GLOBAL(enter_rtas)
        FIX_SRR1(r9,r0)
        addi    r1,r1,INT_FRAME_SIZE
        li      r0,0
-       mtspr   SPRN_SPRG2,r0
+       mtspr   SPRN_SPRG_RTAS,r0
        mtspr   SPRN_SRR0,r8
        mtspr   SPRN_SRR1,r9
        RFI                     /* return to caller */
@@ -999,4 +1310,103 @@ machine_check_in_rtas:
        twi     31,0,0
        /* XXX load up BATs and panic */
 
-#endif /* CONFIG_PPC_OF */
+#endif /* CONFIG_PPC_RTAS */
+
+#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
+_GLOBAL(mcount)
+_GLOBAL(_mcount)
+       /*
+        * It is required that _mcount on PPC32 must preserve the
+        * link register. But we have r0 to play with. We use r0
+        * to push the return address back to the caller of mcount
+        * into the ctr register, restore the link register and
+        * then jump back using the ctr register.
+        */
+       mflr    r0
+       mtctr   r0
+       lwz     r0, 4(r1)
+       mtlr    r0
+       bctr
+
+_GLOBAL(ftrace_caller)
+       MCOUNT_SAVE_FRAME
+       /* r3 ends up with link register */
+       subi    r3, r3, MCOUNT_INSN_SIZE
+.globl ftrace_call
+ftrace_call:
+       bl      ftrace_stub
+       nop
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+.globl ftrace_graph_call
+ftrace_graph_call:
+       b       ftrace_graph_stub
+_GLOBAL(ftrace_graph_stub)
+#endif
+       MCOUNT_RESTORE_FRAME
+       /* old link register ends up in ctr reg */
+       bctr
+#else
+_GLOBAL(mcount)
+_GLOBAL(_mcount)
+
+       MCOUNT_SAVE_FRAME
+
+       subi    r3, r3, MCOUNT_INSN_SIZE
+       LOAD_REG_ADDR(r5, ftrace_trace_function)
+       lwz     r5,0(r5)
+
+       mtctr   r5
+       bctrl
+       nop
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       b       ftrace_graph_caller
+#endif
+       MCOUNT_RESTORE_FRAME
+       bctr
+#endif
+
+_GLOBAL(ftrace_stub)
+       blr
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+_GLOBAL(ftrace_graph_caller)
+       /* load r4 with local address */
+       lwz     r4, 44(r1)
+       subi    r4, r4, MCOUNT_INSN_SIZE
+
+       /* get the parent address */
+       addi    r3, r1, 52
+
+       bl      prepare_ftrace_return
+       nop
+
+       MCOUNT_RESTORE_FRAME
+       /* old link register ends up in ctr reg */
+       bctr
+
+_GLOBAL(return_to_handler)
+       /* need to save return values */
+       stwu    r1, -32(r1)
+       stw     r3, 20(r1)
+       stw     r4, 16(r1)
+       stw     r31, 12(r1)
+       mr      r31, r1
+
+       bl      ftrace_return_to_handler
+       nop
+
+       /* return value has real return address */
+       mtlr    r3
+
+       lwz     r3, 20(r1)
+       lwz     r4, 16(r1)
+       lwz     r31,12(r1)
+       lwz     r1, 0(r1)
+
+       /* Jump back to real return address */
+       blr
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#endif /* CONFIG_MCOUNT */