blob: dff51ea52e4906f7c246f7ccec626d47ee3bfbaa [file] [log] [blame]
Paul Mackerras9994a332005-10-10 22:36:14 +10001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
Paul Mackerras9994a332005-10-10 22:36:14 +100022#include <linux/errno.h>
Michael Ellermanc3525940c2015-07-23 20:21:01 +100023#include <linux/err.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100024#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
Stephen Rothwell46f52212010-11-18 15:06:17 +000034#include <asm/ptrace.h>
Al Viro9445aa12016-01-13 23:33:46 -050035#include <asm/export.h>
Paul Mackerras9994a332005-10-10 22:36:14 +100036
Paul Mackerras9994a332005-10-10 22:36:14 +100037/*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40#if MSR_KERNEL >= 0x10000
41#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
42#else
43#define LOAD_MSR_KERNEL(r, x) li r,(x)
44#endif
45
46#ifdef CONFIG_BOOKE
Paul Mackerras9994a332005-10-10 22:36:14 +100047 .globl mcheck_transfer_to_handler
48mcheck_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050049 mfspr r0,SPRN_DSRR0
50 stw r0,_DSRR0(r11)
51 mfspr r0,SPRN_DSRR1
52 stw r0,_DSRR1(r11)
53 /* fall through */
Paul Mackerras9994a332005-10-10 22:36:14 +100054
55 .globl debug_transfer_to_handler
56debug_transfer_to_handler:
Kumar Galafca622c2008-04-30 05:23:21 -050057 mfspr r0,SPRN_CSRR0
58 stw r0,_CSRR0(r11)
59 mfspr r0,SPRN_CSRR1
60 stw r0,_CSRR1(r11)
61 /* fall through */
Paul Mackerras9994a332005-10-10 22:36:14 +100062
63 .globl crit_transfer_to_handler
64crit_transfer_to_handler:
Kumar Gala70fe3af2009-02-12 16:12:40 -060065#ifdef CONFIG_PPC_BOOK3E_MMU
Kumar Galafca622c2008-04-30 05:23:21 -050066 mfspr r0,SPRN_MAS0
67 stw r0,MAS0(r11)
68 mfspr r0,SPRN_MAS1
69 stw r0,MAS1(r11)
70 mfspr r0,SPRN_MAS2
71 stw r0,MAS2(r11)
72 mfspr r0,SPRN_MAS3
73 stw r0,MAS3(r11)
74 mfspr r0,SPRN_MAS6
75 stw r0,MAS6(r11)
76#ifdef CONFIG_PHYS_64BIT
77 mfspr r0,SPRN_MAS7
78 stw r0,MAS7(r11)
79#endif /* CONFIG_PHYS_64BIT */
Kumar Gala70fe3af2009-02-12 16:12:40 -060080#endif /* CONFIG_PPC_BOOK3E_MMU */
Kumar Galafca622c2008-04-30 05:23:21 -050081#ifdef CONFIG_44x
82 mfspr r0,SPRN_MMUCR
83 stw r0,MMUCR(r11)
84#endif
85 mfspr r0,SPRN_SRR0
86 stw r0,_SRR0(r11)
87 mfspr r0,SPRN_SRR1
88 stw r0,_SRR1(r11)
89
Stuart Yoder1f8b0bc2012-07-16 09:06:48 +000090 /* set the stack limit to the current stack
91 * and set the limit to protect the thread_info
92 * struct
93 */
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +000094 mfspr r8,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -050095 lwz r0,KSP_LIMIT(r8)
96 stw r0,SAVED_KSP_LIMIT(r11)
Stuart Yoder1f8b0bc2012-07-16 09:06:48 +000097 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
Kumar Galafca622c2008-04-30 05:23:21 -050098 stw r0,KSP_LIMIT(r8)
Paul Mackerras9994a332005-10-10 22:36:14 +100099 /* fall through */
100#endif
101
102#ifdef CONFIG_40x
103 .globl crit_transfer_to_handler
104crit_transfer_to_handler:
105 lwz r0,crit_r10@l(0)
106 stw r0,GPR10(r11)
107 lwz r0,crit_r11@l(0)
108 stw r0,GPR11(r11)
Kumar Galafca622c2008-04-30 05:23:21 -0500109 mfspr r0,SPRN_SRR0
110 stw r0,crit_srr0@l(0)
111 mfspr r0,SPRN_SRR1
112 stw r0,crit_srr1@l(0)
113
Stuart Yoder1f8b0bc2012-07-16 09:06:48 +0000114 /* set the stack limit to the current stack
115 * and set the limit to protect the thread_info
116 * struct
117 */
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000118 mfspr r8,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -0500119 lwz r0,KSP_LIMIT(r8)
120 stw r0,saved_ksp_limit@l(0)
Stuart Yoder1f8b0bc2012-07-16 09:06:48 +0000121 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
Kumar Galafca622c2008-04-30 05:23:21 -0500122 stw r0,KSP_LIMIT(r8)
Paul Mackerras9994a332005-10-10 22:36:14 +1000123 /* fall through */
124#endif
125
126/*
127 * This code finishes saving the registers to the exception frame
128 * and jumps to the appropriate handler for the exception, turning
129 * on address translation.
130 * Note that we rely on the caller having set cr0.eq iff the exception
131 * occurred in kernel mode (i.e. MSR:PR = 0).
132 */
133 .globl transfer_to_handler_full
134transfer_to_handler_full:
135 SAVE_NVGPRS(r11)
136 /* fall through */
137
138 .globl transfer_to_handler
139transfer_to_handler:
140 stw r2,GPR2(r11)
141 stw r12,_NIP(r11)
142 stw r9,_MSR(r11)
143 andi. r2,r9,MSR_PR
144 mfctr r12
145 mfspr r2,SPRN_XER
146 stw r12,_CTR(r11)
147 stw r2,_XER(r11)
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000148 mfspr r12,SPRN_SPRG_THREAD
Paul Mackerras9994a332005-10-10 22:36:14 +1000149 addi r2,r12,-THREAD
150 tovirt(r2,r2) /* set r2 to current */
151 beq 2f /* if from user, fix up THREAD.regs */
152 addi r11,r1,STACK_FRAME_OVERHEAD
153 stw r11,PT_REGS(r12)
154#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
155 /* Check to see if the dbcr0 register is set up to debug. Use the
Kumar Gala4eaddb42008-04-09 16:15:40 -0500156 internal debug mode bit to do this. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000157 lwz r12,THREAD_DBCR0(r12)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000158 andis. r12,r12,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000159 beq+ 3f
160 /* From user and task is ptraced - load up global dbcr0 */
161 li r12,-1 /* clear all pending debug events */
162 mtspr SPRN_DBSR,r12
163 lis r11,global_dbcr0@ha
164 tophys(r11,r11)
165 addi r11,r11,global_dbcr0@l
Kumar Gala4eaddb42008-04-09 16:15:40 -0500166#ifdef CONFIG_SMP
Stuart Yoder9778b692012-07-05 04:41:35 +0000167 CURRENT_THREAD_INFO(r9, r1)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500168 lwz r9,TI_CPU(r9)
169 slwi r9,r9,3
170 add r11,r11,r9
171#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000172 lwz r12,0(r11)
173 mtspr SPRN_DBCR0,r12
174 lwz r12,4(r11)
175 addi r12,r12,-1
176 stw r12,4(r11)
177#endif
Christophe Leroyc223c902016-05-17 08:33:46 +0200178#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
179 CURRENT_THREAD_INFO(r9, r1)
180 tophys(r9, r9)
181 ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
182#endif
183
Paul Mackerras9994a332005-10-10 22:36:14 +1000184 b 3f
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000185
Paul Mackerras9994a332005-10-10 22:36:14 +10001862: /* if from kernel, check interrupted DOZE/NAP mode and
187 * check for stack overflow
188 */
Kumar Gala85218822008-04-28 16:21:22 +1000189 lwz r9,KSP_LIMIT(r12)
190 cmplw r1,r9 /* if r1 <= ksp_limit */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000191 ble- stack_ovf /* then the kernel stack overflowed */
1925:
Kumar Galafc4033b2008-06-18 16:26:52 -0500193#if defined(CONFIG_6xx) || defined(CONFIG_E500)
Stuart Yoder9778b692012-07-05 04:41:35 +0000194 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000195 tophys(r9,r9) /* check local flags */
196 lwz r12,TI_LOCAL_FLAGS(r9)
197 mtcrf 0x01,r12
198 bt- 31-TLF_NAPPING,4f
Paul Mackerrasa5606432008-05-14 14:30:48 +1000199 bt- 31-TLF_SLEEPING,7f
Kumar Galafc4033b2008-06-18 16:26:52 -0500200#endif /* CONFIG_6xx || CONFIG_E500 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000201 .globl transfer_to_handler_cont
202transfer_to_handler_cont:
Paul Mackerras9994a332005-10-10 22:36:14 +10002033:
204 mflr r9
205 lwz r11,0(r9) /* virtual address of handler */
206 lwz r9,4(r9) /* where to go when done */
Christophe Leroy75b82472016-12-15 13:42:18 +0100207#ifdef CONFIG_PPC_8xx_PERF_EVENT
208 mtspr SPRN_NRI, r0
209#endif
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000210#ifdef CONFIG_TRACE_IRQFLAGS
211 lis r12,reenable_mmu@h
212 ori r12,r12,reenable_mmu@l
213 mtspr SPRN_SRR0,r12
214 mtspr SPRN_SRR1,r10
215 SYNC
216 RFI
217reenable_mmu: /* re-enable mmu so we can */
218 mfmsr r10
219 lwz r12,_MSR(r1)
220 xor r10,r10,r12
221 andi. r10,r10,MSR_EE /* Did EE change? */
222 beq 1f
223
Kevin Hao2cd76622011-11-10 16:04:17 +0000224 /*
225 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
226 * If from user mode there is only one stack frame on the stack, and
227 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
228 * stack frame to make trace_hardirqs_off happy.
Benjamin Herrenschmidt08f1ec82012-04-10 17:21:35 +1000229 *
230 * This is handy because we also need to save a bunch of GPRs,
231 * r3 can be different from GPR3(r1) at this point, r9 and r11
232 * contains the old MSR and handler address respectively,
233 * r4 & r5 can contain page fault arguments that need to be passed
234 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
235 * they aren't useful past this point (aren't syscall arguments),
236 * the rest is restored from the exception frame.
Kevin Hao2cd76622011-11-10 16:04:17 +0000237 */
Benjamin Herrenschmidt08f1ec82012-04-10 17:21:35 +1000238 stwu r1,-32(r1)
239 stw r9,8(r1)
240 stw r11,12(r1)
241 stw r3,16(r1)
242 stw r4,20(r1)
243 stw r5,24(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000244 bl trace_hardirqs_off
Benjamin Herrenschmidt08f1ec82012-04-10 17:21:35 +1000245 lwz r5,24(r1)
246 lwz r4,20(r1)
247 lwz r3,16(r1)
248 lwz r11,12(r1)
249 lwz r9,8(r1)
250 addi r1,r1,32
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000251 lwz r0,GPR0(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000252 lwz r6,GPR6(r1)
253 lwz r7,GPR7(r1)
254 lwz r8,GPR8(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +00002551: mtctr r11
256 mtlr r9
257 bctr /* jump to handler */
258#else /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000259 mtspr SPRN_SRR0,r11
260 mtspr SPRN_SRR1,r10
261 mtlr r9
262 SYNC
263 RFI /* jump to handler, enable MMU */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000264#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000265
Kumar Galafc4033b2008-06-18 16:26:52 -0500266#if defined (CONFIG_6xx) || defined(CONFIG_E500)
Paul Mackerrasf39224a2006-04-18 21:49:11 +10002674: rlwinm r12,r12,0,~_TLF_NAPPING
268 stw r12,TI_LOCAL_FLAGS(r9)
Kumar Galafc4033b2008-06-18 16:26:52 -0500269 b power_save_ppc32_restore
Paul Mackerrasa5606432008-05-14 14:30:48 +1000270
2717: rlwinm r12,r12,0,~_TLF_SLEEPING
272 stw r12,TI_LOCAL_FLAGS(r9)
273 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
274 rlwinm r9,r9,0,~MSR_EE
275 lwz r12,_LINK(r11) /* and return to address in LR */
276 b fast_exception_return
Paul Mackerrasa0652fc2006-03-27 15:03:03 +1100277#endif
278
Paul Mackerras9994a332005-10-10 22:36:14 +1000279/*
280 * On kernel stack overflow, load up an initial stack pointer
281 * and call StackOverflow(regs), which should not return.
282 */
283stack_ovf:
284 /* sometimes we use a statically-allocated stack, which is OK. */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000285 lis r12,_end@h
286 ori r12,r12,_end@l
287 cmplw r1,r12
288 ble 5b /* r1 <= &_end is OK */
Paul Mackerras9994a332005-10-10 22:36:14 +1000289 SAVE_NVGPRS(r11)
290 addi r3,r1,STACK_FRAME_OVERHEAD
291 lis r1,init_thread_union@ha
292 addi r1,r1,init_thread_union@l
293 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
294 lis r9,StackOverflow@ha
295 addi r9,r9,StackOverflow@l
296 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
Christophe Leroy75b82472016-12-15 13:42:18 +0100297#ifdef CONFIG_PPC_8xx_PERF_EVENT
298 mtspr SPRN_NRI, r0
299#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000300 mtspr SPRN_SRR0,r9
301 mtspr SPRN_SRR1,r10
302 SYNC
303 RFI
304
305/*
306 * Handle a system call.
307 */
308 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
309 .stabs "entry_32.S",N_SO,0,0,0f
3100:
311
312_GLOBAL(DoSyscall)
Paul Mackerras9994a332005-10-10 22:36:14 +1000313 stw r3,ORIG_GPR3(r1)
314 li r12,0
315 stw r12,RESULT(r1)
316 lwz r11,_CCR(r1) /* Clear SO bit in CR */
317 rlwinm r11,r11,0,4,2
318 stw r11,_CCR(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000319#ifdef CONFIG_TRACE_IRQFLAGS
320 /* Return from syscalls can (and generally will) hard enable
321 * interrupts. You aren't supposed to call a syscall with
322 * interrupts disabled in the first place. However, to ensure
323 * that we get it right vs. lockdep if it happens, we force
324 * that hard enable here with appropriate tracing if we see
325 * that we have been called with interrupts off
326 */
327 mfmsr r11
328 andi. r12,r11,MSR_EE
329 bne+ 1f
330 /* We came in with interrupts disabled, we enable them now */
331 bl trace_hardirqs_on
332 mfmsr r11
333 lwz r0,GPR0(r1)
334 lwz r3,GPR3(r1)
335 lwz r4,GPR4(r1)
336 ori r11,r11,MSR_EE
337 lwz r5,GPR5(r1)
338 lwz r6,GPR6(r1)
339 lwz r7,GPR7(r1)
340 lwz r8,GPR8(r1)
341 mtmsr r11
3421:
343#endif /* CONFIG_TRACE_IRQFLAGS */
Stuart Yoder9778b692012-07-05 04:41:35 +0000344 CURRENT_THREAD_INFO(r10, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000345 lwz r11,TI_FLAGS(r10)
Michael Ellerman10ea8342015-01-15 12:01:42 +1100346 andi. r11,r11,_TIF_SYSCALL_DOTRACE
Paul Mackerras9994a332005-10-10 22:36:14 +1000347 bne- syscall_dotrace
348syscall_dotrace_cont:
349 cmplwi 0,r0,NR_syscalls
350 lis r10,sys_call_table@h
351 ori r10,r10,sys_call_table@l
352 slwi r0,r0,2
353 bge- 66f
354 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
355 mtlr r10
356 addi r9,r1,STACK_FRAME_OVERHEAD
357 PPC440EP_ERR42
358 blrl /* Call handler */
359 .globl ret_from_syscall
360ret_from_syscall:
Paul Mackerras9994a332005-10-10 22:36:14 +1000361 mr r6,r3
Stuart Yoder9778b692012-07-05 04:41:35 +0000362 CURRENT_THREAD_INFO(r12, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000363 /* disable interrupts so current_thread_info()->flags can't change */
David Woodhouse401d1f02005-11-15 18:52:18 +0000364 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000365 /* Note: We don't bother telling lockdep about it */
Paul Mackerras9994a332005-10-10 22:36:14 +1000366 SYNC
367 MTMSRD(r10)
368 lwz r9,TI_FLAGS(r12)
Michael Ellermanc3525940c2015-07-23 20:21:01 +1000369 li r8,-MAX_ERRNO
Michael Ellerman10ea8342015-01-15 12:01:42 +1100370 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
Paul Mackerras9994a332005-10-10 22:36:14 +1000371 bne- syscall_exit_work
David Woodhouse401d1f02005-11-15 18:52:18 +0000372 cmplw 0,r3,r8
373 blt+ syscall_exit_cont
374 lwz r11,_CCR(r1) /* Load CR */
375 neg r3,r3
376 oris r11,r11,0x1000 /* Set SO bit in CR */
377 stw r11,_CCR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000378syscall_exit_cont:
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000379 lwz r8,_MSR(r1)
380#ifdef CONFIG_TRACE_IRQFLAGS
381 /* If we are going to return from the syscall with interrupts
382 * off, we trace that here. It shouldn't happen though but we
383 * want to catch the bugger if it does right ?
384 */
385 andi. r10,r8,MSR_EE
386 bne+ 1f
387 stw r3,GPR3(r1)
388 bl trace_hardirqs_off
389 lwz r3,GPR3(r1)
3901:
391#endif /* CONFIG_TRACE_IRQFLAGS */
Paul Mackerras9994a332005-10-10 22:36:14 +1000392#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500393 /* If the process has its own DBCR0 value, load it up. The internal
394 debug mode bit tells us that dbcr0 should be loaded. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000395 lwz r0,THREAD+THREAD_DBCR0(r2)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000396 andis. r10,r0,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000397 bnel- load_dbcr0
398#endif
Benjamin Herrenschmidtb98ac05d2007-10-31 16:42:19 +1100399#ifdef CONFIG_44x
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000400BEGIN_MMU_FTR_SECTION
Benjamin Herrenschmidtb98ac05d2007-10-31 16:42:19 +1100401 lis r4,icache_44x_need_flush@ha
402 lwz r5,icache_44x_need_flush@l(r4)
403 cmplwi cr0,r5,0
404 bne- 2f
4051:
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000406END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
Benjamin Herrenschmidtb98ac05d2007-10-31 16:42:19 +1100407#endif /* CONFIG_44x */
Becky Bruceb64f87c2007-11-10 09:17:49 +1100408BEGIN_FTR_SECTION
409 lwarx r7,0,r1
410END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000411 stwcx. r0,0,r1 /* to clear the reservation */
Christophe Leroyc223c902016-05-17 08:33:46 +0200412#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
413 andi. r4,r8,MSR_PR
414 beq 3f
415 CURRENT_THREAD_INFO(r4, r1)
416 ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
4173:
418#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000419 lwz r4,_LINK(r1)
420 lwz r5,_CCR(r1)
421 mtlr r4
422 mtcr r5
423 lwz r7,_NIP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000424 lwz r2,GPR2(r1)
425 lwz r1,GPR1(r1)
Christophe Leroy75b82472016-12-15 13:42:18 +0100426#ifdef CONFIG_PPC_8xx_PERF_EVENT
427 mtspr SPRN_NRI, r0
428#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000429 mtspr SPRN_SRR0,r7
430 mtspr SPRN_SRR1,r8
431 SYNC
432 RFI
Benjamin Herrenschmidtb98ac05d2007-10-31 16:42:19 +1100433#ifdef CONFIG_44x
4342: li r7,0
435 iccci r0,r0
436 stw r7,icache_44x_need_flush@l(r4)
437 b 1b
438#endif /* CONFIG_44x */
Paul Mackerras9994a332005-10-10 22:36:14 +1000439
44066: li r3,-ENOSYS
441 b ret_from_syscall
442
443 .globl ret_from_fork
444ret_from_fork:
445 REST_NVGPRS(r1)
446 bl schedule_tail
447 li r3,0
448 b ret_from_syscall
449
Al Viro58254e12012-09-12 18:32:42 -0400450 .globl ret_from_kernel_thread
451ret_from_kernel_thread:
452 REST_NVGPRS(r1)
453 bl schedule_tail
454 mtlr r14
455 mr r3,r15
456 PPC440EP_ERR42
457 blrl
458 li r3,0
Al Virobe6abfa72012-08-31 15:48:05 -0400459 b ret_from_syscall
460
Paul Mackerras9994a332005-10-10 22:36:14 +1000461/* Traced system call support */
462syscall_dotrace:
463 SAVE_NVGPRS(r1)
464 li r0,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000465 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000466 addi r3,r1,STACK_FRAME_OVERHEAD
467 bl do_syscall_trace_enter
Roland McGrath4f72c422008-07-27 16:51:03 +1000468 /*
469 * Restore argument registers possibly just changed.
470 * We use the return value of do_syscall_trace_enter
471 * for call number to look up in the table (r0).
472 */
473 mr r0,r3
Paul Mackerras9994a332005-10-10 22:36:14 +1000474 lwz r3,GPR3(r1)
475 lwz r4,GPR4(r1)
476 lwz r5,GPR5(r1)
477 lwz r6,GPR6(r1)
478 lwz r7,GPR7(r1)
479 lwz r8,GPR8(r1)
480 REST_NVGPRS(r1)
Michael Ellermand3837412015-07-23 20:21:02 +1000481
482 cmplwi r0,NR_syscalls
483 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
484 bge- ret_from_syscall
Paul Mackerras9994a332005-10-10 22:36:14 +1000485 b syscall_dotrace_cont
486
487syscall_exit_work:
David Woodhouse401d1f02005-11-15 18:52:18 +0000488 andi. r0,r9,_TIF_RESTOREALL
Paul Mackerras1bd79332006-03-08 13:24:22 +1100489 beq+ 0f
490 REST_NVGPRS(r1)
491 b 2f
4920: cmplw 0,r3,r8
David Woodhouse401d1f02005-11-15 18:52:18 +0000493 blt+ 1f
494 andi. r0,r9,_TIF_NOERROR
495 bne- 1f
496 lwz r11,_CCR(r1) /* Load CR */
497 neg r3,r3
498 oris r11,r11,0x1000 /* Set SO bit in CR */
499 stw r11,_CCR(r1)
500
5011: stw r6,RESULT(r1) /* Save result */
Paul Mackerras9994a332005-10-10 22:36:14 +1000502 stw r3,GPR3(r1) /* Update return value */
David Woodhouse401d1f02005-11-15 18:52:18 +00005032: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
504 beq 4f
505
Paul Mackerras1bd79332006-03-08 13:24:22 +1100506 /* Clear per-syscall TIF flags if any are set. */
David Woodhouse401d1f02005-11-15 18:52:18 +0000507
508 li r11,_TIF_PERSYSCALL_MASK
509 addi r12,r12,TI_FLAGS
5103: lwarx r8,0,r12
511 andc r8,r8,r11
512#ifdef CONFIG_IBM405_ERR77
513 dcbt 0,r12
514#endif
515 stwcx. r8,0,r12
516 bne- 3b
517 subi r12,r12,TI_FLAGS
518
5194: /* Anything which requires enabling interrupts? */
Michael Ellerman10ea8342015-01-15 12:01:42 +1100520 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
Paul Mackerras1bd79332006-03-08 13:24:22 +1100521 beq ret_from_except
522
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000523 /* Re-enable interrupts. There is no need to trace that with
524 * lockdep as we are supposed to have IRQs on at this point
525 */
Paul Mackerras1bd79332006-03-08 13:24:22 +1100526 ori r10,r10,MSR_EE
527 SYNC
528 MTMSRD(r10)
David Woodhouse401d1f02005-11-15 18:52:18 +0000529
530 /* Save NVGPRS if they're not saved already */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000531 lwz r4,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000532 andi. r4,r4,1
David Woodhouse401d1f02005-11-15 18:52:18 +0000533 beq 5f
Paul Mackerras9994a332005-10-10 22:36:14 +1000534 SAVE_NVGPRS(r1)
535 li r4,0xc00
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000536 stw r4,_TRAP(r1)
Paul Mackerras1bd79332006-03-08 13:24:22 +11005375:
Paul Mackerras9994a332005-10-10 22:36:14 +1000538 addi r3,r1,STACK_FRAME_OVERHEAD
539 bl do_syscall_trace_leave
Paul Mackerras1bd79332006-03-08 13:24:22 +1100540 b ret_from_except_full
David Woodhouse401d1f02005-11-15 18:52:18 +0000541
Paul Mackerras9994a332005-10-10 22:36:14 +1000542/*
David Woodhouse401d1f02005-11-15 18:52:18 +0000543 * The fork/clone functions need to copy the full register set into
544 * the child process. Therefore we need to save all the nonvolatile
545 * registers (r13 - r31) before calling the C code.
Paul Mackerras9994a332005-10-10 22:36:14 +1000546 */
Paul Mackerras9994a332005-10-10 22:36:14 +1000547 .globl ppc_fork
548ppc_fork:
549 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000550 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000551 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000552 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000553 b sys_fork
554
555 .globl ppc_vfork
556ppc_vfork:
557 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000558 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000559 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000560 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000561 b sys_vfork
562
563 .globl ppc_clone
564ppc_clone:
565 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000566 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000567 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000568 stw r0,_TRAP(r1) /* register set saved */
Paul Mackerras9994a332005-10-10 22:36:14 +1000569 b sys_clone
570
Paul Mackerras1bd79332006-03-08 13:24:22 +1100571 .globl ppc_swapcontext
572ppc_swapcontext:
573 SAVE_NVGPRS(r1)
574 lwz r0,_TRAP(r1)
575 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
576 stw r0,_TRAP(r1) /* register set saved */
577 b sys_swapcontext
578
Paul Mackerras9994a332005-10-10 22:36:14 +1000579/*
580 * Top-level page fault handling.
581 * This is in assembler because if do_page_fault tells us that
582 * it is a bad kernel page fault, we want to save the non-volatile
583 * registers before calling bad_page_fault.
584 */
585 .globl handle_page_fault
586handle_page_fault:
587 stw r4,_DAR(r1)
588 addi r3,r1,STACK_FRAME_OVERHEAD
Benjamin Herrenschmidtd3006272017-07-19 14:49:25 +1000589 andis. r0,r5,DSISR_DABRMATCH@h
590#ifdef CONFIG_6xx
591 bne- handle_dabr_fault
Paul Mackerras9994a332005-10-10 22:36:14 +1000592 bl do_page_fault
Benjamin Herrenschmidtd3006272017-07-19 14:49:25 +1000593#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000594 cmpwi r3,0
595 beq+ ret_from_except
596 SAVE_NVGPRS(r1)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000597 lwz r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000598 clrrwi r0,r0,1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000599 stw r0,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000600 mr r5,r3
601 addi r3,r1,STACK_FRAME_OVERHEAD
602 lwz r4,_DAR(r1)
603 bl bad_page_fault
604 b ret_from_except_full
605
Benjamin Herrenschmidtd3006272017-07-19 14:49:25 +1000606#ifdef CONFIG_6xx
607 /* We have a data breakpoint exception - handle it */
608handle_dabr_fault:
609 SAVE_NVGPRS(r1)
610 lwz r0,_TRAP(r1)
611 clrrwi r0,r0,1
612 stw r0,_TRAP(r1)
613 bl do_break
614 b ret_from_except_full
615#endif
616
Paul Mackerras9994a332005-10-10 22:36:14 +1000617/*
618 * This routine switches between two different tasks. The process
619 * state of one is saved on its kernel stack. Then the state
620 * of the other is restored from its kernel stack. The memory
621 * management hardware is updated to the second process's state.
622 * Finally, we can return to the second process.
623 * On entry, r3 points to the THREAD for the current task, r4
624 * points to the THREAD for the new task.
625 *
626 * This routine is always called with interrupts disabled.
627 *
628 * Note: there are two ways to get to the "going out" portion
629 * of this code; either by coming in via the entry (_switch)
630 * or via "fork" which must set up an environment equivalent
631 * to the "_switch" path. If you change this , you'll have to
632 * change the fork code also.
633 *
634 * The code which creates the new task context is in 'copy_thread'
635 * in arch/ppc/kernel/process.c
636 */
637_GLOBAL(_switch)
638 stwu r1,-INT_FRAME_SIZE(r1)
639 mflr r0
640 stw r0,INT_FRAME_SIZE+4(r1)
641 /* r3-r12 are caller saved -- Cort */
642 SAVE_NVGPRS(r1)
643 stw r0,_NIP(r1) /* Return to switch caller */
644 mfmsr r11
645 li r0,MSR_FP /* Disable floating-point */
646#ifdef CONFIG_ALTIVEC
647BEGIN_FTR_SECTION
648 oris r0,r0,MSR_VEC@h /* Disable altivec */
649 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
650 stw r12,THREAD+THREAD_VRSAVE(r2)
651END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
652#endif /* CONFIG_ALTIVEC */
653#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -0500654BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000655 oris r0,r0,MSR_SPE@h /* Disable SPE */
656 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
657 stw r12,THREAD+THREAD_SPEFSCR(r2)
Kumar Gala5e14d212007-09-13 01:44:20 -0500658END_FTR_SECTION_IFSET(CPU_FTR_SPE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000659#endif /* CONFIG_SPE */
660 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
661 beq+ 1f
662 andc r11,r11,r0
663 MTMSRD(r11)
664 isync
6651: stw r11,_MSR(r1)
666 mfcr r10
667 stw r10,_CCR(r1)
668 stw r1,KSP(r3) /* Set old stack pointer */
669
670#ifdef CONFIG_SMP
671 /* We need a sync somewhere here to make sure that if the
672 * previous task gets rescheduled on another CPU, it sees all
673 * stores it has performed on this one.
674 */
675 sync
676#endif /* CONFIG_SMP */
677
678 tophys(r0,r4)
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +0000679 mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
Paul Mackerras9994a332005-10-10 22:36:14 +1000680 lwz r1,KSP(r4) /* Load new stack pointer */
681
682 /* save the old current 'last' for return value */
683 mr r3,r2
684 addi r2,r4,-THREAD /* Update current */
685
686#ifdef CONFIG_ALTIVEC
687BEGIN_FTR_SECTION
688 lwz r0,THREAD+THREAD_VRSAVE(r2)
689 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
690END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
691#endif /* CONFIG_ALTIVEC */
692#ifdef CONFIG_SPE
Kumar Gala5e14d212007-09-13 01:44:20 -0500693BEGIN_FTR_SECTION
Paul Mackerras9994a332005-10-10 22:36:14 +1000694 lwz r0,THREAD+THREAD_SPEFSCR(r2)
695 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
Kumar Gala5e14d212007-09-13 01:44:20 -0500696END_FTR_SECTION_IFSET(CPU_FTR_SPE)
Paul Mackerras9994a332005-10-10 22:36:14 +1000697#endif /* CONFIG_SPE */
Michael Ellermanf2574032017-01-24 21:37:20 +1100698
Paul Mackerras9994a332005-10-10 22:36:14 +1000699 lwz r0,_CCR(r1)
700 mtcrf 0xFF,r0
701 /* r3-r12 are destroyed -- Cort */
702 REST_NVGPRS(r1)
703
704 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
705 mtlr r4
706 addi r1,r1,INT_FRAME_SIZE
707 blr
708
709 .globl fast_exception_return
710fast_exception_return:
711#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
712 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
713 beq 1f /* if not, we've got problems */
714#endif
715
7162: REST_4GPRS(3, r11)
717 lwz r10,_CCR(r11)
718 REST_GPR(1, r11)
719 mtcr r10
720 lwz r10,_LINK(r11)
721 mtlr r10
722 REST_GPR(10, r11)
Christophe Leroy75b82472016-12-15 13:42:18 +0100723#ifdef CONFIG_PPC_8xx_PERF_EVENT
724 mtspr SPRN_NRI, r0
725#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000726 mtspr SPRN_SRR1,r9
727 mtspr SPRN_SRR0,r12
728 REST_GPR(9, r11)
729 REST_GPR(12, r11)
730 lwz r11,GPR11(r11)
731 SYNC
732 RFI
733
734#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
735/* check if the exception happened in a restartable section */
7361: lis r3,exc_exit_restart_end@ha
737 addi r3,r3,exc_exit_restart_end@l
738 cmplw r12,r3
739 bge 3f
740 lis r4,exc_exit_restart@ha
741 addi r4,r4,exc_exit_restart@l
742 cmplw r12,r4
743 blt 3f
744 lis r3,fee_restarts@ha
745 tophys(r3,r3)
746 lwz r5,fee_restarts@l(r3)
747 addi r5,r5,1
748 stw r5,fee_restarts@l(r3)
749 mr r12,r4 /* restart at exc_exit_restart */
750 b 2b
751
Kumar Gala991eb432007-05-14 17:11:58 -0500752 .section .bss
753 .align 2
754fee_restarts:
755 .space 4
756 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +1000757
758/* aargh, a nonrecoverable interrupt, panic */
759/* aargh, we don't know which trap this is */
760/* but the 601 doesn't implement the RI bit, so assume it's OK */
7613:
762BEGIN_FTR_SECTION
763 b 2b
764END_FTR_SECTION_IFSET(CPU_FTR_601)
765 li r10,-1
Paul Mackerrasd73e0c92005-10-28 22:45:25 +1000766 stw r10,_TRAP(r11)
Paul Mackerras9994a332005-10-10 22:36:14 +1000767 addi r3,r1,STACK_FRAME_OVERHEAD
768 lis r10,MSR_KERNEL@h
769 ori r10,r10,MSR_KERNEL@l
770 bl transfer_to_handler_full
771 .long nonrecoverable_exception
772 .long ret_from_except
773#endif
774
Paul Mackerras9994a332005-10-10 22:36:14 +1000775 .globl ret_from_except_full
776ret_from_except_full:
777 REST_NVGPRS(r1)
778 /* fall through */
779
780 .globl ret_from_except
781ret_from_except:
782 /* Hard-disable interrupts so that current_thread_info()->flags
783 * can't change between when we test it and when we return
784 * from the interrupt. */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000785 /* Note: We don't bother telling lockdep about it */
Paul Mackerras9994a332005-10-10 22:36:14 +1000786 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
787 SYNC /* Some chip revs have problems here... */
788 MTMSRD(r10) /* disable interrupts */
789
790 lwz r3,_MSR(r1) /* Returning to user mode? */
791 andi. r0,r3,MSR_PR
792 beq resume_kernel
793
794user_exc_return: /* r10 contains MSR_KERNEL here */
795 /* Check current_thread_info()->flags */
Stuart Yoder9778b692012-07-05 04:41:35 +0000796 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000797 lwz r9,TI_FLAGS(r9)
Roland McGrath7a101742008-04-28 17:30:37 +1000798 andi. r0,r9,_TIF_USER_WORK_MASK
Paul Mackerras9994a332005-10-10 22:36:14 +1000799 bne do_work
800
801restore_user:
802#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
Kumar Gala4eaddb42008-04-09 16:15:40 -0500803 /* Check whether this process has its own DBCR0 value. The internal
804 debug mode bit tells us that dbcr0 should be loaded. */
Paul Mackerras9994a332005-10-10 22:36:14 +1000805 lwz r0,THREAD+THREAD_DBCR0(r2)
Kumar Gala2325f0a2008-07-26 05:27:33 +1000806 andis. r10,r0,DBCR0_IDM@h
Paul Mackerras9994a332005-10-10 22:36:14 +1000807 bnel- load_dbcr0
808#endif
Christophe Leroyc223c902016-05-17 08:33:46 +0200809#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
810 CURRENT_THREAD_INFO(r9, r1)
811 ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
812#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000813
Paul Mackerras9994a332005-10-10 22:36:14 +1000814 b restore
815
816/* N.B. the only way to get here is from the beq following ret_from_except. */
817resume_kernel:
Tiejun Chena9c4e542012-09-16 23:54:30 +0000818 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
Stuart Yoder9778b692012-07-05 04:41:35 +0000819 CURRENT_THREAD_INFO(r9, r1)
Tiejun Chena9c4e542012-09-16 23:54:30 +0000820 lwz r8,TI_FLAGS(r9)
Priyanka Jainf7b33672013-05-31 01:20:02 +0000821 andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
Tiejun Chena9c4e542012-09-16 23:54:30 +0000822 beq+ 1f
823
824 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
825
826 lwz r3,GPR1(r1)
827 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
828 mr r4,r1 /* src: current exception frame */
829 mr r1,r3 /* Reroute the trampoline frame to r1 */
830
831 /* Copy from the original to the trampoline. */
832 li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
833 li r6,0 /* start offset: 0 */
834 mtctr r5
8352: lwzx r0,r6,r4
836 stwx r0,r6,r3
837 addi r6,r6,4
838 bdnz 2b
839
840 /* Do real store operation to complete stwu */
841 lwz r5,GPR1(r1)
842 stw r8,0(r5)
843
844 /* Clear _TIF_EMULATE_STACK_STORE flag */
845 lis r11,_TIF_EMULATE_STACK_STORE@h
846 addi r5,r9,TI_FLAGS
8470: lwarx r8,0,r5
848 andc r8,r8,r11
849#ifdef CONFIG_IBM405_ERR77
850 dcbt 0,r5
851#endif
852 stwcx. r8,0,r5
853 bne- 0b
8541:
855
856#ifdef CONFIG_PREEMPT
857 /* check current_thread_info->preempt_count */
Paul Mackerras9994a332005-10-10 22:36:14 +1000858 lwz r0,TI_PREEMPT(r9)
859 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
860 bne restore
Tiejun Chena9c4e542012-09-16 23:54:30 +0000861 andi. r8,r8,_TIF_NEED_RESCHED
Paul Mackerras9994a332005-10-10 22:36:14 +1000862 beq+ restore
Tiejun Chena9c4e542012-09-16 23:54:30 +0000863 lwz r3,_MSR(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000864 andi. r0,r3,MSR_EE /* interrupts off? */
865 beq restore /* don't schedule if so */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000866#ifdef CONFIG_TRACE_IRQFLAGS
867 /* Lockdep thinks irqs are enabled, we need to call
868 * preempt_schedule_irq with IRQs off, so we inform lockdep
869 * now that we -did- turn them off already
870 */
871 bl trace_hardirqs_off
872#endif
Paul Mackerras9994a332005-10-10 22:36:14 +10008731: bl preempt_schedule_irq
Stuart Yoder9778b692012-07-05 04:41:35 +0000874 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +1000875 lwz r3,TI_FLAGS(r9)
876 andi. r0,r3,_TIF_NEED_RESCHED
877 bne- 1b
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000878#ifdef CONFIG_TRACE_IRQFLAGS
879 /* And now, to properly rebalance the above, we tell lockdep they
880 * are being turned back on, which will happen when we return
881 */
882 bl trace_hardirqs_on
883#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000884#endif /* CONFIG_PREEMPT */
885
886 /* interrupts are hard-disabled at this point */
887restore:
Benjamin Herrenschmidtb98ac05d2007-10-31 16:42:19 +1100888#ifdef CONFIG_44x
Dave Kleikampe7f75ad2010-03-05 10:43:12 +0000889BEGIN_MMU_FTR_SECTION
890 b 1f
891END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
Benjamin Herrenschmidtb98ac05d2007-10-31 16:42:19 +1100892 lis r4,icache_44x_need_flush@ha
893 lwz r5,icache_44x_need_flush@l(r4)
894 cmplwi cr0,r5,0
895 beq+ 1f
896 li r6,0
897 iccci r0,r0
898 stw r6,icache_44x_need_flush@l(r4)
8991:
900#endif /* CONFIG_44x */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000901
902 lwz r9,_MSR(r1)
903#ifdef CONFIG_TRACE_IRQFLAGS
904 /* Lockdep doesn't know about the fact that IRQs are temporarily turned
905 * off in this assembly code while peeking at TI_FLAGS() and such. However
906 * we need to inform it if the exception turned interrupts off, and we
907 * are about to trun them back on.
908 *
909 * The problem here sadly is that we don't know whether the exceptions was
910 * one that turned interrupts off or not. So we always tell lockdep about
911 * turning them on here when we go back to wherever we came from with EE
912 * on, even if that may meen some redudant calls being tracked. Maybe later
913 * we could encode what the exception did somewhere or test the exception
914 * type in the pt_regs but that sounds overkill
915 */
916 andi. r10,r9,MSR_EE
917 beq 1f
Steven Rostedt06ca2182010-12-22 16:42:56 +0000918 /*
919 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
920 * which is the stack frame here, we need to force a stack frame
921 * in case we came from user space.
922 */
923 stwu r1,-32(r1)
924 mflr r0
925 stw r0,4(r1)
926 stwu r1,-32(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000927 bl trace_hardirqs_on
Steven Rostedt06ca2182010-12-22 16:42:56 +0000928 lwz r1,0(r1)
929 lwz r1,0(r1)
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +0000930 lwz r9,_MSR(r1)
9311:
932#endif /* CONFIG_TRACE_IRQFLAGS */
933
Paul Mackerras9994a332005-10-10 22:36:14 +1000934 lwz r0,GPR0(r1)
935 lwz r2,GPR2(r1)
936 REST_4GPRS(3, r1)
937 REST_2GPRS(7, r1)
938
939 lwz r10,_XER(r1)
940 lwz r11,_CTR(r1)
941 mtspr SPRN_XER,r10
942 mtctr r11
943
944 PPC405_ERR77(0,r1)
Becky Bruceb64f87c2007-11-10 09:17:49 +1100945BEGIN_FTR_SECTION
946 lwarx r11,0,r1
947END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
Paul Mackerras9994a332005-10-10 22:36:14 +1000948 stwcx. r0,0,r1 /* to clear the reservation */
949
950#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
Paul Mackerras9994a332005-10-10 22:36:14 +1000951 andi. r10,r9,MSR_RI /* check if this exception occurred */
952 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
953
954 lwz r10,_CCR(r1)
955 lwz r11,_LINK(r1)
956 mtcrf 0xFF,r10
957 mtlr r11
958
959 /*
960 * Once we put values in SRR0 and SRR1, we are in a state
961 * where exceptions are not recoverable, since taking an
962 * exception will trash SRR0 and SRR1. Therefore we clear the
963 * MSR:RI bit to indicate this. If we do take an exception,
964 * we can't return to the point of the exception but we
965 * can restart the exception exit path at the label
966 * exc_exit_restart below. -- paulus
967 */
968 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
969 SYNC
970 MTMSRD(r10) /* clear the RI bit */
971 .globl exc_exit_restart
972exc_exit_restart:
Paul Mackerras9994a332005-10-10 22:36:14 +1000973 lwz r12,_NIP(r1)
Christophe Leroy75b82472016-12-15 13:42:18 +0100974#ifdef CONFIG_PPC_8xx_PERF_EVENT
975 mtspr SPRN_NRI, r0
976#endif
Paul Mackerras9994a332005-10-10 22:36:14 +1000977 mtspr SPRN_SRR0,r12
978 mtspr SPRN_SRR1,r9
979 REST_4GPRS(9, r1)
980 lwz r1,GPR1(r1)
981 .globl exc_exit_restart_end
982exc_exit_restart_end:
983 SYNC
984 RFI
985
986#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
987 /*
988 * This is a bit different on 4xx/Book-E because it doesn't have
989 * the RI bit in the MSR.
990 * The TLB miss handler checks if we have interrupted
991 * the exception exit path and restarts it if so
992 * (well maybe one day it will... :).
993 */
994 lwz r11,_LINK(r1)
995 mtlr r11
996 lwz r10,_CCR(r1)
997 mtcrf 0xff,r10
998 REST_2GPRS(9, r1)
999 .globl exc_exit_restart
1000exc_exit_restart:
1001 lwz r11,_NIP(r1)
1002 lwz r12,_MSR(r1)
1003exc_exit_start:
1004 mtspr SPRN_SRR0,r11
1005 mtspr SPRN_SRR1,r12
1006 REST_2GPRS(11, r1)
1007 lwz r1,GPR1(r1)
1008 .globl exc_exit_restart_end
1009exc_exit_restart_end:
1010 PPC405_ERR77_SYNC
1011 rfi
1012 b . /* prevent prefetch past rfi */
1013
1014/*
1015 * Returning from a critical interrupt in user mode doesn't need
1016 * to be any different from a normal exception. For a critical
1017 * interrupt in the kernel, we just return (without checking for
1018 * preemption) since the interrupt may have happened at some crucial
1019 * place (e.g. inside the TLB miss handler), and because we will be
1020 * running with r1 pointing into critical_stack, not the current
1021 * process's kernel stack (and therefore current_thread_info() will
1022 * give the wrong answer).
1023 * We have to restore various SPRs that may have been in use at the
1024 * time of the critical interrupt.
1025 *
1026 */
1027#ifdef CONFIG_40x
1028#define PPC_40x_TURN_OFF_MSR_DR \
1029 /* avoid any possible TLB misses here by turning off MSR.DR, we \
1030 * assume the instructions here are mapped by a pinned TLB entry */ \
1031 li r10,MSR_IR; \
1032 mtmsr r10; \
1033 isync; \
1034 tophys(r1, r1);
1035#else
1036#define PPC_40x_TURN_OFF_MSR_DR
1037#endif
1038
1039#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
1040 REST_NVGPRS(r1); \
1041 lwz r3,_MSR(r1); \
1042 andi. r3,r3,MSR_PR; \
1043 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
1044 bne user_exc_return; \
1045 lwz r0,GPR0(r1); \
1046 lwz r2,GPR2(r1); \
1047 REST_4GPRS(3, r1); \
1048 REST_2GPRS(7, r1); \
1049 lwz r10,_XER(r1); \
1050 lwz r11,_CTR(r1); \
1051 mtspr SPRN_XER,r10; \
1052 mtctr r11; \
1053 PPC405_ERR77(0,r1); \
1054 stwcx. r0,0,r1; /* to clear the reservation */ \
1055 lwz r11,_LINK(r1); \
1056 mtlr r11; \
1057 lwz r10,_CCR(r1); \
1058 mtcrf 0xff,r10; \
1059 PPC_40x_TURN_OFF_MSR_DR; \
1060 lwz r9,_DEAR(r1); \
1061 lwz r10,_ESR(r1); \
1062 mtspr SPRN_DEAR,r9; \
1063 mtspr SPRN_ESR,r10; \
1064 lwz r11,_NIP(r1); \
1065 lwz r12,_MSR(r1); \
1066 mtspr exc_lvl_srr0,r11; \
1067 mtspr exc_lvl_srr1,r12; \
1068 lwz r9,GPR9(r1); \
1069 lwz r12,GPR12(r1); \
1070 lwz r10,GPR10(r1); \
1071 lwz r11,GPR11(r1); \
1072 lwz r1,GPR1(r1); \
1073 PPC405_ERR77_SYNC; \
1074 exc_lvl_rfi; \
1075 b .; /* prevent prefetch past exc_lvl_rfi */
1076
Kumar Galafca622c2008-04-30 05:23:21 -05001077#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
1078 lwz r9,_##exc_lvl_srr0(r1); \
1079 lwz r10,_##exc_lvl_srr1(r1); \
1080 mtspr SPRN_##exc_lvl_srr0,r9; \
1081 mtspr SPRN_##exc_lvl_srr1,r10;
1082
Kumar Gala70fe3af2009-02-12 16:12:40 -06001083#if defined(CONFIG_PPC_BOOK3E_MMU)
Kumar Galafca622c2008-04-30 05:23:21 -05001084#ifdef CONFIG_PHYS_64BIT
1085#define RESTORE_MAS7 \
1086 lwz r11,MAS7(r1); \
1087 mtspr SPRN_MAS7,r11;
1088#else
1089#define RESTORE_MAS7
1090#endif /* CONFIG_PHYS_64BIT */
1091#define RESTORE_MMU_REGS \
1092 lwz r9,MAS0(r1); \
1093 lwz r10,MAS1(r1); \
1094 lwz r11,MAS2(r1); \
1095 mtspr SPRN_MAS0,r9; \
1096 lwz r9,MAS3(r1); \
1097 mtspr SPRN_MAS1,r10; \
1098 lwz r10,MAS6(r1); \
1099 mtspr SPRN_MAS2,r11; \
1100 mtspr SPRN_MAS3,r9; \
1101 mtspr SPRN_MAS6,r10; \
1102 RESTORE_MAS7;
1103#elif defined(CONFIG_44x)
1104#define RESTORE_MMU_REGS \
1105 lwz r9,MMUCR(r1); \
1106 mtspr SPRN_MMUCR,r9;
1107#else
1108#define RESTORE_MMU_REGS
1109#endif
1110
1111#ifdef CONFIG_40x
Paul Mackerras9994a332005-10-10 22:36:14 +10001112 .globl ret_from_crit_exc
1113ret_from_crit_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001114 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001115 lis r10,saved_ksp_limit@ha;
1116 lwz r10,saved_ksp_limit@l(r10);
1117 tovirt(r9,r9);
1118 stw r10,KSP_LIMIT(r9)
1119 lis r9,crit_srr0@ha;
1120 lwz r9,crit_srr0@l(r9);
1121 lis r10,crit_srr1@ha;
1122 lwz r10,crit_srr1@l(r10);
1123 mtspr SPRN_SRR0,r9;
1124 mtspr SPRN_SRR1,r10;
Kumar Gala16c57b32009-02-10 20:10:44 +00001125 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
Kumar Galafca622c2008-04-30 05:23:21 -05001126#endif /* CONFIG_40x */
Paul Mackerras9994a332005-10-10 22:36:14 +10001127
1128#ifdef CONFIG_BOOKE
Kumar Galafca622c2008-04-30 05:23:21 -05001129 .globl ret_from_crit_exc
1130ret_from_crit_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001131 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001132 lwz r10,SAVED_KSP_LIMIT(r1)
1133 stw r10,KSP_LIMIT(r9)
1134 RESTORE_xSRR(SRR0,SRR1);
1135 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001136 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
Kumar Galafca622c2008-04-30 05:23:21 -05001137
Paul Mackerras9994a332005-10-10 22:36:14 +10001138 .globl ret_from_debug_exc
1139ret_from_debug_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001140 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001141 lwz r10,SAVED_KSP_LIMIT(r1)
1142 stw r10,KSP_LIMIT(r9)
1143 lwz r9,THREAD_INFO-THREAD(r9)
Stuart Yoder9778b692012-07-05 04:41:35 +00001144 CURRENT_THREAD_INFO(r10, r1)
Kumar Galafca622c2008-04-30 05:23:21 -05001145 lwz r10,TI_PREEMPT(r10)
1146 stw r10,TI_PREEMPT(r9)
1147 RESTORE_xSRR(SRR0,SRR1);
1148 RESTORE_xSRR(CSRR0,CSRR1);
1149 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001150 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
Paul Mackerras9994a332005-10-10 22:36:14 +10001151
1152 .globl ret_from_mcheck_exc
1153ret_from_mcheck_exc:
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001154 mfspr r9,SPRN_SPRG_THREAD
Kumar Galafca622c2008-04-30 05:23:21 -05001155 lwz r10,SAVED_KSP_LIMIT(r1)
1156 stw r10,KSP_LIMIT(r9)
1157 RESTORE_xSRR(SRR0,SRR1);
1158 RESTORE_xSRR(CSRR0,CSRR1);
1159 RESTORE_xSRR(DSRR0,DSRR1);
1160 RESTORE_MMU_REGS;
Kumar Gala16c57b32009-02-10 20:10:44 +00001161 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
Paul Mackerras9994a332005-10-10 22:36:14 +10001162#endif /* CONFIG_BOOKE */
1163
1164/*
1165 * Load the DBCR0 value for a task that is being ptraced,
1166 * having first saved away the global DBCR0. Note that r0
1167 * has the dbcr0 value to set upon entry to this.
1168 */
1169load_dbcr0:
1170 mfmsr r10 /* first disable debug exceptions */
1171 rlwinm r10,r10,0,~MSR_DE
1172 mtmsr r10
1173 isync
1174 mfspr r10,SPRN_DBCR0
1175 lis r11,global_dbcr0@ha
1176 addi r11,r11,global_dbcr0@l
Kumar Gala4eaddb42008-04-09 16:15:40 -05001177#ifdef CONFIG_SMP
Stuart Yoder9778b692012-07-05 04:41:35 +00001178 CURRENT_THREAD_INFO(r9, r1)
Kumar Gala4eaddb42008-04-09 16:15:40 -05001179 lwz r9,TI_CPU(r9)
1180 slwi r9,r9,3
1181 add r11,r11,r9
1182#endif
Paul Mackerras9994a332005-10-10 22:36:14 +10001183 stw r10,0(r11)
1184 mtspr SPRN_DBCR0,r0
1185 lwz r10,4(r11)
1186 addi r10,r10,1
1187 stw r10,4(r11)
1188 li r11,-1
1189 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1190 blr
1191
Kumar Gala991eb432007-05-14 17:11:58 -05001192 .section .bss
1193 .align 4
1194global_dbcr0:
Kumar Gala4eaddb42008-04-09 16:15:40 -05001195 .space 8*NR_CPUS
Kumar Gala991eb432007-05-14 17:11:58 -05001196 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +10001197#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1198
1199do_work: /* r10 contains MSR_KERNEL here */
1200 andi. r0,r9,_TIF_NEED_RESCHED
1201 beq do_user_signal
1202
1203do_resched: /* r10 contains MSR_KERNEL here */
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +00001204 /* Note: We don't need to inform lockdep that we are enabling
1205 * interrupts here. As far as it knows, they are already enabled
1206 */
Paul Mackerras9994a332005-10-10 22:36:14 +10001207 ori r10,r10,MSR_EE
1208 SYNC
1209 MTMSRD(r10) /* hard-enable interrupts */
1210 bl schedule
1211recheck:
Benjamin Herrenschmidt5d389022009-06-17 17:43:59 +00001212 /* Note: And we don't tell it we are disabling them again
1213 * neither. Those disable/enable cycles used to peek at
1214 * TI_FLAGS aren't advertised.
1215 */
Paul Mackerras9994a332005-10-10 22:36:14 +10001216 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1217 SYNC
1218 MTMSRD(r10) /* disable interrupts */
Stuart Yoder9778b692012-07-05 04:41:35 +00001219 CURRENT_THREAD_INFO(r9, r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001220 lwz r9,TI_FLAGS(r9)
1221 andi. r0,r9,_TIF_NEED_RESCHED
1222 bne- do_resched
Roland McGrath7a101742008-04-28 17:30:37 +10001223 andi. r0,r9,_TIF_USER_WORK_MASK
Paul Mackerras9994a332005-10-10 22:36:14 +10001224 beq restore_user
1225do_user_signal: /* r10 contains MSR_KERNEL here */
1226 ori r10,r10,MSR_EE
1227 SYNC
1228 MTMSRD(r10) /* hard-enable interrupts */
1229 /* save r13-r31 in the exception frame, if not already done */
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001230 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001231 andi. r0,r3,1
1232 beq 2f
1233 SAVE_NVGPRS(r1)
1234 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001235 stw r3,_TRAP(r1)
Roland McGrath7d6d6372008-07-27 16:52:52 +100012362: addi r3,r1,STACK_FRAME_OVERHEAD
1237 mr r4,r9
Benjamin Herrenschmidt18b246f2012-02-22 16:48:32 +11001238 bl do_notify_resume
Paul Mackerras9994a332005-10-10 22:36:14 +10001239 REST_NVGPRS(r1)
1240 b recheck
1241
1242/*
1243 * We come here when we are at the end of handling an exception
1244 * that occurred at a place where taking an exception will lose
1245 * state information, such as the contents of SRR0 and SRR1.
1246 */
1247nonrecoverable:
1248 lis r10,exc_exit_restart_end@ha
1249 addi r10,r10,exc_exit_restart_end@l
1250 cmplw r12,r10
1251 bge 3f
1252 lis r11,exc_exit_restart@ha
1253 addi r11,r11,exc_exit_restart@l
1254 cmplw r12,r11
1255 blt 3f
1256 lis r10,ee_restarts@ha
1257 lwz r12,ee_restarts@l(r10)
1258 addi r12,r12,1
1259 stw r12,ee_restarts@l(r10)
1260 mr r12,r11 /* restart at exc_exit_restart */
1261 blr
12623: /* OK, we can't recover, kill this process */
1263 /* but the 601 doesn't implement the RI bit, so assume it's OK */
1264BEGIN_FTR_SECTION
1265 blr
1266END_FTR_SECTION_IFSET(CPU_FTR_601)
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001267 lwz r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +10001268 andi. r0,r3,1
1269 beq 4f
1270 SAVE_NVGPRS(r1)
1271 rlwinm r3,r3,0,0,30
Paul Mackerrasd73e0c92005-10-28 22:45:25 +10001272 stw r3,_TRAP(r1)
Paul Mackerras9994a332005-10-10 22:36:14 +100012734: addi r3,r1,STACK_FRAME_OVERHEAD
1274 bl nonrecoverable_exception
1275 /* shouldn't return */
1276 b 4b
1277
Kumar Gala991eb432007-05-14 17:11:58 -05001278 .section .bss
1279 .align 2
1280ee_restarts:
1281 .space 4
1282 .previous
Paul Mackerras9994a332005-10-10 22:36:14 +10001283
1284/*
1285 * PROM code for specific machines follows. Put it
1286 * here so it's easy to add arch-specific sections later.
1287 * -- Cort
1288 */
Paul Mackerras033ef332005-10-26 17:05:24 +10001289#ifdef CONFIG_PPC_RTAS
Paul Mackerras9994a332005-10-10 22:36:14 +10001290/*
1291 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1292 * called with the MMU off.
1293 */
1294_GLOBAL(enter_rtas)
1295 stwu r1,-INT_FRAME_SIZE(r1)
1296 mflr r0
1297 stw r0,INT_FRAME_SIZE+4(r1)
David Gibsone58c3492006-01-13 14:56:25 +11001298 LOAD_REG_ADDR(r4, rtas)
Paul Mackerras9994a332005-10-10 22:36:14 +10001299 lis r6,1f@ha /* physical return address for rtas */
1300 addi r6,r6,1f@l
1301 tophys(r6,r6)
1302 tophys(r7,r1)
Paul Mackerras033ef332005-10-26 17:05:24 +10001303 lwz r8,RTASENTRY(r4)
1304 lwz r4,RTASBASE(r4)
Paul Mackerras9994a332005-10-10 22:36:14 +10001305 mfmsr r9
1306 stw r9,8(r1)
1307 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1308 SYNC /* disable interrupts so SRR0/1 */
1309 MTMSRD(r0) /* don't get trashed */
1310 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1311 mtlr r6
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001312 mtspr SPRN_SPRG_RTAS,r7
Paul Mackerras9994a332005-10-10 22:36:14 +10001313 mtspr SPRN_SRR0,r8
1314 mtspr SPRN_SRR1,r9
1315 RFI
13161: tophys(r9,r1)
1317 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1318 lwz r9,8(r9) /* original msr value */
Paul Mackerras9994a332005-10-10 22:36:14 +10001319 addi r1,r1,INT_FRAME_SIZE
1320 li r0,0
Benjamin Herrenschmidtee43eb72009-07-14 20:52:54 +00001321 mtspr SPRN_SPRG_RTAS,r0
Paul Mackerras9994a332005-10-10 22:36:14 +10001322 mtspr SPRN_SRR0,r8
1323 mtspr SPRN_SRR1,r9
1324 RFI /* return to caller */
1325
1326 .globl machine_check_in_rtas
1327machine_check_in_rtas:
1328 twi 31,0,0
1329 /* XXX load up BATs and panic */
1330
Paul Mackerras033ef332005-10-26 17:05:24 +10001331#endif /* CONFIG_PPC_RTAS */