1 /* $Id: trampoline.S,v 1.26 2002/02/09 19:49:30 davem Exp $
2 * trampoline.S: Jump start slave processors on sparc64.
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
12 #include <asm/pstate.h>
14 #include <asm/pgtable.h>
15 #include <asm/spitfire.h>
16 #include <asm/processor.h>
17 #include <asm/thread_info.h>
19 #include <asm/hypervisor.h>
20 #include <asm/cpudata.h>
28 .asciz "SUNW,itlb-load"
31 .asciz "SUNW,dtlb-load"
35 .globl sparc64_cpu_startup, sparc64_cpu_startup_end
39 BRANCH_IF_SUN4V(g1, niagara_startup)
40 BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup)
41 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup)
43 ba,pt %xcc, spitfire_startup
47 /* Preserve OBP chosen DCU and DCR register settings. */
48 ba,pt %xcc, cheetah_generic_startup
52 mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
55 sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
56 or %g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
58 or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5
59 stxa %g5, [%g0] ASI_DCU_CONTROL_REG
62 cheetah_generic_startup:
63 mov TSB_EXTENSION_P, %g3
64 stxa %g0, [%g3] ASI_DMMU
65 stxa %g0, [%g3] ASI_IMMU
68 mov TSB_EXTENSION_S, %g3
69 stxa %g0, [%g3] ASI_DMMU
72 mov TSB_EXTENSION_N, %g3
73 stxa %g0, [%g3] ASI_DMMU
74 stxa %g0, [%g3] ASI_IMMU
79 /* Disable STICK_INT interrupts. */
80 sethi %hi(0x80000000), %g5
84 ba,pt %xcc, startup_continue
88 mov (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1
89 stxa %g1, [%g0] ASI_LSU_CONTROL
95 sethi %hi(0x80000000), %g2
99 BRANCH_IF_SUN4V(g1, niagara_lock_tlb)
101 /* Call OBP by hand to lock KERNBASE into i/d tlbs.
102 * We lock 2 consequetive entries if we are 'bigkernel'.
106 sethi %hi(prom_entry_lock), %g2
107 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
108 membar #StoreLoad | #StoreStore
112 sethi %hi(p1275buf), %g2
113 or %g2, %lo(p1275buf), %g2
114 ldx [%g2 + 0x10], %l2
116 add %l2, -(192 + 128), %sp
119 sethi %hi(call_method), %g2
120 or %g2, %lo(call_method), %g2
121 stx %g2, [%sp + 2047 + 128 + 0x00]
123 stx %g2, [%sp + 2047 + 128 + 0x08]
125 stx %g2, [%sp + 2047 + 128 + 0x10]
126 sethi %hi(itlb_load), %g2
127 or %g2, %lo(itlb_load), %g2
128 stx %g2, [%sp + 2047 + 128 + 0x18]
129 sethi %hi(prom_mmu_ihandle_cache), %g2
130 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
131 stx %g2, [%sp + 2047 + 128 + 0x20]
132 sethi %hi(KERNBASE), %g2
133 stx %g2, [%sp + 2047 + 128 + 0x28]
134 sethi %hi(kern_locked_tte_data), %g2
135 ldx [%g2 + %lo(kern_locked_tte_data)], %g2
136 stx %g2, [%sp + 2047 + 128 + 0x30]
139 BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
143 stx %g2, [%sp + 2047 + 128 + 0x38]
144 sethi %hi(p1275buf), %g2
145 or %g2, %lo(p1275buf), %g2
146 ldx [%g2 + 0x08], %o1
148 add %sp, (2047 + 128), %o0
150 sethi %hi(bigkernel), %g2
151 lduw [%g2 + %lo(bigkernel)], %g2
155 sethi %hi(call_method), %g2
156 or %g2, %lo(call_method), %g2
157 stx %g2, [%sp + 2047 + 128 + 0x00]
159 stx %g2, [%sp + 2047 + 128 + 0x08]
161 stx %g2, [%sp + 2047 + 128 + 0x10]
162 sethi %hi(itlb_load), %g2
163 or %g2, %lo(itlb_load), %g2
164 stx %g2, [%sp + 2047 + 128 + 0x18]
165 sethi %hi(prom_mmu_ihandle_cache), %g2
166 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
167 stx %g2, [%sp + 2047 + 128 + 0x20]
168 sethi %hi(KERNBASE + 0x400000), %g2
169 stx %g2, [%sp + 2047 + 128 + 0x28]
170 sethi %hi(kern_locked_tte_data), %g2
171 ldx [%g2 + %lo(kern_locked_tte_data)], %g2
172 sethi %hi(0x400000), %g1
174 stx %g2, [%sp + 2047 + 128 + 0x30]
177 BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
181 stx %g2, [%sp + 2047 + 128 + 0x38]
182 sethi %hi(p1275buf), %g2
183 or %g2, %lo(p1275buf), %g2
184 ldx [%g2 + 0x08], %o1
186 add %sp, (2047 + 128), %o0
189 sethi %hi(call_method), %g2
190 or %g2, %lo(call_method), %g2
191 stx %g2, [%sp + 2047 + 128 + 0x00]
193 stx %g2, [%sp + 2047 + 128 + 0x08]
195 stx %g2, [%sp + 2047 + 128 + 0x10]
196 sethi %hi(dtlb_load), %g2
197 or %g2, %lo(dtlb_load), %g2
198 stx %g2, [%sp + 2047 + 128 + 0x18]
199 sethi %hi(prom_mmu_ihandle_cache), %g2
200 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
201 stx %g2, [%sp + 2047 + 128 + 0x20]
202 sethi %hi(KERNBASE), %g2
203 stx %g2, [%sp + 2047 + 128 + 0x28]
204 sethi %hi(kern_locked_tte_data), %g2
205 ldx [%g2 + %lo(kern_locked_tte_data)], %g2
206 stx %g2, [%sp + 2047 + 128 + 0x30]
209 BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
214 stx %g2, [%sp + 2047 + 128 + 0x38]
215 sethi %hi(p1275buf), %g2
216 or %g2, %lo(p1275buf), %g2
217 ldx [%g2 + 0x08], %o1
219 add %sp, (2047 + 128), %o0
221 sethi %hi(bigkernel), %g2
222 lduw [%g2 + %lo(bigkernel)], %g2
223 brz,pt %g2, do_unlock
226 sethi %hi(call_method), %g2
227 or %g2, %lo(call_method), %g2
228 stx %g2, [%sp + 2047 + 128 + 0x00]
230 stx %g2, [%sp + 2047 + 128 + 0x08]
232 stx %g2, [%sp + 2047 + 128 + 0x10]
233 sethi %hi(dtlb_load), %g2
234 or %g2, %lo(dtlb_load), %g2
235 stx %g2, [%sp + 2047 + 128 + 0x18]
236 sethi %hi(prom_mmu_ihandle_cache), %g2
237 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
238 stx %g2, [%sp + 2047 + 128 + 0x20]
239 sethi %hi(KERNBASE + 0x400000), %g2
240 stx %g2, [%sp + 2047 + 128 + 0x28]
241 sethi %hi(kern_locked_tte_data), %g2
242 ldx [%g2 + %lo(kern_locked_tte_data)], %g2
243 sethi %hi(0x400000), %g1
245 stx %g2, [%sp + 2047 + 128 + 0x30]
248 BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
253 stx %g2, [%sp + 2047 + 128 + 0x38]
254 sethi %hi(p1275buf), %g2
255 or %g2, %lo(p1275buf), %g2
256 ldx [%g2 + 0x08], %o1
258 add %sp, (2047 + 128), %o0
261 sethi %hi(prom_entry_lock), %g2
262 stb %g0, [%g2 + %lo(prom_entry_lock)]
263 membar #StoreStore | #StoreLoad
265 ba,pt %xcc, after_lock_tlb
269 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
270 sethi %hi(KERNBASE), %o0
272 sethi %hi(kern_locked_tte_data), %o2
273 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
277 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
278 sethi %hi(KERNBASE), %o0
280 sethi %hi(kern_locked_tte_data), %o2
281 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
285 sethi %hi(bigkernel), %g2
286 lduw [%g2 + %lo(bigkernel)], %g2
287 brz,pt %g2, after_lock_tlb
290 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
291 sethi %hi(KERNBASE + 0x400000), %o0
293 sethi %hi(kern_locked_tte_data), %o2
294 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
295 sethi %hi(0x400000), %o3
300 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
301 sethi %hi(KERNBASE + 0x400000), %o0
303 sethi %hi(kern_locked_tte_data), %o2
304 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
305 sethi %hi(0x400000), %o3
316 wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
319 /* XXX Buggy PROM... */
325 mov PRIMARY_CONTEXT, %g7
327 661: stxa %g0, [%g7] ASI_DMMU
328 .section .sun4v_1insn_patch, "ax"
330 stxa %g0, [%g7] ASI_MMU
334 mov SECONDARY_CONTEXT, %g7
336 661: stxa %g0, [%g7] ASI_DMMU
337 .section .sun4v_1insn_patch, "ax"
339 stxa %g0, [%g7] ASI_MMU
345 sllx %g5, THREAD_SHIFT, %g5
346 sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
353 /* Load TBA, then we can resurface. */
354 sethi %hi(sparc64_ttable_tl0), %g5
357 ldx [%g6 + TI_TASK], %g4
361 call init_irqwork_curcpu
364 sethi %hi(tlb_type), %g3
365 lduw [%g3 + %lo(tlb_type)], %g2
370 call sun4v_init_mondo_queues
373 1: call init_cur_cpu_trap
376 /* Start using proper page size encodings in ctx register. */
377 sethi %hi(sparc64_kern_pri_context), %g3
378 ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
379 mov PRIMARY_CONTEXT, %g1
381 661: stxa %g2, [%g1] ASI_DMMU
382 .section .sun4v_1insn_patch, "ax"
384 stxa %g2, [%g1] ASI_MMU
390 or %o1, PSTATE_IE, %o1
393 sethi %hi(is_sun4v), %o0
394 lduw [%o0 + %lo(is_sun4v)], %o0
398 TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
399 add %g2, TRAP_PER_CPU_FAULT_INFO, %g2
400 stxa %g2, [%g0] ASI_SCRATCHPAD
402 /* Compute physical address:
404 * paddr = kern_base + (mmfsa_vaddr - KERNBASE)
406 sethi %hi(KERNBASE), %g3
408 sethi %hi(kern_base), %g3
409 ldx [%g3 + %lo(kern_base)], %g3
412 call prom_set_trap_table_sun4v
413 sethi %hi(sparc64_ttable_tl0), %o0
418 1: call prom_set_trap_table
419 sethi %hi(sparc64_ttable_tl0), %o0
430 sparc64_cpu_startup_end: