Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[linux-2.6.git] / arch / sparc / kernel / rtrap_64.S
1 /*
2  * rtrap.S: Preparing for return from trap on Sparc V9.
3  *
4  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6  */
7
8
9 #include <asm/asi.h>
10 #include <asm/pstate.h>
11 #include <asm/ptrace.h>
12 #include <asm/spitfire.h>
13 #include <asm/head.h>
14 #include <asm/visasm.h>
15 #include <asm/processor.h>
16
17 #define         RTRAP_PSTATE            (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
18 #define         RTRAP_PSTATE_IRQOFF     (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
19 #define         RTRAP_PSTATE_AG_IRQOFF  (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
20
21                 .text
22                 .align                  32
23 __handle_softirq:
24                 call                    do_softirq
25                  nop
26                 ba,a,pt                 %xcc, __handle_softirq_continue
27                  nop
28 __handle_preemption:
29                 call                    schedule
30                  wrpr                   %g0, RTRAP_PSTATE, %pstate
31                 ba,pt                   %xcc, __handle_preemption_continue
32                  wrpr                   %g0, RTRAP_PSTATE_IRQOFF, %pstate
33
34 __handle_user_windows:
35                 call                    fault_in_user_windows
36                  wrpr                   %g0, RTRAP_PSTATE, %pstate
37                 wrpr                    %g0, RTRAP_PSTATE_IRQOFF, %pstate
38                 /* Redo sched+sig checks */
39                 ldx                     [%g6 + TI_FLAGS], %l0
40                 andcc                   %l0, _TIF_NEED_RESCHED, %g0
41
42                 be,pt                   %xcc, 1f
43                  nop
44                 call                    schedule
45                  wrpr                   %g0, RTRAP_PSTATE, %pstate
46                 wrpr                    %g0, RTRAP_PSTATE_IRQOFF, %pstate
47                 ldx                     [%g6 + TI_FLAGS], %l0
48
49 1:              andcc                   %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
50                 be,pt                   %xcc, __handle_user_windows_continue
51                  nop
52                 mov                     %l5, %o1
53                 add                     %sp, PTREGS_OFF, %o0
54                 mov                     %l0, %o2
55
56                 call                    do_notify_resume
57                  wrpr                   %g0, RTRAP_PSTATE, %pstate
58                 wrpr                    %g0, RTRAP_PSTATE_IRQOFF, %pstate
59                 /* Signal delivery can modify pt_regs tstate, so we must
60                  * reload it.
61                  */
62                 ldx                     [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
63                 sethi                   %hi(0xf << 20), %l4
64                 and                     %l1, %l4, %l4
65                 ba,pt                   %xcc, __handle_user_windows_continue
66
67                  andn                   %l1, %l4, %l1
68 __handle_userfpu:
69                 rd                      %fprs, %l5
70                 andcc                   %l5, FPRS_FEF, %g0
71                 sethi                   %hi(TSTATE_PEF), %o0
72                 be,a,pn                 %icc, __handle_userfpu_continue
73                  andn                   %l1, %o0, %l1
74                 ba,a,pt                 %xcc, __handle_userfpu_continue
75
76 __handle_signal:
77                 mov                     %l5, %o1
78                 add                     %sp, PTREGS_OFF, %o0
79                 mov                     %l0, %o2
80                 call                    do_notify_resume
81                  wrpr                   %g0, RTRAP_PSTATE, %pstate
82                 wrpr                    %g0, RTRAP_PSTATE_IRQOFF, %pstate
83
84                 /* Signal delivery can modify pt_regs tstate, so we must
85                  * reload it.
86                  */
87                 ldx                     [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
88                 sethi                   %hi(0xf << 20), %l4
89                 and                     %l1, %l4, %l4
90                 ba,pt                   %xcc, __handle_signal_continue
91                  andn                   %l1, %l4, %l1
92
93                 /* When returning from a NMI (%pil==15) interrupt we want to
94                  * avoid running softirqs, doing IRQ tracing, preempting, etc.
95                  */
96                 .globl                  rtrap_nmi
97 rtrap_nmi:      ldx                     [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
98                 sethi                   %hi(0xf << 20), %l4
99                 and                     %l1, %l4, %l4
100                 andn                    %l1, %l4, %l1
101                 srl                     %l4, 20, %l4
102                 ba,pt                   %xcc, rtrap_no_irq_enable
103                  wrpr                   %l4, %pil
104
105                 .align                  64
106                 .globl                  rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
107 rtrap_irq:
108 rtrap:
109 #ifndef CONFIG_SMP
110                 sethi                   %hi(__cpu_data), %l0
111                 lduw                    [%l0 + %lo(__cpu_data)], %l1
112 #else
113                 sethi                   %hi(__cpu_data), %l0
114                 or                      %l0, %lo(__cpu_data), %l0
115                 lduw                    [%l0 + %g5], %l1
116 #endif
117                 cmp                     %l1, 0
118
119                 /* mm/ultra.S:xcall_report_regs KNOWS about this load. */
120                 bne,pn                  %icc, __handle_softirq
121                  ldx                    [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
122 __handle_softirq_continue:
123 rtrap_xcall:
124                 sethi                   %hi(0xf << 20), %l4
125                 and                     %l1, %l4, %l4
126                 andn                    %l1, %l4, %l1
127                 srl                     %l4, 20, %l4
128 #ifdef CONFIG_TRACE_IRQFLAGS
129                 brnz,pn                 %l4, rtrap_no_irq_enable
130                  nop
131                 call                    trace_hardirqs_on
132                  nop
133                 wrpr                    %l4, %pil
134 #endif
135 rtrap_no_irq_enable:
136                 andcc                   %l1, TSTATE_PRIV, %l3
137                 bne,pn                  %icc, to_kernel
138                  nop
139
140                 /* We must hold IRQs off and atomically test schedule+signal
141                  * state, then hold them off all the way back to userspace.
142                  * If we are returning to kernel, none of this matters.  Note
143                  * that we are disabling interrupts via PSTATE_IE, not using
144                  * %pil.
145                  *
146                  * If we do not do this, there is a window where we would do
147                  * the tests, later the signal/resched event arrives but we do
148                  * not process it since we are still in kernel mode.  It would
149                  * take until the next local IRQ before the signal/resched
150                  * event would be handled.
151                  *
152                  * This also means that if we have to deal with user
153                  * windows, we have to redo all of these sched+signal checks
154                  * with IRQs disabled.
155                  */
156 to_user:        wrpr                    %g0, RTRAP_PSTATE_IRQOFF, %pstate
157                 wrpr                    0, %pil
158 __handle_preemption_continue:
159                 ldx                     [%g6 + TI_FLAGS], %l0
160                 sethi                   %hi(_TIF_USER_WORK_MASK), %o0
161                 or                      %o0, %lo(_TIF_USER_WORK_MASK), %o0
162                 andcc                   %l0, %o0, %g0
163                 sethi                   %hi(TSTATE_PEF), %o0
164                 be,pt                   %xcc, user_nowork
165                  andcc                  %l1, %o0, %g0
166                 andcc                   %l0, _TIF_NEED_RESCHED, %g0
167                 bne,pn                  %xcc, __handle_preemption
168                  andcc                  %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
169                 bne,pn                  %xcc, __handle_signal
170 __handle_signal_continue:
171                  ldub                   [%g6 + TI_WSAVED], %o2
172                 brnz,pn                 %o2, __handle_user_windows
173                  nop
174 __handle_user_windows_continue:
175                 sethi                   %hi(TSTATE_PEF), %o0
176                 andcc                   %l1, %o0, %g0
177
178                 /* This fpdepth clear is necessary for non-syscall rtraps only */
179 user_nowork:
180                 bne,pn                  %xcc, __handle_userfpu
181                  stb                    %g0, [%g6 + TI_FPDEPTH]
182 __handle_userfpu_continue:
183
184 rt_continue:    ldx                     [%sp + PTREGS_OFF + PT_V9_G1], %g1
185                 ldx                     [%sp + PTREGS_OFF + PT_V9_G2], %g2
186
187                 ldx                     [%sp + PTREGS_OFF + PT_V9_G3], %g3
188                 ldx                     [%sp + PTREGS_OFF + PT_V9_G4], %g4
189                 ldx                     [%sp + PTREGS_OFF + PT_V9_G5], %g5
190                 brz,pt                  %l3, 1f
191                 mov                     %g6, %l2
192
193                 /* Must do this before thread reg is clobbered below.  */
194                 LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2)
195 1:
196                 ldx                     [%sp + PTREGS_OFF + PT_V9_G6], %g6
197                 ldx                     [%sp + PTREGS_OFF + PT_V9_G7], %g7
198
199                 /* Normal globals are restored, go to trap globals.  */
200 661:            wrpr                    %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
201                 nop
202                 .section                .sun4v_2insn_patch, "ax"
203                 .word                   661b
204                 wrpr                    %g0, RTRAP_PSTATE_IRQOFF, %pstate
205                 SET_GL(1)
206                 .previous
207
208                 mov                     %l2, %g6
209
210                 ldx                     [%sp + PTREGS_OFF + PT_V9_I0], %i0
211                 ldx                     [%sp + PTREGS_OFF + PT_V9_I1], %i1
212
213                 ldx                     [%sp + PTREGS_OFF + PT_V9_I2], %i2
214                 ldx                     [%sp + PTREGS_OFF + PT_V9_I3], %i3
215                 ldx                     [%sp + PTREGS_OFF + PT_V9_I4], %i4
216                 ldx                     [%sp + PTREGS_OFF + PT_V9_I5], %i5
217                 ldx                     [%sp + PTREGS_OFF + PT_V9_I6], %i6
218                 ldx                     [%sp + PTREGS_OFF + PT_V9_I7], %i7
219                 ldx                     [%sp + PTREGS_OFF + PT_V9_TPC], %l2
220                 ldx                     [%sp + PTREGS_OFF + PT_V9_TNPC], %o2
221
222                 ld                      [%sp + PTREGS_OFF + PT_V9_Y], %o3
223                 wr                      %o3, %g0, %y
224                 wrpr                    %l4, 0x0, %pil
225                 wrpr                    %g0, 0x1, %tl
226                 andn                    %l1, TSTATE_SYSCALL, %l1
227                 wrpr                    %l1, %g0, %tstate
228                 wrpr                    %l2, %g0, %tpc
229                 wrpr                    %o2, %g0, %tnpc
230
231                 brnz,pn                 %l3, kern_rtt
232                  mov                    PRIMARY_CONTEXT, %l7
233
234 661:            ldxa                    [%l7 + %l7] ASI_DMMU, %l0
235                 .section                .sun4v_1insn_patch, "ax"
236                 .word                   661b
237                 ldxa                    [%l7 + %l7] ASI_MMU, %l0
238                 .previous
239
240                 sethi                   %hi(sparc64_kern_pri_nuc_bits), %l1
241                 ldx                     [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
242                 or                      %l0, %l1, %l0
243
244 661:            stxa                    %l0, [%l7] ASI_DMMU
245                 .section                .sun4v_1insn_patch, "ax"
246                 .word                   661b
247                 stxa                    %l0, [%l7] ASI_MMU
248                 .previous
249
250                 sethi                   %hi(KERNBASE), %l7
251                 flush                   %l7
252                 rdpr                    %wstate, %l1
253                 rdpr                    %otherwin, %l2
254                 srl                     %l1, 3, %l1
255
256                 wrpr                    %l2, %g0, %canrestore
257                 wrpr                    %l1, %g0, %wstate
258                 brnz,pt                 %l2, user_rtt_restore
259                  wrpr                   %g0, %g0, %otherwin
260
261                 ldx                     [%g6 + TI_FLAGS], %g3
262                 wr                      %g0, ASI_AIUP, %asi
263                 rdpr                    %cwp, %g1
264                 andcc                   %g3, _TIF_32BIT, %g0
265                 sub                     %g1, 1, %g1
266                 bne,pt                  %xcc, user_rtt_fill_32bit
267                  wrpr                   %g1, %cwp
268                 ba,a,pt                 %xcc, user_rtt_fill_64bit
269
270 user_rtt_fill_fixup:
271                 rdpr    %cwp, %g1
272                 add     %g1, 1, %g1
273                 wrpr    %g1, 0x0, %cwp
274
275                 rdpr    %wstate, %g2
276                 sll     %g2, 3, %g2
277                 wrpr    %g2, 0x0, %wstate
278
279                 /* We know %canrestore and %otherwin are both zero.  */
280
281                 sethi   %hi(sparc64_kern_pri_context), %g2
282                 ldx     [%g2 + %lo(sparc64_kern_pri_context)], %g2
283                 mov     PRIMARY_CONTEXT, %g1
284
285 661:            stxa    %g2, [%g1] ASI_DMMU
286                 .section .sun4v_1insn_patch, "ax"
287                 .word   661b
288                 stxa    %g2, [%g1] ASI_MMU
289                 .previous
290
291                 sethi   %hi(KERNBASE), %g1
292                 flush   %g1
293
294                 or      %g4, FAULT_CODE_WINFIXUP, %g4
295                 stb     %g4, [%g6 + TI_FAULT_CODE]
296                 stx     %g5, [%g6 + TI_FAULT_ADDR]
297
298                 mov     %g6, %l1
299                 wrpr    %g0, 0x0, %tl
300
301 661:            nop
302                 .section                .sun4v_1insn_patch, "ax"
303                 .word                   661b
304                 SET_GL(0)
305                 .previous
306
307                 wrpr    %g0, RTRAP_PSTATE, %pstate
308
309                 mov     %l1, %g6
310                 ldx     [%g6 + TI_TASK], %g4
311                 LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
312                 call    do_sparc64_fault
313                  add    %sp, PTREGS_OFF, %o0
314                 ba,pt   %xcc, rtrap
315                  nop
316
317 user_rtt_pre_restore:
318                 add                     %g1, 1, %g1
319                 wrpr                    %g1, 0x0, %cwp
320
321 user_rtt_restore:
322                 restore
323                 rdpr                    %canrestore, %g1
324                 wrpr                    %g1, 0x0, %cleanwin
325                 retry
326                 nop
327
328 kern_rtt:       rdpr                    %canrestore, %g1
329                 brz,pn                  %g1, kern_rtt_fill
330                  nop
331 kern_rtt_restore:
332                 stw                     %g0, [%sp + PTREGS_OFF + PT_V9_MAGIC]
333                 restore
334                 retry
335
336 to_kernel:
337 #ifdef CONFIG_PREEMPT
338                 ldsw                    [%g6 + TI_PRE_COUNT], %l5
339                 brnz                    %l5, kern_fpucheck
340                  ldx                    [%g6 + TI_FLAGS], %l5
341                 andcc                   %l5, _TIF_NEED_RESCHED, %g0
342                 be,pt                   %xcc, kern_fpucheck
343                  nop
344                 cmp                     %l4, 0
345                 bne,pn                  %xcc, kern_fpucheck
346                  sethi                  %hi(PREEMPT_ACTIVE), %l6
347                 stw                     %l6, [%g6 + TI_PRE_COUNT]
348                 call                    schedule
349                  nop
350                 ba,pt                   %xcc, rtrap
351                  stw                    %g0, [%g6 + TI_PRE_COUNT]
352 #endif
353 kern_fpucheck:  ldub                    [%g6 + TI_FPDEPTH], %l5
354                 brz,pt                  %l5, rt_continue
355                  srl                    %l5, 1, %o0
356                 add                     %g6, TI_FPSAVED, %l6
357                 ldub                    [%l6 + %o0], %l2
358                 sub                     %l5, 2, %l5
359
360                 add                     %g6, TI_GSR, %o1
361                 andcc                   %l2, (FPRS_FEF|FPRS_DU), %g0
362                 be,pt                   %icc, 2f
363                  and                    %l2, FPRS_DL, %l6
364                 andcc                   %l2, FPRS_FEF, %g0
365                 be,pn                   %icc, 5f
366                  sll                    %o0, 3, %o5
367                 rd                      %fprs, %g1
368
369                 wr                      %g1, FPRS_FEF, %fprs
370                 ldx                     [%o1 + %o5], %g1
371                 add                     %g6, TI_XFSR, %o1
372                 sll                     %o0, 8, %o2
373                 add                     %g6, TI_FPREGS, %o3
374                 brz,pn                  %l6, 1f
375                  add                    %g6, TI_FPREGS+0x40, %o4
376
377                 membar                  #Sync
378                 ldda                    [%o3 + %o2] ASI_BLK_P, %f0
379                 ldda                    [%o4 + %o2] ASI_BLK_P, %f16
380                 membar                  #Sync
381 1:              andcc                   %l2, FPRS_DU, %g0
382                 be,pn                   %icc, 1f
383                  wr                     %g1, 0, %gsr
384                 add                     %o2, 0x80, %o2
385                 membar                  #Sync
386                 ldda                    [%o3 + %o2] ASI_BLK_P, %f32
387                 ldda                    [%o4 + %o2] ASI_BLK_P, %f48
388 1:              membar                  #Sync
389                 ldx                     [%o1 + %o5], %fsr
390 2:              stb                     %l5, [%g6 + TI_FPDEPTH]
391                 ba,pt                   %xcc, rt_continue
392                  nop
393 5:              wr                      %g0, FPRS_FEF, %fprs
394                 sll                     %o0, 8, %o2
395
396                 add                     %g6, TI_FPREGS+0x80, %o3
397                 add                     %g6, TI_FPREGS+0xc0, %o4
398                 membar                  #Sync
399                 ldda                    [%o3 + %o2] ASI_BLK_P, %f32
400                 ldda                    [%o4 + %o2] ASI_BLK_P, %f48
401                 membar                  #Sync
402                 wr                      %g0, FPRS_DU, %fprs
403                 ba,pt                   %xcc, rt_continue
404                  stb                    %l5, [%g6 + TI_FPDEPTH]