blob: 1a969925bf800e9adce6a2c098322f77ffa3e6e4 [file] [log] [blame]
Stephen Rothwell81e70092005-10-18 11:17:58 +10001/*
2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Copyright (C) 2001 IBM
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 *
10 * Derived from "arch/i386/kernel/signal.c"
11 * Copyright (C) 1991, 1992 Linus Torvalds
12 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 */
19
Stephen Rothwell81e70092005-10-18 11:17:58 +100020#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100023#include <linux/kernel.h>
24#include <linux/signal.h>
25#include <linux/errno.h>
26#include <linux/elf.h>
Lucas Woods05ead012007-12-13 15:56:06 -080027#include <linux/ptrace.h>
Christian Dietrich76462232011-06-04 05:36:54 +000028#include <linux/ratelimit.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100029#ifdef CONFIG_PPC64
30#include <linux/syscalls.h>
31#include <linux/compat.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100032#else
33#include <linux/wait.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100034#include <linux/unistd.h>
35#include <linux/stddef.h>
36#include <linux/tty.h>
37#include <linux/binfmts.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100038#endif
39
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080040#include <linux/uaccess.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100041#include <asm/cacheflush.h>
Arnd Bergmanna7f31842006-03-23 00:00:08 +010042#include <asm/syscalls.h>
David Gibsonc5ff7002005-11-09 11:21:07 +110043#include <asm/sigcontext.h>
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +110044#include <asm/vdso.h>
David Howellsae3a1972012-03-28 18:30:02 +010045#include <asm/switch_to.h>
Michael Neuling2b0a5762013-02-13 16:21:41 +000046#include <asm/tm.h>
Daniel Axtens0545d542016-09-06 15:32:43 +100047#include <asm/asm-prototypes.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100048#ifdef CONFIG_PPC64
Stephen Rothwell879168e2005-11-03 15:32:07 +110049#include "ppc32.h"
Stephen Rothwell81e70092005-10-18 11:17:58 +100050#include <asm/unistd.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100051#else
52#include <asm/ucontext.h>
53#include <asm/pgtable.h>
54#endif
55
Benjamin Herrenschmidt22e38f22007-06-04 15:15:49 +100056#include "signal.h"
57
Stephen Rothwell81e70092005-10-18 11:17:58 +100058
Stephen Rothwell81e70092005-10-18 11:17:58 +100059#ifdef CONFIG_PPC64
Stephen Rothwellb09a4912005-10-18 14:51:57 +100060#define sys_rt_sigreturn compat_sys_rt_sigreturn
Stephen Rothwellb09a4912005-10-18 14:51:57 +100061#define sys_swapcontext compat_sys_swapcontext
62#define sys_sigreturn compat_sys_sigreturn
Stephen Rothwell81e70092005-10-18 11:17:58 +100063
64#define old_sigaction old_sigaction32
65#define sigcontext sigcontext32
66#define mcontext mcontext32
67#define ucontext ucontext32
68
Al Viro7cce2462012-12-23 03:26:46 -050069#define __save_altstack __compat_save_altstack
70
Stephen Rothwell81e70092005-10-18 11:17:58 +100071/*
Michael Neulingc1cb2992008-07-08 18:43:41 +100072 * Userspace code may pass a ucontext which doesn't include VSX added
73 * at the end. We need to check for this case.
74 */
75#define UCONTEXTSIZEWITHOUTVSX \
76 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
77
78/*
Stephen Rothwell81e70092005-10-18 11:17:58 +100079 * Returning 0 means we return to userspace via
80 * ret_from_except and thus restore all user
81 * registers from *regs. This is what we need
82 * to do when a signal has been delivered.
83 */
Stephen Rothwell81e70092005-10-18 11:17:58 +100084
85#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
86#undef __SIGNAL_FRAMESIZE
87#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
88#undef ELF_NVRREG
89#define ELF_NVRREG ELF_NVRREG32
90
91/*
92 * Functions for flipping sigsets (thanks to brain dead generic
93 * implementation that makes things simple for little endian only)
94 */
95static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
96{
Al Viroa5ae7542017-09-04 12:17:38 -040097 return put_compat_sigset(uset, set, sizeof(*uset));
Stephen Rothwell81e70092005-10-18 11:17:58 +100098}
99
Paul Mackerras9b7cf8b2005-10-19 23:13:04 +1000100static inline int get_sigset_t(sigset_t *set,
101 const compat_sigset_t __user *uset)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000102{
Al Viroa5ae7542017-09-04 12:17:38 -0400103 return get_compat_sigset(set, uset);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000104}
105
Al Viro29e646d2006-02-01 05:28:09 -0500106#define to_user_ptr(p) ptr_to_compat(p)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000107#define from_user_ptr(p) compat_ptr(p)
108
109static inline int save_general_regs(struct pt_regs *regs,
110 struct mcontext __user *frame)
111{
112 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
113 int i;
114
Paul Mackerras1bd79332006-03-08 13:24:22 +1100115 WARN_ON(!FULL_REGS(regs));
David Woodhouse401d1f02005-11-15 18:52:18 +0000116
117 for (i = 0; i <= PT_RESULT; i ++) {
118 if (i == 14 && !FULL_REGS(regs))
119 i = 32;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000120 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
121 return -EFAULT;
David Woodhouse401d1f02005-11-15 18:52:18 +0000122 }
Stephen Rothwell81e70092005-10-18 11:17:58 +1000123 return 0;
124}
125
126static inline int restore_general_regs(struct pt_regs *regs,
127 struct mcontext __user *sr)
128{
129 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
130 int i;
131
132 for (i = 0; i <= PT_RESULT; i++) {
133 if ((i == PT_MSR) || (i == PT_SOFTE))
134 continue;
135 if (__get_user(gregs[i], &sr->mc_gregs[i]))
136 return -EFAULT;
137 }
138 return 0;
139}
140
141#else /* CONFIG_PPC64 */
142
Stephen Rothwell81e70092005-10-18 11:17:58 +1000143#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
144
145static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
146{
147 return copy_to_user(uset, set, sizeof(*uset));
148}
149
Paul Mackerras9b7cf8b2005-10-19 23:13:04 +1000150static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000151{
152 return copy_from_user(set, uset, sizeof(*uset));
153}
154
Al Viro29e646d2006-02-01 05:28:09 -0500155#define to_user_ptr(p) ((unsigned long)(p))
156#define from_user_ptr(p) ((void __user *)(p))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000157
158static inline int save_general_regs(struct pt_regs *regs,
159 struct mcontext __user *frame)
160{
Paul Mackerras1bd79332006-03-08 13:24:22 +1100161 WARN_ON(!FULL_REGS(regs));
Stephen Rothwell81e70092005-10-18 11:17:58 +1000162 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
163}
164
165static inline int restore_general_regs(struct pt_regs *regs,
166 struct mcontext __user *sr)
167{
168 /* copy up to but not including MSR */
169 if (__copy_from_user(regs, &sr->mc_gregs,
170 PT_MSR * sizeof(elf_greg_t)))
171 return -EFAULT;
172 /* copy from orig_r3 (the word after the MSR) up to the end */
173 if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
174 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
175 return -EFAULT;
176 return 0;
177}
Stephen Rothwell81e70092005-10-18 11:17:58 +1000178#endif
179
Stephen Rothwell81e70092005-10-18 11:17:58 +1000180/*
181 * When we have signals to deliver, we set up on the
182 * user stack, going down from the original stack pointer:
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000183 * an ABI gap of 56 words
184 * an mcontext struct
Stephen Rothwell81e70092005-10-18 11:17:58 +1000185 * a sigcontext struct
186 * a gap of __SIGNAL_FRAMESIZE bytes
187 *
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000188 * Each of these things must be a multiple of 16 bytes in size. The following
189 * structure represent all of this except the __SIGNAL_FRAMESIZE gap
Stephen Rothwell81e70092005-10-18 11:17:58 +1000190 *
191 */
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000192struct sigframe {
193 struct sigcontext sctx; /* the sigcontext */
Stephen Rothwell81e70092005-10-18 11:17:58 +1000194 struct mcontext mctx; /* all the register values */
Michael Neuling2b0a5762013-02-13 16:21:41 +0000195#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
196 struct sigcontext sctx_transact;
197 struct mcontext mctx_transact;
198#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +1000199 /*
200 * Programs using the rs6000/xcoff abi can save up to 19 gp
201 * regs and 18 fp regs below sp before decrementing it.
202 */
203 int abigap[56];
204};
205
206/* We use the mc_pad field for the signal return trampoline. */
207#define tramp mc_pad
208
209/*
210 * When we have rt signals to deliver, we set up on the
211 * user stack, going down from the original stack pointer:
212 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
213 * a gap of __SIGNAL_FRAMESIZE+16 bytes
214 * (the +16 is to get the siginfo and ucontext in the same
215 * positions as in older kernels).
216 *
217 * Each of these things must be a multiple of 16 bytes in size.
218 *
219 */
220struct rt_sigframe {
221#ifdef CONFIG_PPC64
222 compat_siginfo_t info;
223#else
224 struct siginfo info;
225#endif
226 struct ucontext uc;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000227#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
228 struct ucontext uc_transact;
229#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +1000230 /*
231 * Programs using the rs6000/xcoff abi can save up to 19 gp
232 * regs and 18 fp regs below sp before decrementing it.
233 */
234 int abigap[56];
235};
236
Michael Neuling6a274c02008-07-02 14:06:37 +1000237#ifdef CONFIG_VSX
238unsigned long copy_fpr_to_user(void __user *to,
239 struct task_struct *task)
240{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000241 u64 buf[ELF_NFPREG];
Michael Neuling6a274c02008-07-02 14:06:37 +1000242 int i;
243
244 /* save FPR copy to local buffer then write to the thread_struct */
245 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
246 buf[i] = task->thread.TS_FPR(i);
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000247 buf[i] = task->thread.fp_state.fpscr;
Michael Neuling6a274c02008-07-02 14:06:37 +1000248 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
249}
250
251unsigned long copy_fpr_from_user(struct task_struct *task,
252 void __user *from)
253{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000254 u64 buf[ELF_NFPREG];
Michael Neuling6a274c02008-07-02 14:06:37 +1000255 int i;
256
257 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
258 return 1;
259 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
260 task->thread.TS_FPR(i) = buf[i];
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000261 task->thread.fp_state.fpscr = buf[i];
Michael Neuling6a274c02008-07-02 14:06:37 +1000262
263 return 0;
264}
265
266unsigned long copy_vsx_to_user(void __user *to,
267 struct task_struct *task)
268{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000269 u64 buf[ELF_NVSRHALFREG];
Michael Neuling6a274c02008-07-02 14:06:37 +1000270 int i;
271
272 /* save FPR copy to local buffer then write to the thread_struct */
273 for (i = 0; i < ELF_NVSRHALFREG; i++)
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000274 buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
Michael Neuling6a274c02008-07-02 14:06:37 +1000275 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
276}
277
278unsigned long copy_vsx_from_user(struct task_struct *task,
279 void __user *from)
280{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000281 u64 buf[ELF_NVSRHALFREG];
Michael Neuling6a274c02008-07-02 14:06:37 +1000282 int i;
283
284 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
285 return 1;
286 for (i = 0; i < ELF_NVSRHALFREG ; i++)
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000287 task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
Michael Neuling6a274c02008-07-02 14:06:37 +1000288 return 0;
289}
Michael Neuling2b0a5762013-02-13 16:21:41 +0000290
291#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Cyril Bur000ec282016-09-23 16:18:25 +1000292unsigned long copy_ckfpr_to_user(void __user *to,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000293 struct task_struct *task)
294{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000295 u64 buf[ELF_NFPREG];
Michael Neuling2b0a5762013-02-13 16:21:41 +0000296 int i;
297
298 /* save FPR copy to local buffer then write to the thread_struct */
299 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +1000300 buf[i] = task->thread.TS_CKFPR(i);
301 buf[i] = task->thread.ckfp_state.fpscr;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000302 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
303}
304
Cyril Bur000ec282016-09-23 16:18:25 +1000305unsigned long copy_ckfpr_from_user(struct task_struct *task,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000306 void __user *from)
307{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000308 u64 buf[ELF_NFPREG];
Michael Neuling2b0a5762013-02-13 16:21:41 +0000309 int i;
310
311 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
312 return 1;
313 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +1000314 task->thread.TS_CKFPR(i) = buf[i];
315 task->thread.ckfp_state.fpscr = buf[i];
Michael Neuling2b0a5762013-02-13 16:21:41 +0000316
317 return 0;
318}
319
Cyril Bur000ec282016-09-23 16:18:25 +1000320unsigned long copy_ckvsx_to_user(void __user *to,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000321 struct task_struct *task)
322{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000323 u64 buf[ELF_NVSRHALFREG];
Michael Neuling2b0a5762013-02-13 16:21:41 +0000324 int i;
325
326 /* save FPR copy to local buffer then write to the thread_struct */
327 for (i = 0; i < ELF_NVSRHALFREG; i++)
Cyril Bur000ec282016-09-23 16:18:25 +1000328 buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
Michael Neuling2b0a5762013-02-13 16:21:41 +0000329 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
330}
331
Cyril Bur000ec282016-09-23 16:18:25 +1000332unsigned long copy_ckvsx_from_user(struct task_struct *task,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000333 void __user *from)
334{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000335 u64 buf[ELF_NVSRHALFREG];
Michael Neuling2b0a5762013-02-13 16:21:41 +0000336 int i;
337
338 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
339 return 1;
340 for (i = 0; i < ELF_NVSRHALFREG ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +1000341 task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
Michael Neuling2b0a5762013-02-13 16:21:41 +0000342 return 0;
343}
344#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
Michael Neuling6a274c02008-07-02 14:06:37 +1000345#else
346inline unsigned long copy_fpr_to_user(void __user *to,
347 struct task_struct *task)
348{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000349 return __copy_to_user(to, task->thread.fp_state.fpr,
Michael Neuling6a274c02008-07-02 14:06:37 +1000350 ELF_NFPREG * sizeof(double));
351}
352
353inline unsigned long copy_fpr_from_user(struct task_struct *task,
354 void __user *from)
355{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000356 return __copy_from_user(task->thread.fp_state.fpr, from,
Michael Neuling6a274c02008-07-02 14:06:37 +1000357 ELF_NFPREG * sizeof(double));
358}
Michael Neuling2b0a5762013-02-13 16:21:41 +0000359
360#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Cyril Bur000ec282016-09-23 16:18:25 +1000361inline unsigned long copy_ckfpr_to_user(void __user *to,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000362 struct task_struct *task)
363{
Cyril Bur000ec282016-09-23 16:18:25 +1000364 return __copy_to_user(to, task->thread.ckfp_state.fpr,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000365 ELF_NFPREG * sizeof(double));
366}
367
Cyril Bur000ec282016-09-23 16:18:25 +1000368inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000369 void __user *from)
370{
Cyril Bur000ec282016-09-23 16:18:25 +1000371 return __copy_from_user(task->thread.ckfp_state.fpr, from,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000372 ELF_NFPREG * sizeof(double));
373}
374#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
Michael Neuling6a274c02008-07-02 14:06:37 +1000375#endif
376
Stephen Rothwell81e70092005-10-18 11:17:58 +1000377/*
378 * Save the current user registers on the user stack.
379 * We only save the altivec/spe registers if the process has used
380 * altivec/spe instructions at some point.
381 */
382static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
Michael Neuling1d25f112013-06-09 21:23:15 +1000383 struct mcontext __user *tm_frame, int sigret,
384 int ctx_has_vsx_region)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000385{
Michael Neuling9e751182008-06-25 14:07:17 +1000386 unsigned long msr = regs->msr;
387
Stephen Rothwell81e70092005-10-18 11:17:58 +1000388 /* Make sure floating point registers are stored in regs */
389 flush_fp_to_thread(current);
390
Michael Neulingc6e67712008-06-25 14:07:18 +1000391 /* save general registers */
392 if (save_general_regs(regs, frame))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000393 return 1;
394
Stephen Rothwell81e70092005-10-18 11:17:58 +1000395#ifdef CONFIG_ALTIVEC
396 /* save altivec registers */
397 if (current->thread.used_vr) {
398 flush_altivec_to_thread(current);
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000399 if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
Stephen Rothwell81e70092005-10-18 11:17:58 +1000400 ELF_NVRREG * sizeof(vector128)))
401 return 1;
402 /* set MSR_VEC in the saved MSR value to indicate that
403 frame->mc_vregs contains valid data */
Michael Neuling9e751182008-06-25 14:07:17 +1000404 msr |= MSR_VEC;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000405 }
406 /* else assert((regs->msr & MSR_VEC) == 0) */
407
408 /* We always copy to/from vrsave, it's 0 if we don't have or don't
409 * use altivec. Since VSCR only contains 32 bits saved in the least
410 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
411 * most significant bits of that same vector. --BenH
Paul Mackerras408a7e02013-08-05 14:13:16 +1000412 * Note that the current VRSAVE value is in the SPR at this point.
Stephen Rothwell81e70092005-10-18 11:17:58 +1000413 */
Paul Mackerras408a7e02013-08-05 14:13:16 +1000414 if (cpu_has_feature(CPU_FTR_ALTIVEC))
415 current->thread.vrsave = mfspr(SPRN_VRSAVE);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000416 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
417 return 1;
418#endif /* CONFIG_ALTIVEC */
Michael Neuling6a274c02008-07-02 14:06:37 +1000419 if (copy_fpr_to_user(&frame->mc_fregs, current))
Michael Neulingc6e67712008-06-25 14:07:18 +1000420 return 1;
Michael Neulingec67ad82013-11-25 11:12:20 +1100421
422 /*
423 * Clear the MSR VSX bit to indicate there is no valid state attached
424 * to this context, except in the specific case below where we set it.
425 */
426 msr &= ~MSR_VSX;
Michael Neuling6a274c02008-07-02 14:06:37 +1000427#ifdef CONFIG_VSX
Michael Neulingce48b212008-06-25 14:07:18 +1000428 /*
429 * Copy VSR 0-31 upper half from thread_struct to local
430 * buffer, then write that to userspace. Also set MSR_VSX in
431 * the saved MSR value to indicate that frame->mc_vregs
432 * contains valid data
433 */
Michael Neuling16c29d12008-10-23 00:42:36 +0000434 if (current->thread.used_vsr && ctx_has_vsx_region) {
Anton Blancharda7d623d2015-10-29 11:44:02 +1100435 flush_vsx_to_thread(current);
Michael Neuling6a274c02008-07-02 14:06:37 +1000436 if (copy_vsx_to_user(&frame->mc_vsregs, current))
Michael Neulingce48b212008-06-25 14:07:18 +1000437 return 1;
438 msr |= MSR_VSX;
Michael Neulingec67ad82013-11-25 11:12:20 +1100439 }
Michael Neulingc6e67712008-06-25 14:07:18 +1000440#endif /* CONFIG_VSX */
Stephen Rothwell81e70092005-10-18 11:17:58 +1000441#ifdef CONFIG_SPE
442 /* save spe registers */
443 if (current->thread.used_spe) {
444 flush_spe_to_thread(current);
445 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
446 ELF_NEVRREG * sizeof(u32)))
447 return 1;
448 /* set MSR_SPE in the saved MSR value to indicate that
449 frame->mc_vregs contains valid data */
Michael Neuling9e751182008-06-25 14:07:17 +1000450 msr |= MSR_SPE;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000451 }
452 /* else assert((regs->msr & MSR_SPE) == 0) */
453
454 /* We always copy to/from spefscr */
455 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
456 return 1;
457#endif /* CONFIG_SPE */
458
Michael Neuling9e751182008-06-25 14:07:17 +1000459 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
460 return 1;
Michael Neuling1d25f112013-06-09 21:23:15 +1000461 /* We need to write 0 the MSR top 32 bits in the tm frame so that we
462 * can check it on the restore to see if TM is active
463 */
464 if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
465 return 1;
466
Stephen Rothwell81e70092005-10-18 11:17:58 +1000467 if (sigret) {
468 /* Set up the sigreturn trampoline: li r0,sigret; sc */
469 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
470 || __put_user(0x44000002UL, &frame->tramp[1]))
471 return 1;
472 flush_icache_range((unsigned long) &frame->tramp[0],
473 (unsigned long) &frame->tramp[2]);
474 }
475
476 return 0;
477}
478
Michael Neuling2b0a5762013-02-13 16:21:41 +0000479#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
480/*
481 * Save the current user registers on the user stack.
482 * We only save the altivec/spe registers if the process has used
483 * altivec/spe instructions at some point.
484 * We also save the transactional registers to a second ucontext in the
485 * frame.
486 *
487 * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
488 */
489static int save_tm_user_regs(struct pt_regs *regs,
490 struct mcontext __user *frame,
491 struct mcontext __user *tm_frame, int sigret)
492{
493 unsigned long msr = regs->msr;
494
Paul Mackerrasd31626f2014-01-13 15:56:29 +1100495 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
496 * just indicates to userland that we were doing a transaction, but we
497 * don't want to return in transactional state. This also ensures
498 * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
499 */
500 regs->msr &= ~MSR_TS_MASK;
501
Michael Neuling2b0a5762013-02-13 16:21:41 +0000502 /* Save both sets of general registers */
503 if (save_general_regs(&current->thread.ckpt_regs, frame)
504 || save_general_regs(regs, tm_frame))
505 return 1;
506
507 /* Stash the top half of the 64bit MSR into the 32bit MSR word
508 * of the transactional mcontext. This way we have a backward-compatible
509 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
510 * also look at what type of transaction (T or S) was active at the
511 * time of the signal.
512 */
513 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
514 return 1;
515
516#ifdef CONFIG_ALTIVEC
517 /* save altivec registers */
518 if (current->thread.used_vr) {
Cyril Bur000ec282016-09-23 16:18:25 +1000519 if (__copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000520 ELF_NVRREG * sizeof(vector128)))
521 return 1;
522 if (msr & MSR_VEC) {
523 if (__copy_to_user(&tm_frame->mc_vregs,
Cyril Burdc310662016-09-23 16:18:24 +1000524 &current->thread.vr_state,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000525 ELF_NVRREG * sizeof(vector128)))
526 return 1;
527 } else {
528 if (__copy_to_user(&tm_frame->mc_vregs,
Cyril Bur000ec282016-09-23 16:18:25 +1000529 &current->thread.ckvr_state,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000530 ELF_NVRREG * sizeof(vector128)))
531 return 1;
532 }
533
534 /* set MSR_VEC in the saved MSR value to indicate that
535 * frame->mc_vregs contains valid data
536 */
537 msr |= MSR_VEC;
538 }
539
540 /* We always copy to/from vrsave, it's 0 if we don't have or don't
541 * use altivec. Since VSCR only contains 32 bits saved in the least
542 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
543 * most significant bits of that same vector. --BenH
544 */
Paul Mackerras408a7e02013-08-05 14:13:16 +1000545 if (cpu_has_feature(CPU_FTR_ALTIVEC))
Cyril Bur000ec282016-09-23 16:18:25 +1000546 current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
547 if (__put_user(current->thread.ckvrsave,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000548 (u32 __user *)&frame->mc_vregs[32]))
549 return 1;
550 if (msr & MSR_VEC) {
Cyril Burdc310662016-09-23 16:18:24 +1000551 if (__put_user(current->thread.vrsave,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000552 (u32 __user *)&tm_frame->mc_vregs[32]))
553 return 1;
554 } else {
Cyril Bur000ec282016-09-23 16:18:25 +1000555 if (__put_user(current->thread.ckvrsave,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000556 (u32 __user *)&tm_frame->mc_vregs[32]))
557 return 1;
558 }
559#endif /* CONFIG_ALTIVEC */
560
Cyril Bur000ec282016-09-23 16:18:25 +1000561 if (copy_ckfpr_to_user(&frame->mc_fregs, current))
Michael Neuling2b0a5762013-02-13 16:21:41 +0000562 return 1;
563 if (msr & MSR_FP) {
Cyril Burdc310662016-09-23 16:18:24 +1000564 if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
Michael Neuling2b0a5762013-02-13 16:21:41 +0000565 return 1;
566 } else {
Cyril Bur000ec282016-09-23 16:18:25 +1000567 if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
Michael Neuling2b0a5762013-02-13 16:21:41 +0000568 return 1;
569 }
570
571#ifdef CONFIG_VSX
572 /*
573 * Copy VSR 0-31 upper half from thread_struct to local
574 * buffer, then write that to userspace. Also set MSR_VSX in
575 * the saved MSR value to indicate that frame->mc_vregs
576 * contains valid data
577 */
578 if (current->thread.used_vsr) {
Cyril Bur000ec282016-09-23 16:18:25 +1000579 if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
Michael Neuling2b0a5762013-02-13 16:21:41 +0000580 return 1;
581 if (msr & MSR_VSX) {
Cyril Burdc310662016-09-23 16:18:24 +1000582 if (copy_vsx_to_user(&tm_frame->mc_vsregs,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000583 current))
584 return 1;
585 } else {
Cyril Bur000ec282016-09-23 16:18:25 +1000586 if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
Michael Neuling2b0a5762013-02-13 16:21:41 +0000587 return 1;
588 }
589
590 msr |= MSR_VSX;
591 }
592#endif /* CONFIG_VSX */
593#ifdef CONFIG_SPE
594 /* SPE regs are not checkpointed with TM, so this section is
595 * simply the same as in save_user_regs().
596 */
597 if (current->thread.used_spe) {
598 flush_spe_to_thread(current);
599 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
600 ELF_NEVRREG * sizeof(u32)))
601 return 1;
602 /* set MSR_SPE in the saved MSR value to indicate that
603 * frame->mc_vregs contains valid data */
604 msr |= MSR_SPE;
605 }
606
607 /* We always copy to/from spefscr */
608 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
609 return 1;
610#endif /* CONFIG_SPE */
611
612 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
613 return 1;
614 if (sigret) {
615 /* Set up the sigreturn trampoline: li r0,sigret; sc */
616 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
617 || __put_user(0x44000002UL, &frame->tramp[1]))
618 return 1;
619 flush_icache_range((unsigned long) &frame->tramp[0],
620 (unsigned long) &frame->tramp[2]);
621 }
622
623 return 0;
624}
625#endif
626
Stephen Rothwell81e70092005-10-18 11:17:58 +1000627/*
628 * Restore the current user register values from the user stack,
629 * (except for MSR).
630 */
631static long restore_user_regs(struct pt_regs *regs,
632 struct mcontext __user *sr, int sig)
633{
634 long err;
635 unsigned int save_r2 = 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000636 unsigned long msr;
Michael Neulingc6e67712008-06-25 14:07:18 +1000637#ifdef CONFIG_VSX
Michael Neulingc6e67712008-06-25 14:07:18 +1000638 int i;
639#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +1000640
641 /*
642 * restore general registers but not including MSR or SOFTE. Also
643 * take care of keeping r2 (TLS) intact if not a signal
644 */
645 if (!sig)
646 save_r2 = (unsigned int)regs->gpr[2];
647 err = restore_general_regs(regs, sr);
Al Viro9a81c162010-09-20 21:48:57 +0100648 regs->trap = 0;
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000649 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000650 if (!sig)
651 regs->gpr[2] = (unsigned long) save_r2;
652 if (err)
653 return 1;
654
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000655 /* if doing signal return, restore the previous little-endian mode */
656 if (sig)
657 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
658
Stephen Rothwell81e70092005-10-18 11:17:58 +1000659#ifdef CONFIG_ALTIVEC
Michael Neulingc6e67712008-06-25 14:07:18 +1000660 /*
661 * Force the process to reload the altivec registers from
662 * current->thread when it next does altivec instructions
663 */
Stephen Rothwell81e70092005-10-18 11:17:58 +1000664 regs->msr &= ~MSR_VEC;
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000665 if (msr & MSR_VEC) {
Stephen Rothwell81e70092005-10-18 11:17:58 +1000666 /* restore altivec registers from the stack */
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000667 if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
Stephen Rothwell81e70092005-10-18 11:17:58 +1000668 sizeof(sr->mc_vregs)))
669 return 1;
Simon Guoe1c0d662016-07-26 16:06:01 +0800670 current->thread.used_vr = true;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000671 } else if (current->thread.used_vr)
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000672 memset(&current->thread.vr_state, 0,
673 ELF_NVRREG * sizeof(vector128));
Stephen Rothwell81e70092005-10-18 11:17:58 +1000674
675 /* Always get VRSAVE back */
676 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
677 return 1;
Paul Mackerras408a7e02013-08-05 14:13:16 +1000678 if (cpu_has_feature(CPU_FTR_ALTIVEC))
679 mtspr(SPRN_VRSAVE, current->thread.vrsave);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000680#endif /* CONFIG_ALTIVEC */
Michael Neuling6a274c02008-07-02 14:06:37 +1000681 if (copy_fpr_from_user(current, &sr->mc_fregs))
682 return 1;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000683
Michael Neulingc6e67712008-06-25 14:07:18 +1000684#ifdef CONFIG_VSX
Michael Neulingce48b212008-06-25 14:07:18 +1000685 /*
686 * Force the process to reload the VSX registers from
687 * current->thread when it next does VSX instruction.
688 */
689 regs->msr &= ~MSR_VSX;
690 if (msr & MSR_VSX) {
691 /*
692 * Restore altivec registers from the stack to a local
693 * buffer, then write this out to the thread_struct
694 */
Michael Neuling6a274c02008-07-02 14:06:37 +1000695 if (copy_vsx_from_user(current, &sr->mc_vsregs))
Michael Neulingce48b212008-06-25 14:07:18 +1000696 return 1;
Simon Guoe1c0d662016-07-26 16:06:01 +0800697 current->thread.used_vsr = true;
Michael Neulingce48b212008-06-25 14:07:18 +1000698 } else if (current->thread.used_vsr)
699 for (i = 0; i < 32 ; i++)
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000700 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
Michael Neulingc6e67712008-06-25 14:07:18 +1000701#endif /* CONFIG_VSX */
702 /*
703 * force the process to reload the FP registers from
704 * current->thread when it next does FP instructions
705 */
706 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
707
Stephen Rothwell81e70092005-10-18 11:17:58 +1000708#ifdef CONFIG_SPE
709 /* force the process to reload the spe registers from
710 current->thread when it next does spe instructions */
711 regs->msr &= ~MSR_SPE;
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000712 if (msr & MSR_SPE) {
Stephen Rothwell81e70092005-10-18 11:17:58 +1000713 /* restore spe registers from the stack */
714 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
715 ELF_NEVRREG * sizeof(u32)))
716 return 1;
Simon Guoe1c0d662016-07-26 16:06:01 +0800717 current->thread.used_spe = true;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000718 } else if (current->thread.used_spe)
719 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
720
721 /* Always get SPEFSCR back */
722 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
723 return 1;
724#endif /* CONFIG_SPE */
725
Stephen Rothwell81e70092005-10-18 11:17:58 +1000726 return 0;
727}
728
Michael Neuling2b0a5762013-02-13 16:21:41 +0000729#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
730/*
731 * Restore the current user register values from the user stack, except for
732 * MSR, and recheckpoint the original checkpointed register state for processes
733 * in transactions.
734 */
735static long restore_tm_user_regs(struct pt_regs *regs,
736 struct mcontext __user *sr,
737 struct mcontext __user *tm_sr)
738{
739 long err;
Michael Neuling2c27a182013-06-09 21:23:17 +1000740 unsigned long msr, msr_hi;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000741#ifdef CONFIG_VSX
742 int i;
743#endif
744
745 /*
746 * restore general registers but not including MSR or SOFTE. Also
747 * take care of keeping r2 (TLS) intact if not a signal.
748 * See comment in signal_64.c:restore_tm_sigcontexts();
749 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
750 * were set by the signal delivery.
751 */
752 err = restore_general_regs(regs, tm_sr);
753 err |= restore_general_regs(&current->thread.ckpt_regs, sr);
754
755 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
756
757 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
758 if (err)
759 return 1;
760
761 /* Restore the previous little-endian mode */
762 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
763
Michael Neuling2b0a5762013-02-13 16:21:41 +0000764#ifdef CONFIG_ALTIVEC
765 regs->msr &= ~MSR_VEC;
766 if (msr & MSR_VEC) {
767 /* restore altivec registers from the stack */
Cyril Bur000ec282016-09-23 16:18:25 +1000768 if (__copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000769 sizeof(sr->mc_vregs)) ||
Cyril Burdc310662016-09-23 16:18:24 +1000770 __copy_from_user(&current->thread.vr_state,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000771 &tm_sr->mc_vregs,
772 sizeof(sr->mc_vregs)))
773 return 1;
Simon Guoe1c0d662016-07-26 16:06:01 +0800774 current->thread.used_vr = true;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000775 } else if (current->thread.used_vr) {
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000776 memset(&current->thread.vr_state, 0,
777 ELF_NVRREG * sizeof(vector128));
Cyril Bur000ec282016-09-23 16:18:25 +1000778 memset(&current->thread.ckvr_state, 0,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000779 ELF_NVRREG * sizeof(vector128));
780 }
781
782 /* Always get VRSAVE back */
Cyril Bur000ec282016-09-23 16:18:25 +1000783 if (__get_user(current->thread.ckvrsave,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000784 (u32 __user *)&sr->mc_vregs[32]) ||
Cyril Burdc310662016-09-23 16:18:24 +1000785 __get_user(current->thread.vrsave,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000786 (u32 __user *)&tm_sr->mc_vregs[32]))
787 return 1;
Paul Mackerras408a7e02013-08-05 14:13:16 +1000788 if (cpu_has_feature(CPU_FTR_ALTIVEC))
Cyril Bur000ec282016-09-23 16:18:25 +1000789 mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
Michael Neuling2b0a5762013-02-13 16:21:41 +0000790#endif /* CONFIG_ALTIVEC */
791
792 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
793
794 if (copy_fpr_from_user(current, &sr->mc_fregs) ||
Cyril Bur000ec282016-09-23 16:18:25 +1000795 copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
Michael Neuling2b0a5762013-02-13 16:21:41 +0000796 return 1;
797
798#ifdef CONFIG_VSX
799 regs->msr &= ~MSR_VSX;
800 if (msr & MSR_VSX) {
801 /*
802 * Restore altivec registers from the stack to a local
803 * buffer, then write this out to the thread_struct
804 */
Cyril Burdc310662016-09-23 16:18:24 +1000805 if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
Cyril Bur000ec282016-09-23 16:18:25 +1000806 copy_ckvsx_from_user(current, &sr->mc_vsregs))
Michael Neuling2b0a5762013-02-13 16:21:41 +0000807 return 1;
Simon Guoe1c0d662016-07-26 16:06:01 +0800808 current->thread.used_vsr = true;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000809 } else if (current->thread.used_vsr)
810 for (i = 0; i < 32 ; i++) {
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000811 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
Cyril Bur000ec282016-09-23 16:18:25 +1000812 current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000813 }
814#endif /* CONFIG_VSX */
815
816#ifdef CONFIG_SPE
817 /* SPE regs are not checkpointed with TM, so this section is
818 * simply the same as in restore_user_regs().
819 */
820 regs->msr &= ~MSR_SPE;
821 if (msr & MSR_SPE) {
822 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
823 ELF_NEVRREG * sizeof(u32)))
824 return 1;
Simon Guoe1c0d662016-07-26 16:06:01 +0800825 current->thread.used_spe = true;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000826 } else if (current->thread.used_spe)
827 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
828
829 /* Always get SPEFSCR back */
830 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
831 + ELF_NEVRREG))
832 return 1;
833#endif /* CONFIG_SPE */
834
Michael Neulingd2b9d2a2015-11-19 15:44:44 +1100835 /* Get the top half of the MSR from the user context */
836 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
837 return 1;
838 msr_hi <<= 32;
839 /* If TM bits are set to the reserved value, it's an invalid context */
840 if (MSR_TM_RESV(msr_hi))
841 return 1;
842 /* Pull in the MSR TM bits from the user context */
843 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
Michael Neuling2b0a5762013-02-13 16:21:41 +0000844 /* Now, recheckpoint. This loads up all of the checkpointed (older)
845 * registers, including FP and V[S]Rs. After recheckpointing, the
846 * transactional versions should be loaded.
847 */
848 tm_enable();
Michael Neulinge6b8fd02014-04-04 20:19:48 +1100849 /* Make sure the transaction is marked as failed */
850 current->thread.tm_texasr |= TEXASR_FS;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000851 /* This loads the checkpointed FP/VEC state, if used */
852 tm_recheckpoint(&current->thread, msr);
Michael Neuling2b0a5762013-02-13 16:21:41 +0000853
854 /* This loads the speculative FP/VEC state, if used */
Cyril Burdc310662016-09-23 16:18:24 +1000855 msr_check_and_set(msr & (MSR_FP | MSR_VEC));
Michael Neuling2b0a5762013-02-13 16:21:41 +0000856 if (msr & MSR_FP) {
Cyril Burdc310662016-09-23 16:18:24 +1000857 load_fp_state(&current->thread.fp_state);
Michael Neuling2b0a5762013-02-13 16:21:41 +0000858 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
859 }
Michael Neulingf110c0c2013-04-09 16:18:55 +1000860#ifdef CONFIG_ALTIVEC
Michael Neuling2b0a5762013-02-13 16:21:41 +0000861 if (msr & MSR_VEC) {
Cyril Burdc310662016-09-23 16:18:24 +1000862 load_vr_state(&current->thread.vr_state);
Michael Neuling2b0a5762013-02-13 16:21:41 +0000863 regs->msr |= MSR_VEC;
864 }
Michael Neulingf110c0c2013-04-09 16:18:55 +1000865#endif
Michael Neuling2b0a5762013-02-13 16:21:41 +0000866
867 return 0;
868}
869#endif
870
Stephen Rothwell81e70092005-10-18 11:17:58 +1000871#ifdef CONFIG_PPC64
Al Viroce395962013-10-13 17:23:53 -0400872int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000873{
874 int err;
875
876 if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
877 return -EFAULT;
878
879 /* If you change siginfo_t structure, please be sure
880 * this code is fixed accordingly.
881 * It should never copy any pad contained in the structure
882 * to avoid security leaks, but must copy the generic
883 * 3 ints plus the relevant union member.
884 * This routine must convert siginfo from 64bit to 32bit as well
885 * at the same time.
886 */
887 err = __put_user(s->si_signo, &d->si_signo);
888 err |= __put_user(s->si_errno, &d->si_errno);
Eric W. Biedermancc731522017-07-16 22:36:59 -0500889 err |= __put_user(s->si_code, &d->si_code);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000890 if (s->si_code < 0)
891 err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
892 SI_PAD_SIZE32);
Eric W. Biedermancc731522017-07-16 22:36:59 -0500893 else switch(siginfo_layout(s->si_signo, s->si_code)) {
894 case SIL_CHLD:
Stephen Rothwell81e70092005-10-18 11:17:58 +1000895 err |= __put_user(s->si_pid, &d->si_pid);
896 err |= __put_user(s->si_uid, &d->si_uid);
897 err |= __put_user(s->si_utime, &d->si_utime);
898 err |= __put_user(s->si_stime, &d->si_stime);
899 err |= __put_user(s->si_status, &d->si_status);
900 break;
Eric W. Biedermancc731522017-07-16 22:36:59 -0500901 case SIL_FAULT:
Stephen Rothwell81e70092005-10-18 11:17:58 +1000902 err |= __put_user((unsigned int)(unsigned long)s->si_addr,
903 &d->si_addr);
904 break;
Eric W. Biedermancc731522017-07-16 22:36:59 -0500905 case SIL_POLL:
Stephen Rothwell81e70092005-10-18 11:17:58 +1000906 err |= __put_user(s->si_band, &d->si_band);
907 err |= __put_user(s->si_fd, &d->si_fd);
908 break;
Eric W. Biedermancc731522017-07-16 22:36:59 -0500909 case SIL_TIMER:
Stephen Rothwell81e70092005-10-18 11:17:58 +1000910 err |= __put_user(s->si_tid, &d->si_tid);
911 err |= __put_user(s->si_overrun, &d->si_overrun);
912 err |= __put_user(s->si_int, &d->si_int);
913 break;
Eric W. Biedermancc731522017-07-16 22:36:59 -0500914 case SIL_SYS:
Michael Ellerman1b60bab2015-07-23 20:21:08 +1000915 err |= __put_user(ptr_to_compat(s->si_call_addr), &d->si_call_addr);
916 err |= __put_user(s->si_syscall, &d->si_syscall);
917 err |= __put_user(s->si_arch, &d->si_arch);
918 break;
Eric W. Biedermancc731522017-07-16 22:36:59 -0500919 case SIL_RT:
Stephen Rothwell81e70092005-10-18 11:17:58 +1000920 err |= __put_user(s->si_int, &d->si_int);
921 /* fallthrough */
Eric W. Biedermancc731522017-07-16 22:36:59 -0500922 case SIL_KILL:
Stephen Rothwell81e70092005-10-18 11:17:58 +1000923 err |= __put_user(s->si_pid, &d->si_pid);
924 err |= __put_user(s->si_uid, &d->si_uid);
925 break;
926 }
927 return err;
928}
929
930#define copy_siginfo_to_user copy_siginfo_to_user32
931
Roland McGrath9c0c44d2008-04-20 08:19:24 +1000932int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
933{
Roland McGrath9c0c44d2008-04-20 08:19:24 +1000934 if (copy_from_user(to, from, 3*sizeof(int)) ||
935 copy_from_user(to->_sifields._pad,
936 from->_sifields._pad, SI_PAD_SIZE32))
937 return -EFAULT;
938
939 return 0;
940}
Stephen Rothwell81e70092005-10-18 11:17:58 +1000941#endif /* CONFIG_PPC64 */
942
Stephen Rothwell81e70092005-10-18 11:17:58 +1000943/*
944 * Set up a signal frame for a "real-time" signal handler
945 * (one which gets siginfo).
946 */
Richard Weinberger129b69d2014-03-02 14:46:11 +0100947int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
Cyril Burd1199432016-09-23 16:18:12 +1000948 struct task_struct *tsk)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000949{
950 struct rt_sigframe __user *rt_sf;
951 struct mcontext __user *frame;
Michael Neuling1d25f112013-06-09 21:23:15 +1000952 struct mcontext __user *tm_frame = NULL;
Olof Johanssond0c3d532007-10-12 10:20:07 +1000953 void __user *addr;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000954 unsigned long newsp = 0;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000955 int sigret;
956 unsigned long tramp;
Cyril Burd1199432016-09-23 16:18:12 +1000957 struct pt_regs *regs = tsk->thread.regs;
958
959 BUG_ON(tsk != current);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000960
961 /* Set up Signal Frame */
962 /* Put a Real Time Context onto stack */
Cyril Burd1199432016-09-23 16:18:12 +1000963 rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
Olof Johanssond0c3d532007-10-12 10:20:07 +1000964 addr = rt_sf;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000965 if (unlikely(rt_sf == NULL))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000966 goto badframe;
967
968 /* Put the siginfo & fill in most of the ucontext */
Richard Weinberger129b69d2014-03-02 14:46:11 +0100969 if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000970 || __put_user(0, &rt_sf->uc.uc_flags)
Al Viro7cce2462012-12-23 03:26:46 -0500971 || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
Stephen Rothwell81e70092005-10-18 11:17:58 +1000972 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
973 &rt_sf->uc.uc_regs)
974 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
975 goto badframe;
976
977 /* Save user registers on the stack */
978 frame = &rt_sf->uc.uc_mcontext;
Olof Johanssond0c3d532007-10-12 10:20:07 +1000979 addr = frame;
Cyril Burd1199432016-09-23 16:18:12 +1000980 if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
Michael Neuling2b0a5762013-02-13 16:21:41 +0000981 sigret = 0;
Cyril Burd1199432016-09-23 16:18:12 +1000982 tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100983 } else {
Michael Neuling2b0a5762013-02-13 16:21:41 +0000984 sigret = __NR_rt_sigreturn;
985 tramp = (unsigned long) frame->tramp;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000986 }
Paul Mackerrascc657f52005-11-14 21:55:15 +1100987
Michael Neuling2b0a5762013-02-13 16:21:41 +0000988#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Michael Neuling1d25f112013-06-09 21:23:15 +1000989 tm_frame = &rt_sf->uc_transact.uc_mcontext;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000990 if (MSR_TM_ACTIVE(regs->msr)) {
Paul Mackerrasd765ff22014-01-29 16:33:56 +1100991 if (__put_user((unsigned long)&rt_sf->uc_transact,
992 &rt_sf->uc.uc_link) ||
993 __put_user((unsigned long)tm_frame,
994 &rt_sf->uc_transact.uc_regs))
995 goto badframe;
Michael Neuling1d25f112013-06-09 21:23:15 +1000996 if (save_tm_user_regs(regs, frame, tm_frame, sigret))
Michael Neuling2b0a5762013-02-13 16:21:41 +0000997 goto badframe;
998 }
999 else
1000#endif
Michael Neuling1d25f112013-06-09 21:23:15 +10001001 {
Paul Mackerrasd765ff22014-01-29 16:33:56 +11001002 if (__put_user(0, &rt_sf->uc.uc_link))
1003 goto badframe;
Michael Neuling1d25f112013-06-09 21:23:15 +10001004 if (save_user_regs(regs, frame, tm_frame, sigret, 1))
Michael Neuling2b0a5762013-02-13 16:21:41 +00001005 goto badframe;
Michael Neuling1d25f112013-06-09 21:23:15 +10001006 }
Michael Neuling2b0a5762013-02-13 16:21:41 +00001007 regs->link = tramp;
1008
Cyril Burd1199432016-09-23 16:18:12 +10001009 tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
Paul Mackerrascc657f52005-11-14 21:55:15 +11001010
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001011 /* create a stack frame for the caller of the handler */
1012 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001013 addr = (void __user *)regs->gpr[1];
Paul Mackerrase2b55302005-10-22 14:46:33 +10001014 if (put_user(regs->gpr[1], (u32 __user *)newsp))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001015 goto badframe;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001016
1017 /* Fill registers for signal handler */
Stephen Rothwell81e70092005-10-18 11:17:58 +10001018 regs->gpr[1] = newsp;
Richard Weinberger129b69d2014-03-02 14:46:11 +01001019 regs->gpr[3] = ksig->sig;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001020 regs->gpr[4] = (unsigned long) &rt_sf->info;
1021 regs->gpr[5] = (unsigned long) &rt_sf->uc;
1022 regs->gpr[6] = (unsigned long) rt_sf;
Richard Weinberger129b69d2014-03-02 14:46:11 +01001023 regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
Anton Blancharde871c6b2013-09-23 12:04:43 +10001024 /* enter the signal handler in native-endian mode */
Paul Mackerrasfab5db92006-06-07 16:14:40 +10001025 regs->msr &= ~MSR_LE;
Anton Blancharde871c6b2013-09-23 12:04:43 +10001026 regs->msr |= (MSR_KERNEL & MSR_LE);
Richard Weinberger129b69d2014-03-02 14:46:11 +01001027 return 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001028
1029badframe:
Christian Dietrich76462232011-06-04 05:36:54 +00001030 if (show_unhandled_signals)
1031 printk_ratelimited(KERN_INFO
1032 "%s[%d]: bad frame in handle_rt_signal32: "
1033 "%p nip %08lx lr %08lx\n",
Cyril Burd1199432016-09-23 16:18:12 +10001034 tsk->comm, tsk->pid,
Christian Dietrich76462232011-06-04 05:36:54 +00001035 addr, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001036
Richard Weinberger129b69d2014-03-02 14:46:11 +01001037 return 1;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001038}
1039
1040static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
1041{
1042 sigset_t set;
1043 struct mcontext __user *mcp;
1044
1045 if (get_sigset_t(&set, &ucp->uc_sigmask))
1046 return -EFAULT;
1047#ifdef CONFIG_PPC64
1048 {
1049 u32 cmcp;
1050
1051 if (__get_user(cmcp, &ucp->uc_regs))
1052 return -EFAULT;
1053 mcp = (struct mcontext __user *)(u64)cmcp;
Paul Mackerras7c85d1f2006-06-09 13:02:59 +10001054 /* no need to check access_ok(mcp), since mcp < 4GB */
Stephen Rothwell81e70092005-10-18 11:17:58 +10001055 }
1056#else
1057 if (__get_user(mcp, &ucp->uc_regs))
1058 return -EFAULT;
Paul Mackerras7c85d1f2006-06-09 13:02:59 +10001059 if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
1060 return -EFAULT;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001061#endif
Al Viro17440f12012-04-27 14:09:19 -04001062 set_current_blocked(&set);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001063 if (restore_user_regs(regs, mcp, sig))
1064 return -EFAULT;
1065
1066 return 0;
1067}
1068
Michael Neuling2b0a5762013-02-13 16:21:41 +00001069#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1070static int do_setcontext_tm(struct ucontext __user *ucp,
1071 struct ucontext __user *tm_ucp,
1072 struct pt_regs *regs)
1073{
1074 sigset_t set;
1075 struct mcontext __user *mcp;
1076 struct mcontext __user *tm_mcp;
1077 u32 cmcp;
1078 u32 tm_cmcp;
1079
1080 if (get_sigset_t(&set, &ucp->uc_sigmask))
1081 return -EFAULT;
1082
1083 if (__get_user(cmcp, &ucp->uc_regs) ||
1084 __get_user(tm_cmcp, &tm_ucp->uc_regs))
1085 return -EFAULT;
1086 mcp = (struct mcontext __user *)(u64)cmcp;
1087 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1088 /* no need to check access_ok(mcp), since mcp < 4GB */
1089
1090 set_current_blocked(&set);
1091 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1092 return -EFAULT;
1093
1094 return 0;
1095}
1096#endif
1097
Stephen Rothwell81e70092005-10-18 11:17:58 +10001098long sys_swapcontext(struct ucontext __user *old_ctx,
Paul Mackerras1bd79332006-03-08 13:24:22 +11001099 struct ucontext __user *new_ctx,
1100 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001101{
1102 unsigned char tmp;
Michael Neuling16c29d12008-10-23 00:42:36 +00001103 int ctx_has_vsx_region = 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001104
Michael Neulingc1cb2992008-07-08 18:43:41 +10001105#ifdef CONFIG_PPC64
1106 unsigned long new_msr = 0;
1107
Andreas Schwab77eb50a2008-11-06 00:49:00 +00001108 if (new_ctx) {
1109 struct mcontext __user *mcp;
1110 u32 cmcp;
1111
1112 /*
1113 * Get pointer to the real mcontext. No need for
1114 * access_ok since we are dealing with compat
1115 * pointers.
1116 */
1117 if (__get_user(cmcp, &new_ctx->uc_regs))
1118 return -EFAULT;
1119 mcp = (struct mcontext __user *)(u64)cmcp;
1120 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1121 return -EFAULT;
1122 }
Michael Neulingc1cb2992008-07-08 18:43:41 +10001123 /*
1124 * Check that the context is not smaller than the original
1125 * size (with VMX but without VSX)
1126 */
1127 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1128 return -EINVAL;
1129 /*
1130 * If the new context state sets the MSR VSX bits but
1131 * it doesn't provide VSX state.
1132 */
1133 if ((ctx_size < sizeof(struct ucontext)) &&
1134 (new_msr & MSR_VSX))
1135 return -EINVAL;
Michael Neuling16c29d12008-10-23 00:42:36 +00001136 /* Does the context have enough room to store VSX data? */
1137 if (ctx_size >= sizeof(struct ucontext))
1138 ctx_has_vsx_region = 1;
Michael Neulingc1cb2992008-07-08 18:43:41 +10001139#else
Stephen Rothwell81e70092005-10-18 11:17:58 +10001140 /* Context size is for future use. Right now, we only make sure
1141 * we are passed something we understand
1142 */
1143 if (ctx_size < sizeof(struct ucontext))
1144 return -EINVAL;
Michael Neulingc1cb2992008-07-08 18:43:41 +10001145#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +10001146 if (old_ctx != NULL) {
Paul Mackerras1c9bb1a2006-12-20 13:57:06 +11001147 struct mcontext __user *mctx;
1148
1149 /*
1150 * old_ctx might not be 16-byte aligned, in which
1151 * case old_ctx->uc_mcontext won't be either.
1152 * Because we have the old_ctx->uc_pad2 field
1153 * before old_ctx->uc_mcontext, we need to round down
1154 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1155 */
1156 mctx = (struct mcontext __user *)
1157 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
Michael Neuling16c29d12008-10-23 00:42:36 +00001158 if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
Michael Neuling1d25f112013-06-09 21:23:15 +10001159 || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001160 || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
Paul Mackerras1c9bb1a2006-12-20 13:57:06 +11001161 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001162 return -EFAULT;
1163 }
1164 if (new_ctx == NULL)
1165 return 0;
Michael Neuling16c29d12008-10-23 00:42:36 +00001166 if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001167 || __get_user(tmp, (u8 __user *) new_ctx)
Michael Neuling16c29d12008-10-23 00:42:36 +00001168 || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001169 return -EFAULT;
1170
1171 /*
1172 * If we get a fault copying the context into the kernel's
1173 * image of the user's registers, we can't just return -EFAULT
1174 * because the user's registers will be corrupted. For instance
1175 * the NIP value may have been updated but not some of the
1176 * other registers. Given that we have done the access_ok
1177 * and successfully read the first and last bytes of the region
1178 * above, this should only happen in an out-of-memory situation
1179 * or if another thread unmaps the region containing the context.
1180 * We kill the task with a SIGSEGV in this situation.
1181 */
1182 if (do_setcontext(new_ctx, regs, 0))
1183 do_exit(SIGSEGV);
David Woodhouse401d1f02005-11-15 18:52:18 +00001184
1185 set_thread_flag(TIF_RESTOREALL);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001186 return 0;
1187}
1188
1189long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1190 struct pt_regs *regs)
1191{
1192 struct rt_sigframe __user *rt_sf;
Michael Neuling2b0a5762013-02-13 16:21:41 +00001193#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1194 struct ucontext __user *uc_transact;
1195 unsigned long msr_hi;
1196 unsigned long tmp;
1197 int tm_restore = 0;
1198#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +10001199 /* Always make any pending restarted system calls return -EINTR */
Andy Lutomirskif56141e2015-02-12 15:01:14 -08001200 current->restart_block.fn = do_no_restart_syscall;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001201
1202 rt_sf = (struct rt_sigframe __user *)
1203 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1204 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1205 goto bad;
Cyril Bur78a3e882016-08-23 10:46:17 +10001206
Michael Neuling2b0a5762013-02-13 16:21:41 +00001207#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Cyril Bur78a3e882016-08-23 10:46:17 +10001208 /*
1209 * If there is a transactional state then throw it away.
1210 * The purpose of a sigreturn is to destroy all traces of the
1211 * signal frame, this includes any transactional state created
1212 * within in. We only check for suspended as we can never be
1213 * active in the kernel, we are active, there is nothing better to
1214 * do than go ahead and Bad Thing later.
1215 * The cause is not important as there will never be a
1216 * recheckpoint so it's not user visible.
1217 */
1218 if (MSR_TM_SUSPENDED(mfmsr()))
1219 tm_reclaim_current(0);
1220
Michael Neuling2b0a5762013-02-13 16:21:41 +00001221 if (__get_user(tmp, &rt_sf->uc.uc_link))
1222 goto bad;
1223 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1224 if (uc_transact) {
1225 u32 cmcp;
1226 struct mcontext __user *mcp;
1227
1228 if (__get_user(cmcp, &uc_transact->uc_regs))
1229 return -EFAULT;
1230 mcp = (struct mcontext __user *)(u64)cmcp;
1231 /* The top 32 bits of the MSR are stashed in the transactional
1232 * ucontext. */
1233 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1234 goto bad;
1235
Michael Neuling55e43412013-06-09 21:23:18 +10001236 if (MSR_TM_ACTIVE(msr_hi<<32)) {
Michael Neuling2b0a5762013-02-13 16:21:41 +00001237 /* We only recheckpoint on return if we're
1238 * transaction.
1239 */
1240 tm_restore = 1;
1241 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1242 goto bad;
1243 }
1244 }
1245 if (!tm_restore)
1246 /* Fall through, for non-TM restore */
1247#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +10001248 if (do_setcontext(&rt_sf->uc, regs, 1))
1249 goto bad;
1250
1251 /*
1252 * It's not clear whether or why it is desirable to save the
1253 * sigaltstack setting on signal delivery and restore it on
1254 * signal return. But other architectures do this and we have
1255 * always done it up until now so it is probably better not to
1256 * change it. -- paulus
1257 */
1258#ifdef CONFIG_PPC64
Al Viro7cce2462012-12-23 03:26:46 -05001259 if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1260 goto bad;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001261#else
Al Viro7cce2462012-12-23 03:26:46 -05001262 if (restore_altstack(&rt_sf->uc.uc_stack))
1263 goto bad;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001264#endif
David Woodhouse401d1f02005-11-15 18:52:18 +00001265 set_thread_flag(TIF_RESTOREALL);
1266 return 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001267
1268 bad:
Christian Dietrich76462232011-06-04 05:36:54 +00001269 if (show_unhandled_signals)
1270 printk_ratelimited(KERN_INFO
1271 "%s[%d]: bad frame in sys_rt_sigreturn: "
1272 "%p nip %08lx lr %08lx\n",
1273 current->comm, current->pid,
1274 rt_sf, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001275
Stephen Rothwell81e70092005-10-18 11:17:58 +10001276 force_sig(SIGSEGV, current);
1277 return 0;
1278}
1279
1280#ifdef CONFIG_PPC32
1281int sys_debug_setcontext(struct ucontext __user *ctx,
1282 int ndbg, struct sig_dbg_op __user *dbg,
1283 int r6, int r7, int r8,
1284 struct pt_regs *regs)
1285{
1286 struct sig_dbg_op op;
1287 int i;
Paul Mackerras7c85d1f2006-06-09 13:02:59 +10001288 unsigned char tmp;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001289 unsigned long new_msr = regs->msr;
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00001290#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05301291 unsigned long new_dbcr0 = current->thread.debug.dbcr0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001292#endif
1293
1294 for (i=0; i<ndbg; i++) {
Paul Mackerras7c85d1f2006-06-09 13:02:59 +10001295 if (copy_from_user(&op, dbg + i, sizeof(op)))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001296 return -EFAULT;
1297 switch (op.dbg_type) {
1298 case SIG_DBG_SINGLE_STEPPING:
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00001299#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Stephen Rothwell81e70092005-10-18 11:17:58 +10001300 if (op.dbg_value) {
1301 new_msr |= MSR_DE;
1302 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1303 } else {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00001304 new_dbcr0 &= ~DBCR0_IC;
1305 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05301306 current->thread.debug.dbcr1)) {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00001307 new_msr &= ~MSR_DE;
1308 new_dbcr0 &= ~DBCR0_IDM;
1309 }
Stephen Rothwell81e70092005-10-18 11:17:58 +10001310 }
1311#else
1312 if (op.dbg_value)
1313 new_msr |= MSR_SE;
1314 else
1315 new_msr &= ~MSR_SE;
1316#endif
1317 break;
1318 case SIG_DBG_BRANCH_TRACING:
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00001319#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Stephen Rothwell81e70092005-10-18 11:17:58 +10001320 return -EINVAL;
1321#else
1322 if (op.dbg_value)
1323 new_msr |= MSR_BE;
1324 else
1325 new_msr &= ~MSR_BE;
1326#endif
1327 break;
1328
1329 default:
1330 return -EINVAL;
1331 }
1332 }
1333
1334 /* We wait until here to actually install the values in the
1335 registers so if we fail in the above loop, it will not
1336 affect the contents of these registers. After this point,
1337 failure is a problem, anyway, and it's very unlikely unless
1338 the user is really doing something wrong. */
1339 regs->msr = new_msr;
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00001340#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05301341 current->thread.debug.dbcr0 = new_dbcr0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001342#endif
1343
Paul Mackerras7c85d1f2006-06-09 13:02:59 +10001344 if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1345 || __get_user(tmp, (u8 __user *) ctx)
1346 || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1347 return -EFAULT;
1348
Stephen Rothwell81e70092005-10-18 11:17:58 +10001349 /*
1350 * If we get a fault copying the context into the kernel's
1351 * image of the user's registers, we can't just return -EFAULT
1352 * because the user's registers will be corrupted. For instance
1353 * the NIP value may have been updated but not some of the
1354 * other registers. Given that we have done the access_ok
1355 * and successfully read the first and last bytes of the region
1356 * above, this should only happen in an out-of-memory situation
1357 * or if another thread unmaps the region containing the context.
1358 * We kill the task with a SIGSEGV in this situation.
1359 */
1360 if (do_setcontext(ctx, regs, 1)) {
Christian Dietrich76462232011-06-04 05:36:54 +00001361 if (show_unhandled_signals)
1362 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1363 "sys_debug_setcontext: %p nip %08lx "
1364 "lr %08lx\n",
1365 current->comm, current->pid,
1366 ctx, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001367
Stephen Rothwell81e70092005-10-18 11:17:58 +10001368 force_sig(SIGSEGV, current);
1369 goto out;
1370 }
1371
1372 /*
1373 * It's not clear whether or why it is desirable to save the
1374 * sigaltstack setting on signal delivery and restore it on
1375 * signal return. But other architectures do this and we have
1376 * always done it up until now so it is probably better not to
1377 * change it. -- paulus
1378 */
Al Viro7cce2462012-12-23 03:26:46 -05001379 restore_altstack(&ctx->uc_stack);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001380
David Woodhouse401d1f02005-11-15 18:52:18 +00001381 set_thread_flag(TIF_RESTOREALL);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001382 out:
1383 return 0;
1384}
1385#endif
1386
1387/*
1388 * OK, we're invoking a handler
1389 */
Cyril Burd1199432016-09-23 16:18:12 +10001390int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1391 struct task_struct *tsk)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001392{
1393 struct sigcontext __user *sc;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001394 struct sigframe __user *frame;
Michael Neuling1d25f112013-06-09 21:23:15 +10001395 struct mcontext __user *tm_mctx = NULL;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001396 unsigned long newsp = 0;
Michael Neuling2b0a5762013-02-13 16:21:41 +00001397 int sigret;
1398 unsigned long tramp;
Cyril Burd1199432016-09-23 16:18:12 +10001399 struct pt_regs *regs = tsk->thread.regs;
1400
1401 BUG_ON(tsk != current);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001402
1403 /* Set up Signal Frame */
Cyril Burd1199432016-09-23 16:18:12 +10001404 frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001405 if (unlikely(frame == NULL))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001406 goto badframe;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001407 sc = (struct sigcontext __user *) &frame->sctx;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001408
1409#if _NSIG != 64
1410#error "Please adjust handle_signal()"
1411#endif
Richard Weinberger129b69d2014-03-02 14:46:11 +01001412 if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001413 || __put_user(oldset->sig[0], &sc->oldmask)
1414#ifdef CONFIG_PPC64
1415 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1416#else
1417 || __put_user(oldset->sig[1], &sc->_unused[3])
1418#endif
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001419 || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
Richard Weinberger129b69d2014-03-02 14:46:11 +01001420 || __put_user(ksig->sig, &sc->signal))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001421 goto badframe;
1422
Cyril Burd1199432016-09-23 16:18:12 +10001423 if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
Michael Neuling2b0a5762013-02-13 16:21:41 +00001424 sigret = 0;
Cyril Burd1199432016-09-23 16:18:12 +10001425 tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +11001426 } else {
Michael Neuling2b0a5762013-02-13 16:21:41 +00001427 sigret = __NR_sigreturn;
1428 tramp = (unsigned long) frame->mctx.tramp;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001429 }
1430
Michael Neuling2b0a5762013-02-13 16:21:41 +00001431#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Michael Neuling1d25f112013-06-09 21:23:15 +10001432 tm_mctx = &frame->mctx_transact;
Michael Neuling2b0a5762013-02-13 16:21:41 +00001433 if (MSR_TM_ACTIVE(regs->msr)) {
1434 if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1435 sigret))
1436 goto badframe;
1437 }
1438 else
1439#endif
Michael Neuling1d25f112013-06-09 21:23:15 +10001440 {
1441 if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
Michael Neuling2b0a5762013-02-13 16:21:41 +00001442 goto badframe;
Michael Neuling1d25f112013-06-09 21:23:15 +10001443 }
Michael Neuling2b0a5762013-02-13 16:21:41 +00001444
1445 regs->link = tramp;
1446
Cyril Burd1199432016-09-23 16:18:12 +10001447 tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
Paul Mackerrascc657f52005-11-14 21:55:15 +11001448
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001449 /* create a stack frame for the caller of the handler */
1450 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001451 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1452 goto badframe;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001453
Stephen Rothwell81e70092005-10-18 11:17:58 +10001454 regs->gpr[1] = newsp;
Richard Weinberger129b69d2014-03-02 14:46:11 +01001455 regs->gpr[3] = ksig->sig;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001456 regs->gpr[4] = (unsigned long) sc;
Richard Weinberger129b69d2014-03-02 14:46:11 +01001457 regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
Paul Mackerrasfab5db92006-06-07 16:14:40 +10001458 /* enter the signal handler in big-endian mode */
1459 regs->msr &= ~MSR_LE;
Richard Weinberger129b69d2014-03-02 14:46:11 +01001460 return 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001461
1462badframe:
Christian Dietrich76462232011-06-04 05:36:54 +00001463 if (show_unhandled_signals)
1464 printk_ratelimited(KERN_INFO
1465 "%s[%d]: bad frame in handle_signal32: "
1466 "%p nip %08lx lr %08lx\n",
Cyril Burd1199432016-09-23 16:18:12 +10001467 tsk->comm, tsk->pid,
Christian Dietrich76462232011-06-04 05:36:54 +00001468 frame, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001469
Richard Weinberger129b69d2014-03-02 14:46:11 +01001470 return 1;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001471}
1472
1473/*
1474 * Do a signal return; undo the signal stack.
1475 */
1476long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1477 struct pt_regs *regs)
1478{
Michael Neulingfee554502013-06-09 21:23:16 +10001479 struct sigframe __user *sf;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001480 struct sigcontext __user *sc;
1481 struct sigcontext sigctx;
1482 struct mcontext __user *sr;
Olof Johanssond0c3d532007-10-12 10:20:07 +10001483 void __user *addr;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001484 sigset_t set;
Michael Neulingfee554502013-06-09 21:23:16 +10001485#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1486 struct mcontext __user *mcp, *tm_mcp;
1487 unsigned long msr_hi;
1488#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +10001489
1490 /* Always make any pending restarted system calls return -EINTR */
Andy Lutomirskif56141e2015-02-12 15:01:14 -08001491 current->restart_block.fn = do_no_restart_syscall;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001492
Michael Neulingfee554502013-06-09 21:23:16 +10001493 sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1494 sc = &sf->sctx;
Olof Johanssond0c3d532007-10-12 10:20:07 +10001495 addr = sc;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001496 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1497 goto badframe;
1498
1499#ifdef CONFIG_PPC64
1500 /*
1501 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1502 * unused part of the signal stackframe
1503 */
1504 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1505#else
1506 set.sig[0] = sigctx.oldmask;
1507 set.sig[1] = sigctx._unused[3];
1508#endif
Al Viro17440f12012-04-27 14:09:19 -04001509 set_current_blocked(&set);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001510
Michael Neulingfee554502013-06-09 21:23:16 +10001511#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1512 mcp = (struct mcontext __user *)&sf->mctx;
1513 tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1514 if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001515 goto badframe;
Michael Neulingfee554502013-06-09 21:23:16 +10001516 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1517 if (!cpu_has_feature(CPU_FTR_TM))
1518 goto badframe;
1519 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1520 goto badframe;
1521 } else
1522#endif
1523 {
1524 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1525 addr = sr;
1526 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1527 || restore_user_regs(regs, sr, 1))
1528 goto badframe;
1529 }
Stephen Rothwell81e70092005-10-18 11:17:58 +10001530
David Woodhouse401d1f02005-11-15 18:52:18 +00001531 set_thread_flag(TIF_RESTOREALL);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001532 return 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001533
1534badframe:
Christian Dietrich76462232011-06-04 05:36:54 +00001535 if (show_unhandled_signals)
1536 printk_ratelimited(KERN_INFO
1537 "%s[%d]: bad frame in sys_sigreturn: "
1538 "%p nip %08lx lr %08lx\n",
1539 current->comm, current->pid,
1540 addr, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001541
Stephen Rothwell81e70092005-10-18 11:17:58 +10001542 force_sig(SIGSEGV, current);
1543 return 0;
1544}