blob: 9cf8a03d3bc75865841b7ad787db0bf8085585b7 [file] [log] [blame]
Stephen Rothwell81e70092005-10-18 11:17:58 +10001/*
2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Copyright (C) 2001 IBM
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 *
10 * Derived from "arch/i386/kernel/signal.c"
11 * Copyright (C) 1991, 1992 Linus Torvalds
12 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 */
19
Stephen Rothwell81e70092005-10-18 11:17:58 +100020#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100023#include <linux/kernel.h>
24#include <linux/signal.h>
25#include <linux/errno.h>
26#include <linux/elf.h>
Lucas Woods05ead012007-12-13 15:56:06 -080027#include <linux/ptrace.h>
Christian Dietrich76462232011-06-04 05:36:54 +000028#include <linux/ratelimit.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100029#include <linux/syscalls.h>
Al Virof3675642018-05-02 23:20:47 +100030#ifdef CONFIG_PPC64
Stephen Rothwell81e70092005-10-18 11:17:58 +100031#include <linux/compat.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100032#else
33#include <linux/wait.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100034#include <linux/unistd.h>
35#include <linux/stddef.h>
36#include <linux/tty.h>
37#include <linux/binfmts.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100038#endif
39
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080040#include <linux/uaccess.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100041#include <asm/cacheflush.h>
Arnd Bergmanna7f31842006-03-23 00:00:08 +010042#include <asm/syscalls.h>
David Gibsonc5ff7002005-11-09 11:21:07 +110043#include <asm/sigcontext.h>
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +110044#include <asm/vdso.h>
David Howellsae3a1972012-03-28 18:30:02 +010045#include <asm/switch_to.h>
Michael Neuling2b0a5762013-02-13 16:21:41 +000046#include <asm/tm.h>
Daniel Axtens0545d542016-09-06 15:32:43 +100047#include <asm/asm-prototypes.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100048#ifdef CONFIG_PPC64
Stephen Rothwell879168e2005-11-03 15:32:07 +110049#include "ppc32.h"
Stephen Rothwell81e70092005-10-18 11:17:58 +100050#include <asm/unistd.h>
Stephen Rothwell81e70092005-10-18 11:17:58 +100051#else
52#include <asm/ucontext.h>
53#include <asm/pgtable.h>
54#endif
55
Benjamin Herrenschmidt22e38f22007-06-04 15:15:49 +100056#include "signal.h"
57
Stephen Rothwell81e70092005-10-18 11:17:58 +100058
Stephen Rothwell81e70092005-10-18 11:17:58 +100059#ifdef CONFIG_PPC64
Stephen Rothwell81e70092005-10-18 11:17:58 +100060#define old_sigaction old_sigaction32
61#define sigcontext sigcontext32
62#define mcontext mcontext32
63#define ucontext ucontext32
64
Al Viro7cce2462012-12-23 03:26:46 -050065#define __save_altstack __compat_save_altstack
66
Stephen Rothwell81e70092005-10-18 11:17:58 +100067/*
Michael Neulingc1cb2992008-07-08 18:43:41 +100068 * Userspace code may pass a ucontext which doesn't include VSX added
69 * at the end. We need to check for this case.
70 */
71#define UCONTEXTSIZEWITHOUTVSX \
72 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
73
74/*
Stephen Rothwell81e70092005-10-18 11:17:58 +100075 * Returning 0 means we return to userspace via
76 * ret_from_except and thus restore all user
77 * registers from *regs. This is what we need
78 * to do when a signal has been delivered.
79 */
Stephen Rothwell81e70092005-10-18 11:17:58 +100080
81#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
82#undef __SIGNAL_FRAMESIZE
83#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
84#undef ELF_NVRREG
85#define ELF_NVRREG ELF_NVRREG32
86
87/*
88 * Functions for flipping sigsets (thanks to brain dead generic
89 * implementation that makes things simple for little endian only)
90 */
91static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
92{
Al Viroa5ae7542017-09-04 12:17:38 -040093 return put_compat_sigset(uset, set, sizeof(*uset));
Stephen Rothwell81e70092005-10-18 11:17:58 +100094}
95
Paul Mackerras9b7cf8b2005-10-19 23:13:04 +100096static inline int get_sigset_t(sigset_t *set,
97 const compat_sigset_t __user *uset)
Stephen Rothwell81e70092005-10-18 11:17:58 +100098{
Al Viroa5ae7542017-09-04 12:17:38 -040099 return get_compat_sigset(set, uset);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000100}
101
Al Viro29e646d2006-02-01 05:28:09 -0500102#define to_user_ptr(p) ptr_to_compat(p)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000103#define from_user_ptr(p) compat_ptr(p)
104
105static inline int save_general_regs(struct pt_regs *regs,
106 struct mcontext __user *frame)
107{
108 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
109 int i;
Madhavan Srinivasana8a4b032017-08-20 23:28:24 +0530110 /* Force usr to alway see softe as 1 (interrupts enabled) */
111 elf_greg_t64 softe = 0x1;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000112
Paul Mackerras1bd79332006-03-08 13:24:22 +1100113 WARN_ON(!FULL_REGS(regs));
David Woodhouse401d1f02005-11-15 18:52:18 +0000114
115 for (i = 0; i <= PT_RESULT; i ++) {
116 if (i == 14 && !FULL_REGS(regs))
117 i = 32;
Madhavan Srinivasana8a4b032017-08-20 23:28:24 +0530118 if ( i == PT_SOFTE) {
119 if(__put_user((unsigned int)softe, &frame->mc_gregs[i]))
120 return -EFAULT;
121 else
122 continue;
123 }
Stephen Rothwell81e70092005-10-18 11:17:58 +1000124 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
125 return -EFAULT;
David Woodhouse401d1f02005-11-15 18:52:18 +0000126 }
Stephen Rothwell81e70092005-10-18 11:17:58 +1000127 return 0;
128}
129
130static inline int restore_general_regs(struct pt_regs *regs,
131 struct mcontext __user *sr)
132{
133 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
134 int i;
135
136 for (i = 0; i <= PT_RESULT; i++) {
137 if ((i == PT_MSR) || (i == PT_SOFTE))
138 continue;
139 if (__get_user(gregs[i], &sr->mc_gregs[i]))
140 return -EFAULT;
141 }
142 return 0;
143}
144
145#else /* CONFIG_PPC64 */
146
Stephen Rothwell81e70092005-10-18 11:17:58 +1000147#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
148
149static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
150{
151 return copy_to_user(uset, set, sizeof(*uset));
152}
153
Paul Mackerras9b7cf8b2005-10-19 23:13:04 +1000154static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000155{
156 return copy_from_user(set, uset, sizeof(*uset));
157}
158
Al Viro29e646d2006-02-01 05:28:09 -0500159#define to_user_ptr(p) ((unsigned long)(p))
160#define from_user_ptr(p) ((void __user *)(p))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000161
162static inline int save_general_regs(struct pt_regs *regs,
163 struct mcontext __user *frame)
164{
Paul Mackerras1bd79332006-03-08 13:24:22 +1100165 WARN_ON(!FULL_REGS(regs));
Stephen Rothwell81e70092005-10-18 11:17:58 +1000166 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
167}
168
169static inline int restore_general_regs(struct pt_regs *regs,
170 struct mcontext __user *sr)
171{
172 /* copy up to but not including MSR */
173 if (__copy_from_user(regs, &sr->mc_gregs,
174 PT_MSR * sizeof(elf_greg_t)))
175 return -EFAULT;
176 /* copy from orig_r3 (the word after the MSR) up to the end */
177 if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
178 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
179 return -EFAULT;
180 return 0;
181}
Stephen Rothwell81e70092005-10-18 11:17:58 +1000182#endif
183
Stephen Rothwell81e70092005-10-18 11:17:58 +1000184/*
185 * When we have signals to deliver, we set up on the
186 * user stack, going down from the original stack pointer:
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000187 * an ABI gap of 56 words
188 * an mcontext struct
Stephen Rothwell81e70092005-10-18 11:17:58 +1000189 * a sigcontext struct
190 * a gap of __SIGNAL_FRAMESIZE bytes
191 *
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000192 * Each of these things must be a multiple of 16 bytes in size. The following
193 * structure represent all of this except the __SIGNAL_FRAMESIZE gap
Stephen Rothwell81e70092005-10-18 11:17:58 +1000194 *
195 */
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000196struct sigframe {
197 struct sigcontext sctx; /* the sigcontext */
Stephen Rothwell81e70092005-10-18 11:17:58 +1000198 struct mcontext mctx; /* all the register values */
Michael Neuling2b0a5762013-02-13 16:21:41 +0000199#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
200 struct sigcontext sctx_transact;
201 struct mcontext mctx_transact;
202#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +1000203 /*
204 * Programs using the rs6000/xcoff abi can save up to 19 gp
205 * regs and 18 fp regs below sp before decrementing it.
206 */
207 int abigap[56];
208};
209
210/* We use the mc_pad field for the signal return trampoline. */
211#define tramp mc_pad
212
213/*
214 * When we have rt signals to deliver, we set up on the
215 * user stack, going down from the original stack pointer:
216 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
217 * a gap of __SIGNAL_FRAMESIZE+16 bytes
218 * (the +16 is to get the siginfo and ucontext in the same
219 * positions as in older kernels).
220 *
221 * Each of these things must be a multiple of 16 bytes in size.
222 *
223 */
224struct rt_sigframe {
225#ifdef CONFIG_PPC64
226 compat_siginfo_t info;
227#else
228 struct siginfo info;
229#endif
230 struct ucontext uc;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000231#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
232 struct ucontext uc_transact;
233#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +1000234 /*
235 * Programs using the rs6000/xcoff abi can save up to 19 gp
236 * regs and 18 fp regs below sp before decrementing it.
237 */
238 int abigap[56];
239};
240
Michael Neuling6a274c02008-07-02 14:06:37 +1000241#ifdef CONFIG_VSX
242unsigned long copy_fpr_to_user(void __user *to,
243 struct task_struct *task)
244{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000245 u64 buf[ELF_NFPREG];
Michael Neuling6a274c02008-07-02 14:06:37 +1000246 int i;
247
248 /* save FPR copy to local buffer then write to the thread_struct */
249 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
250 buf[i] = task->thread.TS_FPR(i);
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000251 buf[i] = task->thread.fp_state.fpscr;
Michael Neuling6a274c02008-07-02 14:06:37 +1000252 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
253}
254
255unsigned long copy_fpr_from_user(struct task_struct *task,
256 void __user *from)
257{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000258 u64 buf[ELF_NFPREG];
Michael Neuling6a274c02008-07-02 14:06:37 +1000259 int i;
260
261 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
262 return 1;
263 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
264 task->thread.TS_FPR(i) = buf[i];
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000265 task->thread.fp_state.fpscr = buf[i];
Michael Neuling6a274c02008-07-02 14:06:37 +1000266
267 return 0;
268}
269
270unsigned long copy_vsx_to_user(void __user *to,
271 struct task_struct *task)
272{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000273 u64 buf[ELF_NVSRHALFREG];
Michael Neuling6a274c02008-07-02 14:06:37 +1000274 int i;
275
276 /* save FPR copy to local buffer then write to the thread_struct */
277 for (i = 0; i < ELF_NVSRHALFREG; i++)
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000278 buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
Michael Neuling6a274c02008-07-02 14:06:37 +1000279 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
280}
281
282unsigned long copy_vsx_from_user(struct task_struct *task,
283 void __user *from)
284{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000285 u64 buf[ELF_NVSRHALFREG];
Michael Neuling6a274c02008-07-02 14:06:37 +1000286 int i;
287
288 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
289 return 1;
290 for (i = 0; i < ELF_NVSRHALFREG ; i++)
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000291 task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
Michael Neuling6a274c02008-07-02 14:06:37 +1000292 return 0;
293}
Michael Neuling2b0a5762013-02-13 16:21:41 +0000294
295#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Cyril Bur000ec282016-09-23 16:18:25 +1000296unsigned long copy_ckfpr_to_user(void __user *to,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000297 struct task_struct *task)
298{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000299 u64 buf[ELF_NFPREG];
Michael Neuling2b0a5762013-02-13 16:21:41 +0000300 int i;
301
302 /* save FPR copy to local buffer then write to the thread_struct */
303 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +1000304 buf[i] = task->thread.TS_CKFPR(i);
305 buf[i] = task->thread.ckfp_state.fpscr;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000306 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
307}
308
Cyril Bur000ec282016-09-23 16:18:25 +1000309unsigned long copy_ckfpr_from_user(struct task_struct *task,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000310 void __user *from)
311{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000312 u64 buf[ELF_NFPREG];
Michael Neuling2b0a5762013-02-13 16:21:41 +0000313 int i;
314
315 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
316 return 1;
317 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +1000318 task->thread.TS_CKFPR(i) = buf[i];
319 task->thread.ckfp_state.fpscr = buf[i];
Michael Neuling2b0a5762013-02-13 16:21:41 +0000320
321 return 0;
322}
323
Cyril Bur000ec282016-09-23 16:18:25 +1000324unsigned long copy_ckvsx_to_user(void __user *to,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000325 struct task_struct *task)
326{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000327 u64 buf[ELF_NVSRHALFREG];
Michael Neuling2b0a5762013-02-13 16:21:41 +0000328 int i;
329
330 /* save FPR copy to local buffer then write to the thread_struct */
331 for (i = 0; i < ELF_NVSRHALFREG; i++)
Cyril Bur000ec282016-09-23 16:18:25 +1000332 buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
Michael Neuling2b0a5762013-02-13 16:21:41 +0000333 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
334}
335
Cyril Bur000ec282016-09-23 16:18:25 +1000336unsigned long copy_ckvsx_from_user(struct task_struct *task,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000337 void __user *from)
338{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000339 u64 buf[ELF_NVSRHALFREG];
Michael Neuling2b0a5762013-02-13 16:21:41 +0000340 int i;
341
342 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
343 return 1;
344 for (i = 0; i < ELF_NVSRHALFREG ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +1000345 task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
Michael Neuling2b0a5762013-02-13 16:21:41 +0000346 return 0;
347}
348#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
Michael Neuling6a274c02008-07-02 14:06:37 +1000349#else
350inline unsigned long copy_fpr_to_user(void __user *to,
351 struct task_struct *task)
352{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000353 return __copy_to_user(to, task->thread.fp_state.fpr,
Michael Neuling6a274c02008-07-02 14:06:37 +1000354 ELF_NFPREG * sizeof(double));
355}
356
357inline unsigned long copy_fpr_from_user(struct task_struct *task,
358 void __user *from)
359{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000360 return __copy_from_user(task->thread.fp_state.fpr, from,
Michael Neuling6a274c02008-07-02 14:06:37 +1000361 ELF_NFPREG * sizeof(double));
362}
Michael Neuling2b0a5762013-02-13 16:21:41 +0000363
364#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Cyril Bur000ec282016-09-23 16:18:25 +1000365inline unsigned long copy_ckfpr_to_user(void __user *to,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000366 struct task_struct *task)
367{
Cyril Bur000ec282016-09-23 16:18:25 +1000368 return __copy_to_user(to, task->thread.ckfp_state.fpr,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000369 ELF_NFPREG * sizeof(double));
370}
371
Cyril Bur000ec282016-09-23 16:18:25 +1000372inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000373 void __user *from)
374{
Cyril Bur000ec282016-09-23 16:18:25 +1000375 return __copy_from_user(task->thread.ckfp_state.fpr, from,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000376 ELF_NFPREG * sizeof(double));
377}
378#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
Michael Neuling6a274c02008-07-02 14:06:37 +1000379#endif
380
Stephen Rothwell81e70092005-10-18 11:17:58 +1000381/*
382 * Save the current user registers on the user stack.
383 * We only save the altivec/spe registers if the process has used
384 * altivec/spe instructions at some point.
385 */
386static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
Michael Neuling1d25f112013-06-09 21:23:15 +1000387 struct mcontext __user *tm_frame, int sigret,
388 int ctx_has_vsx_region)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000389{
Michael Neuling9e751182008-06-25 14:07:17 +1000390 unsigned long msr = regs->msr;
391
Stephen Rothwell81e70092005-10-18 11:17:58 +1000392 /* Make sure floating point registers are stored in regs */
393 flush_fp_to_thread(current);
394
Michael Neulingc6e67712008-06-25 14:07:18 +1000395 /* save general registers */
396 if (save_general_regs(regs, frame))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000397 return 1;
398
Stephen Rothwell81e70092005-10-18 11:17:58 +1000399#ifdef CONFIG_ALTIVEC
400 /* save altivec registers */
401 if (current->thread.used_vr) {
402 flush_altivec_to_thread(current);
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000403 if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
Stephen Rothwell81e70092005-10-18 11:17:58 +1000404 ELF_NVRREG * sizeof(vector128)))
405 return 1;
406 /* set MSR_VEC in the saved MSR value to indicate that
407 frame->mc_vregs contains valid data */
Michael Neuling9e751182008-06-25 14:07:17 +1000408 msr |= MSR_VEC;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000409 }
410 /* else assert((regs->msr & MSR_VEC) == 0) */
411
412 /* We always copy to/from vrsave, it's 0 if we don't have or don't
413 * use altivec. Since VSCR only contains 32 bits saved in the least
414 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
415 * most significant bits of that same vector. --BenH
Paul Mackerras408a7e02013-08-05 14:13:16 +1000416 * Note that the current VRSAVE value is in the SPR at this point.
Stephen Rothwell81e70092005-10-18 11:17:58 +1000417 */
Paul Mackerras408a7e02013-08-05 14:13:16 +1000418 if (cpu_has_feature(CPU_FTR_ALTIVEC))
419 current->thread.vrsave = mfspr(SPRN_VRSAVE);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000420 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
421 return 1;
422#endif /* CONFIG_ALTIVEC */
Michael Neuling6a274c02008-07-02 14:06:37 +1000423 if (copy_fpr_to_user(&frame->mc_fregs, current))
Michael Neulingc6e67712008-06-25 14:07:18 +1000424 return 1;
Michael Neulingec67ad82013-11-25 11:12:20 +1100425
426 /*
427 * Clear the MSR VSX bit to indicate there is no valid state attached
428 * to this context, except in the specific case below where we set it.
429 */
430 msr &= ~MSR_VSX;
Michael Neuling6a274c02008-07-02 14:06:37 +1000431#ifdef CONFIG_VSX
Michael Neulingce48b212008-06-25 14:07:18 +1000432 /*
433 * Copy VSR 0-31 upper half from thread_struct to local
434 * buffer, then write that to userspace. Also set MSR_VSX in
435 * the saved MSR value to indicate that frame->mc_vregs
436 * contains valid data
437 */
Michael Neuling16c29d12008-10-23 00:42:36 +0000438 if (current->thread.used_vsr && ctx_has_vsx_region) {
Anton Blancharda7d623d2015-10-29 11:44:02 +1100439 flush_vsx_to_thread(current);
Michael Neuling6a274c02008-07-02 14:06:37 +1000440 if (copy_vsx_to_user(&frame->mc_vsregs, current))
Michael Neulingce48b212008-06-25 14:07:18 +1000441 return 1;
442 msr |= MSR_VSX;
Michael Neulingec67ad82013-11-25 11:12:20 +1100443 }
Michael Neulingc6e67712008-06-25 14:07:18 +1000444#endif /* CONFIG_VSX */
Stephen Rothwell81e70092005-10-18 11:17:58 +1000445#ifdef CONFIG_SPE
446 /* save spe registers */
447 if (current->thread.used_spe) {
448 flush_spe_to_thread(current);
449 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
450 ELF_NEVRREG * sizeof(u32)))
451 return 1;
452 /* set MSR_SPE in the saved MSR value to indicate that
453 frame->mc_vregs contains valid data */
Michael Neuling9e751182008-06-25 14:07:17 +1000454 msr |= MSR_SPE;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000455 }
456 /* else assert((regs->msr & MSR_SPE) == 0) */
457
458 /* We always copy to/from spefscr */
459 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
460 return 1;
461#endif /* CONFIG_SPE */
462
Michael Neuling9e751182008-06-25 14:07:17 +1000463 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
464 return 1;
Michael Neuling1d25f112013-06-09 21:23:15 +1000465 /* We need to write 0 the MSR top 32 bits in the tm frame so that we
466 * can check it on the restore to see if TM is active
467 */
468 if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
469 return 1;
470
Stephen Rothwell81e70092005-10-18 11:17:58 +1000471 if (sigret) {
472 /* Set up the sigreturn trampoline: li r0,sigret; sc */
473 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
474 || __put_user(0x44000002UL, &frame->tramp[1]))
475 return 1;
476 flush_icache_range((unsigned long) &frame->tramp[0],
477 (unsigned long) &frame->tramp[2]);
478 }
479
480 return 0;
481}
482
Michael Neuling2b0a5762013-02-13 16:21:41 +0000483#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
484/*
485 * Save the current user registers on the user stack.
486 * We only save the altivec/spe registers if the process has used
487 * altivec/spe instructions at some point.
488 * We also save the transactional registers to a second ucontext in the
489 * frame.
490 *
491 * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
492 */
493static int save_tm_user_regs(struct pt_regs *regs,
494 struct mcontext __user *frame,
495 struct mcontext __user *tm_frame, int sigret)
496{
497 unsigned long msr = regs->msr;
498
Michael Neuling92fb8692017-10-12 21:17:19 +1100499 WARN_ON(tm_suspend_disabled);
500
Paul Mackerrasd31626f2014-01-13 15:56:29 +1100501 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
502 * just indicates to userland that we were doing a transaction, but we
503 * don't want to return in transactional state. This also ensures
504 * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
505 */
506 regs->msr &= ~MSR_TS_MASK;
507
Michael Neuling2b0a5762013-02-13 16:21:41 +0000508 /* Save both sets of general registers */
509 if (save_general_regs(&current->thread.ckpt_regs, frame)
510 || save_general_regs(regs, tm_frame))
511 return 1;
512
513 /* Stash the top half of the 64bit MSR into the 32bit MSR word
514 * of the transactional mcontext. This way we have a backward-compatible
515 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
516 * also look at what type of transaction (T or S) was active at the
517 * time of the signal.
518 */
519 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
520 return 1;
521
522#ifdef CONFIG_ALTIVEC
523 /* save altivec registers */
524 if (current->thread.used_vr) {
Cyril Bur000ec282016-09-23 16:18:25 +1000525 if (__copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000526 ELF_NVRREG * sizeof(vector128)))
527 return 1;
528 if (msr & MSR_VEC) {
529 if (__copy_to_user(&tm_frame->mc_vregs,
Cyril Burdc310662016-09-23 16:18:24 +1000530 &current->thread.vr_state,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000531 ELF_NVRREG * sizeof(vector128)))
532 return 1;
533 } else {
534 if (__copy_to_user(&tm_frame->mc_vregs,
Cyril Bur000ec282016-09-23 16:18:25 +1000535 &current->thread.ckvr_state,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000536 ELF_NVRREG * sizeof(vector128)))
537 return 1;
538 }
539
540 /* set MSR_VEC in the saved MSR value to indicate that
541 * frame->mc_vregs contains valid data
542 */
543 msr |= MSR_VEC;
544 }
545
546 /* We always copy to/from vrsave, it's 0 if we don't have or don't
547 * use altivec. Since VSCR only contains 32 bits saved in the least
548 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
549 * most significant bits of that same vector. --BenH
550 */
Paul Mackerras408a7e02013-08-05 14:13:16 +1000551 if (cpu_has_feature(CPU_FTR_ALTIVEC))
Cyril Bur000ec282016-09-23 16:18:25 +1000552 current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
553 if (__put_user(current->thread.ckvrsave,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000554 (u32 __user *)&frame->mc_vregs[32]))
555 return 1;
556 if (msr & MSR_VEC) {
Cyril Burdc310662016-09-23 16:18:24 +1000557 if (__put_user(current->thread.vrsave,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000558 (u32 __user *)&tm_frame->mc_vregs[32]))
559 return 1;
560 } else {
Cyril Bur000ec282016-09-23 16:18:25 +1000561 if (__put_user(current->thread.ckvrsave,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000562 (u32 __user *)&tm_frame->mc_vregs[32]))
563 return 1;
564 }
565#endif /* CONFIG_ALTIVEC */
566
Cyril Bur000ec282016-09-23 16:18:25 +1000567 if (copy_ckfpr_to_user(&frame->mc_fregs, current))
Michael Neuling2b0a5762013-02-13 16:21:41 +0000568 return 1;
569 if (msr & MSR_FP) {
Cyril Burdc310662016-09-23 16:18:24 +1000570 if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
Michael Neuling2b0a5762013-02-13 16:21:41 +0000571 return 1;
572 } else {
Cyril Bur000ec282016-09-23 16:18:25 +1000573 if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
Michael Neuling2b0a5762013-02-13 16:21:41 +0000574 return 1;
575 }
576
577#ifdef CONFIG_VSX
578 /*
579 * Copy VSR 0-31 upper half from thread_struct to local
580 * buffer, then write that to userspace. Also set MSR_VSX in
581 * the saved MSR value to indicate that frame->mc_vregs
582 * contains valid data
583 */
584 if (current->thread.used_vsr) {
Cyril Bur000ec282016-09-23 16:18:25 +1000585 if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
Michael Neuling2b0a5762013-02-13 16:21:41 +0000586 return 1;
587 if (msr & MSR_VSX) {
Cyril Burdc310662016-09-23 16:18:24 +1000588 if (copy_vsx_to_user(&tm_frame->mc_vsregs,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000589 current))
590 return 1;
591 } else {
Cyril Bur000ec282016-09-23 16:18:25 +1000592 if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
Michael Neuling2b0a5762013-02-13 16:21:41 +0000593 return 1;
594 }
595
596 msr |= MSR_VSX;
597 }
598#endif /* CONFIG_VSX */
599#ifdef CONFIG_SPE
600 /* SPE regs are not checkpointed with TM, so this section is
601 * simply the same as in save_user_regs().
602 */
603 if (current->thread.used_spe) {
604 flush_spe_to_thread(current);
605 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
606 ELF_NEVRREG * sizeof(u32)))
607 return 1;
608 /* set MSR_SPE in the saved MSR value to indicate that
609 * frame->mc_vregs contains valid data */
610 msr |= MSR_SPE;
611 }
612
613 /* We always copy to/from spefscr */
614 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
615 return 1;
616#endif /* CONFIG_SPE */
617
618 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
619 return 1;
620 if (sigret) {
621 /* Set up the sigreturn trampoline: li r0,sigret; sc */
622 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
623 || __put_user(0x44000002UL, &frame->tramp[1]))
624 return 1;
625 flush_icache_range((unsigned long) &frame->tramp[0],
626 (unsigned long) &frame->tramp[2]);
627 }
628
629 return 0;
630}
631#endif
632
Stephen Rothwell81e70092005-10-18 11:17:58 +1000633/*
634 * Restore the current user register values from the user stack,
635 * (except for MSR).
636 */
637static long restore_user_regs(struct pt_regs *regs,
638 struct mcontext __user *sr, int sig)
639{
640 long err;
641 unsigned int save_r2 = 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000642 unsigned long msr;
Michael Neulingc6e67712008-06-25 14:07:18 +1000643#ifdef CONFIG_VSX
Michael Neulingc6e67712008-06-25 14:07:18 +1000644 int i;
645#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +1000646
647 /*
648 * restore general registers but not including MSR or SOFTE. Also
649 * take care of keeping r2 (TLS) intact if not a signal
650 */
651 if (!sig)
652 save_r2 = (unsigned int)regs->gpr[2];
653 err = restore_general_regs(regs, sr);
Al Viro9a81c162010-09-20 21:48:57 +0100654 regs->trap = 0;
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000655 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000656 if (!sig)
657 regs->gpr[2] = (unsigned long) save_r2;
658 if (err)
659 return 1;
660
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000661 /* if doing signal return, restore the previous little-endian mode */
662 if (sig)
663 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
664
Stephen Rothwell81e70092005-10-18 11:17:58 +1000665#ifdef CONFIG_ALTIVEC
Michael Neulingc6e67712008-06-25 14:07:18 +1000666 /*
667 * Force the process to reload the altivec registers from
668 * current->thread when it next does altivec instructions
669 */
Stephen Rothwell81e70092005-10-18 11:17:58 +1000670 regs->msr &= ~MSR_VEC;
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000671 if (msr & MSR_VEC) {
Stephen Rothwell81e70092005-10-18 11:17:58 +1000672 /* restore altivec registers from the stack */
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000673 if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
Stephen Rothwell81e70092005-10-18 11:17:58 +1000674 sizeof(sr->mc_vregs)))
675 return 1;
Simon Guoe1c0d662016-07-26 16:06:01 +0800676 current->thread.used_vr = true;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000677 } else if (current->thread.used_vr)
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000678 memset(&current->thread.vr_state, 0,
679 ELF_NVRREG * sizeof(vector128));
Stephen Rothwell81e70092005-10-18 11:17:58 +1000680
681 /* Always get VRSAVE back */
682 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
683 return 1;
Paul Mackerras408a7e02013-08-05 14:13:16 +1000684 if (cpu_has_feature(CPU_FTR_ALTIVEC))
685 mtspr(SPRN_VRSAVE, current->thread.vrsave);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000686#endif /* CONFIG_ALTIVEC */
Michael Neuling6a274c02008-07-02 14:06:37 +1000687 if (copy_fpr_from_user(current, &sr->mc_fregs))
688 return 1;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000689
Michael Neulingc6e67712008-06-25 14:07:18 +1000690#ifdef CONFIG_VSX
Michael Neulingce48b212008-06-25 14:07:18 +1000691 /*
692 * Force the process to reload the VSX registers from
693 * current->thread when it next does VSX instruction.
694 */
695 regs->msr &= ~MSR_VSX;
696 if (msr & MSR_VSX) {
697 /*
698 * Restore altivec registers from the stack to a local
699 * buffer, then write this out to the thread_struct
700 */
Michael Neuling6a274c02008-07-02 14:06:37 +1000701 if (copy_vsx_from_user(current, &sr->mc_vsregs))
Michael Neulingce48b212008-06-25 14:07:18 +1000702 return 1;
Simon Guoe1c0d662016-07-26 16:06:01 +0800703 current->thread.used_vsr = true;
Michael Neulingce48b212008-06-25 14:07:18 +1000704 } else if (current->thread.used_vsr)
705 for (i = 0; i < 32 ; i++)
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000706 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
Michael Neulingc6e67712008-06-25 14:07:18 +1000707#endif /* CONFIG_VSX */
708 /*
709 * force the process to reload the FP registers from
710 * current->thread when it next does FP instructions
711 */
712 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
713
Stephen Rothwell81e70092005-10-18 11:17:58 +1000714#ifdef CONFIG_SPE
715 /* force the process to reload the spe registers from
716 current->thread when it next does spe instructions */
717 regs->msr &= ~MSR_SPE;
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000718 if (msr & MSR_SPE) {
Stephen Rothwell81e70092005-10-18 11:17:58 +1000719 /* restore spe registers from the stack */
720 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
721 ELF_NEVRREG * sizeof(u32)))
722 return 1;
Simon Guoe1c0d662016-07-26 16:06:01 +0800723 current->thread.used_spe = true;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000724 } else if (current->thread.used_spe)
725 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
726
727 /* Always get SPEFSCR back */
728 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
729 return 1;
730#endif /* CONFIG_SPE */
731
Stephen Rothwell81e70092005-10-18 11:17:58 +1000732 return 0;
733}
734
Michael Neuling2b0a5762013-02-13 16:21:41 +0000735#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
736/*
737 * Restore the current user register values from the user stack, except for
738 * MSR, and recheckpoint the original checkpointed register state for processes
739 * in transactions.
740 */
741static long restore_tm_user_regs(struct pt_regs *regs,
742 struct mcontext __user *sr,
743 struct mcontext __user *tm_sr)
744{
745 long err;
Michael Neuling2c27a182013-06-09 21:23:17 +1000746 unsigned long msr, msr_hi;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000747#ifdef CONFIG_VSX
748 int i;
749#endif
750
Michael Neuling92fb8692017-10-12 21:17:19 +1100751 if (tm_suspend_disabled)
752 return 1;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000753 /*
754 * restore general registers but not including MSR or SOFTE. Also
755 * take care of keeping r2 (TLS) intact if not a signal.
756 * See comment in signal_64.c:restore_tm_sigcontexts();
757 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
758 * were set by the signal delivery.
759 */
760 err = restore_general_regs(regs, tm_sr);
761 err |= restore_general_regs(&current->thread.ckpt_regs, sr);
762
763 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
764
765 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
766 if (err)
767 return 1;
768
769 /* Restore the previous little-endian mode */
770 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
771
Michael Neuling2b0a5762013-02-13 16:21:41 +0000772#ifdef CONFIG_ALTIVEC
773 regs->msr &= ~MSR_VEC;
774 if (msr & MSR_VEC) {
775 /* restore altivec registers from the stack */
Cyril Bur000ec282016-09-23 16:18:25 +1000776 if (__copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000777 sizeof(sr->mc_vregs)) ||
Cyril Burdc310662016-09-23 16:18:24 +1000778 __copy_from_user(&current->thread.vr_state,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000779 &tm_sr->mc_vregs,
780 sizeof(sr->mc_vregs)))
781 return 1;
Simon Guoe1c0d662016-07-26 16:06:01 +0800782 current->thread.used_vr = true;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000783 } else if (current->thread.used_vr) {
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000784 memset(&current->thread.vr_state, 0,
785 ELF_NVRREG * sizeof(vector128));
Cyril Bur000ec282016-09-23 16:18:25 +1000786 memset(&current->thread.ckvr_state, 0,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000787 ELF_NVRREG * sizeof(vector128));
788 }
789
790 /* Always get VRSAVE back */
Cyril Bur000ec282016-09-23 16:18:25 +1000791 if (__get_user(current->thread.ckvrsave,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000792 (u32 __user *)&sr->mc_vregs[32]) ||
Cyril Burdc310662016-09-23 16:18:24 +1000793 __get_user(current->thread.vrsave,
Michael Neuling2b0a5762013-02-13 16:21:41 +0000794 (u32 __user *)&tm_sr->mc_vregs[32]))
795 return 1;
Paul Mackerras408a7e02013-08-05 14:13:16 +1000796 if (cpu_has_feature(CPU_FTR_ALTIVEC))
Cyril Bur000ec282016-09-23 16:18:25 +1000797 mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
Michael Neuling2b0a5762013-02-13 16:21:41 +0000798#endif /* CONFIG_ALTIVEC */
799
800 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
801
802 if (copy_fpr_from_user(current, &sr->mc_fregs) ||
Cyril Bur000ec282016-09-23 16:18:25 +1000803 copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
Michael Neuling2b0a5762013-02-13 16:21:41 +0000804 return 1;
805
806#ifdef CONFIG_VSX
807 regs->msr &= ~MSR_VSX;
808 if (msr & MSR_VSX) {
809 /*
810 * Restore altivec registers from the stack to a local
811 * buffer, then write this out to the thread_struct
812 */
Cyril Burdc310662016-09-23 16:18:24 +1000813 if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
Cyril Bur000ec282016-09-23 16:18:25 +1000814 copy_ckvsx_from_user(current, &sr->mc_vsregs))
Michael Neuling2b0a5762013-02-13 16:21:41 +0000815 return 1;
Simon Guoe1c0d662016-07-26 16:06:01 +0800816 current->thread.used_vsr = true;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000817 } else if (current->thread.used_vsr)
818 for (i = 0; i < 32 ; i++) {
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000819 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
Cyril Bur000ec282016-09-23 16:18:25 +1000820 current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000821 }
822#endif /* CONFIG_VSX */
823
824#ifdef CONFIG_SPE
825 /* SPE regs are not checkpointed with TM, so this section is
826 * simply the same as in restore_user_regs().
827 */
828 regs->msr &= ~MSR_SPE;
829 if (msr & MSR_SPE) {
830 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
831 ELF_NEVRREG * sizeof(u32)))
832 return 1;
Simon Guoe1c0d662016-07-26 16:06:01 +0800833 current->thread.used_spe = true;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000834 } else if (current->thread.used_spe)
835 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
836
837 /* Always get SPEFSCR back */
838 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
839 + ELF_NEVRREG))
840 return 1;
841#endif /* CONFIG_SPE */
842
Michael Neulingd2b9d2a2015-11-19 15:44:44 +1100843 /* Get the top half of the MSR from the user context */
844 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
845 return 1;
846 msr_hi <<= 32;
847 /* If TM bits are set to the reserved value, it's an invalid context */
848 if (MSR_TM_RESV(msr_hi))
849 return 1;
850 /* Pull in the MSR TM bits from the user context */
851 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
Michael Neuling2b0a5762013-02-13 16:21:41 +0000852 /* Now, recheckpoint. This loads up all of the checkpointed (older)
853 * registers, including FP and V[S]Rs. After recheckpointing, the
854 * transactional versions should be loaded.
855 */
856 tm_enable();
Michael Neulinge6b8fd02014-04-04 20:19:48 +1100857 /* Make sure the transaction is marked as failed */
858 current->thread.tm_texasr |= TEXASR_FS;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000859 /* This loads the checkpointed FP/VEC state, if used */
Cyril Bureb5c3f12017-11-02 14:09:05 +1100860 tm_recheckpoint(&current->thread);
Michael Neuling2b0a5762013-02-13 16:21:41 +0000861
862 /* This loads the speculative FP/VEC state, if used */
Cyril Burdc310662016-09-23 16:18:24 +1000863 msr_check_and_set(msr & (MSR_FP | MSR_VEC));
Michael Neuling2b0a5762013-02-13 16:21:41 +0000864 if (msr & MSR_FP) {
Cyril Burdc310662016-09-23 16:18:24 +1000865 load_fp_state(&current->thread.fp_state);
Michael Neuling2b0a5762013-02-13 16:21:41 +0000866 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
867 }
Michael Neulingf110c0c2013-04-09 16:18:55 +1000868#ifdef CONFIG_ALTIVEC
Michael Neuling2b0a5762013-02-13 16:21:41 +0000869 if (msr & MSR_VEC) {
Cyril Burdc310662016-09-23 16:18:24 +1000870 load_vr_state(&current->thread.vr_state);
Michael Neuling2b0a5762013-02-13 16:21:41 +0000871 regs->msr |= MSR_VEC;
872 }
Michael Neulingf110c0c2013-04-09 16:18:55 +1000873#endif
Michael Neuling2b0a5762013-02-13 16:21:41 +0000874
875 return 0;
876}
877#endif
878
Stephen Rothwell81e70092005-10-18 11:17:58 +1000879#ifdef CONFIG_PPC64
Stephen Rothwell81e70092005-10-18 11:17:58 +1000880
881#define copy_siginfo_to_user copy_siginfo_to_user32
882
Stephen Rothwell81e70092005-10-18 11:17:58 +1000883#endif /* CONFIG_PPC64 */
884
Stephen Rothwell81e70092005-10-18 11:17:58 +1000885/*
886 * Set up a signal frame for a "real-time" signal handler
887 * (one which gets siginfo).
888 */
Richard Weinberger129b69d2014-03-02 14:46:11 +0100889int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
Cyril Burd1199432016-09-23 16:18:12 +1000890 struct task_struct *tsk)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000891{
892 struct rt_sigframe __user *rt_sf;
893 struct mcontext __user *frame;
Michael Neuling1d25f112013-06-09 21:23:15 +1000894 struct mcontext __user *tm_frame = NULL;
Olof Johanssond0c3d532007-10-12 10:20:07 +1000895 void __user *addr;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000896 unsigned long newsp = 0;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000897 int sigret;
898 unsigned long tramp;
Cyril Burd1199432016-09-23 16:18:12 +1000899 struct pt_regs *regs = tsk->thread.regs;
900
901 BUG_ON(tsk != current);
Stephen Rothwell81e70092005-10-18 11:17:58 +1000902
903 /* Set up Signal Frame */
904 /* Put a Real Time Context onto stack */
Cyril Burd1199432016-09-23 16:18:12 +1000905 rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
Olof Johanssond0c3d532007-10-12 10:20:07 +1000906 addr = rt_sf;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000907 if (unlikely(rt_sf == NULL))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000908 goto badframe;
909
910 /* Put the siginfo & fill in most of the ucontext */
Richard Weinberger129b69d2014-03-02 14:46:11 +0100911 if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
Stephen Rothwell81e70092005-10-18 11:17:58 +1000912 || __put_user(0, &rt_sf->uc.uc_flags)
Al Viro7cce2462012-12-23 03:26:46 -0500913 || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
Stephen Rothwell81e70092005-10-18 11:17:58 +1000914 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
915 &rt_sf->uc.uc_regs)
916 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
917 goto badframe;
918
919 /* Save user registers on the stack */
920 frame = &rt_sf->uc.uc_mcontext;
Olof Johanssond0c3d532007-10-12 10:20:07 +1000921 addr = frame;
Cyril Burd1199432016-09-23 16:18:12 +1000922 if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
Michael Neuling2b0a5762013-02-13 16:21:41 +0000923 sigret = 0;
Cyril Burd1199432016-09-23 16:18:12 +1000924 tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100925 } else {
Michael Neuling2b0a5762013-02-13 16:21:41 +0000926 sigret = __NR_rt_sigreturn;
927 tramp = (unsigned long) frame->tramp;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000928 }
Paul Mackerrascc657f52005-11-14 21:55:15 +1100929
Michael Neuling2b0a5762013-02-13 16:21:41 +0000930#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Michael Neuling1d25f112013-06-09 21:23:15 +1000931 tm_frame = &rt_sf->uc_transact.uc_mcontext;
Michael Neuling2b0a5762013-02-13 16:21:41 +0000932 if (MSR_TM_ACTIVE(regs->msr)) {
Paul Mackerrasd765ff22014-01-29 16:33:56 +1100933 if (__put_user((unsigned long)&rt_sf->uc_transact,
934 &rt_sf->uc.uc_link) ||
935 __put_user((unsigned long)tm_frame,
936 &rt_sf->uc_transact.uc_regs))
937 goto badframe;
Michael Neuling1d25f112013-06-09 21:23:15 +1000938 if (save_tm_user_regs(regs, frame, tm_frame, sigret))
Michael Neuling2b0a5762013-02-13 16:21:41 +0000939 goto badframe;
940 }
941 else
942#endif
Michael Neuling1d25f112013-06-09 21:23:15 +1000943 {
Paul Mackerrasd765ff22014-01-29 16:33:56 +1100944 if (__put_user(0, &rt_sf->uc.uc_link))
945 goto badframe;
Michael Neuling1d25f112013-06-09 21:23:15 +1000946 if (save_user_regs(regs, frame, tm_frame, sigret, 1))
Michael Neuling2b0a5762013-02-13 16:21:41 +0000947 goto badframe;
Michael Neuling1d25f112013-06-09 21:23:15 +1000948 }
Michael Neuling2b0a5762013-02-13 16:21:41 +0000949 regs->link = tramp;
950
Cyril Burd1199432016-09-23 16:18:12 +1000951 tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
Paul Mackerrascc657f52005-11-14 21:55:15 +1100952
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000953 /* create a stack frame for the caller of the handler */
954 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
Olof Johanssond0c3d532007-10-12 10:20:07 +1000955 addr = (void __user *)regs->gpr[1];
Paul Mackerrase2b55302005-10-22 14:46:33 +1000956 if (put_user(regs->gpr[1], (u32 __user *)newsp))
Stephen Rothwell81e70092005-10-18 11:17:58 +1000957 goto badframe;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +1000958
959 /* Fill registers for signal handler */
Stephen Rothwell81e70092005-10-18 11:17:58 +1000960 regs->gpr[1] = newsp;
Richard Weinberger129b69d2014-03-02 14:46:11 +0100961 regs->gpr[3] = ksig->sig;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000962 regs->gpr[4] = (unsigned long) &rt_sf->info;
963 regs->gpr[5] = (unsigned long) &rt_sf->uc;
964 regs->gpr[6] = (unsigned long) rt_sf;
Richard Weinberger129b69d2014-03-02 14:46:11 +0100965 regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
Anton Blancharde871c6b2013-09-23 12:04:43 +1000966 /* enter the signal handler in native-endian mode */
Paul Mackerrasfab5db92006-06-07 16:14:40 +1000967 regs->msr &= ~MSR_LE;
Anton Blancharde871c6b2013-09-23 12:04:43 +1000968 regs->msr |= (MSR_KERNEL & MSR_LE);
Richard Weinberger129b69d2014-03-02 14:46:11 +0100969 return 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000970
971badframe:
Christian Dietrich76462232011-06-04 05:36:54 +0000972 if (show_unhandled_signals)
973 printk_ratelimited(KERN_INFO
974 "%s[%d]: bad frame in handle_rt_signal32: "
975 "%p nip %08lx lr %08lx\n",
Cyril Burd1199432016-09-23 16:18:12 +1000976 tsk->comm, tsk->pid,
Christian Dietrich76462232011-06-04 05:36:54 +0000977 addr, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +1000978
Richard Weinberger129b69d2014-03-02 14:46:11 +0100979 return 1;
Stephen Rothwell81e70092005-10-18 11:17:58 +1000980}
981
982static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
983{
984 sigset_t set;
985 struct mcontext __user *mcp;
986
987 if (get_sigset_t(&set, &ucp->uc_sigmask))
988 return -EFAULT;
989#ifdef CONFIG_PPC64
990 {
991 u32 cmcp;
992
993 if (__get_user(cmcp, &ucp->uc_regs))
994 return -EFAULT;
995 mcp = (struct mcontext __user *)(u64)cmcp;
Paul Mackerras7c85d1f2006-06-09 13:02:59 +1000996 /* no need to check access_ok(mcp), since mcp < 4GB */
Stephen Rothwell81e70092005-10-18 11:17:58 +1000997 }
998#else
999 if (__get_user(mcp, &ucp->uc_regs))
1000 return -EFAULT;
Paul Mackerras7c85d1f2006-06-09 13:02:59 +10001001 if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
1002 return -EFAULT;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001003#endif
Al Viro17440f12012-04-27 14:09:19 -04001004 set_current_blocked(&set);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001005 if (restore_user_regs(regs, mcp, sig))
1006 return -EFAULT;
1007
1008 return 0;
1009}
1010
Michael Neuling2b0a5762013-02-13 16:21:41 +00001011#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1012static int do_setcontext_tm(struct ucontext __user *ucp,
1013 struct ucontext __user *tm_ucp,
1014 struct pt_regs *regs)
1015{
1016 sigset_t set;
1017 struct mcontext __user *mcp;
1018 struct mcontext __user *tm_mcp;
1019 u32 cmcp;
1020 u32 tm_cmcp;
1021
1022 if (get_sigset_t(&set, &ucp->uc_sigmask))
1023 return -EFAULT;
1024
1025 if (__get_user(cmcp, &ucp->uc_regs) ||
1026 __get_user(tm_cmcp, &tm_ucp->uc_regs))
1027 return -EFAULT;
1028 mcp = (struct mcontext __user *)(u64)cmcp;
1029 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1030 /* no need to check access_ok(mcp), since mcp < 4GB */
1031
1032 set_current_blocked(&set);
1033 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1034 return -EFAULT;
1035
1036 return 0;
1037}
1038#endif
1039
Al Virof3675642018-05-02 23:20:47 +10001040#ifdef CONFIG_PPC64
1041COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1042 struct ucontext __user *, new_ctx, int, ctx_size)
1043#else
1044SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1045 struct ucontext __user *, new_ctx, long, ctx_size)
1046#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +10001047{
Al Virof3675642018-05-02 23:20:47 +10001048 struct pt_regs *regs = current_pt_regs();
Mathieu Malaterre67b464a2018-02-25 18:22:19 +01001049 unsigned char tmp __maybe_unused;
Michael Neuling16c29d12008-10-23 00:42:36 +00001050 int ctx_has_vsx_region = 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001051
Michael Neulingc1cb2992008-07-08 18:43:41 +10001052#ifdef CONFIG_PPC64
1053 unsigned long new_msr = 0;
1054
Andreas Schwab77eb50a2008-11-06 00:49:00 +00001055 if (new_ctx) {
1056 struct mcontext __user *mcp;
1057 u32 cmcp;
1058
1059 /*
1060 * Get pointer to the real mcontext. No need for
1061 * access_ok since we are dealing with compat
1062 * pointers.
1063 */
1064 if (__get_user(cmcp, &new_ctx->uc_regs))
1065 return -EFAULT;
1066 mcp = (struct mcontext __user *)(u64)cmcp;
1067 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1068 return -EFAULT;
1069 }
Michael Neulingc1cb2992008-07-08 18:43:41 +10001070 /*
1071 * Check that the context is not smaller than the original
1072 * size (with VMX but without VSX)
1073 */
1074 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1075 return -EINVAL;
1076 /*
1077 * If the new context state sets the MSR VSX bits but
1078 * it doesn't provide VSX state.
1079 */
1080 if ((ctx_size < sizeof(struct ucontext)) &&
1081 (new_msr & MSR_VSX))
1082 return -EINVAL;
Michael Neuling16c29d12008-10-23 00:42:36 +00001083 /* Does the context have enough room to store VSX data? */
1084 if (ctx_size >= sizeof(struct ucontext))
1085 ctx_has_vsx_region = 1;
Michael Neulingc1cb2992008-07-08 18:43:41 +10001086#else
Stephen Rothwell81e70092005-10-18 11:17:58 +10001087 /* Context size is for future use. Right now, we only make sure
1088 * we are passed something we understand
1089 */
1090 if (ctx_size < sizeof(struct ucontext))
1091 return -EINVAL;
Michael Neulingc1cb2992008-07-08 18:43:41 +10001092#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +10001093 if (old_ctx != NULL) {
Paul Mackerras1c9bb1a2006-12-20 13:57:06 +11001094 struct mcontext __user *mctx;
1095
1096 /*
1097 * old_ctx might not be 16-byte aligned, in which
1098 * case old_ctx->uc_mcontext won't be either.
1099 * Because we have the old_ctx->uc_pad2 field
1100 * before old_ctx->uc_mcontext, we need to round down
1101 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1102 */
1103 mctx = (struct mcontext __user *)
1104 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
Michael Neuling16c29d12008-10-23 00:42:36 +00001105 if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
Michael Neuling1d25f112013-06-09 21:23:15 +10001106 || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001107 || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
Paul Mackerras1c9bb1a2006-12-20 13:57:06 +11001108 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001109 return -EFAULT;
1110 }
1111 if (new_ctx == NULL)
1112 return 0;
Michael Neuling16c29d12008-10-23 00:42:36 +00001113 if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001114 || __get_user(tmp, (u8 __user *) new_ctx)
Michael Neuling16c29d12008-10-23 00:42:36 +00001115 || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001116 return -EFAULT;
1117
1118 /*
1119 * If we get a fault copying the context into the kernel's
1120 * image of the user's registers, we can't just return -EFAULT
1121 * because the user's registers will be corrupted. For instance
1122 * the NIP value may have been updated but not some of the
1123 * other registers. Given that we have done the access_ok
1124 * and successfully read the first and last bytes of the region
1125 * above, this should only happen in an out-of-memory situation
1126 * or if another thread unmaps the region containing the context.
1127 * We kill the task with a SIGSEGV in this situation.
1128 */
1129 if (do_setcontext(new_ctx, regs, 0))
1130 do_exit(SIGSEGV);
David Woodhouse401d1f02005-11-15 18:52:18 +00001131
1132 set_thread_flag(TIF_RESTOREALL);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001133 return 0;
1134}
1135
Al Virof3675642018-05-02 23:20:47 +10001136#ifdef CONFIG_PPC64
1137COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1138#else
1139SYSCALL_DEFINE0(rt_sigreturn)
1140#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +10001141{
1142 struct rt_sigframe __user *rt_sf;
Al Virof3675642018-05-02 23:20:47 +10001143 struct pt_regs *regs = current_pt_regs();
Michael Neuling2b0a5762013-02-13 16:21:41 +00001144#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1145 struct ucontext __user *uc_transact;
1146 unsigned long msr_hi;
1147 unsigned long tmp;
1148 int tm_restore = 0;
1149#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +10001150 /* Always make any pending restarted system calls return -EINTR */
Andy Lutomirskif56141e2015-02-12 15:01:14 -08001151 current->restart_block.fn = do_no_restart_syscall;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001152
1153 rt_sf = (struct rt_sigframe __user *)
1154 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1155 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1156 goto bad;
Cyril Bur78a3e882016-08-23 10:46:17 +10001157
Michael Neuling2b0a5762013-02-13 16:21:41 +00001158#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Cyril Bur78a3e882016-08-23 10:46:17 +10001159 /*
1160 * If there is a transactional state then throw it away.
1161 * The purpose of a sigreturn is to destroy all traces of the
1162 * signal frame, this includes any transactional state created
1163 * within in. We only check for suspended as we can never be
1164 * active in the kernel, we are active, there is nothing better to
1165 * do than go ahead and Bad Thing later.
1166 * The cause is not important as there will never be a
1167 * recheckpoint so it's not user visible.
1168 */
1169 if (MSR_TM_SUSPENDED(mfmsr()))
1170 tm_reclaim_current(0);
1171
Michael Neuling2b0a5762013-02-13 16:21:41 +00001172 if (__get_user(tmp, &rt_sf->uc.uc_link))
1173 goto bad;
1174 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1175 if (uc_transact) {
1176 u32 cmcp;
1177 struct mcontext __user *mcp;
1178
1179 if (__get_user(cmcp, &uc_transact->uc_regs))
1180 return -EFAULT;
1181 mcp = (struct mcontext __user *)(u64)cmcp;
1182 /* The top 32 bits of the MSR are stashed in the transactional
1183 * ucontext. */
1184 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1185 goto bad;
1186
Michael Neuling55e43412013-06-09 21:23:18 +10001187 if (MSR_TM_ACTIVE(msr_hi<<32)) {
Michael Neuling2b0a5762013-02-13 16:21:41 +00001188 /* We only recheckpoint on return if we're
1189 * transaction.
1190 */
1191 tm_restore = 1;
1192 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1193 goto bad;
1194 }
1195 }
1196 if (!tm_restore)
1197 /* Fall through, for non-TM restore */
1198#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +10001199 if (do_setcontext(&rt_sf->uc, regs, 1))
1200 goto bad;
1201
1202 /*
1203 * It's not clear whether or why it is desirable to save the
1204 * sigaltstack setting on signal delivery and restore it on
1205 * signal return. But other architectures do this and we have
1206 * always done it up until now so it is probably better not to
1207 * change it. -- paulus
1208 */
1209#ifdef CONFIG_PPC64
Al Viro7cce2462012-12-23 03:26:46 -05001210 if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1211 goto bad;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001212#else
Al Viro7cce2462012-12-23 03:26:46 -05001213 if (restore_altstack(&rt_sf->uc.uc_stack))
1214 goto bad;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001215#endif
David Woodhouse401d1f02005-11-15 18:52:18 +00001216 set_thread_flag(TIF_RESTOREALL);
1217 return 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001218
1219 bad:
Christian Dietrich76462232011-06-04 05:36:54 +00001220 if (show_unhandled_signals)
1221 printk_ratelimited(KERN_INFO
1222 "%s[%d]: bad frame in sys_rt_sigreturn: "
1223 "%p nip %08lx lr %08lx\n",
1224 current->comm, current->pid,
1225 rt_sf, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001226
Stephen Rothwell81e70092005-10-18 11:17:58 +10001227 force_sig(SIGSEGV, current);
1228 return 0;
1229}
1230
1231#ifdef CONFIG_PPC32
Al Virof3675642018-05-02 23:20:47 +10001232SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1233 int, ndbg, struct sig_dbg_op __user *, dbg)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001234{
Al Virof3675642018-05-02 23:20:47 +10001235 struct pt_regs *regs = current_pt_regs();
Stephen Rothwell81e70092005-10-18 11:17:58 +10001236 struct sig_dbg_op op;
1237 int i;
Mathieu Malaterre67b464a2018-02-25 18:22:19 +01001238 unsigned char tmp __maybe_unused;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001239 unsigned long new_msr = regs->msr;
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00001240#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05301241 unsigned long new_dbcr0 = current->thread.debug.dbcr0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001242#endif
1243
1244 for (i=0; i<ndbg; i++) {
Paul Mackerras7c85d1f2006-06-09 13:02:59 +10001245 if (copy_from_user(&op, dbg + i, sizeof(op)))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001246 return -EFAULT;
1247 switch (op.dbg_type) {
1248 case SIG_DBG_SINGLE_STEPPING:
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00001249#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Stephen Rothwell81e70092005-10-18 11:17:58 +10001250 if (op.dbg_value) {
1251 new_msr |= MSR_DE;
1252 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1253 } else {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00001254 new_dbcr0 &= ~DBCR0_IC;
1255 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05301256 current->thread.debug.dbcr1)) {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00001257 new_msr &= ~MSR_DE;
1258 new_dbcr0 &= ~DBCR0_IDM;
1259 }
Stephen Rothwell81e70092005-10-18 11:17:58 +10001260 }
1261#else
1262 if (op.dbg_value)
1263 new_msr |= MSR_SE;
1264 else
1265 new_msr &= ~MSR_SE;
1266#endif
1267 break;
1268 case SIG_DBG_BRANCH_TRACING:
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00001269#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Stephen Rothwell81e70092005-10-18 11:17:58 +10001270 return -EINVAL;
1271#else
1272 if (op.dbg_value)
1273 new_msr |= MSR_BE;
1274 else
1275 new_msr &= ~MSR_BE;
1276#endif
1277 break;
1278
1279 default:
1280 return -EINVAL;
1281 }
1282 }
1283
1284 /* We wait until here to actually install the values in the
1285 registers so if we fail in the above loop, it will not
1286 affect the contents of these registers. After this point,
1287 failure is a problem, anyway, and it's very unlikely unless
1288 the user is really doing something wrong. */
1289 regs->msr = new_msr;
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00001290#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05301291 current->thread.debug.dbcr0 = new_dbcr0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001292#endif
1293
Paul Mackerras7c85d1f2006-06-09 13:02:59 +10001294 if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1295 || __get_user(tmp, (u8 __user *) ctx)
1296 || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1297 return -EFAULT;
1298
Stephen Rothwell81e70092005-10-18 11:17:58 +10001299 /*
1300 * If we get a fault copying the context into the kernel's
1301 * image of the user's registers, we can't just return -EFAULT
1302 * because the user's registers will be corrupted. For instance
1303 * the NIP value may have been updated but not some of the
1304 * other registers. Given that we have done the access_ok
1305 * and successfully read the first and last bytes of the region
1306 * above, this should only happen in an out-of-memory situation
1307 * or if another thread unmaps the region containing the context.
1308 * We kill the task with a SIGSEGV in this situation.
1309 */
1310 if (do_setcontext(ctx, regs, 1)) {
Christian Dietrich76462232011-06-04 05:36:54 +00001311 if (show_unhandled_signals)
1312 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1313 "sys_debug_setcontext: %p nip %08lx "
1314 "lr %08lx\n",
1315 current->comm, current->pid,
1316 ctx, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001317
Stephen Rothwell81e70092005-10-18 11:17:58 +10001318 force_sig(SIGSEGV, current);
1319 goto out;
1320 }
1321
1322 /*
1323 * It's not clear whether or why it is desirable to save the
1324 * sigaltstack setting on signal delivery and restore it on
1325 * signal return. But other architectures do this and we have
1326 * always done it up until now so it is probably better not to
1327 * change it. -- paulus
1328 */
Al Viro7cce2462012-12-23 03:26:46 -05001329 restore_altstack(&ctx->uc_stack);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001330
David Woodhouse401d1f02005-11-15 18:52:18 +00001331 set_thread_flag(TIF_RESTOREALL);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001332 out:
1333 return 0;
1334}
1335#endif
1336
1337/*
1338 * OK, we're invoking a handler
1339 */
Cyril Burd1199432016-09-23 16:18:12 +10001340int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1341 struct task_struct *tsk)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001342{
1343 struct sigcontext __user *sc;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001344 struct sigframe __user *frame;
Michael Neuling1d25f112013-06-09 21:23:15 +10001345 struct mcontext __user *tm_mctx = NULL;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001346 unsigned long newsp = 0;
Michael Neuling2b0a5762013-02-13 16:21:41 +00001347 int sigret;
1348 unsigned long tramp;
Cyril Burd1199432016-09-23 16:18:12 +10001349 struct pt_regs *regs = tsk->thread.regs;
1350
1351 BUG_ON(tsk != current);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001352
1353 /* Set up Signal Frame */
Cyril Burd1199432016-09-23 16:18:12 +10001354 frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001355 if (unlikely(frame == NULL))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001356 goto badframe;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001357 sc = (struct sigcontext __user *) &frame->sctx;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001358
1359#if _NSIG != 64
1360#error "Please adjust handle_signal()"
1361#endif
Richard Weinberger129b69d2014-03-02 14:46:11 +01001362 if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
Stephen Rothwell81e70092005-10-18 11:17:58 +10001363 || __put_user(oldset->sig[0], &sc->oldmask)
1364#ifdef CONFIG_PPC64
1365 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1366#else
1367 || __put_user(oldset->sig[1], &sc->_unused[3])
1368#endif
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001369 || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
Richard Weinberger129b69d2014-03-02 14:46:11 +01001370 || __put_user(ksig->sig, &sc->signal))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001371 goto badframe;
1372
Cyril Burd1199432016-09-23 16:18:12 +10001373 if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
Michael Neuling2b0a5762013-02-13 16:21:41 +00001374 sigret = 0;
Cyril Burd1199432016-09-23 16:18:12 +10001375 tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +11001376 } else {
Michael Neuling2b0a5762013-02-13 16:21:41 +00001377 sigret = __NR_sigreturn;
1378 tramp = (unsigned long) frame->mctx.tramp;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001379 }
1380
Michael Neuling2b0a5762013-02-13 16:21:41 +00001381#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Michael Neuling1d25f112013-06-09 21:23:15 +10001382 tm_mctx = &frame->mctx_transact;
Michael Neuling2b0a5762013-02-13 16:21:41 +00001383 if (MSR_TM_ACTIVE(regs->msr)) {
1384 if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1385 sigret))
1386 goto badframe;
1387 }
1388 else
1389#endif
Michael Neuling1d25f112013-06-09 21:23:15 +10001390 {
1391 if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
Michael Neuling2b0a5762013-02-13 16:21:41 +00001392 goto badframe;
Michael Neuling1d25f112013-06-09 21:23:15 +10001393 }
Michael Neuling2b0a5762013-02-13 16:21:41 +00001394
1395 regs->link = tramp;
1396
Cyril Burd1199432016-09-23 16:18:12 +10001397 tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
Paul Mackerrascc657f52005-11-14 21:55:15 +11001398
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001399 /* create a stack frame for the caller of the handler */
1400 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001401 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1402 goto badframe;
Benjamin Herrenschmidta3f61dc2007-06-04 17:22:48 +10001403
Stephen Rothwell81e70092005-10-18 11:17:58 +10001404 regs->gpr[1] = newsp;
Richard Weinberger129b69d2014-03-02 14:46:11 +01001405 regs->gpr[3] = ksig->sig;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001406 regs->gpr[4] = (unsigned long) sc;
Richard Weinberger129b69d2014-03-02 14:46:11 +01001407 regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
Paul Mackerrasfab5db92006-06-07 16:14:40 +10001408 /* enter the signal handler in big-endian mode */
1409 regs->msr &= ~MSR_LE;
Richard Weinberger129b69d2014-03-02 14:46:11 +01001410 return 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001411
1412badframe:
Christian Dietrich76462232011-06-04 05:36:54 +00001413 if (show_unhandled_signals)
1414 printk_ratelimited(KERN_INFO
1415 "%s[%d]: bad frame in handle_signal32: "
1416 "%p nip %08lx lr %08lx\n",
Cyril Burd1199432016-09-23 16:18:12 +10001417 tsk->comm, tsk->pid,
Christian Dietrich76462232011-06-04 05:36:54 +00001418 frame, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001419
Richard Weinberger129b69d2014-03-02 14:46:11 +01001420 return 1;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001421}
1422
1423/*
1424 * Do a signal return; undo the signal stack.
1425 */
Al Virof3675642018-05-02 23:20:47 +10001426#ifdef CONFIG_PPC64
1427COMPAT_SYSCALL_DEFINE0(sigreturn)
1428#else
1429SYSCALL_DEFINE0(sigreturn)
1430#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +10001431{
Al Virof3675642018-05-02 23:20:47 +10001432 struct pt_regs *regs = current_pt_regs();
Michael Neulingfee554502013-06-09 21:23:16 +10001433 struct sigframe __user *sf;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001434 struct sigcontext __user *sc;
1435 struct sigcontext sigctx;
1436 struct mcontext __user *sr;
Olof Johanssond0c3d532007-10-12 10:20:07 +10001437 void __user *addr;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001438 sigset_t set;
Michael Neulingfee554502013-06-09 21:23:16 +10001439#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1440 struct mcontext __user *mcp, *tm_mcp;
1441 unsigned long msr_hi;
1442#endif
Stephen Rothwell81e70092005-10-18 11:17:58 +10001443
1444 /* Always make any pending restarted system calls return -EINTR */
Andy Lutomirskif56141e2015-02-12 15:01:14 -08001445 current->restart_block.fn = do_no_restart_syscall;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001446
Michael Neulingfee554502013-06-09 21:23:16 +10001447 sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1448 sc = &sf->sctx;
Olof Johanssond0c3d532007-10-12 10:20:07 +10001449 addr = sc;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001450 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1451 goto badframe;
1452
1453#ifdef CONFIG_PPC64
1454 /*
1455 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1456 * unused part of the signal stackframe
1457 */
1458 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1459#else
1460 set.sig[0] = sigctx.oldmask;
1461 set.sig[1] = sigctx._unused[3];
1462#endif
Al Viro17440f12012-04-27 14:09:19 -04001463 set_current_blocked(&set);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001464
Michael Neulingfee554502013-06-09 21:23:16 +10001465#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1466 mcp = (struct mcontext __user *)&sf->mctx;
1467 tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1468 if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
Stephen Rothwell81e70092005-10-18 11:17:58 +10001469 goto badframe;
Michael Neulingfee554502013-06-09 21:23:16 +10001470 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1471 if (!cpu_has_feature(CPU_FTR_TM))
1472 goto badframe;
1473 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1474 goto badframe;
1475 } else
1476#endif
1477 {
1478 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1479 addr = sr;
1480 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1481 || restore_user_regs(regs, sr, 1))
1482 goto badframe;
1483 }
Stephen Rothwell81e70092005-10-18 11:17:58 +10001484
David Woodhouse401d1f02005-11-15 18:52:18 +00001485 set_thread_flag(TIF_RESTOREALL);
Stephen Rothwell81e70092005-10-18 11:17:58 +10001486 return 0;
Stephen Rothwell81e70092005-10-18 11:17:58 +10001487
1488badframe:
Christian Dietrich76462232011-06-04 05:36:54 +00001489 if (show_unhandled_signals)
1490 printk_ratelimited(KERN_INFO
1491 "%s[%d]: bad frame in sys_sigreturn: "
1492 "%p nip %08lx lr %08lx\n",
1493 current->comm, current->pid,
1494 addr, regs->nip, regs->link);
Olof Johanssond0c3d532007-10-12 10:20:07 +10001495
Stephen Rothwell81e70092005-10-18 11:17:58 +10001496 force_sig(SIGSEGV, current);
1497 return 0;
1498}