blob: 7535f89e08cd2091f32bba708004f820f6ee2fe5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Derived from "arch/m68k/kernel/ptrace.c"
6 * Copyright (C) 1994 by Hamish Macdonald
7 * Taken from linux/kernel/ptrace.c and modified for M680x0.
8 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
9 *
10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
Paul Mackerrasb1239232005-10-20 09:11:29 +100011 * and Paul Mackerras (paulus@samba.org).
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
13 * This file is subject to the terms and conditions of the GNU General
14 * Public License. See the file README.legal in the main directory of
15 * this archive for more details.
16 */
17
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/mm.h>
21#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/errno.h>
23#include <linux/ptrace.h>
Roland McGrathf65255e2007-12-20 03:57:34 -080024#include <linux/regset.h>
Roland McGrath4f72c422008-07-27 16:51:03 +100025#include <linux/tracehook.h>
Roland McGrath3caf06c2007-12-20 03:57:39 -080026#include <linux/elf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/user.h>
28#include <linux/security.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070029#include <linux/signal.h>
David Woodhouseea9c1022005-05-08 15:56:09 +010030#include <linux/seccomp.h>
31#include <linux/audit.h>
Ian Munsie02424d82011-02-02 17:27:24 +000032#include <trace/syscall.h>
K.Prasad5aae8a52010-06-15 11:35:19 +053033#include <linux/hw_breakpoint.h>
34#include <linux/perf_event.h>
Li Zhong22ecbe82013-05-13 16:16:40 +000035#include <linux/context_tracking.h>
Breno Leitaoebb0e132019-01-30 10:46:00 -020036#include <linux/nospec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080038#include <linux/uaccess.h>
Thiago Jung Bauermannc5cc1f42018-01-18 17:50:43 -080039#include <linux/pkeys.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <asm/page.h>
41#include <asm/pgtable.h>
David Howellsae3a1972012-03-28 18:30:02 +010042#include <asm/switch_to.h>
Cyril Burc7a318b2016-08-10 15:44:46 +100043#include <asm/tm.h>
Daniel Axtens0545d542016-09-06 15:32:43 +100044#include <asm/asm-prototypes.h>
Michael Neuling85ce9a52018-03-27 15:37:18 +110045#include <asm/debug.h>
Paul Mackerras21a62902005-11-19 20:47:22 +110046
Ian Munsie02424d82011-02-02 17:27:24 +000047#define CREATE_TRACE_POINTS
48#include <trace/events/syscalls.h>
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050/*
Mahesh Salgaonkar359e4282010-04-07 18:10:20 +100051 * The parameter save area on the stack is used to store arguments being passed
52 * to callee function and is located at fixed offset from stack pointer.
53 */
54#ifdef CONFIG_PPC32
55#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
56#else /* CONFIG_PPC32 */
57#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
58#endif
59
60struct pt_regs_offset {
61 const char *name;
62 int offset;
63};
64
65#define STR(s) #s /* convert to string */
66#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
67#define GPR_OFFSET_NAME(num) \
Rashmica Gupta343c3322015-11-21 17:08:16 +110068 {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
Mahesh Salgaonkar359e4282010-04-07 18:10:20 +100069 {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
70#define REG_OFFSET_END {.name = NULL, .offset = 0}
71
Anshuman Khandual8c13f592016-07-28 10:57:38 +080072#define TVSO(f) (offsetof(struct thread_vr_state, f))
Anshuman Khandual9d3918f2016-07-28 10:57:39 +080073#define TFSO(f) (offsetof(struct thread_fp_state, f))
Anshuman Khandual08e1c012016-07-28 10:57:40 +080074#define TSO(f) (offsetof(struct thread_struct, f))
Anshuman Khandual8c13f592016-07-28 10:57:38 +080075
Mahesh Salgaonkar359e4282010-04-07 18:10:20 +100076static const struct pt_regs_offset regoffset_table[] = {
77 GPR_OFFSET_NAME(0),
78 GPR_OFFSET_NAME(1),
79 GPR_OFFSET_NAME(2),
80 GPR_OFFSET_NAME(3),
81 GPR_OFFSET_NAME(4),
82 GPR_OFFSET_NAME(5),
83 GPR_OFFSET_NAME(6),
84 GPR_OFFSET_NAME(7),
85 GPR_OFFSET_NAME(8),
86 GPR_OFFSET_NAME(9),
87 GPR_OFFSET_NAME(10),
88 GPR_OFFSET_NAME(11),
89 GPR_OFFSET_NAME(12),
90 GPR_OFFSET_NAME(13),
91 GPR_OFFSET_NAME(14),
92 GPR_OFFSET_NAME(15),
93 GPR_OFFSET_NAME(16),
94 GPR_OFFSET_NAME(17),
95 GPR_OFFSET_NAME(18),
96 GPR_OFFSET_NAME(19),
97 GPR_OFFSET_NAME(20),
98 GPR_OFFSET_NAME(21),
99 GPR_OFFSET_NAME(22),
100 GPR_OFFSET_NAME(23),
101 GPR_OFFSET_NAME(24),
102 GPR_OFFSET_NAME(25),
103 GPR_OFFSET_NAME(26),
104 GPR_OFFSET_NAME(27),
105 GPR_OFFSET_NAME(28),
106 GPR_OFFSET_NAME(29),
107 GPR_OFFSET_NAME(30),
108 GPR_OFFSET_NAME(31),
109 REG_OFFSET_NAME(nip),
110 REG_OFFSET_NAME(msr),
111 REG_OFFSET_NAME(ctr),
112 REG_OFFSET_NAME(link),
113 REG_OFFSET_NAME(xer),
114 REG_OFFSET_NAME(ccr),
115#ifdef CONFIG_PPC64
116 REG_OFFSET_NAME(softe),
117#else
118 REG_OFFSET_NAME(mq),
119#endif
120 REG_OFFSET_NAME(trap),
121 REG_OFFSET_NAME(dar),
122 REG_OFFSET_NAME(dsisr),
123 REG_OFFSET_END,
124};
125
Cyril Burc7a318b2016-08-10 15:44:46 +1000126#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
127static void flush_tmregs_to_thread(struct task_struct *tsk)
128{
129 /*
130 * If task is not current, it will have been flushed already to
131 * it's thread_struct during __switch_to().
132 *
Gustavo Romerocd63f3c2017-07-19 01:44:13 -0400133 * A reclaim flushes ALL the state or if not in TM save TM SPRs
134 * in the appropriate thread structures from live.
Cyril Burc7a318b2016-08-10 15:44:46 +1000135 */
136
Gustavo Romeroc1fa0762017-09-13 22:13:48 -0400137 if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
Gustavo Romerocd63f3c2017-07-19 01:44:13 -0400138 return;
Cyril Burc7a318b2016-08-10 15:44:46 +1000139
Gustavo Romerocd63f3c2017-07-19 01:44:13 -0400140 if (MSR_TM_SUSPENDED(mfmsr())) {
141 tm_reclaim_current(TM_CAUSE_SIGNAL);
142 } else {
143 tm_enable();
144 tm_save_sprs(&(tsk->thread));
145 }
Cyril Burc7a318b2016-08-10 15:44:46 +1000146}
147#else
148static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
149#endif
150
Mahesh Salgaonkar359e4282010-04-07 18:10:20 +1000151/**
152 * regs_query_register_offset() - query register offset from its name
153 * @name: the name of a register
154 *
155 * regs_query_register_offset() returns the offset of a register in struct
156 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
157 */
158int regs_query_register_offset(const char *name)
159{
160 const struct pt_regs_offset *roff;
161 for (roff = regoffset_table; roff->name != NULL; roff++)
162 if (!strcmp(roff->name, name))
163 return roff->offset;
164 return -EINVAL;
165}
166
167/**
168 * regs_query_register_name() - query register name from its offset
169 * @offset: the offset of a register in struct pt_regs.
170 *
171 * regs_query_register_name() returns the name of a register from its
172 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
173 */
174const char *regs_query_register_name(unsigned int offset)
175{
176 const struct pt_regs_offset *roff;
177 for (roff = regoffset_table; roff->name != NULL; roff++)
178 if (roff->offset == offset)
179 return roff->name;
180 return NULL;
181}
182
183/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 * does not yet catch signals sent when the child dies.
185 * in exit.c or in signal.c.
186 */
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188/*
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +1000189 * Set of msr bits that gdb can change on behalf of a process.
190 */
Dave Kleikamp172ae2e2010-02-08 11:50:57 +0000191#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +1000192#define MSR_DEBUGCHANGE 0
193#else
194#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
195#endif
196
197/*
198 * Max register writeable via put_reg
199 */
200#ifdef CONFIG_PPC32
201#define PT_MAX_PUT_REG PT_MQ
202#else
203#define PT_MAX_PUT_REG PT_CCR
204#endif
205
Roland McGrath26f77132007-12-20 03:57:51 -0800206static unsigned long get_user_msr(struct task_struct *task)
207{
208 return task->thread.regs->msr | task->thread.fpexc_mode;
209}
210
211static int set_user_msr(struct task_struct *task, unsigned long msr)
212{
213 task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
214 task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
215 return 0;
216}
217
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800218#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
219static unsigned long get_user_ckpt_msr(struct task_struct *task)
220{
221 return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
222}
223
224static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
225{
226 task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
227 task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
228 return 0;
229}
230
231static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
232{
233 task->thread.ckpt_regs.trap = trap & 0xfff0;
234 return 0;
235}
236#endif
237
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000238#ifdef CONFIG_PPC64
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000239static int get_user_dscr(struct task_struct *task, unsigned long *data)
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000240{
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000241 *data = task->thread.dscr;
242 return 0;
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000243}
244
245static int set_user_dscr(struct task_struct *task, unsigned long dscr)
246{
247 task->thread.dscr = dscr;
248 task->thread.dscr_inherit = 1;
249 return 0;
250}
251#else
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000252static int get_user_dscr(struct task_struct *task, unsigned long *data)
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000253{
254 return -EIO;
255}
256
257static int set_user_dscr(struct task_struct *task, unsigned long dscr)
258{
259 return -EIO;
260}
261#endif
262
Roland McGrath26f77132007-12-20 03:57:51 -0800263/*
264 * We prevent mucking around with the reserved area of trap
265 * which are used internally by the kernel.
266 */
267static int set_user_trap(struct task_struct *task, unsigned long trap)
268{
269 task->thread.regs->trap = trap & 0xfff0;
270 return 0;
271}
272
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +1000273/*
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000274 * Get contents of register REGNO in task TASK.
275 */
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000276int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000277{
Breno Leitaoebb0e132019-01-30 10:46:00 -0200278 unsigned int regs_max;
279
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000280 if ((task->thread.regs == NULL) || !data)
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000281 return -EIO;
282
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000283 if (regno == PT_MSR) {
284 *data = get_user_msr(task);
285 return 0;
286 }
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000287
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000288 if (regno == PT_DSCR)
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000289 return get_user_dscr(task, data);
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000290
Madhavan Srinivasana8a4b032017-08-20 23:28:24 +0530291#ifdef CONFIG_PPC64
292 /*
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +0530293 * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
Madhavan Srinivasana8a4b032017-08-20 23:28:24 +0530294 * no more used as a flag, lets force usr to alway see the softe value as 1
295 * which means interrupts are not soft disabled.
296 */
297 if (regno == PT_SOFTE) {
298 *data = 1;
299 return 0;
300 }
301#endif
302
Breno Leitaoebb0e132019-01-30 10:46:00 -0200303 regs_max = sizeof(struct user_pt_regs) / sizeof(unsigned long);
304 if (regno < regs_max) {
305 regno = array_index_nospec(regno, regs_max);
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000306 *data = ((unsigned long *)task->thread.regs)[regno];
307 return 0;
308 }
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000309
310 return -EIO;
311}
312
313/*
314 * Write contents of register REGNO in task TASK.
315 */
316int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
317{
318 if (task->thread.regs == NULL)
319 return -EIO;
320
Roland McGrath26f77132007-12-20 03:57:51 -0800321 if (regno == PT_MSR)
322 return set_user_msr(task, data);
323 if (regno == PT_TRAP)
324 return set_user_trap(task, data);
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000325 if (regno == PT_DSCR)
326 return set_user_dscr(task, data);
Roland McGrath26f77132007-12-20 03:57:51 -0800327
328 if (regno <= PT_MAX_PUT_REG) {
Breno Leitaoebb0e132019-01-30 10:46:00 -0200329 regno = array_index_nospec(regno, PT_MAX_PUT_REG + 1);
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000330 ((unsigned long *)task->thread.regs)[regno] = data;
331 return 0;
332 }
333 return -EIO;
334}
335
Roland McGrath44dd3f52007-12-20 03:57:55 -0800336static int gpr_get(struct task_struct *target, const struct user_regset *regset,
337 unsigned int pos, unsigned int count,
338 void *kbuf, void __user *ubuf)
339{
Mike Wolfa71f5d52011-03-21 11:14:53 +1100340 int i, ret;
Roland McGrath44dd3f52007-12-20 03:57:55 -0800341
342 if (target->thread.regs == NULL)
343 return -EIO;
344
Mike Wolfa71f5d52011-03-21 11:14:53 +1100345 if (!FULL_REGS(target->thread.regs)) {
346 /* We have a partial register set. Fill 14-31 with bogus values */
347 for (i = 14; i < 32; i++)
348 target->thread.regs->gpr[i] = NV_REG_POISON;
349 }
Roland McGrath44dd3f52007-12-20 03:57:55 -0800350
351 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
352 target->thread.regs,
353 0, offsetof(struct pt_regs, msr));
354 if (!ret) {
355 unsigned long msr = get_user_msr(target);
356 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
357 offsetof(struct pt_regs, msr),
358 offsetof(struct pt_regs, msr) +
359 sizeof(msr));
360 }
361
362 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
363 offsetof(struct pt_regs, msr) + sizeof(long));
364
365 if (!ret)
366 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
367 &target->thread.regs->orig_gpr3,
368 offsetof(struct pt_regs, orig_gpr3),
Michael Ellerman3eeacd92018-10-13 00:39:31 +1100369 sizeof(struct user_pt_regs));
Roland McGrath44dd3f52007-12-20 03:57:55 -0800370 if (!ret)
371 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
Michael Ellerman3eeacd92018-10-13 00:39:31 +1100372 sizeof(struct user_pt_regs), -1);
Roland McGrath44dd3f52007-12-20 03:57:55 -0800373
374 return ret;
375}
376
377static int gpr_set(struct task_struct *target, const struct user_regset *regset,
378 unsigned int pos, unsigned int count,
379 const void *kbuf, const void __user *ubuf)
380{
381 unsigned long reg;
382 int ret;
383
384 if (target->thread.regs == NULL)
385 return -EIO;
386
387 CHECK_FULL_REGS(target->thread.regs);
388
389 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
390 target->thread.regs,
391 0, PT_MSR * sizeof(reg));
392
393 if (!ret && count > 0) {
394 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
395 PT_MSR * sizeof(reg),
396 (PT_MSR + 1) * sizeof(reg));
397 if (!ret)
398 ret = set_user_msr(target, reg);
399 }
400
401 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
402 offsetof(struct pt_regs, msr) + sizeof(long));
403
404 if (!ret)
405 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
406 &target->thread.regs->orig_gpr3,
407 PT_ORIG_R3 * sizeof(reg),
408 (PT_MAX_PUT_REG + 1) * sizeof(reg));
409
410 if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
411 ret = user_regset_copyin_ignore(
412 &pos, &count, &kbuf, &ubuf,
413 (PT_MAX_PUT_REG + 1) * sizeof(reg),
414 PT_TRAP * sizeof(reg));
415
416 if (!ret && count > 0) {
417 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
418 PT_TRAP * sizeof(reg),
419 (PT_TRAP + 1) * sizeof(reg));
420 if (!ret)
421 ret = set_user_trap(target, reg);
422 }
423
424 if (!ret)
425 ret = user_regset_copyin_ignore(
426 &pos, &count, &kbuf, &ubuf,
427 (PT_TRAP + 1) * sizeof(reg), -1);
428
429 return ret;
430}
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000431
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800432/*
Cyril Burdc310662016-09-23 16:18:24 +1000433 * Regardless of transactions, 'fp_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000434 * value of all FPR registers and 'ckfp_state' holds the last checkpointed
Cyril Burdc310662016-09-23 16:18:24 +1000435 * value of all FPR registers for the current transaction.
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800436 *
437 * Userspace interface buffer layout:
438 *
439 * struct data {
440 * u64 fpr[32];
441 * u64 fpscr;
442 * };
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800443 */
Roland McGrathf65255e2007-12-20 03:57:34 -0800444static int fpr_get(struct task_struct *target, const struct user_regset *regset,
445 unsigned int pos, unsigned int count,
446 void *kbuf, void __user *ubuf)
447{
Michael Neulingc6e67712008-06-25 14:07:18 +1000448#ifdef CONFIG_VSX
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000449 u64 buf[33];
Michael Neulingc6e67712008-06-25 14:07:18 +1000450 int i;
Cyril Burdc310662016-09-23 16:18:24 +1000451
Roland McGrathf65255e2007-12-20 03:57:34 -0800452 flush_fp_to_thread(target);
453
Michael Neulingc6e67712008-06-25 14:07:18 +1000454 /* copy to local buffer then write that out */
455 for (i = 0; i < 32 ; i++)
456 buf[i] = target->thread.TS_FPR(i);
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000457 buf[32] = target->thread.fp_state.fpscr;
Michael Neulingc6e67712008-06-25 14:07:18 +1000458 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
Cyril Burdc310662016-09-23 16:18:24 +1000459#else
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000460 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
Khem Raj1e407ee2016-04-25 09:19:17 -0700461 offsetof(struct thread_fp_state, fpr[32]));
Roland McGrathf65255e2007-12-20 03:57:34 -0800462
Cyril Burdc310662016-09-23 16:18:24 +1000463 flush_fp_to_thread(target);
464
Roland McGrathf65255e2007-12-20 03:57:34 -0800465 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000466 &target->thread.fp_state, 0, -1);
Michael Neulingc6e67712008-06-25 14:07:18 +1000467#endif
Roland McGrathf65255e2007-12-20 03:57:34 -0800468}
469
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800470/*
Cyril Burdc310662016-09-23 16:18:24 +1000471 * Regardless of transactions, 'fp_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000472 * value of all FPR registers and 'ckfp_state' holds the last checkpointed
Cyril Burdc310662016-09-23 16:18:24 +1000473 * value of all FPR registers for the current transaction.
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800474 *
475 * Userspace interface buffer layout:
476 *
477 * struct data {
478 * u64 fpr[32];
479 * u64 fpscr;
480 * };
481 *
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800482 */
Roland McGrathf65255e2007-12-20 03:57:34 -0800483static int fpr_set(struct task_struct *target, const struct user_regset *regset,
484 unsigned int pos, unsigned int count,
485 const void *kbuf, const void __user *ubuf)
486{
Michael Neulingc6e67712008-06-25 14:07:18 +1000487#ifdef CONFIG_VSX
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000488 u64 buf[33];
Michael Neulingc6e67712008-06-25 14:07:18 +1000489 int i;
Cyril Burdc310662016-09-23 16:18:24 +1000490
Roland McGrathf65255e2007-12-20 03:57:34 -0800491 flush_fp_to_thread(target);
492
Dave Martin99dfe802017-01-05 16:50:57 +0000493 for (i = 0; i < 32 ; i++)
494 buf[i] = target->thread.TS_FPR(i);
495 buf[32] = target->thread.fp_state.fpscr;
496
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800497 /* copy to local buffer then write that out */
498 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
499 if (i)
500 return i;
501
Michael Neulingc6e67712008-06-25 14:07:18 +1000502 for (i = 0; i < 32 ; i++)
503 target->thread.TS_FPR(i) = buf[i];
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000504 target->thread.fp_state.fpscr = buf[32];
Michael Neulingc6e67712008-06-25 14:07:18 +1000505 return 0;
Cyril Burdc310662016-09-23 16:18:24 +1000506#else
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000507 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
Khem Raj1e407ee2016-04-25 09:19:17 -0700508 offsetof(struct thread_fp_state, fpr[32]));
Roland McGrathf65255e2007-12-20 03:57:34 -0800509
Cyril Burdc310662016-09-23 16:18:24 +1000510 flush_fp_to_thread(target);
511
Roland McGrathf65255e2007-12-20 03:57:34 -0800512 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000513 &target->thread.fp_state, 0, -1);
Michael Neulingc6e67712008-06-25 14:07:18 +1000514#endif
Roland McGrathf65255e2007-12-20 03:57:34 -0800515}
516
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000517#ifdef CONFIG_ALTIVEC
518/*
519 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
520 * The transfer totals 34 quadword. Quadwords 0-31 contain the
521 * corresponding vector registers. Quadword 32 contains the vscr as the
522 * last word (offset 12) within that quadword. Quadword 33 contains the
523 * vrsave as the first word (offset 0) within the quadword.
524 *
525 * This definition of the VMX state is compatible with the current PPC32
526 * ptrace interface. This allows signal handling and ptrace to use the
527 * same structures. This also simplifies the implementation of a bi-arch
528 * (combined (32- and 64-bit) gdb.
529 */
530
Roland McGrath3caf06c2007-12-20 03:57:39 -0800531static int vr_active(struct task_struct *target,
532 const struct user_regset *regset)
533{
534 flush_altivec_to_thread(target);
535 return target->thread.used_vr ? regset->n : 0;
536}
537
Anshuman Khanduald844e272016-07-28 10:57:33 +0800538/*
Cyril Burdc310662016-09-23 16:18:24 +1000539 * Regardless of transactions, 'vr_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000540 * value of all the VMX registers and 'ckvr_state' holds the last
Cyril Burdc310662016-09-23 16:18:24 +1000541 * checkpointed value of all the VMX registers for the current
542 * transaction to fall back on in case it aborts.
Anshuman Khanduald844e272016-07-28 10:57:33 +0800543 *
544 * Userspace interface buffer layout:
545 *
546 * struct data {
547 * vector128 vr[32];
548 * vector128 vscr;
549 * vector128 vrsave;
550 * };
551 */
Roland McGrath3caf06c2007-12-20 03:57:39 -0800552static int vr_get(struct task_struct *target, const struct user_regset *regset,
553 unsigned int pos, unsigned int count,
554 void *kbuf, void __user *ubuf)
555{
556 int ret;
557
558 flush_altivec_to_thread(target);
559
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000560 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
561 offsetof(struct thread_vr_state, vr[32]));
Roland McGrath3caf06c2007-12-20 03:57:39 -0800562
563 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Cyril Burdc310662016-09-23 16:18:24 +1000564 &target->thread.vr_state, 0,
Roland McGrath3caf06c2007-12-20 03:57:39 -0800565 33 * sizeof(vector128));
566 if (!ret) {
567 /*
568 * Copy out only the low-order word of vrsave.
569 */
570 union {
571 elf_vrreg_t reg;
572 u32 word;
573 } vrsave;
574 memset(&vrsave, 0, sizeof(vrsave));
Anshuman Khanduald844e272016-07-28 10:57:33 +0800575
Roland McGrath3caf06c2007-12-20 03:57:39 -0800576 vrsave.word = target->thread.vrsave;
Anshuman Khanduald844e272016-07-28 10:57:33 +0800577
Roland McGrath3caf06c2007-12-20 03:57:39 -0800578 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
579 33 * sizeof(vector128), -1);
580 }
581
582 return ret;
583}
584
Anshuman Khanduald844e272016-07-28 10:57:33 +0800585/*
Cyril Burdc310662016-09-23 16:18:24 +1000586 * Regardless of transactions, 'vr_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000587 * value of all the VMX registers and 'ckvr_state' holds the last
Cyril Burdc310662016-09-23 16:18:24 +1000588 * checkpointed value of all the VMX registers for the current
589 * transaction to fall back on in case it aborts.
Anshuman Khanduald844e272016-07-28 10:57:33 +0800590 *
591 * Userspace interface buffer layout:
592 *
593 * struct data {
594 * vector128 vr[32];
595 * vector128 vscr;
596 * vector128 vrsave;
597 * };
598 */
Roland McGrath3caf06c2007-12-20 03:57:39 -0800599static int vr_set(struct task_struct *target, const struct user_regset *regset,
600 unsigned int pos, unsigned int count,
601 const void *kbuf, const void __user *ubuf)
602{
603 int ret;
604
605 flush_altivec_to_thread(target);
606
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000607 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
608 offsetof(struct thread_vr_state, vr[32]));
Roland McGrath3caf06c2007-12-20 03:57:39 -0800609
610 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Cyril Burdc310662016-09-23 16:18:24 +1000611 &target->thread.vr_state, 0,
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000612 33 * sizeof(vector128));
Roland McGrath3caf06c2007-12-20 03:57:39 -0800613 if (!ret && count > 0) {
614 /*
615 * We use only the first word of vrsave.
616 */
617 union {
618 elf_vrreg_t reg;
619 u32 word;
620 } vrsave;
621 memset(&vrsave, 0, sizeof(vrsave));
Anshuman Khanduald844e272016-07-28 10:57:33 +0800622
Roland McGrath3caf06c2007-12-20 03:57:39 -0800623 vrsave.word = target->thread.vrsave;
Cyril Burdc310662016-09-23 16:18:24 +1000624
Roland McGrath3caf06c2007-12-20 03:57:39 -0800625 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
626 33 * sizeof(vector128), -1);
Cyril Burdc310662016-09-23 16:18:24 +1000627 if (!ret)
Roland McGrath3caf06c2007-12-20 03:57:39 -0800628 target->thread.vrsave = vrsave.word;
629 }
630
631 return ret;
632}
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000633#endif /* CONFIG_ALTIVEC */
634
Michael Neulingce48b212008-06-25 14:07:18 +1000635#ifdef CONFIG_VSX
636/*
637 * Currently to set and and get all the vsx state, you need to call
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300638 * the fp and VMX calls as well. This only get/sets the lower 32
Michael Neulingce48b212008-06-25 14:07:18 +1000639 * 128bit VSX registers.
640 */
641
642static int vsr_active(struct task_struct *target,
643 const struct user_regset *regset)
644{
645 flush_vsx_to_thread(target);
646 return target->thread.used_vsr ? regset->n : 0;
647}
648
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800649/*
Cyril Burdc310662016-09-23 16:18:24 +1000650 * Regardless of transactions, 'fp_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000651 * value of all FPR registers and 'ckfp_state' holds the last
Cyril Burdc310662016-09-23 16:18:24 +1000652 * checkpointed value of all FPR registers for the current
653 * transaction.
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800654 *
655 * Userspace interface buffer layout:
656 *
657 * struct data {
658 * u64 vsx[32];
659 * };
660 */
Michael Neulingce48b212008-06-25 14:07:18 +1000661static int vsr_get(struct task_struct *target, const struct user_regset *regset,
662 unsigned int pos, unsigned int count,
663 void *kbuf, void __user *ubuf)
664{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000665 u64 buf[32];
Michael Neulingf3e909c2008-07-01 14:01:39 +1000666 int ret, i;
Michael Neulingce48b212008-06-25 14:07:18 +1000667
Cyril Burdc310662016-09-23 16:18:24 +1000668 flush_tmregs_to_thread(target);
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800669 flush_fp_to_thread(target);
670 flush_altivec_to_thread(target);
Michael Neulingce48b212008-06-25 14:07:18 +1000671 flush_vsx_to_thread(target);
672
Michael Neulingf3e909c2008-07-01 14:01:39 +1000673 for (i = 0; i < 32 ; i++)
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000674 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
Cyril Burdc310662016-09-23 16:18:24 +1000675
Michael Neulingce48b212008-06-25 14:07:18 +1000676 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Michael Neulingf3e909c2008-07-01 14:01:39 +1000677 buf, 0, 32 * sizeof(double));
Michael Neulingce48b212008-06-25 14:07:18 +1000678
679 return ret;
680}
681
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800682/*
Cyril Burdc310662016-09-23 16:18:24 +1000683 * Regardless of transactions, 'fp_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000684 * value of all FPR registers and 'ckfp_state' holds the last
Cyril Burdc310662016-09-23 16:18:24 +1000685 * checkpointed value of all FPR registers for the current
686 * transaction.
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800687 *
688 * Userspace interface buffer layout:
689 *
690 * struct data {
691 * u64 vsx[32];
692 * };
693 */
Michael Neulingce48b212008-06-25 14:07:18 +1000694static int vsr_set(struct task_struct *target, const struct user_regset *regset,
695 unsigned int pos, unsigned int count,
696 const void *kbuf, const void __user *ubuf)
697{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000698 u64 buf[32];
Michael Neulingf3e909c2008-07-01 14:01:39 +1000699 int ret,i;
Michael Neulingce48b212008-06-25 14:07:18 +1000700
Cyril Burdc310662016-09-23 16:18:24 +1000701 flush_tmregs_to_thread(target);
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800702 flush_fp_to_thread(target);
703 flush_altivec_to_thread(target);
Michael Neulingce48b212008-06-25 14:07:18 +1000704 flush_vsx_to_thread(target);
705
Dave Martin99dfe802017-01-05 16:50:57 +0000706 for (i = 0; i < 32 ; i++)
707 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
708
Michael Neulingce48b212008-06-25 14:07:18 +1000709 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Michael Neulingf3e909c2008-07-01 14:01:39 +1000710 buf, 0, 32 * sizeof(double));
Cyril Burdc310662016-09-23 16:18:24 +1000711 if (!ret)
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800712 for (i = 0; i < 32 ; i++)
Cyril Burdc310662016-09-23 16:18:24 +1000713 target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
Michael Neulingce48b212008-06-25 14:07:18 +1000714
715 return ret;
716}
717#endif /* CONFIG_VSX */
718
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000719#ifdef CONFIG_SPE
720
721/*
722 * For get_evrregs/set_evrregs functions 'data' has the following layout:
723 *
724 * struct {
725 * u32 evr[32];
726 * u64 acc;
727 * u32 spefscr;
728 * }
729 */
730
Roland McGratha4e4b172007-12-20 03:57:48 -0800731static int evr_active(struct task_struct *target,
732 const struct user_regset *regset)
733{
734 flush_spe_to_thread(target);
735 return target->thread.used_spe ? regset->n : 0;
736}
737
738static int evr_get(struct task_struct *target, const struct user_regset *regset,
739 unsigned int pos, unsigned int count,
740 void *kbuf, void __user *ubuf)
741{
742 int ret;
743
744 flush_spe_to_thread(target);
745
746 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
747 &target->thread.evr,
748 0, sizeof(target->thread.evr));
749
750 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
751 offsetof(struct thread_struct, spefscr));
752
753 if (!ret)
754 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
755 &target->thread.acc,
756 sizeof(target->thread.evr), -1);
757
758 return ret;
759}
760
761static int evr_set(struct task_struct *target, const struct user_regset *regset,
762 unsigned int pos, unsigned int count,
763 const void *kbuf, const void __user *ubuf)
764{
765 int ret;
766
767 flush_spe_to_thread(target);
768
769 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
770 &target->thread.evr,
771 0, sizeof(target->thread.evr));
772
773 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
774 offsetof(struct thread_struct, spefscr));
775
776 if (!ret)
777 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
778 &target->thread.acc,
779 sizeof(target->thread.evr), -1);
780
781 return ret;
782}
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000783#endif /* CONFIG_SPE */
784
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800785#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
786/**
787 * tm_cgpr_active - get active number of registers in CGPR
788 * @target: The target task.
789 * @regset: The user regset structure.
790 *
791 * This function checks for the active number of available
792 * regisers in transaction checkpointed GPR category.
793 */
794static int tm_cgpr_active(struct task_struct *target,
795 const struct user_regset *regset)
796{
797 if (!cpu_has_feature(CPU_FTR_TM))
798 return -ENODEV;
799
800 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
801 return 0;
802
803 return regset->n;
804}
805
806/**
807 * tm_cgpr_get - get CGPR registers
808 * @target: The target task.
809 * @regset: The user regset structure.
810 * @pos: The buffer position.
811 * @count: Number of bytes to copy.
812 * @kbuf: Kernel buffer to copy from.
813 * @ubuf: User buffer to copy into.
814 *
815 * This function gets transaction checkpointed GPR registers.
816 *
817 * When the transaction is active, 'ckpt_regs' holds all the checkpointed
818 * GPR register values for the current transaction to fall back on if it
819 * aborts in between. This function gets those checkpointed GPR registers.
820 * The userspace interface buffer layout is as follows.
821 *
822 * struct data {
823 * struct pt_regs ckpt_regs;
824 * };
825 */
826static int tm_cgpr_get(struct task_struct *target,
827 const struct user_regset *regset,
828 unsigned int pos, unsigned int count,
829 void *kbuf, void __user *ubuf)
830{
831 int ret;
832
833 if (!cpu_has_feature(CPU_FTR_TM))
834 return -ENODEV;
835
836 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
837 return -ENODATA;
838
Cyril Burdc310662016-09-23 16:18:24 +1000839 flush_tmregs_to_thread(target);
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800840 flush_fp_to_thread(target);
841 flush_altivec_to_thread(target);
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800842
843 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
844 &target->thread.ckpt_regs,
845 0, offsetof(struct pt_regs, msr));
846 if (!ret) {
847 unsigned long msr = get_user_ckpt_msr(target);
848
849 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
850 offsetof(struct pt_regs, msr),
851 offsetof(struct pt_regs, msr) +
852 sizeof(msr));
853 }
854
855 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
856 offsetof(struct pt_regs, msr) + sizeof(long));
857
858 if (!ret)
859 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
860 &target->thread.ckpt_regs.orig_gpr3,
861 offsetof(struct pt_regs, orig_gpr3),
Michael Ellerman3eeacd92018-10-13 00:39:31 +1100862 sizeof(struct user_pt_regs));
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800863 if (!ret)
864 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
Michael Ellerman3eeacd92018-10-13 00:39:31 +1100865 sizeof(struct user_pt_regs), -1);
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800866
867 return ret;
868}
869
870/*
871 * tm_cgpr_set - set the CGPR registers
872 * @target: The target task.
873 * @regset: The user regset structure.
874 * @pos: The buffer position.
875 * @count: Number of bytes to copy.
876 * @kbuf: Kernel buffer to copy into.
877 * @ubuf: User buffer to copy from.
878 *
879 * This function sets in transaction checkpointed GPR registers.
880 *
881 * When the transaction is active, 'ckpt_regs' holds the checkpointed
882 * GPR register values for the current transaction to fall back on if it
883 * aborts in between. This function sets those checkpointed GPR registers.
884 * The userspace interface buffer layout is as follows.
885 *
886 * struct data {
887 * struct pt_regs ckpt_regs;
888 * };
889 */
890static int tm_cgpr_set(struct task_struct *target,
891 const struct user_regset *regset,
892 unsigned int pos, unsigned int count,
893 const void *kbuf, const void __user *ubuf)
894{
895 unsigned long reg;
896 int ret;
897
898 if (!cpu_has_feature(CPU_FTR_TM))
899 return -ENODEV;
900
901 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
902 return -ENODATA;
903
Cyril Burdc310662016-09-23 16:18:24 +1000904 flush_tmregs_to_thread(target);
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800905 flush_fp_to_thread(target);
906 flush_altivec_to_thread(target);
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800907
908 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
909 &target->thread.ckpt_regs,
910 0, PT_MSR * sizeof(reg));
911
912 if (!ret && count > 0) {
913 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
914 PT_MSR * sizeof(reg),
915 (PT_MSR + 1) * sizeof(reg));
916 if (!ret)
917 ret = set_user_ckpt_msr(target, reg);
918 }
919
920 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
921 offsetof(struct pt_regs, msr) + sizeof(long));
922
923 if (!ret)
924 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
925 &target->thread.ckpt_regs.orig_gpr3,
926 PT_ORIG_R3 * sizeof(reg),
927 (PT_MAX_PUT_REG + 1) * sizeof(reg));
928
929 if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
930 ret = user_regset_copyin_ignore(
931 &pos, &count, &kbuf, &ubuf,
932 (PT_MAX_PUT_REG + 1) * sizeof(reg),
933 PT_TRAP * sizeof(reg));
934
935 if (!ret && count > 0) {
936 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
937 PT_TRAP * sizeof(reg),
938 (PT_TRAP + 1) * sizeof(reg));
939 if (!ret)
940 ret = set_user_ckpt_trap(target, reg);
941 }
942
943 if (!ret)
944 ret = user_regset_copyin_ignore(
945 &pos, &count, &kbuf, &ubuf,
946 (PT_TRAP + 1) * sizeof(reg), -1);
947
948 return ret;
949}
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +0800950
951/**
952 * tm_cfpr_active - get active number of registers in CFPR
953 * @target: The target task.
954 * @regset: The user regset structure.
955 *
956 * This function checks for the active number of available
957 * regisers in transaction checkpointed FPR category.
958 */
959static int tm_cfpr_active(struct task_struct *target,
960 const struct user_regset *regset)
961{
962 if (!cpu_has_feature(CPU_FTR_TM))
963 return -ENODEV;
964
965 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
966 return 0;
967
968 return regset->n;
969}
970
971/**
972 * tm_cfpr_get - get CFPR registers
973 * @target: The target task.
974 * @regset: The user regset structure.
975 * @pos: The buffer position.
976 * @count: Number of bytes to copy.
977 * @kbuf: Kernel buffer to copy from.
978 * @ubuf: User buffer to copy into.
979 *
980 * This function gets in transaction checkpointed FPR registers.
981 *
Cyril Bur000ec282016-09-23 16:18:25 +1000982 * When the transaction is active 'ckfp_state' holds the checkpointed
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +0800983 * values for the current transaction to fall back on if it aborts
984 * in between. This function gets those checkpointed FPR registers.
985 * The userspace interface buffer layout is as follows.
986 *
987 * struct data {
988 * u64 fpr[32];
989 * u64 fpscr;
990 *};
991 */
992static int tm_cfpr_get(struct task_struct *target,
993 const struct user_regset *regset,
994 unsigned int pos, unsigned int count,
995 void *kbuf, void __user *ubuf)
996{
997 u64 buf[33];
998 int i;
999
1000 if (!cpu_has_feature(CPU_FTR_TM))
1001 return -ENODEV;
1002
1003 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1004 return -ENODATA;
1005
Cyril Burdc310662016-09-23 16:18:24 +10001006 flush_tmregs_to_thread(target);
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001007 flush_fp_to_thread(target);
1008 flush_altivec_to_thread(target);
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001009
1010 /* copy to local buffer then write that out */
1011 for (i = 0; i < 32 ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +10001012 buf[i] = target->thread.TS_CKFPR(i);
1013 buf[32] = target->thread.ckfp_state.fpscr;
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001014 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1015}
1016
1017/**
1018 * tm_cfpr_set - set CFPR registers
1019 * @target: The target task.
1020 * @regset: The user regset structure.
1021 * @pos: The buffer position.
1022 * @count: Number of bytes to copy.
1023 * @kbuf: Kernel buffer to copy into.
1024 * @ubuf: User buffer to copy from.
1025 *
1026 * This function sets in transaction checkpointed FPR registers.
1027 *
Cyril Bur000ec282016-09-23 16:18:25 +10001028 * When the transaction is active 'ckfp_state' holds the checkpointed
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001029 * FPR register values for the current transaction to fall back on
1030 * if it aborts in between. This function sets these checkpointed
1031 * FPR registers. The userspace interface buffer layout is as follows.
1032 *
1033 * struct data {
1034 * u64 fpr[32];
1035 * u64 fpscr;
1036 *};
1037 */
1038static int tm_cfpr_set(struct task_struct *target,
1039 const struct user_regset *regset,
1040 unsigned int pos, unsigned int count,
1041 const void *kbuf, const void __user *ubuf)
1042{
1043 u64 buf[33];
1044 int i;
1045
1046 if (!cpu_has_feature(CPU_FTR_TM))
1047 return -ENODEV;
1048
1049 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1050 return -ENODATA;
1051
Cyril Burdc310662016-09-23 16:18:24 +10001052 flush_tmregs_to_thread(target);
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001053 flush_fp_to_thread(target);
1054 flush_altivec_to_thread(target);
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001055
Dave Martinb34ca602017-01-05 16:50:57 +00001056 for (i = 0; i < 32; i++)
1057 buf[i] = target->thread.TS_CKFPR(i);
1058 buf[32] = target->thread.ckfp_state.fpscr;
1059
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001060 /* copy to local buffer then write that out */
1061 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1062 if (i)
1063 return i;
1064 for (i = 0; i < 32 ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +10001065 target->thread.TS_CKFPR(i) = buf[i];
1066 target->thread.ckfp_state.fpscr = buf[32];
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001067 return 0;
1068}
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001069
1070/**
1071 * tm_cvmx_active - get active number of registers in CVMX
1072 * @target: The target task.
1073 * @regset: The user regset structure.
1074 *
1075 * This function checks for the active number of available
1076 * regisers in checkpointed VMX category.
1077 */
1078static int tm_cvmx_active(struct task_struct *target,
1079 const struct user_regset *regset)
1080{
1081 if (!cpu_has_feature(CPU_FTR_TM))
1082 return -ENODEV;
1083
1084 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1085 return 0;
1086
1087 return regset->n;
1088}
1089
1090/**
1091 * tm_cvmx_get - get CMVX registers
1092 * @target: The target task.
1093 * @regset: The user regset structure.
1094 * @pos: The buffer position.
1095 * @count: Number of bytes to copy.
1096 * @kbuf: Kernel buffer to copy from.
1097 * @ubuf: User buffer to copy into.
1098 *
1099 * This function gets in transaction checkpointed VMX registers.
1100 *
Cyril Bur000ec282016-09-23 16:18:25 +10001101 * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001102 * the checkpointed values for the current transaction to fall
1103 * back on if it aborts in between. The userspace interface buffer
1104 * layout is as follows.
1105 *
1106 * struct data {
1107 * vector128 vr[32];
1108 * vector128 vscr;
1109 * vector128 vrsave;
1110 *};
1111 */
1112static int tm_cvmx_get(struct task_struct *target,
1113 const struct user_regset *regset,
1114 unsigned int pos, unsigned int count,
1115 void *kbuf, void __user *ubuf)
1116{
1117 int ret;
1118
1119 BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1120
1121 if (!cpu_has_feature(CPU_FTR_TM))
1122 return -ENODEV;
1123
1124 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1125 return -ENODATA;
1126
1127 /* Flush the state */
Cyril Burdc310662016-09-23 16:18:24 +10001128 flush_tmregs_to_thread(target);
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001129 flush_fp_to_thread(target);
1130 flush_altivec_to_thread(target);
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001131
1132 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Cyril Bur000ec282016-09-23 16:18:25 +10001133 &target->thread.ckvr_state, 0,
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001134 33 * sizeof(vector128));
1135 if (!ret) {
1136 /*
1137 * Copy out only the low-order word of vrsave.
1138 */
1139 union {
1140 elf_vrreg_t reg;
1141 u32 word;
1142 } vrsave;
1143 memset(&vrsave, 0, sizeof(vrsave));
Cyril Bur000ec282016-09-23 16:18:25 +10001144 vrsave.word = target->thread.ckvrsave;
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001145 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
1146 33 * sizeof(vector128), -1);
1147 }
1148
1149 return ret;
1150}
1151
1152/**
1153 * tm_cvmx_set - set CMVX registers
1154 * @target: The target task.
1155 * @regset: The user regset structure.
1156 * @pos: The buffer position.
1157 * @count: Number of bytes to copy.
1158 * @kbuf: Kernel buffer to copy into.
1159 * @ubuf: User buffer to copy from.
1160 *
1161 * This function sets in transaction checkpointed VMX registers.
1162 *
Cyril Bur000ec282016-09-23 16:18:25 +10001163 * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001164 * the checkpointed values for the current transaction to fall
1165 * back on if it aborts in between. The userspace interface buffer
1166 * layout is as follows.
1167 *
1168 * struct data {
1169 * vector128 vr[32];
1170 * vector128 vscr;
1171 * vector128 vrsave;
1172 *};
1173 */
1174static int tm_cvmx_set(struct task_struct *target,
1175 const struct user_regset *regset,
1176 unsigned int pos, unsigned int count,
1177 const void *kbuf, const void __user *ubuf)
1178{
1179 int ret;
1180
1181 BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1182
1183 if (!cpu_has_feature(CPU_FTR_TM))
1184 return -ENODEV;
1185
1186 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1187 return -ENODATA;
1188
Cyril Burdc310662016-09-23 16:18:24 +10001189 flush_tmregs_to_thread(target);
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001190 flush_fp_to_thread(target);
1191 flush_altivec_to_thread(target);
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001192
1193 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Cyril Bur000ec282016-09-23 16:18:25 +10001194 &target->thread.ckvr_state, 0,
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001195 33 * sizeof(vector128));
1196 if (!ret && count > 0) {
1197 /*
1198 * We use only the low-order word of vrsave.
1199 */
1200 union {
1201 elf_vrreg_t reg;
1202 u32 word;
1203 } vrsave;
1204 memset(&vrsave, 0, sizeof(vrsave));
Cyril Bur000ec282016-09-23 16:18:25 +10001205 vrsave.word = target->thread.ckvrsave;
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001206 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
1207 33 * sizeof(vector128), -1);
1208 if (!ret)
Cyril Bur000ec282016-09-23 16:18:25 +10001209 target->thread.ckvrsave = vrsave.word;
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001210 }
1211
1212 return ret;
1213}
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001214
1215/**
1216 * tm_cvsx_active - get active number of registers in CVSX
1217 * @target: The target task.
1218 * @regset: The user regset structure.
1219 *
1220 * This function checks for the active number of available
1221 * regisers in transaction checkpointed VSX category.
1222 */
1223static int tm_cvsx_active(struct task_struct *target,
1224 const struct user_regset *regset)
1225{
1226 if (!cpu_has_feature(CPU_FTR_TM))
1227 return -ENODEV;
1228
1229 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1230 return 0;
1231
1232 flush_vsx_to_thread(target);
1233 return target->thread.used_vsr ? regset->n : 0;
1234}
1235
1236/**
1237 * tm_cvsx_get - get CVSX registers
1238 * @target: The target task.
1239 * @regset: The user regset structure.
1240 * @pos: The buffer position.
1241 * @count: Number of bytes to copy.
1242 * @kbuf: Kernel buffer to copy from.
1243 * @ubuf: User buffer to copy into.
1244 *
1245 * This function gets in transaction checkpointed VSX registers.
1246 *
Cyril Bur000ec282016-09-23 16:18:25 +10001247 * When the transaction is active 'ckfp_state' holds the checkpointed
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001248 * values for the current transaction to fall back on if it aborts
1249 * in between. This function gets those checkpointed VSX registers.
1250 * The userspace interface buffer layout is as follows.
1251 *
1252 * struct data {
1253 * u64 vsx[32];
1254 *};
1255 */
1256static int tm_cvsx_get(struct task_struct *target,
1257 const struct user_regset *regset,
1258 unsigned int pos, unsigned int count,
1259 void *kbuf, void __user *ubuf)
1260{
1261 u64 buf[32];
1262 int ret, i;
1263
1264 if (!cpu_has_feature(CPU_FTR_TM))
1265 return -ENODEV;
1266
1267 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1268 return -ENODATA;
1269
1270 /* Flush the state */
Cyril Burdc310662016-09-23 16:18:24 +10001271 flush_tmregs_to_thread(target);
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001272 flush_fp_to_thread(target);
1273 flush_altivec_to_thread(target);
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001274 flush_vsx_to_thread(target);
1275
1276 for (i = 0; i < 32 ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +10001277 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001278 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1279 buf, 0, 32 * sizeof(double));
1280
1281 return ret;
1282}
1283
1284/**
1285 * tm_cvsx_set - set CFPR registers
1286 * @target: The target task.
1287 * @regset: The user regset structure.
1288 * @pos: The buffer position.
1289 * @count: Number of bytes to copy.
1290 * @kbuf: Kernel buffer to copy into.
1291 * @ubuf: User buffer to copy from.
1292 *
1293 * This function sets in transaction checkpointed VSX registers.
1294 *
Cyril Bur000ec282016-09-23 16:18:25 +10001295 * When the transaction is active 'ckfp_state' holds the checkpointed
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001296 * VSX register values for the current transaction to fall back on
1297 * if it aborts in between. This function sets these checkpointed
1298 * FPR registers. The userspace interface buffer layout is as follows.
1299 *
1300 * struct data {
1301 * u64 vsx[32];
1302 *};
1303 */
1304static int tm_cvsx_set(struct task_struct *target,
1305 const struct user_regset *regset,
1306 unsigned int pos, unsigned int count,
1307 const void *kbuf, const void __user *ubuf)
1308{
1309 u64 buf[32];
1310 int ret, i;
1311
1312 if (!cpu_has_feature(CPU_FTR_TM))
1313 return -ENODEV;
1314
1315 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1316 return -ENODATA;
1317
1318 /* Flush the state */
Cyril Burdc310662016-09-23 16:18:24 +10001319 flush_tmregs_to_thread(target);
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001320 flush_fp_to_thread(target);
1321 flush_altivec_to_thread(target);
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001322 flush_vsx_to_thread(target);
1323
Dave Martinb34ca602017-01-05 16:50:57 +00001324 for (i = 0; i < 32 ; i++)
1325 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1326
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001327 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1328 buf, 0, 32 * sizeof(double));
Cyril Burdc310662016-09-23 16:18:24 +10001329 if (!ret)
1330 for (i = 0; i < 32 ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +10001331 target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001332
1333 return ret;
1334}
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001335
1336/**
1337 * tm_spr_active - get active number of registers in TM SPR
1338 * @target: The target task.
1339 * @regset: The user regset structure.
1340 *
1341 * This function checks the active number of available
1342 * regisers in the transactional memory SPR category.
1343 */
1344static int tm_spr_active(struct task_struct *target,
1345 const struct user_regset *regset)
1346{
1347 if (!cpu_has_feature(CPU_FTR_TM))
1348 return -ENODEV;
1349
1350 return regset->n;
1351}
1352
1353/**
1354 * tm_spr_get - get the TM related SPR registers
1355 * @target: The target task.
1356 * @regset: The user regset structure.
1357 * @pos: The buffer position.
1358 * @count: Number of bytes to copy.
1359 * @kbuf: Kernel buffer to copy from.
1360 * @ubuf: User buffer to copy into.
1361 *
1362 * This function gets transactional memory related SPR registers.
1363 * The userspace interface buffer layout is as follows.
1364 *
1365 * struct {
1366 * u64 tm_tfhar;
1367 * u64 tm_texasr;
1368 * u64 tm_tfiar;
1369 * };
1370 */
1371static int tm_spr_get(struct task_struct *target,
1372 const struct user_regset *regset,
1373 unsigned int pos, unsigned int count,
1374 void *kbuf, void __user *ubuf)
1375{
1376 int ret;
1377
1378 /* Build tests */
1379 BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1380 BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1381 BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1382
1383 if (!cpu_has_feature(CPU_FTR_TM))
1384 return -ENODEV;
1385
1386 /* Flush the states */
Cyril Burdc310662016-09-23 16:18:24 +10001387 flush_tmregs_to_thread(target);
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001388 flush_fp_to_thread(target);
1389 flush_altivec_to_thread(target);
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001390
1391 /* TFHAR register */
1392 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1393 &target->thread.tm_tfhar, 0, sizeof(u64));
1394
1395 /* TEXASR register */
1396 if (!ret)
1397 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1398 &target->thread.tm_texasr, sizeof(u64),
1399 2 * sizeof(u64));
1400
1401 /* TFIAR register */
1402 if (!ret)
1403 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1404 &target->thread.tm_tfiar,
1405 2 * sizeof(u64), 3 * sizeof(u64));
1406 return ret;
1407}
1408
1409/**
1410 * tm_spr_set - set the TM related SPR registers
1411 * @target: The target task.
1412 * @regset: The user regset structure.
1413 * @pos: The buffer position.
1414 * @count: Number of bytes to copy.
1415 * @kbuf: Kernel buffer to copy into.
1416 * @ubuf: User buffer to copy from.
1417 *
1418 * This function sets transactional memory related SPR registers.
1419 * The userspace interface buffer layout is as follows.
1420 *
1421 * struct {
1422 * u64 tm_tfhar;
1423 * u64 tm_texasr;
1424 * u64 tm_tfiar;
1425 * };
1426 */
1427static int tm_spr_set(struct task_struct *target,
1428 const struct user_regset *regset,
1429 unsigned int pos, unsigned int count,
1430 const void *kbuf, const void __user *ubuf)
1431{
1432 int ret;
1433
1434 /* Build tests */
1435 BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1436 BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1437 BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1438
1439 if (!cpu_has_feature(CPU_FTR_TM))
1440 return -ENODEV;
1441
1442 /* Flush the states */
Cyril Burdc310662016-09-23 16:18:24 +10001443 flush_tmregs_to_thread(target);
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001444 flush_fp_to_thread(target);
1445 flush_altivec_to_thread(target);
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001446
1447 /* TFHAR register */
1448 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1449 &target->thread.tm_tfhar, 0, sizeof(u64));
1450
1451 /* TEXASR register */
1452 if (!ret)
1453 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1454 &target->thread.tm_texasr, sizeof(u64),
1455 2 * sizeof(u64));
1456
1457 /* TFIAR register */
1458 if (!ret)
1459 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1460 &target->thread.tm_tfiar,
1461 2 * sizeof(u64), 3 * sizeof(u64));
1462 return ret;
1463}
Anshuman Khandualc45dc902016-07-28 10:57:41 +08001464
1465static int tm_tar_active(struct task_struct *target,
1466 const struct user_regset *regset)
1467{
1468 if (!cpu_has_feature(CPU_FTR_TM))
1469 return -ENODEV;
1470
1471 if (MSR_TM_ACTIVE(target->thread.regs->msr))
1472 return regset->n;
1473
1474 return 0;
1475}
1476
1477static int tm_tar_get(struct task_struct *target,
1478 const struct user_regset *regset,
1479 unsigned int pos, unsigned int count,
1480 void *kbuf, void __user *ubuf)
1481{
1482 int ret;
1483
1484 if (!cpu_has_feature(CPU_FTR_TM))
1485 return -ENODEV;
1486
1487 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1488 return -ENODATA;
1489
1490 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1491 &target->thread.tm_tar, 0, sizeof(u64));
1492 return ret;
1493}
1494
1495static int tm_tar_set(struct task_struct *target,
1496 const struct user_regset *regset,
1497 unsigned int pos, unsigned int count,
1498 const void *kbuf, const void __user *ubuf)
1499{
1500 int ret;
1501
1502 if (!cpu_has_feature(CPU_FTR_TM))
1503 return -ENODEV;
1504
1505 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1506 return -ENODATA;
1507
1508 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1509 &target->thread.tm_tar, 0, sizeof(u64));
1510 return ret;
1511}
1512
1513static int tm_ppr_active(struct task_struct *target,
1514 const struct user_regset *regset)
1515{
1516 if (!cpu_has_feature(CPU_FTR_TM))
1517 return -ENODEV;
1518
1519 if (MSR_TM_ACTIVE(target->thread.regs->msr))
1520 return regset->n;
1521
1522 return 0;
1523}
1524
1525
1526static int tm_ppr_get(struct task_struct *target,
1527 const struct user_regset *regset,
1528 unsigned int pos, unsigned int count,
1529 void *kbuf, void __user *ubuf)
1530{
1531 int ret;
1532
1533 if (!cpu_has_feature(CPU_FTR_TM))
1534 return -ENODEV;
1535
1536 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1537 return -ENODATA;
1538
1539 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1540 &target->thread.tm_ppr, 0, sizeof(u64));
1541 return ret;
1542}
1543
1544static int tm_ppr_set(struct task_struct *target,
1545 const struct user_regset *regset,
1546 unsigned int pos, unsigned int count,
1547 const void *kbuf, const void __user *ubuf)
1548{
1549 int ret;
1550
1551 if (!cpu_has_feature(CPU_FTR_TM))
1552 return -ENODEV;
1553
1554 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1555 return -ENODATA;
1556
1557 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1558 &target->thread.tm_ppr, 0, sizeof(u64));
1559 return ret;
1560}
1561
1562static int tm_dscr_active(struct task_struct *target,
1563 const struct user_regset *regset)
1564{
1565 if (!cpu_has_feature(CPU_FTR_TM))
1566 return -ENODEV;
1567
1568 if (MSR_TM_ACTIVE(target->thread.regs->msr))
1569 return regset->n;
1570
1571 return 0;
1572}
1573
1574static int tm_dscr_get(struct task_struct *target,
1575 const struct user_regset *regset,
1576 unsigned int pos, unsigned int count,
1577 void *kbuf, void __user *ubuf)
1578{
1579 int ret;
1580
1581 if (!cpu_has_feature(CPU_FTR_TM))
1582 return -ENODEV;
1583
1584 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1585 return -ENODATA;
1586
1587 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1588 &target->thread.tm_dscr, 0, sizeof(u64));
1589 return ret;
1590}
1591
1592static int tm_dscr_set(struct task_struct *target,
1593 const struct user_regset *regset,
1594 unsigned int pos, unsigned int count,
1595 const void *kbuf, const void __user *ubuf)
1596{
1597 int ret;
1598
1599 if (!cpu_has_feature(CPU_FTR_TM))
1600 return -ENODEV;
1601
1602 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1603 return -ENODATA;
1604
1605 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1606 &target->thread.tm_dscr, 0, sizeof(u64));
1607 return ret;
1608}
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001609#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10001610
Anshuman Khandualfa439812016-07-28 10:57:42 +08001611#ifdef CONFIG_PPC64
1612static int ppr_get(struct task_struct *target,
1613 const struct user_regset *regset,
1614 unsigned int pos, unsigned int count,
1615 void *kbuf, void __user *ubuf)
1616{
Masahiro Yamada7f2462a2016-09-06 20:21:50 +09001617 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Nicholas Piggin4c2de742018-10-13 00:15:16 +11001618 &target->thread.regs->ppr, 0, sizeof(u64));
Anshuman Khandualfa439812016-07-28 10:57:42 +08001619}
1620
1621static int ppr_set(struct task_struct *target,
1622 const struct user_regset *regset,
1623 unsigned int pos, unsigned int count,
1624 const void *kbuf, const void __user *ubuf)
1625{
Masahiro Yamada7f2462a2016-09-06 20:21:50 +09001626 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Nicholas Piggin4c2de742018-10-13 00:15:16 +11001627 &target->thread.regs->ppr, 0, sizeof(u64));
Anshuman Khandualfa439812016-07-28 10:57:42 +08001628}
1629
1630static int dscr_get(struct task_struct *target,
1631 const struct user_regset *regset,
1632 unsigned int pos, unsigned int count,
1633 void *kbuf, void __user *ubuf)
1634{
Masahiro Yamada7f2462a2016-09-06 20:21:50 +09001635 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1636 &target->thread.dscr, 0, sizeof(u64));
Anshuman Khandualfa439812016-07-28 10:57:42 +08001637}
1638static int dscr_set(struct task_struct *target,
1639 const struct user_regset *regset,
1640 unsigned int pos, unsigned int count,
1641 const void *kbuf, const void __user *ubuf)
1642{
Masahiro Yamada7f2462a2016-09-06 20:21:50 +09001643 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1644 &target->thread.dscr, 0, sizeof(u64));
Anshuman Khandualfa439812016-07-28 10:57:42 +08001645}
1646#endif
1647#ifdef CONFIG_PPC_BOOK3S_64
1648static int tar_get(struct task_struct *target,
1649 const struct user_regset *regset,
1650 unsigned int pos, unsigned int count,
1651 void *kbuf, void __user *ubuf)
1652{
Masahiro Yamada7f2462a2016-09-06 20:21:50 +09001653 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1654 &target->thread.tar, 0, sizeof(u64));
Anshuman Khandualfa439812016-07-28 10:57:42 +08001655}
1656static int tar_set(struct task_struct *target,
1657 const struct user_regset *regset,
1658 unsigned int pos, unsigned int count,
1659 const void *kbuf, const void __user *ubuf)
1660{
Masahiro Yamada7f2462a2016-09-06 20:21:50 +09001661 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1662 &target->thread.tar, 0, sizeof(u64));
Anshuman Khandualfa439812016-07-28 10:57:42 +08001663}
Anshuman Khandualcf89d4e2016-07-28 10:57:43 +08001664
1665static int ebb_active(struct task_struct *target,
1666 const struct user_regset *regset)
1667{
1668 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1669 return -ENODEV;
1670
1671 if (target->thread.used_ebb)
1672 return regset->n;
1673
1674 return 0;
1675}
1676
1677static int ebb_get(struct task_struct *target,
1678 const struct user_regset *regset,
1679 unsigned int pos, unsigned int count,
1680 void *kbuf, void __user *ubuf)
1681{
1682 /* Build tests */
1683 BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1684 BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1685
1686 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1687 return -ENODEV;
1688
1689 if (!target->thread.used_ebb)
1690 return -ENODATA;
1691
1692 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1693 &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
1694}
1695
1696static int ebb_set(struct task_struct *target,
1697 const struct user_regset *regset,
1698 unsigned int pos, unsigned int count,
1699 const void *kbuf, const void __user *ubuf)
1700{
1701 int ret = 0;
1702
1703 /* Build tests */
1704 BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1705 BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1706
1707 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1708 return -ENODEV;
1709
1710 if (target->thread.used_ebb)
1711 return -ENODATA;
1712
1713 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1714 &target->thread.ebbrr, 0, sizeof(unsigned long));
1715
1716 if (!ret)
1717 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1718 &target->thread.ebbhr, sizeof(unsigned long),
1719 2 * sizeof(unsigned long));
1720
1721 if (!ret)
1722 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1723 &target->thread.bescr,
1724 2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1725
1726 return ret;
1727}
Anshuman Khanduala67ae752016-07-28 10:57:44 +08001728static int pmu_active(struct task_struct *target,
1729 const struct user_regset *regset)
1730{
1731 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1732 return -ENODEV;
1733
1734 return regset->n;
1735}
1736
1737static int pmu_get(struct task_struct *target,
1738 const struct user_regset *regset,
1739 unsigned int pos, unsigned int count,
1740 void *kbuf, void __user *ubuf)
1741{
1742 /* Build tests */
1743 BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1744 BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1745 BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1746 BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1747
1748 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1749 return -ENODEV;
1750
1751 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1752 &target->thread.siar, 0,
1753 5 * sizeof(unsigned long));
1754}
1755
1756static int pmu_set(struct task_struct *target,
1757 const struct user_regset *regset,
1758 unsigned int pos, unsigned int count,
1759 const void *kbuf, const void __user *ubuf)
1760{
1761 int ret = 0;
1762
1763 /* Build tests */
1764 BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1765 BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1766 BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1767 BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1768
1769 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1770 return -ENODEV;
1771
1772 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1773 &target->thread.siar, 0,
1774 sizeof(unsigned long));
1775
1776 if (!ret)
1777 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1778 &target->thread.sdar, sizeof(unsigned long),
1779 2 * sizeof(unsigned long));
1780
1781 if (!ret)
1782 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1783 &target->thread.sier, 2 * sizeof(unsigned long),
1784 3 * sizeof(unsigned long));
1785
1786 if (!ret)
1787 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1788 &target->thread.mmcr2, 3 * sizeof(unsigned long),
1789 4 * sizeof(unsigned long));
1790
1791 if (!ret)
1792 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1793 &target->thread.mmcr0, 4 * sizeof(unsigned long),
1794 5 * sizeof(unsigned long));
1795 return ret;
1796}
Anshuman Khandualfa439812016-07-28 10:57:42 +08001797#endif
Thiago Jung Bauermannc5cc1f42018-01-18 17:50:43 -08001798
1799#ifdef CONFIG_PPC_MEM_KEYS
1800static int pkey_active(struct task_struct *target,
1801 const struct user_regset *regset)
1802{
1803 if (!arch_pkeys_enabled())
1804 return -ENODEV;
1805
1806 return regset->n;
1807}
1808
1809static int pkey_get(struct task_struct *target,
1810 const struct user_regset *regset,
1811 unsigned int pos, unsigned int count,
1812 void *kbuf, void __user *ubuf)
1813{
1814 BUILD_BUG_ON(TSO(amr) + sizeof(unsigned long) != TSO(iamr));
1815 BUILD_BUG_ON(TSO(iamr) + sizeof(unsigned long) != TSO(uamor));
1816
1817 if (!arch_pkeys_enabled())
1818 return -ENODEV;
1819
1820 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1821 &target->thread.amr, 0,
1822 ELF_NPKEY * sizeof(unsigned long));
1823}
1824
1825static int pkey_set(struct task_struct *target,
1826 const struct user_regset *regset,
1827 unsigned int pos, unsigned int count,
1828 const void *kbuf, const void __user *ubuf)
1829{
1830 u64 new_amr;
1831 int ret;
1832
1833 if (!arch_pkeys_enabled())
1834 return -ENODEV;
1835
1836 /* Only the AMR can be set from userspace */
1837 if (pos != 0 || count != sizeof(new_amr))
1838 return -EINVAL;
1839
1840 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1841 &new_amr, 0, sizeof(new_amr));
1842 if (ret)
1843 return ret;
1844
1845 /* UAMOR determines which bits of the AMR can be set from userspace. */
1846 target->thread.amr = (new_amr & target->thread.uamor) |
1847 (target->thread.amr & ~target->thread.uamor);
1848
1849 return 0;
1850}
1851#endif /* CONFIG_PPC_MEM_KEYS */
1852
Roland McGrath80fdf472007-12-20 03:58:00 -08001853/*
1854 * These are our native regset flavors.
1855 */
1856enum powerpc_regset {
1857 REGSET_GPR,
1858 REGSET_FPR,
1859#ifdef CONFIG_ALTIVEC
1860 REGSET_VMX,
1861#endif
Michael Neulingce48b212008-06-25 14:07:18 +10001862#ifdef CONFIG_VSX
1863 REGSET_VSX,
1864#endif
Roland McGrath80fdf472007-12-20 03:58:00 -08001865#ifdef CONFIG_SPE
1866 REGSET_SPE,
1867#endif
Anshuman Khandual25847fb2016-07-28 10:57:36 +08001868#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1869 REGSET_TM_CGPR, /* TM checkpointed GPR registers */
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001870 REGSET_TM_CFPR, /* TM checkpointed FPR registers */
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001871 REGSET_TM_CVMX, /* TM checkpointed VMX registers */
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001872 REGSET_TM_CVSX, /* TM checkpointed VSX registers */
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001873 REGSET_TM_SPR, /* TM specific SPR registers */
Anshuman Khandualc45dc902016-07-28 10:57:41 +08001874 REGSET_TM_CTAR, /* TM checkpointed TAR register */
1875 REGSET_TM_CPPR, /* TM checkpointed PPR register */
1876 REGSET_TM_CDSCR, /* TM checkpointed DSCR register */
Anshuman Khandual25847fb2016-07-28 10:57:36 +08001877#endif
Anshuman Khandualfa439812016-07-28 10:57:42 +08001878#ifdef CONFIG_PPC64
1879 REGSET_PPR, /* PPR register */
1880 REGSET_DSCR, /* DSCR register */
1881#endif
1882#ifdef CONFIG_PPC_BOOK3S_64
1883 REGSET_TAR, /* TAR register */
Anshuman Khandualcf89d4e2016-07-28 10:57:43 +08001884 REGSET_EBB, /* EBB registers */
Anshuman Khanduala67ae752016-07-28 10:57:44 +08001885 REGSET_PMR, /* Performance Monitor Registers */
Anshuman Khandualfa439812016-07-28 10:57:42 +08001886#endif
Thiago Jung Bauermannc5cc1f42018-01-18 17:50:43 -08001887#ifdef CONFIG_PPC_MEM_KEYS
1888 REGSET_PKEY, /* AMR register */
1889#endif
Roland McGrath80fdf472007-12-20 03:58:00 -08001890};
1891
1892static const struct user_regset native_regsets[] = {
1893 [REGSET_GPR] = {
1894 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1895 .size = sizeof(long), .align = sizeof(long),
1896 .get = gpr_get, .set = gpr_set
1897 },
1898 [REGSET_FPR] = {
1899 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1900 .size = sizeof(double), .align = sizeof(double),
1901 .get = fpr_get, .set = fpr_set
1902 },
1903#ifdef CONFIG_ALTIVEC
1904 [REGSET_VMX] = {
1905 .core_note_type = NT_PPC_VMX, .n = 34,
1906 .size = sizeof(vector128), .align = sizeof(vector128),
1907 .active = vr_active, .get = vr_get, .set = vr_set
1908 },
1909#endif
Michael Neulingce48b212008-06-25 14:07:18 +10001910#ifdef CONFIG_VSX
1911 [REGSET_VSX] = {
Michael Neulingf3e909c2008-07-01 14:01:39 +10001912 .core_note_type = NT_PPC_VSX, .n = 32,
1913 .size = sizeof(double), .align = sizeof(double),
Michael Neulingce48b212008-06-25 14:07:18 +10001914 .active = vsr_active, .get = vsr_get, .set = vsr_set
1915 },
1916#endif
Roland McGrath80fdf472007-12-20 03:58:00 -08001917#ifdef CONFIG_SPE
1918 [REGSET_SPE] = {
Suzuki Poulosea0b38b42013-08-27 13:22:14 +05301919 .core_note_type = NT_PPC_SPE, .n = 35,
Roland McGrath80fdf472007-12-20 03:58:00 -08001920 .size = sizeof(u32), .align = sizeof(u32),
1921 .active = evr_active, .get = evr_get, .set = evr_set
1922 },
1923#endif
Anshuman Khandual25847fb2016-07-28 10:57:36 +08001924#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1925 [REGSET_TM_CGPR] = {
1926 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1927 .size = sizeof(long), .align = sizeof(long),
1928 .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
1929 },
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001930 [REGSET_TM_CFPR] = {
1931 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1932 .size = sizeof(double), .align = sizeof(double),
1933 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1934 },
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001935 [REGSET_TM_CVMX] = {
1936 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1937 .size = sizeof(vector128), .align = sizeof(vector128),
1938 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1939 },
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001940 [REGSET_TM_CVSX] = {
1941 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
1942 .size = sizeof(double), .align = sizeof(double),
1943 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
1944 },
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001945 [REGSET_TM_SPR] = {
1946 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
1947 .size = sizeof(u64), .align = sizeof(u64),
1948 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
1949 },
Anshuman Khandualc45dc902016-07-28 10:57:41 +08001950 [REGSET_TM_CTAR] = {
1951 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
1952 .size = sizeof(u64), .align = sizeof(u64),
1953 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
1954 },
1955 [REGSET_TM_CPPR] = {
1956 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
1957 .size = sizeof(u64), .align = sizeof(u64),
1958 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
1959 },
1960 [REGSET_TM_CDSCR] = {
1961 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
1962 .size = sizeof(u64), .align = sizeof(u64),
1963 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
1964 },
Anshuman Khandual25847fb2016-07-28 10:57:36 +08001965#endif
Anshuman Khandualfa439812016-07-28 10:57:42 +08001966#ifdef CONFIG_PPC64
1967 [REGSET_PPR] = {
1968 .core_note_type = NT_PPC_PPR, .n = 1,
1969 .size = sizeof(u64), .align = sizeof(u64),
1970 .get = ppr_get, .set = ppr_set
1971 },
1972 [REGSET_DSCR] = {
1973 .core_note_type = NT_PPC_DSCR, .n = 1,
1974 .size = sizeof(u64), .align = sizeof(u64),
1975 .get = dscr_get, .set = dscr_set
1976 },
1977#endif
1978#ifdef CONFIG_PPC_BOOK3S_64
1979 [REGSET_TAR] = {
1980 .core_note_type = NT_PPC_TAR, .n = 1,
1981 .size = sizeof(u64), .align = sizeof(u64),
1982 .get = tar_get, .set = tar_set
1983 },
Anshuman Khandualcf89d4e2016-07-28 10:57:43 +08001984 [REGSET_EBB] = {
1985 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
1986 .size = sizeof(u64), .align = sizeof(u64),
1987 .active = ebb_active, .get = ebb_get, .set = ebb_set
1988 },
Anshuman Khanduala67ae752016-07-28 10:57:44 +08001989 [REGSET_PMR] = {
1990 .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
1991 .size = sizeof(u64), .align = sizeof(u64),
1992 .active = pmu_active, .get = pmu_get, .set = pmu_set
1993 },
Anshuman Khandualfa439812016-07-28 10:57:42 +08001994#endif
Thiago Jung Bauermannc5cc1f42018-01-18 17:50:43 -08001995#ifdef CONFIG_PPC_MEM_KEYS
1996 [REGSET_PKEY] = {
1997 .core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
1998 .size = sizeof(u64), .align = sizeof(u64),
1999 .active = pkey_active, .get = pkey_get, .set = pkey_set
2000 },
2001#endif
Roland McGrath80fdf472007-12-20 03:58:00 -08002002};
2003
2004static const struct user_regset_view user_ppc_native_view = {
2005 .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
2006 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2007};
2008
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002009#ifdef CONFIG_PPC64
2010#include <linux/compat.h>
2011
Anshuman Khandual04fcadc2016-07-28 10:57:35 +08002012static int gpr32_get_common(struct task_struct *target,
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002013 const struct user_regset *regset,
2014 unsigned int pos, unsigned int count,
Simon Guo26183112016-09-11 21:44:13 +08002015 void *kbuf, void __user *ubuf,
2016 unsigned long *regs)
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002017{
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002018 compat_ulong_t *k = kbuf;
2019 compat_ulong_t __user *u = ubuf;
2020 compat_ulong_t reg;
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002021
2022 pos /= sizeof(reg);
2023 count /= sizeof(reg);
2024
2025 if (kbuf)
2026 for (; count > 0 && pos < PT_MSR; --count)
2027 *k++ = regs[pos++];
2028 else
2029 for (; count > 0 && pos < PT_MSR; --count)
2030 if (__put_user((compat_ulong_t) regs[pos++], u++))
2031 return -EFAULT;
2032
2033 if (count > 0 && pos == PT_MSR) {
2034 reg = get_user_msr(target);
2035 if (kbuf)
2036 *k++ = reg;
2037 else if (__put_user(reg, u++))
2038 return -EFAULT;
2039 ++pos;
2040 --count;
2041 }
2042
2043 if (kbuf)
2044 for (; count > 0 && pos < PT_REGS_COUNT; --count)
2045 *k++ = regs[pos++];
2046 else
2047 for (; count > 0 && pos < PT_REGS_COUNT; --count)
2048 if (__put_user((compat_ulong_t) regs[pos++], u++))
2049 return -EFAULT;
2050
2051 kbuf = k;
2052 ubuf = u;
2053 pos *= sizeof(reg);
2054 count *= sizeof(reg);
2055 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
2056 PT_REGS_COUNT * sizeof(reg), -1);
2057}
2058
Anshuman Khandual04fcadc2016-07-28 10:57:35 +08002059static int gpr32_set_common(struct task_struct *target,
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002060 const struct user_regset *regset,
2061 unsigned int pos, unsigned int count,
Simon Guo26183112016-09-11 21:44:13 +08002062 const void *kbuf, const void __user *ubuf,
2063 unsigned long *regs)
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002064{
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002065 const compat_ulong_t *k = kbuf;
2066 const compat_ulong_t __user *u = ubuf;
2067 compat_ulong_t reg;
2068
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002069 pos /= sizeof(reg);
2070 count /= sizeof(reg);
2071
2072 if (kbuf)
2073 for (; count > 0 && pos < PT_MSR; --count)
2074 regs[pos++] = *k++;
2075 else
2076 for (; count > 0 && pos < PT_MSR; --count) {
2077 if (__get_user(reg, u++))
2078 return -EFAULT;
2079 regs[pos++] = reg;
2080 }
2081
2082
2083 if (count > 0 && pos == PT_MSR) {
2084 if (kbuf)
2085 reg = *k++;
2086 else if (__get_user(reg, u++))
2087 return -EFAULT;
2088 set_user_msr(target, reg);
2089 ++pos;
2090 --count;
2091 }
2092
Roland McGrathc2372eb2008-03-13 19:25:35 +11002093 if (kbuf) {
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002094 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
2095 regs[pos++] = *k++;
Roland McGrathc2372eb2008-03-13 19:25:35 +11002096 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2097 ++k;
2098 } else {
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002099 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
2100 if (__get_user(reg, u++))
2101 return -EFAULT;
2102 regs[pos++] = reg;
2103 }
Roland McGrathc2372eb2008-03-13 19:25:35 +11002104 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2105 if (__get_user(reg, u++))
2106 return -EFAULT;
2107 }
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002108
2109 if (count > 0 && pos == PT_TRAP) {
2110 if (kbuf)
2111 reg = *k++;
2112 else if (__get_user(reg, u++))
2113 return -EFAULT;
2114 set_user_trap(target, reg);
2115 ++pos;
2116 --count;
2117 }
2118
2119 kbuf = k;
2120 ubuf = u;
2121 pos *= sizeof(reg);
2122 count *= sizeof(reg);
2123 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
2124 (PT_TRAP + 1) * sizeof(reg), -1);
2125}
2126
Anshuman Khandual25847fb2016-07-28 10:57:36 +08002127#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2128static int tm_cgpr32_get(struct task_struct *target,
2129 const struct user_regset *regset,
2130 unsigned int pos, unsigned int count,
2131 void *kbuf, void __user *ubuf)
2132{
Simon Guo26183112016-09-11 21:44:13 +08002133 return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2134 &target->thread.ckpt_regs.gpr[0]);
Anshuman Khandual25847fb2016-07-28 10:57:36 +08002135}
2136
2137static int tm_cgpr32_set(struct task_struct *target,
2138 const struct user_regset *regset,
2139 unsigned int pos, unsigned int count,
2140 const void *kbuf, const void __user *ubuf)
2141{
Simon Guo26183112016-09-11 21:44:13 +08002142 return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2143 &target->thread.ckpt_regs.gpr[0]);
Anshuman Khandual25847fb2016-07-28 10:57:36 +08002144}
2145#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2146
Anshuman Khandual04fcadc2016-07-28 10:57:35 +08002147static int gpr32_get(struct task_struct *target,
2148 const struct user_regset *regset,
2149 unsigned int pos, unsigned int count,
2150 void *kbuf, void __user *ubuf)
2151{
Simon Guo26183112016-09-11 21:44:13 +08002152 int i;
2153
2154 if (target->thread.regs == NULL)
2155 return -EIO;
2156
2157 if (!FULL_REGS(target->thread.regs)) {
2158 /*
2159 * We have a partial register set.
2160 * Fill 14-31 with bogus values.
2161 */
2162 for (i = 14; i < 32; i++)
2163 target->thread.regs->gpr[i] = NV_REG_POISON;
2164 }
2165 return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2166 &target->thread.regs->gpr[0]);
Anshuman Khandual04fcadc2016-07-28 10:57:35 +08002167}
2168
2169static int gpr32_set(struct task_struct *target,
2170 const struct user_regset *regset,
2171 unsigned int pos, unsigned int count,
2172 const void *kbuf, const void __user *ubuf)
2173{
Simon Guo26183112016-09-11 21:44:13 +08002174 if (target->thread.regs == NULL)
2175 return -EIO;
2176
2177 CHECK_FULL_REGS(target->thread.regs);
2178 return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2179 &target->thread.regs->gpr[0]);
Anshuman Khandual04fcadc2016-07-28 10:57:35 +08002180}
2181
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002182/*
2183 * These are the regset flavors matching the CONFIG_PPC32 native set.
2184 */
2185static const struct user_regset compat_regsets[] = {
2186 [REGSET_GPR] = {
2187 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
2188 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
2189 .get = gpr32_get, .set = gpr32_set
2190 },
2191 [REGSET_FPR] = {
2192 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
2193 .size = sizeof(double), .align = sizeof(double),
2194 .get = fpr_get, .set = fpr_set
2195 },
2196#ifdef CONFIG_ALTIVEC
2197 [REGSET_VMX] = {
2198 .core_note_type = NT_PPC_VMX, .n = 34,
2199 .size = sizeof(vector128), .align = sizeof(vector128),
2200 .active = vr_active, .get = vr_get, .set = vr_set
2201 },
2202#endif
2203#ifdef CONFIG_SPE
2204 [REGSET_SPE] = {
Roland McGrath24f1a842008-01-02 17:05:48 -08002205 .core_note_type = NT_PPC_SPE, .n = 35,
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002206 .size = sizeof(u32), .align = sizeof(u32),
2207 .active = evr_active, .get = evr_get, .set = evr_set
2208 },
2209#endif
Anshuman Khandual25847fb2016-07-28 10:57:36 +08002210#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2211 [REGSET_TM_CGPR] = {
2212 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
2213 .size = sizeof(long), .align = sizeof(long),
2214 .active = tm_cgpr_active,
2215 .get = tm_cgpr32_get, .set = tm_cgpr32_set
2216 },
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08002217 [REGSET_TM_CFPR] = {
2218 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
2219 .size = sizeof(double), .align = sizeof(double),
2220 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
2221 },
Anshuman Khandual8c13f592016-07-28 10:57:38 +08002222 [REGSET_TM_CVMX] = {
2223 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
2224 .size = sizeof(vector128), .align = sizeof(vector128),
2225 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
2226 },
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08002227 [REGSET_TM_CVSX] = {
2228 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
2229 .size = sizeof(double), .align = sizeof(double),
2230 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
2231 },
Anshuman Khandual08e1c012016-07-28 10:57:40 +08002232 [REGSET_TM_SPR] = {
2233 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
2234 .size = sizeof(u64), .align = sizeof(u64),
2235 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
2236 },
Anshuman Khandualc45dc902016-07-28 10:57:41 +08002237 [REGSET_TM_CTAR] = {
2238 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
2239 .size = sizeof(u64), .align = sizeof(u64),
2240 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
2241 },
2242 [REGSET_TM_CPPR] = {
2243 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
2244 .size = sizeof(u64), .align = sizeof(u64),
2245 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2246 },
2247 [REGSET_TM_CDSCR] = {
2248 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2249 .size = sizeof(u64), .align = sizeof(u64),
2250 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2251 },
Anshuman Khandual25847fb2016-07-28 10:57:36 +08002252#endif
Anshuman Khandualfa439812016-07-28 10:57:42 +08002253#ifdef CONFIG_PPC64
2254 [REGSET_PPR] = {
2255 .core_note_type = NT_PPC_PPR, .n = 1,
2256 .size = sizeof(u64), .align = sizeof(u64),
2257 .get = ppr_get, .set = ppr_set
2258 },
2259 [REGSET_DSCR] = {
2260 .core_note_type = NT_PPC_DSCR, .n = 1,
2261 .size = sizeof(u64), .align = sizeof(u64),
2262 .get = dscr_get, .set = dscr_set
2263 },
2264#endif
2265#ifdef CONFIG_PPC_BOOK3S_64
2266 [REGSET_TAR] = {
2267 .core_note_type = NT_PPC_TAR, .n = 1,
2268 .size = sizeof(u64), .align = sizeof(u64),
2269 .get = tar_get, .set = tar_set
2270 },
Anshuman Khandualcf89d4e2016-07-28 10:57:43 +08002271 [REGSET_EBB] = {
2272 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2273 .size = sizeof(u64), .align = sizeof(u64),
2274 .active = ebb_active, .get = ebb_get, .set = ebb_set
2275 },
Anshuman Khandualfa439812016-07-28 10:57:42 +08002276#endif
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002277};
2278
2279static const struct user_regset_view user_ppc_compat_view = {
2280 .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
2281 .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
2282};
2283#endif /* CONFIG_PPC64 */
2284
Roland McGrath80fdf472007-12-20 03:58:00 -08002285const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2286{
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002287#ifdef CONFIG_PPC64
2288 if (test_tsk_thread_flag(task, TIF_32BIT))
2289 return &user_ppc_compat_view;
2290#endif
Roland McGrath80fdf472007-12-20 03:58:00 -08002291 return &user_ppc_native_view;
2292}
2293
2294
Roland McGrath2a84b0d2008-01-30 13:30:51 +01002295void user_enable_single_step(struct task_struct *task)
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002296{
2297 struct pt_regs *regs = task->thread.regs;
2298
2299 if (regs != NULL) {
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002300#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302301 task->thread.debug.dbcr0 &= ~DBCR0_BT;
2302 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002303 regs->msr |= MSR_DE;
2304#else
Roland McGrathec097c82009-05-28 21:26:38 +00002305 regs->msr &= ~MSR_BE;
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002306 regs->msr |= MSR_SE;
2307#endif
2308 }
2309 set_tsk_thread_flag(task, TIF_SINGLESTEP);
2310}
2311
Roland McGrathec097c82009-05-28 21:26:38 +00002312void user_enable_block_step(struct task_struct *task)
2313{
2314 struct pt_regs *regs = task->thread.regs;
2315
2316 if (regs != NULL) {
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002317#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302318 task->thread.debug.dbcr0 &= ~DBCR0_IC;
2319 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
Roland McGrathec097c82009-05-28 21:26:38 +00002320 regs->msr |= MSR_DE;
2321#else
2322 regs->msr &= ~MSR_SE;
2323 regs->msr |= MSR_BE;
2324#endif
2325 }
2326 set_tsk_thread_flag(task, TIF_SINGLESTEP);
2327}
2328
Roland McGrath2a84b0d2008-01-30 13:30:51 +01002329void user_disable_single_step(struct task_struct *task)
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002330{
2331 struct pt_regs *regs = task->thread.regs;
2332
2333 if (regs != NULL) {
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002334#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002335 /*
2336 * The logic to disable single stepping should be as
2337 * simple as turning off the Instruction Complete flag.
2338 * And, after doing so, if all debug flags are off, turn
2339 * off DBCR0(IDM) and MSR(DE) .... Torez
2340 */
James Yang682775b2013-07-05 14:49:43 -05002341 task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002342 /*
2343 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
2344 */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302345 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2346 task->thread.debug.dbcr1)) {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002347 /*
2348 * All debug events were off.....
2349 */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302350 task->thread.debug.dbcr0 &= ~DBCR0_IDM;
Dave Kleikamp28477fb2009-07-08 13:46:18 +00002351 regs->msr &= ~MSR_DE;
2352 }
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002353#else
Roland McGrathec097c82009-05-28 21:26:38 +00002354 regs->msr &= ~(MSR_SE | MSR_BE);
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002355#endif
2356 }
2357 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
2358}
2359
K.Prasad5aae8a52010-06-15 11:35:19 +05302360#ifdef CONFIG_HAVE_HW_BREAKPOINT
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02002361void ptrace_triggered(struct perf_event *bp,
K.Prasad5aae8a52010-06-15 11:35:19 +05302362 struct perf_sample_data *data, struct pt_regs *regs)
2363{
2364 struct perf_event_attr attr;
2365
2366 /*
2367 * Disable the breakpoint request here since ptrace has defined a
2368 * one-shot behaviour for breakpoint exceptions in PPC64.
2369 * The SIGTRAP signal is generated automatically for us in do_dabr().
2370 * We don't have to do anything about that here
2371 */
2372 attr = bp->attr;
2373 attr.disabled = true;
2374 modify_user_hw_breakpoint(bp, &attr);
2375}
2376#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2377
Anton Blancharde51df2c2014-08-20 08:55:18 +10002378static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002379 unsigned long data)
2380{
K.Prasad5aae8a52010-06-15 11:35:19 +05302381#ifdef CONFIG_HAVE_HW_BREAKPOINT
2382 int ret;
2383 struct thread_struct *thread = &(task->thread);
2384 struct perf_event *bp;
2385 struct perf_event_attr attr;
2386#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Michael Neuling9422de32012-12-20 14:06:44 +00002387#ifndef CONFIG_PPC_ADV_DEBUG_REGS
Michael Neuling85ce9a52018-03-27 15:37:18 +11002388 bool set_bp = true;
Michael Neuling9422de32012-12-20 14:06:44 +00002389 struct arch_hw_breakpoint hw_brk;
2390#endif
K.Prasad5aae8a52010-06-15 11:35:19 +05302391
Luis Machadod6a61bf2008-07-24 02:10:41 +10002392 /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
2393 * For embedded processors we support one DAC and no IAC's at the
2394 * moment.
2395 */
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002396 if (addr > 0)
2397 return -EINVAL;
2398
Kumar Gala2325f0a2008-07-26 05:27:33 +10002399 /* The bottom 3 bits in dabr are flags */
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002400 if ((data & ~0x7UL) >= TASK_SIZE)
2401 return -EIO;
2402
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002403#ifndef CONFIG_PPC_ADV_DEBUG_REGS
Luis Machadod6a61bf2008-07-24 02:10:41 +10002404 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
2405 * It was assumed, on previous implementations, that 3 bits were
2406 * passed together with the data address, fitting the design of the
2407 * DABR register, as follows:
2408 *
2409 * bit 0: Read flag
2410 * bit 1: Write flag
2411 * bit 2: Breakpoint translation
2412 *
2413 * Thus, we use them here as so.
2414 */
2415
2416 /* Ensure breakpoint translation bit is set */
Michael Neuling9422de32012-12-20 14:06:44 +00002417 if (data && !(data & HW_BRK_TYPE_TRANSLATE))
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002418 return -EIO;
Michael Neuling9422de32012-12-20 14:06:44 +00002419 hw_brk.address = data & (~HW_BRK_TYPE_DABR);
2420 hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
2421 hw_brk.len = 8;
Michael Neuling85ce9a52018-03-27 15:37:18 +11002422 set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR);
K.Prasad5aae8a52010-06-15 11:35:19 +05302423#ifdef CONFIG_HAVE_HW_BREAKPOINT
2424 bp = thread->ptrace_bps[0];
Michael Neuling85ce9a52018-03-27 15:37:18 +11002425 if (!set_bp) {
K.Prasad5aae8a52010-06-15 11:35:19 +05302426 if (bp) {
2427 unregister_hw_breakpoint(bp);
2428 thread->ptrace_bps[0] = NULL;
2429 }
2430 return 0;
2431 }
2432 if (bp) {
2433 attr = bp->attr;
Michael Neuling9422de32012-12-20 14:06:44 +00002434 attr.bp_addr = hw_brk.address;
2435 arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
Aravinda Prasada53fd612012-11-04 22:15:28 +00002436
2437 /* Enable breakpoint */
2438 attr.disabled = false;
2439
K.Prasad5aae8a52010-06-15 11:35:19 +05302440 ret = modify_user_hw_breakpoint(bp, &attr);
Frederic Weisbecker925f83c2011-05-06 01:53:18 +02002441 if (ret) {
K.Prasad5aae8a52010-06-15 11:35:19 +05302442 return ret;
Frederic Weisbecker925f83c2011-05-06 01:53:18 +02002443 }
K.Prasad5aae8a52010-06-15 11:35:19 +05302444 thread->ptrace_bps[0] = bp;
Michael Neuling9422de32012-12-20 14:06:44 +00002445 thread->hw_brk = hw_brk;
K.Prasad5aae8a52010-06-15 11:35:19 +05302446 return 0;
2447 }
2448
2449 /* Create a new breakpoint request if one doesn't exist already */
2450 hw_breakpoint_init(&attr);
Michael Neuling9422de32012-12-20 14:06:44 +00002451 attr.bp_addr = hw_brk.address;
Michael Neuling4f7c06e2018-05-17 15:37:15 +10002452 attr.bp_len = 8;
Michael Neuling9422de32012-12-20 14:06:44 +00002453 arch_bp_generic_fields(hw_brk.type,
2454 &attr.bp_type);
K.Prasad5aae8a52010-06-15 11:35:19 +05302455
2456 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
Avi Kivity4dc0da82011-06-29 18:42:35 +03002457 ptrace_triggered, NULL, task);
K.Prasad5aae8a52010-06-15 11:35:19 +05302458 if (IS_ERR(bp)) {
2459 thread->ptrace_bps[0] = NULL;
2460 return PTR_ERR(bp);
2461 }
2462
Michael Neuling85ce9a52018-03-27 15:37:18 +11002463#else /* !CONFIG_HAVE_HW_BREAKPOINT */
2464 if (set_bp && (!ppc_breakpoint_available()))
2465 return -ENODEV;
K.Prasad5aae8a52010-06-15 11:35:19 +05302466#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Michael Neuling9422de32012-12-20 14:06:44 +00002467 task->thread.hw_brk = hw_brk;
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002468#else /* CONFIG_PPC_ADV_DEBUG_REGS */
Luis Machadod6a61bf2008-07-24 02:10:41 +10002469 /* As described above, it was assumed 3 bits were passed with the data
2470 * address, but we will assume only the mode bits will be passed
2471 * as to not cause alignment restrictions for DAC-based processors.
2472 */
2473
2474 /* DAC's hold the whole address without any mode flags */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302475 task->thread.debug.dac1 = data & ~0x3UL;
Luis Machadod6a61bf2008-07-24 02:10:41 +10002476
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302477 if (task->thread.debug.dac1 == 0) {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002478 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302479 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2480 task->thread.debug.dbcr1)) {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002481 task->thread.regs->msr &= ~MSR_DE;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302482 task->thread.debug.dbcr0 &= ~DBCR0_IDM;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002483 }
Luis Machadod6a61bf2008-07-24 02:10:41 +10002484 return 0;
2485 }
2486
2487 /* Read or Write bits must be set */
2488
2489 if (!(data & 0x3UL))
2490 return -EINVAL;
2491
2492 /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
2493 register */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302494 task->thread.debug.dbcr0 |= DBCR0_IDM;
Luis Machadod6a61bf2008-07-24 02:10:41 +10002495
2496 /* Check for write and read flags and set DBCR0
2497 accordingly */
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002498 dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
Luis Machadod6a61bf2008-07-24 02:10:41 +10002499 if (data & 0x1UL)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002500 dbcr_dac(task) |= DBCR_DAC1R;
Luis Machadod6a61bf2008-07-24 02:10:41 +10002501 if (data & 0x2UL)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002502 dbcr_dac(task) |= DBCR_DAC1W;
Luis Machadod6a61bf2008-07-24 02:10:41 +10002503 task->thread.regs->msr |= MSR_DE;
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002504#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002505 return 0;
2506}
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002507
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002508/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 * Called by kernel/ptrace.c when detaching..
2510 *
2511 * Make sure single step bits etc are not set.
2512 */
2513void ptrace_disable(struct task_struct *child)
2514{
2515 /* make sure the single step bit is not set. */
Roland McGrath2a84b0d2008-01-30 13:30:51 +01002516 user_disable_single_step(child);
Breno Leitao5521eb42018-09-20 13:45:06 -03002517 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518}
2519
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002520#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Michael Neuling84295df2012-10-28 15:13:16 +00002521static long set_instruction_bp(struct task_struct *child,
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002522 struct ppc_hw_breakpoint *bp_info)
2523{
2524 int slot;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302525 int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
2526 int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
2527 int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
2528 int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002529
2530 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2531 slot2_in_use = 1;
2532 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2533 slot4_in_use = 1;
2534
2535 if (bp_info->addr >= TASK_SIZE)
2536 return -EIO;
2537
2538 if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
2539
2540 /* Make sure range is valid. */
2541 if (bp_info->addr2 >= TASK_SIZE)
2542 return -EIO;
2543
2544 /* We need a pair of IAC regsisters */
2545 if ((!slot1_in_use) && (!slot2_in_use)) {
2546 slot = 1;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302547 child->thread.debug.iac1 = bp_info->addr;
2548 child->thread.debug.iac2 = bp_info->addr2;
2549 child->thread.debug.dbcr0 |= DBCR0_IAC1;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002550 if (bp_info->addr_mode ==
2551 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2552 dbcr_iac_range(child) |= DBCR_IAC12X;
2553 else
2554 dbcr_iac_range(child) |= DBCR_IAC12I;
2555#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2556 } else if ((!slot3_in_use) && (!slot4_in_use)) {
2557 slot = 3;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302558 child->thread.debug.iac3 = bp_info->addr;
2559 child->thread.debug.iac4 = bp_info->addr2;
2560 child->thread.debug.dbcr0 |= DBCR0_IAC3;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002561 if (bp_info->addr_mode ==
2562 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2563 dbcr_iac_range(child) |= DBCR_IAC34X;
2564 else
2565 dbcr_iac_range(child) |= DBCR_IAC34I;
2566#endif
2567 } else
2568 return -ENOSPC;
2569 } else {
2570 /* We only need one. If possible leave a pair free in
2571 * case a range is needed later
2572 */
2573 if (!slot1_in_use) {
2574 /*
2575 * Don't use iac1 if iac1-iac2 are free and either
2576 * iac3 or iac4 (but not both) are free
2577 */
2578 if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
2579 slot = 1;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302580 child->thread.debug.iac1 = bp_info->addr;
2581 child->thread.debug.dbcr0 |= DBCR0_IAC1;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002582 goto out;
2583 }
2584 }
2585 if (!slot2_in_use) {
2586 slot = 2;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302587 child->thread.debug.iac2 = bp_info->addr;
2588 child->thread.debug.dbcr0 |= DBCR0_IAC2;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002589#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2590 } else if (!slot3_in_use) {
2591 slot = 3;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302592 child->thread.debug.iac3 = bp_info->addr;
2593 child->thread.debug.dbcr0 |= DBCR0_IAC3;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002594 } else if (!slot4_in_use) {
2595 slot = 4;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302596 child->thread.debug.iac4 = bp_info->addr;
2597 child->thread.debug.dbcr0 |= DBCR0_IAC4;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002598#endif
2599 } else
2600 return -ENOSPC;
2601 }
2602out:
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302603 child->thread.debug.dbcr0 |= DBCR0_IDM;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002604 child->thread.regs->msr |= MSR_DE;
2605
2606 return slot;
2607}
2608
2609static int del_instruction_bp(struct task_struct *child, int slot)
2610{
2611 switch (slot) {
2612 case 1:
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302613 if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002614 return -ENOENT;
2615
2616 if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
2617 /* address range - clear slots 1 & 2 */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302618 child->thread.debug.iac2 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002619 dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
2620 }
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302621 child->thread.debug.iac1 = 0;
2622 child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002623 break;
2624 case 2:
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302625 if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002626 return -ENOENT;
2627
2628 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2629 /* used in a range */
2630 return -EINVAL;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302631 child->thread.debug.iac2 = 0;
2632 child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002633 break;
2634#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2635 case 3:
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302636 if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002637 return -ENOENT;
2638
2639 if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
2640 /* address range - clear slots 3 & 4 */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302641 child->thread.debug.iac4 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002642 dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
2643 }
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302644 child->thread.debug.iac3 = 0;
2645 child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002646 break;
2647 case 4:
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302648 if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002649 return -ENOENT;
2650
2651 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2652 /* Used in a range */
2653 return -EINVAL;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302654 child->thread.debug.iac4 = 0;
2655 child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002656 break;
2657#endif
2658 default:
2659 return -EINVAL;
2660 }
2661 return 0;
2662}
2663
2664static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
2665{
2666 int byte_enable =
2667 (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
2668 & 0xf;
2669 int condition_mode =
2670 bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
2671 int slot;
2672
2673 if (byte_enable && (condition_mode == 0))
2674 return -EINVAL;
2675
2676 if (bp_info->addr >= TASK_SIZE)
2677 return -EIO;
2678
2679 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
2680 slot = 1;
2681 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2682 dbcr_dac(child) |= DBCR_DAC1R;
2683 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2684 dbcr_dac(child) |= DBCR_DAC1W;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302685 child->thread.debug.dac1 = (unsigned long)bp_info->addr;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002686#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2687 if (byte_enable) {
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302688 child->thread.debug.dvc1 =
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002689 (unsigned long)bp_info->condition_value;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302690 child->thread.debug.dbcr2 |=
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002691 ((byte_enable << DBCR2_DVC1BE_SHIFT) |
2692 (condition_mode << DBCR2_DVC1M_SHIFT));
2693 }
2694#endif
2695#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302696 } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002697 /* Both dac1 and dac2 are part of a range */
2698 return -ENOSPC;
2699#endif
2700 } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
2701 slot = 2;
2702 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2703 dbcr_dac(child) |= DBCR_DAC2R;
2704 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2705 dbcr_dac(child) |= DBCR_DAC2W;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302706 child->thread.debug.dac2 = (unsigned long)bp_info->addr;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002707#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2708 if (byte_enable) {
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302709 child->thread.debug.dvc2 =
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002710 (unsigned long)bp_info->condition_value;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302711 child->thread.debug.dbcr2 |=
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002712 ((byte_enable << DBCR2_DVC2BE_SHIFT) |
2713 (condition_mode << DBCR2_DVC2M_SHIFT));
2714 }
2715#endif
2716 } else
2717 return -ENOSPC;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302718 child->thread.debug.dbcr0 |= DBCR0_IDM;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002719 child->thread.regs->msr |= MSR_DE;
2720
2721 return slot + 4;
2722}
2723
2724static int del_dac(struct task_struct *child, int slot)
2725{
2726 if (slot == 1) {
Dave Kleikamp30124d12010-03-01 04:57:34 +00002727 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002728 return -ENOENT;
2729
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302730 child->thread.debug.dac1 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002731 dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2732#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302733 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2734 child->thread.debug.dac2 = 0;
2735 child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002736 }
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302737 child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002738#endif
2739#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302740 child->thread.debug.dvc1 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002741#endif
2742 } else if (slot == 2) {
Dave Kleikamp30124d12010-03-01 04:57:34 +00002743 if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002744 return -ENOENT;
2745
2746#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302747 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002748 /* Part of a range */
2749 return -EINVAL;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302750 child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002751#endif
2752#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302753 child->thread.debug.dvc2 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002754#endif
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302755 child->thread.debug.dac2 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002756 dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
2757 } else
2758 return -EINVAL;
2759
2760 return 0;
2761}
2762#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2763
2764#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2765static int set_dac_range(struct task_struct *child,
2766 struct ppc_hw_breakpoint *bp_info)
2767{
2768 int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
2769
2770 /* We don't allow range watchpoints to be used with DVC */
2771 if (bp_info->condition_mode)
2772 return -EINVAL;
2773
2774 /*
2775 * Best effort to verify the address range. The user/supervisor bits
2776 * prevent trapping in kernel space, but let's fail on an obvious bad
2777 * range. The simple test on the mask is not fool-proof, and any
2778 * exclusive range will spill over into kernel space.
2779 */
2780 if (bp_info->addr >= TASK_SIZE)
2781 return -EIO;
2782 if (mode == PPC_BREAKPOINT_MODE_MASK) {
2783 /*
2784 * dac2 is a bitmask. Don't allow a mask that makes a
2785 * kernel space address from a valid dac1 value
2786 */
2787 if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
2788 return -EIO;
2789 } else {
2790 /*
2791 * For range breakpoints, addr2 must also be a valid address
2792 */
2793 if (bp_info->addr2 >= TASK_SIZE)
2794 return -EIO;
2795 }
2796
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302797 if (child->thread.debug.dbcr0 &
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002798 (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
2799 return -ENOSPC;
2800
2801 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302802 child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002803 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302804 child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
2805 child->thread.debug.dac1 = bp_info->addr;
2806 child->thread.debug.dac2 = bp_info->addr2;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002807 if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302808 child->thread.debug.dbcr2 |= DBCR2_DAC12M;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002809 else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302810 child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002811 else /* PPC_BREAKPOINT_MODE_MASK */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302812 child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002813 child->thread.regs->msr |= MSR_DE;
2814
2815 return 5;
2816}
2817#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2818
Dave Kleikamp3162d922010-02-08 11:51:05 +00002819static long ppc_set_hwdebug(struct task_struct *child,
2820 struct ppc_hw_breakpoint *bp_info)
2821{
K.Prasad6c7a2852012-10-28 15:13:15 +00002822#ifdef CONFIG_HAVE_HW_BREAKPOINT
2823 int len = 0;
2824 struct thread_struct *thread = &(child->thread);
2825 struct perf_event *bp;
2826 struct perf_event_attr attr;
2827#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002828#ifndef CONFIG_PPC_ADV_DEBUG_REGS
Michael Neuling9422de32012-12-20 14:06:44 +00002829 struct arch_hw_breakpoint brk;
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002830#endif
2831
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002832 if (bp_info->version != 1)
2833 return -ENOTSUPP;
2834#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Dave Kleikamp3162d922010-02-08 11:51:05 +00002835 /*
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002836 * Check for invalid flags and combinations
2837 */
2838 if ((bp_info->trigger_type == 0) ||
2839 (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
2840 PPC_BREAKPOINT_TRIGGER_RW)) ||
2841 (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
2842 (bp_info->condition_mode &
2843 ~(PPC_BREAKPOINT_CONDITION_MODE |
2844 PPC_BREAKPOINT_CONDITION_BE_ALL)))
2845 return -EINVAL;
2846#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2847 if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2848 return -EINVAL;
2849#endif
2850
2851 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
2852 if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
2853 (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
2854 return -EINVAL;
Michael Neuling84295df2012-10-28 15:13:16 +00002855 return set_instruction_bp(child, bp_info);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002856 }
2857 if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2858 return set_dac(child, bp_info);
2859
2860#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2861 return set_dac_range(child, bp_info);
2862#else
2863 return -EINVAL;
2864#endif
2865#else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2866 /*
2867 * We only support one data breakpoint
Dave Kleikamp3162d922010-02-08 11:51:05 +00002868 */
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002869 if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
2870 (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002871 bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
Dave Kleikamp3162d922010-02-08 11:51:05 +00002872 return -EINVAL;
2873
Dave Kleikamp3162d922010-02-08 11:51:05 +00002874 if ((unsigned long)bp_info->addr >= TASK_SIZE)
2875 return -EIO;
2876
Michael Neuling9422de32012-12-20 14:06:44 +00002877 brk.address = bp_info->addr & ~7UL;
2878 brk.type = HW_BRK_TYPE_TRANSLATE;
Michael Neuling2bb78ef2013-03-11 16:42:49 +00002879 brk.len = 8;
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002880 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
Michael Neuling9422de32012-12-20 14:06:44 +00002881 brk.type |= HW_BRK_TYPE_READ;
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002882 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
Michael Neuling9422de32012-12-20 14:06:44 +00002883 brk.type |= HW_BRK_TYPE_WRITE;
K.Prasad6c7a2852012-10-28 15:13:15 +00002884#ifdef CONFIG_HAVE_HW_BREAKPOINT
K.Prasad6c7a2852012-10-28 15:13:15 +00002885 /*
2886 * Check if the request is for 'range' breakpoints. We can
2887 * support it if range < 8 bytes.
2888 */
Oleg Nesterov6961ed92013-07-08 16:00:49 -07002889 if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
K.Prasad6c7a2852012-10-28 15:13:15 +00002890 len = bp_info->addr2 - bp_info->addr;
Oleg Nesterov6961ed92013-07-08 16:00:49 -07002891 else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
Michael Neulingb0b0aa92013-06-24 15:47:22 +10002892 len = 1;
Oleg Nesterov6961ed92013-07-08 16:00:49 -07002893 else
K.Prasad6c7a2852012-10-28 15:13:15 +00002894 return -EINVAL;
K.Prasad6c7a2852012-10-28 15:13:15 +00002895 bp = thread->ptrace_bps[0];
Oleg Nesterov6961ed92013-07-08 16:00:49 -07002896 if (bp)
K.Prasad6c7a2852012-10-28 15:13:15 +00002897 return -ENOSPC;
K.Prasad6c7a2852012-10-28 15:13:15 +00002898
2899 /* Create a new breakpoint request if one doesn't exist already */
2900 hw_breakpoint_init(&attr);
2901 attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
2902 attr.bp_len = len;
Michael Neuling9422de32012-12-20 14:06:44 +00002903 arch_bp_generic_fields(brk.type, &attr.bp_type);
K.Prasad6c7a2852012-10-28 15:13:15 +00002904
2905 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2906 ptrace_triggered, NULL, child);
2907 if (IS_ERR(bp)) {
2908 thread->ptrace_bps[0] = NULL;
K.Prasad6c7a2852012-10-28 15:13:15 +00002909 return PTR_ERR(bp);
2910 }
2911
K.Prasad6c7a2852012-10-28 15:13:15 +00002912 return 1;
2913#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2914
2915 if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
2916 return -EINVAL;
2917
Michael Neuling9422de32012-12-20 14:06:44 +00002918 if (child->thread.hw_brk.address)
K.Prasad6c7a2852012-10-28 15:13:15 +00002919 return -ENOSPC;
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002920
Michael Neuling85ce9a52018-03-27 15:37:18 +11002921 if (!ppc_breakpoint_available())
2922 return -ENODEV;
2923
Michael Neuling9422de32012-12-20 14:06:44 +00002924 child->thread.hw_brk = brk;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002925
Dave Kleikamp3162d922010-02-08 11:51:05 +00002926 return 1;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002927#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
Dave Kleikamp3162d922010-02-08 11:51:05 +00002928}
2929
Michael Neulingec1b33d2012-10-28 15:13:17 +00002930static long ppc_del_hwdebug(struct task_struct *child, long data)
Dave Kleikamp3162d922010-02-08 11:51:05 +00002931{
K.Prasad6c7a2852012-10-28 15:13:15 +00002932#ifdef CONFIG_HAVE_HW_BREAKPOINT
2933 int ret = 0;
2934 struct thread_struct *thread = &(child->thread);
2935 struct perf_event *bp;
2936#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002937#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2938 int rc;
2939
2940 if (data <= 4)
2941 rc = del_instruction_bp(child, (int)data);
2942 else
2943 rc = del_dac(child, (int)data - 4);
2944
2945 if (!rc) {
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302946 if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
2947 child->thread.debug.dbcr1)) {
2948 child->thread.debug.dbcr0 &= ~DBCR0_IDM;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002949 child->thread.regs->msr &= ~MSR_DE;
2950 }
2951 }
2952 return rc;
2953#else
Dave Kleikamp3162d922010-02-08 11:51:05 +00002954 if (data != 1)
2955 return -EINVAL;
K.Prasad6c7a2852012-10-28 15:13:15 +00002956
2957#ifdef CONFIG_HAVE_HW_BREAKPOINT
K.Prasad6c7a2852012-10-28 15:13:15 +00002958 bp = thread->ptrace_bps[0];
2959 if (bp) {
2960 unregister_hw_breakpoint(bp);
2961 thread->ptrace_bps[0] = NULL;
2962 } else
2963 ret = -ENOENT;
K.Prasad6c7a2852012-10-28 15:13:15 +00002964 return ret;
2965#else /* CONFIG_HAVE_HW_BREAKPOINT */
Michael Neuling9422de32012-12-20 14:06:44 +00002966 if (child->thread.hw_brk.address == 0)
Dave Kleikamp3162d922010-02-08 11:51:05 +00002967 return -ENOENT;
2968
Michael Neuling9422de32012-12-20 14:06:44 +00002969 child->thread.hw_brk.address = 0;
2970 child->thread.hw_brk.type = 0;
K.Prasad6c7a2852012-10-28 15:13:15 +00002971#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002972
Dave Kleikamp3162d922010-02-08 11:51:05 +00002973 return 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002974#endif
Dave Kleikamp3162d922010-02-08 11:51:05 +00002975}
2976
Namhyung Kim9b05a692010-10-27 15:33:47 -07002977long arch_ptrace(struct task_struct *child, long request,
2978 unsigned long addr, unsigned long data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 int ret = -EPERM;
Namhyung Kimf68d2042010-10-27 15:34:01 -07002981 void __user *datavp = (void __user *) data;
2982 unsigned long __user *datalp = datavp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984 switch (request) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 /* read the word at location addr in the USER area. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 case PTRACE_PEEKUSR: {
2987 unsigned long index, tmp;
2988
2989 ret = -EIO;
2990 /* convert to index and check */
Stephen Rothwelle8a30302005-10-13 15:52:04 +10002991#ifdef CONFIG_PPC32
Namhyung Kim9b05a692010-10-27 15:33:47 -07002992 index = addr >> 2;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10002993 if ((addr & 3) || (index > PT_FPSCR)
2994 || (child->thread.regs == NULL))
2995#else
Namhyung Kim9b05a692010-10-27 15:33:47 -07002996 index = addr >> 3;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10002997 if ((addr & 7) || (index > PT_FPSCR))
2998#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 break;
3000
3001 CHECK_FULL_REGS(child->thread.regs);
3002 if (index < PT_FPR0) {
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +00003003 ret = ptrace_get_reg(child, (int) index, &tmp);
3004 if (ret)
3005 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 } else {
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00003007 unsigned int fpidx = index - PT_FPR0;
3008
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003009 flush_fp_to_thread(child);
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00003010 if (fpidx < (PT_FPSCR - PT_FPR0))
Ulrich Weigand36aa1b12013-12-12 15:59:34 +11003011 memcpy(&tmp, &child->thread.TS_FPR(fpidx),
Anton Blanchard87fec052013-09-23 12:04:38 +10003012 sizeof(long));
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00003013 else
Paul Mackerrasde79f7b2013-09-10 20:20:42 +10003014 tmp = child->thread.fp_state.fpscr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015 }
Namhyung Kimf68d2042010-10-27 15:34:01 -07003016 ret = put_user(tmp, datalp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017 break;
3018 }
3019
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020 /* write the word at location addr in the USER area */
3021 case PTRACE_POKEUSR: {
3022 unsigned long index;
3023
3024 ret = -EIO;
3025 /* convert to index and check */
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003026#ifdef CONFIG_PPC32
Namhyung Kim9b05a692010-10-27 15:33:47 -07003027 index = addr >> 2;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003028 if ((addr & 3) || (index > PT_FPSCR)
3029 || (child->thread.regs == NULL))
3030#else
Namhyung Kim9b05a692010-10-27 15:33:47 -07003031 index = addr >> 3;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003032 if ((addr & 7) || (index > PT_FPSCR))
3033#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034 break;
3035
3036 CHECK_FULL_REGS(child->thread.regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037 if (index < PT_FPR0) {
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10003038 ret = ptrace_put_reg(child, index, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 } else {
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00003040 unsigned int fpidx = index - PT_FPR0;
3041
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003042 flush_fp_to_thread(child);
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00003043 if (fpidx < (PT_FPSCR - PT_FPR0))
Ulrich Weigand36aa1b12013-12-12 15:59:34 +11003044 memcpy(&child->thread.TS_FPR(fpidx), &data,
Anton Blanchard87fec052013-09-23 12:04:38 +10003045 sizeof(long));
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00003046 else
Paul Mackerrasde79f7b2013-09-10 20:20:42 +10003047 child->thread.fp_state.fpscr = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048 ret = 0;
3049 }
3050 break;
3051 }
3052
Dave Kleikamp3162d922010-02-08 11:51:05 +00003053 case PPC_PTRACE_GETHWDBGINFO: {
3054 struct ppc_debug_info dbginfo;
3055
3056 dbginfo.version = 1;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00003057#ifdef CONFIG_PPC_ADV_DEBUG_REGS
3058 dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
3059 dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
3060 dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
3061 dbginfo.data_bp_alignment = 4;
3062 dbginfo.sizeof_condition = 4;
3063 dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
3064 PPC_DEBUG_FEATURE_INSN_BP_MASK;
3065#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
3066 dbginfo.features |=
3067 PPC_DEBUG_FEATURE_DATA_BP_RANGE |
3068 PPC_DEBUG_FEATURE_DATA_BP_MASK;
3069#endif
3070#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
Dave Kleikamp3162d922010-02-08 11:51:05 +00003071 dbginfo.num_instruction_bps = 0;
Michael Neuling85ce9a52018-03-27 15:37:18 +11003072 if (ppc_breakpoint_available())
3073 dbginfo.num_data_bps = 1;
3074 else
3075 dbginfo.num_data_bps = 0;
Dave Kleikamp3162d922010-02-08 11:51:05 +00003076 dbginfo.num_condition_regs = 0;
3077#ifdef CONFIG_PPC64
3078 dbginfo.data_bp_alignment = 8;
3079#else
3080 dbginfo.data_bp_alignment = 4;
3081#endif
3082 dbginfo.sizeof_condition = 0;
K.Prasad6c7a2852012-10-28 15:13:15 +00003083#ifdef CONFIG_HAVE_HW_BREAKPOINT
3084 dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
Michael Neuling517b7312013-03-21 20:12:33 +00003085 if (cpu_has_feature(CPU_FTR_DAWR))
3086 dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
K.Prasad6c7a2852012-10-28 15:13:15 +00003087#else
Dave Kleikamp3162d922010-02-08 11:51:05 +00003088 dbginfo.features = 0;
K.Prasad6c7a2852012-10-28 15:13:15 +00003089#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Dave Kleikamp3bffb652010-02-08 11:51:18 +00003090#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
Dave Kleikamp3162d922010-02-08 11:51:05 +00003091
Al Viro6bcdd292018-05-29 22:57:38 +10003092 if (copy_to_user(datavp, &dbginfo,
3093 sizeof(struct ppc_debug_info)))
Dave Kleikamp3162d922010-02-08 11:51:05 +00003094 return -EFAULT;
Al Viro6bcdd292018-05-29 22:57:38 +10003095 return 0;
Dave Kleikamp3162d922010-02-08 11:51:05 +00003096 }
3097
3098 case PPC_PTRACE_SETHWDEBUG: {
3099 struct ppc_hw_breakpoint bp_info;
3100
Al Viro6bcdd292018-05-29 22:57:38 +10003101 if (copy_from_user(&bp_info, datavp,
3102 sizeof(struct ppc_hw_breakpoint)))
Dave Kleikamp3162d922010-02-08 11:51:05 +00003103 return -EFAULT;
Al Viro6bcdd292018-05-29 22:57:38 +10003104 return ppc_set_hwdebug(child, &bp_info);
Dave Kleikamp3162d922010-02-08 11:51:05 +00003105 }
3106
3107 case PPC_PTRACE_DELHWDEBUG: {
Michael Neulingec1b33d2012-10-28 15:13:17 +00003108 ret = ppc_del_hwdebug(child, data);
Dave Kleikamp3162d922010-02-08 11:51:05 +00003109 break;
3110 }
3111
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003112 case PTRACE_GET_DEBUGREG: {
Michael Neuling9422de32012-12-20 14:06:44 +00003113#ifndef CONFIG_PPC_ADV_DEBUG_REGS
3114 unsigned long dabr_fake;
3115#endif
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003116 ret = -EINVAL;
3117 /* We only support one DABR and no IABRS at the moment */
3118 if (addr > 0)
3119 break;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00003120#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05303121 ret = put_user(child->thread.debug.dac1, datalp);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00003122#else
Michael Neuling9422de32012-12-20 14:06:44 +00003123 dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
3124 (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
3125 ret = put_user(dabr_fake, datalp);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00003126#endif
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003127 break;
3128 }
3129
3130 case PTRACE_SET_DEBUGREG:
3131 ret = ptrace_set_debugreg(child, addr, data);
3132 break;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003133
Benjamin Herrenschmidte17666b2007-06-04 15:15:43 +10003134#ifdef CONFIG_PPC64
3135 case PTRACE_GETREGS64:
3136#endif
Roland McGrathc391cd02007-12-20 03:58:36 -08003137 case PTRACE_GETREGS: /* Get all pt_regs from the child. */
3138 return copy_regset_to_user(child, &user_ppc_native_view,
3139 REGSET_GPR,
Michael Ellerman3eeacd92018-10-13 00:39:31 +11003140 0, sizeof(struct user_pt_regs),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003141 datavp);
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003142
Benjamin Herrenschmidt0b3d5c42007-06-04 15:15:39 +10003143#ifdef CONFIG_PPC64
Benjamin Herrenschmidte17666b2007-06-04 15:15:43 +10003144 case PTRACE_SETREGS64:
3145#endif
Roland McGrathc391cd02007-12-20 03:58:36 -08003146 case PTRACE_SETREGS: /* Set all gp regs in the child. */
3147 return copy_regset_from_user(child, &user_ppc_native_view,
3148 REGSET_GPR,
Michael Ellerman3eeacd92018-10-13 00:39:31 +11003149 0, sizeof(struct user_pt_regs),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003150 datavp);
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003151
Roland McGrathc391cd02007-12-20 03:58:36 -08003152 case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
3153 return copy_regset_to_user(child, &user_ppc_native_view,
3154 REGSET_FPR,
3155 0, sizeof(elf_fpregset_t),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003156 datavp);
Benjamin Herrenschmidte17666b2007-06-04 15:15:43 +10003157
Roland McGrathc391cd02007-12-20 03:58:36 -08003158 case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
3159 return copy_regset_from_user(child, &user_ppc_native_view,
3160 REGSET_FPR,
3161 0, sizeof(elf_fpregset_t),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003162 datavp);
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003163
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164#ifdef CONFIG_ALTIVEC
3165 case PTRACE_GETVRREGS:
Roland McGrathc391cd02007-12-20 03:58:36 -08003166 return copy_regset_to_user(child, &user_ppc_native_view,
3167 REGSET_VMX,
3168 0, (33 * sizeof(vector128) +
3169 sizeof(u32)),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003170 datavp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171
3172 case PTRACE_SETVRREGS:
Roland McGrathc391cd02007-12-20 03:58:36 -08003173 return copy_regset_from_user(child, &user_ppc_native_view,
3174 REGSET_VMX,
3175 0, (33 * sizeof(vector128) +
3176 sizeof(u32)),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003177 datavp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178#endif
Michael Neulingce48b212008-06-25 14:07:18 +10003179#ifdef CONFIG_VSX
3180 case PTRACE_GETVSRREGS:
3181 return copy_regset_to_user(child, &user_ppc_native_view,
3182 REGSET_VSX,
Michael Neuling1ac42ef82008-07-29 01:13:14 +10003183 0, 32 * sizeof(double),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003184 datavp);
Michael Neulingce48b212008-06-25 14:07:18 +10003185
3186 case PTRACE_SETVSRREGS:
3187 return copy_regset_from_user(child, &user_ppc_native_view,
3188 REGSET_VSX,
Michael Neuling1ac42ef82008-07-29 01:13:14 +10003189 0, 32 * sizeof(double),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003190 datavp);
Michael Neulingce48b212008-06-25 14:07:18 +10003191#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192#ifdef CONFIG_SPE
3193 case PTRACE_GETEVRREGS:
3194 /* Get the child spe register state. */
Roland McGrathc391cd02007-12-20 03:58:36 -08003195 return copy_regset_to_user(child, &user_ppc_native_view,
3196 REGSET_SPE, 0, 35 * sizeof(u32),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003197 datavp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198
3199 case PTRACE_SETEVRREGS:
3200 /* Set the child spe register state. */
Roland McGrathc391cd02007-12-20 03:58:36 -08003201 return copy_regset_from_user(child, &user_ppc_native_view,
3202 REGSET_SPE, 0, 35 * sizeof(u32),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003203 datavp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204#endif
3205
3206 default:
3207 ret = ptrace_request(child, request, addr, data);
3208 break;
3209 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210 return ret;
3211}
3212
Michael Ellerman2449acc2015-07-23 20:21:09 +10003213#ifdef CONFIG_SECCOMP
3214static int do_seccomp(struct pt_regs *regs)
3215{
3216 if (!test_thread_flag(TIF_SECCOMP))
3217 return 0;
3218
3219 /*
3220 * The ABI we present to seccomp tracers is that r3 contains
3221 * the syscall return value and orig_gpr3 contains the first
3222 * syscall parameter. This is different to the ptrace ABI where
3223 * both r3 and orig_gpr3 contain the first syscall parameter.
3224 */
3225 regs->gpr[3] = -ENOSYS;
3226
3227 /*
3228 * We use the __ version here because we have already checked
3229 * TIF_SECCOMP. If this fails, there is nothing left to do, we
3230 * have already loaded -ENOSYS into r3, or seccomp has put
3231 * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
3232 */
Andy Lutomirski2f275de2016-05-27 12:57:02 -07003233 if (__secure_computing(NULL))
Michael Ellerman2449acc2015-07-23 20:21:09 +10003234 return -1;
3235
3236 /*
3237 * The syscall was allowed by seccomp, restore the register
Kees Cook1addc572016-06-02 19:55:09 -07003238 * state to what audit expects.
Michael Ellerman2449acc2015-07-23 20:21:09 +10003239 * Note that we use orig_gpr3, which means a seccomp tracer can
3240 * modify the first syscall parameter (in orig_gpr3) and also
3241 * allow the syscall to proceed.
3242 */
3243 regs->gpr[3] = regs->orig_gpr3;
3244
3245 return 0;
3246}
3247#else
3248static inline int do_seccomp(struct pt_regs *regs) { return 0; }
3249#endif /* CONFIG_SECCOMP */
3250
Michael Ellermand3837412015-07-23 20:21:02 +10003251/**
3252 * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
3253 * @regs: the pt_regs of the task to trace (current)
3254 *
3255 * Performs various types of tracing on syscall entry. This includes seccomp,
3256 * ptrace, syscall tracepoints and audit.
3257 *
3258 * The pt_regs are potentially visible to userspace via ptrace, so their
3259 * contents is ABI.
3260 *
3261 * One or more of the tracers may modify the contents of pt_regs, in particular
3262 * to modify arguments or even the syscall number itself.
3263 *
3264 * It's also possible that a tracer can choose to reject the system call. In
3265 * that case this function will return an illegal syscall number, and will put
3266 * an appropriate return value in regs->r3.
3267 *
3268 * Return: the (possibly changed) syscall number.
Roland McGrath4f72c422008-07-27 16:51:03 +10003269 */
3270long do_syscall_trace_enter(struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271{
Dmitry V. Levin8dbdec02018-12-16 20:28:28 +03003272 u32 flags;
3273
Li Zhong22ecbe82013-05-13 16:16:40 +00003274 user_exit();
3275
Dmitry V. Levin8dbdec02018-12-16 20:28:28 +03003276 flags = READ_ONCE(current_thread_info()->flags) &
3277 (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE);
Breno Leitao5521eb42018-09-20 13:45:06 -03003278
Dmitry V. Levin8dbdec02018-12-16 20:28:28 +03003279 if (flags) {
3280 int rc = tracehook_report_syscall_entry(regs);
3281
3282 if (unlikely(flags & _TIF_SYSCALL_EMU)) {
3283 /*
3284 * A nonzero return code from
3285 * tracehook_report_syscall_entry() tells us to prevent
3286 * the syscall execution, but we are not going to
3287 * execute it anyway.
3288 *
3289 * Returning -1 will skip the syscall execution. We want
3290 * to avoid clobbering any registers, so we don't goto
3291 * the skip label below.
3292 */
3293 return -1;
3294 }
3295
3296 if (rc) {
3297 /*
3298 * The tracer decided to abort the syscall. Note that
3299 * the tracer may also just change regs->gpr[0] to an
3300 * invalid syscall number, that is handled below on the
3301 * exit path.
3302 */
3303 goto skip;
3304 }
3305 }
Kees Cook1addc572016-06-02 19:55:09 -07003306
3307 /* Run seccomp after ptrace; allow it to set gpr[3]. */
Michael Ellerman2449acc2015-07-23 20:21:09 +10003308 if (do_seccomp(regs))
3309 return -1;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003310
Kees Cook1addc572016-06-02 19:55:09 -07003311 /* Avoid trace and audit when syscall is invalid. */
3312 if (regs->gpr[0] >= NR_syscalls)
3313 goto skip;
David Woodhouseea9c1022005-05-08 15:56:09 +01003314
Ian Munsie02424d82011-02-02 17:27:24 +00003315 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3316 trace_sys_enter(regs, regs->gpr[0]);
3317
David Woodhousecfcd1702007-01-14 09:38:18 +08003318#ifdef CONFIG_PPC64
Eric Parisb05d8442012-01-03 14:23:06 -05003319 if (!is_32bit_task())
Eric Paris91397402014-03-11 13:29:28 -04003320 audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
Eric Parisb05d8442012-01-03 14:23:06 -05003321 regs->gpr[5], regs->gpr[6]);
3322 else
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003323#endif
Eric Paris91397402014-03-11 13:29:28 -04003324 audit_syscall_entry(regs->gpr[0],
Eric Parisb05d8442012-01-03 14:23:06 -05003325 regs->gpr[3] & 0xffffffff,
3326 regs->gpr[4] & 0xffffffff,
3327 regs->gpr[5] & 0xffffffff,
3328 regs->gpr[6] & 0xffffffff);
Roland McGrath4f72c422008-07-27 16:51:03 +10003329
Michael Ellermand3837412015-07-23 20:21:02 +10003330 /* Return the possibly modified but valid syscall number */
3331 return regs->gpr[0];
Kees Cook1addc572016-06-02 19:55:09 -07003332
3333skip:
3334 /*
3335 * If we are aborting explicitly, or if the syscall number is
3336 * now invalid, set the return value to -ENOSYS.
3337 */
3338 regs->gpr[3] = -ENOSYS;
3339 return -1;
David Woodhouseea9c1022005-05-08 15:56:09 +01003340}
3341
3342void do_syscall_trace_leave(struct pt_regs *regs)
3343{
Roland McGrath4f72c422008-07-27 16:51:03 +10003344 int step;
3345
Eric Parisd7e75282012-01-03 14:23:06 -05003346 audit_syscall_exit(regs);
David Woodhouseea9c1022005-05-08 15:56:09 +01003347
Ian Munsie02424d82011-02-02 17:27:24 +00003348 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3349 trace_sys_exit(regs, regs->result);
3350
Roland McGrath4f72c422008-07-27 16:51:03 +10003351 step = test_thread_flag(TIF_SINGLESTEP);
3352 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
3353 tracehook_report_syscall_exit(regs, step);
Li Zhong22ecbe82013-05-13 16:16:40 +00003354
3355 user_enter();
David Woodhouseea9c1022005-05-08 15:56:09 +01003356}
Michael Ellerman002af932018-10-12 23:13:17 +11003357
3358void __init pt_regs_check(void)
3359{
3360 BUILD_BUG_ON(offsetof(struct pt_regs, gpr) !=
3361 offsetof(struct user_pt_regs, gpr));
3362 BUILD_BUG_ON(offsetof(struct pt_regs, nip) !=
3363 offsetof(struct user_pt_regs, nip));
3364 BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
3365 offsetof(struct user_pt_regs, msr));
3366 BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
3367 offsetof(struct user_pt_regs, msr));
3368 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
3369 offsetof(struct user_pt_regs, orig_gpr3));
3370 BUILD_BUG_ON(offsetof(struct pt_regs, ctr) !=
3371 offsetof(struct user_pt_regs, ctr));
3372 BUILD_BUG_ON(offsetof(struct pt_regs, link) !=
3373 offsetof(struct user_pt_regs, link));
3374 BUILD_BUG_ON(offsetof(struct pt_regs, xer) !=
3375 offsetof(struct user_pt_regs, xer));
3376 BUILD_BUG_ON(offsetof(struct pt_regs, ccr) !=
3377 offsetof(struct user_pt_regs, ccr));
3378#ifdef __powerpc64__
3379 BUILD_BUG_ON(offsetof(struct pt_regs, softe) !=
3380 offsetof(struct user_pt_regs, softe));
3381#else
3382 BUILD_BUG_ON(offsetof(struct pt_regs, mq) !=
3383 offsetof(struct user_pt_regs, mq));
3384#endif
3385 BUILD_BUG_ON(offsetof(struct pt_regs, trap) !=
3386 offsetof(struct user_pt_regs, trap));
3387 BUILD_BUG_ON(offsetof(struct pt_regs, dar) !=
3388 offsetof(struct user_pt_regs, dar));
3389 BUILD_BUG_ON(offsetof(struct pt_regs, dsisr) !=
3390 offsetof(struct user_pt_regs, dsisr));
3391 BUILD_BUG_ON(offsetof(struct pt_regs, result) !=
3392 offsetof(struct user_pt_regs, result));
3393
3394 BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
3395}