blob: afb819f4ca68bee0f88fab357ffe9395508dc106 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Derived from "arch/m68k/kernel/ptrace.c"
6 * Copyright (C) 1994 by Hamish Macdonald
7 * Taken from linux/kernel/ptrace.c and modified for M680x0.
8 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
9 *
10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
Paul Mackerrasb1239232005-10-20 09:11:29 +100011 * and Paul Mackerras (paulus@samba.org).
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
13 * This file is subject to the terms and conditions of the GNU General
14 * Public License. See the file README.legal in the main directory of
15 * this archive for more details.
16 */
17
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/mm.h>
21#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/errno.h>
23#include <linux/ptrace.h>
Roland McGrathf65255e2007-12-20 03:57:34 -080024#include <linux/regset.h>
Roland McGrath4f72c422008-07-27 16:51:03 +100025#include <linux/tracehook.h>
Roland McGrath3caf06c2007-12-20 03:57:39 -080026#include <linux/elf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/user.h>
28#include <linux/security.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070029#include <linux/signal.h>
David Woodhouseea9c1022005-05-08 15:56:09 +010030#include <linux/seccomp.h>
31#include <linux/audit.h>
Ian Munsie02424d82011-02-02 17:27:24 +000032#include <trace/syscall.h>
K.Prasad5aae8a52010-06-15 11:35:19 +053033#include <linux/hw_breakpoint.h>
34#include <linux/perf_event.h>
Li Zhong22ecbe82013-05-13 16:16:40 +000035#include <linux/context_tracking.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080037#include <linux/uaccess.h>
Thiago Jung Bauermannc5cc1f42018-01-18 17:50:43 -080038#include <linux/pkeys.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <asm/page.h>
40#include <asm/pgtable.h>
David Howellsae3a1972012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Cyril Burc7a318b2016-08-10 15:44:46 +100042#include <asm/tm.h>
Daniel Axtens0545d542016-09-06 15:32:43 +100043#include <asm/asm-prototypes.h>
Michael Neuling85ce9a52018-03-27 15:37:18 +110044#include <asm/debug.h>
Paul Mackerras21a62902005-11-19 20:47:22 +110045
Ian Munsie02424d82011-02-02 17:27:24 +000046#define CREATE_TRACE_POINTS
47#include <trace/events/syscalls.h>
48
Linus Torvalds1da177e2005-04-16 15:20:36 -070049/*
Mahesh Salgaonkar359e4282010-04-07 18:10:20 +100050 * The parameter save area on the stack is used to store arguments being passed
51 * to callee function and is located at fixed offset from stack pointer.
52 */
53#ifdef CONFIG_PPC32
54#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
55#else /* CONFIG_PPC32 */
56#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
57#endif
58
59struct pt_regs_offset {
60 const char *name;
61 int offset;
62};
63
64#define STR(s) #s /* convert to string */
65#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
66#define GPR_OFFSET_NAME(num) \
Rashmica Gupta343c3322015-11-21 17:08:16 +110067 {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
Mahesh Salgaonkar359e4282010-04-07 18:10:20 +100068 {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
69#define REG_OFFSET_END {.name = NULL, .offset = 0}
70
Anshuman Khandual8c13f592016-07-28 10:57:38 +080071#define TVSO(f) (offsetof(struct thread_vr_state, f))
Anshuman Khandual9d3918f2016-07-28 10:57:39 +080072#define TFSO(f) (offsetof(struct thread_fp_state, f))
Anshuman Khandual08e1c012016-07-28 10:57:40 +080073#define TSO(f) (offsetof(struct thread_struct, f))
Anshuman Khandual8c13f592016-07-28 10:57:38 +080074
Mahesh Salgaonkar359e4282010-04-07 18:10:20 +100075static const struct pt_regs_offset regoffset_table[] = {
76 GPR_OFFSET_NAME(0),
77 GPR_OFFSET_NAME(1),
78 GPR_OFFSET_NAME(2),
79 GPR_OFFSET_NAME(3),
80 GPR_OFFSET_NAME(4),
81 GPR_OFFSET_NAME(5),
82 GPR_OFFSET_NAME(6),
83 GPR_OFFSET_NAME(7),
84 GPR_OFFSET_NAME(8),
85 GPR_OFFSET_NAME(9),
86 GPR_OFFSET_NAME(10),
87 GPR_OFFSET_NAME(11),
88 GPR_OFFSET_NAME(12),
89 GPR_OFFSET_NAME(13),
90 GPR_OFFSET_NAME(14),
91 GPR_OFFSET_NAME(15),
92 GPR_OFFSET_NAME(16),
93 GPR_OFFSET_NAME(17),
94 GPR_OFFSET_NAME(18),
95 GPR_OFFSET_NAME(19),
96 GPR_OFFSET_NAME(20),
97 GPR_OFFSET_NAME(21),
98 GPR_OFFSET_NAME(22),
99 GPR_OFFSET_NAME(23),
100 GPR_OFFSET_NAME(24),
101 GPR_OFFSET_NAME(25),
102 GPR_OFFSET_NAME(26),
103 GPR_OFFSET_NAME(27),
104 GPR_OFFSET_NAME(28),
105 GPR_OFFSET_NAME(29),
106 GPR_OFFSET_NAME(30),
107 GPR_OFFSET_NAME(31),
108 REG_OFFSET_NAME(nip),
109 REG_OFFSET_NAME(msr),
110 REG_OFFSET_NAME(ctr),
111 REG_OFFSET_NAME(link),
112 REG_OFFSET_NAME(xer),
113 REG_OFFSET_NAME(ccr),
114#ifdef CONFIG_PPC64
115 REG_OFFSET_NAME(softe),
116#else
117 REG_OFFSET_NAME(mq),
118#endif
119 REG_OFFSET_NAME(trap),
120 REG_OFFSET_NAME(dar),
121 REG_OFFSET_NAME(dsisr),
122 REG_OFFSET_END,
123};
124
Cyril Burc7a318b2016-08-10 15:44:46 +1000125#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
126static void flush_tmregs_to_thread(struct task_struct *tsk)
127{
128 /*
129 * If task is not current, it will have been flushed already to
130 * it's thread_struct during __switch_to().
131 *
Gustavo Romerocd63f3c2017-07-19 01:44:13 -0400132 * A reclaim flushes ALL the state or if not in TM save TM SPRs
133 * in the appropriate thread structures from live.
Cyril Burc7a318b2016-08-10 15:44:46 +1000134 */
135
Gustavo Romeroc1fa0762017-09-13 22:13:48 -0400136 if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
Gustavo Romerocd63f3c2017-07-19 01:44:13 -0400137 return;
Cyril Burc7a318b2016-08-10 15:44:46 +1000138
Gustavo Romerocd63f3c2017-07-19 01:44:13 -0400139 if (MSR_TM_SUSPENDED(mfmsr())) {
140 tm_reclaim_current(TM_CAUSE_SIGNAL);
141 } else {
142 tm_enable();
143 tm_save_sprs(&(tsk->thread));
144 }
Cyril Burc7a318b2016-08-10 15:44:46 +1000145}
146#else
147static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
148#endif
149
Mahesh Salgaonkar359e4282010-04-07 18:10:20 +1000150/**
151 * regs_query_register_offset() - query register offset from its name
152 * @name: the name of a register
153 *
154 * regs_query_register_offset() returns the offset of a register in struct
155 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
156 */
157int regs_query_register_offset(const char *name)
158{
159 const struct pt_regs_offset *roff;
160 for (roff = regoffset_table; roff->name != NULL; roff++)
161 if (!strcmp(roff->name, name))
162 return roff->offset;
163 return -EINVAL;
164}
165
166/**
167 * regs_query_register_name() - query register name from its offset
168 * @offset: the offset of a register in struct pt_regs.
169 *
170 * regs_query_register_name() returns the name of a register from its
171 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
172 */
173const char *regs_query_register_name(unsigned int offset)
174{
175 const struct pt_regs_offset *roff;
176 for (roff = regoffset_table; roff->name != NULL; roff++)
177 if (roff->offset == offset)
178 return roff->name;
179 return NULL;
180}
181
182/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 * does not yet catch signals sent when the child dies.
184 * in exit.c or in signal.c.
185 */
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187/*
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +1000188 * Set of msr bits that gdb can change on behalf of a process.
189 */
Dave Kleikamp172ae2e2010-02-08 11:50:57 +0000190#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +1000191#define MSR_DEBUGCHANGE 0
192#else
193#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
194#endif
195
196/*
197 * Max register writeable via put_reg
198 */
199#ifdef CONFIG_PPC32
200#define PT_MAX_PUT_REG PT_MQ
201#else
202#define PT_MAX_PUT_REG PT_CCR
203#endif
204
Roland McGrath26f77132007-12-20 03:57:51 -0800205static unsigned long get_user_msr(struct task_struct *task)
206{
207 return task->thread.regs->msr | task->thread.fpexc_mode;
208}
209
210static int set_user_msr(struct task_struct *task, unsigned long msr)
211{
212 task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
213 task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
214 return 0;
215}
216
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800217#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
218static unsigned long get_user_ckpt_msr(struct task_struct *task)
219{
220 return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
221}
222
223static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
224{
225 task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
226 task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
227 return 0;
228}
229
230static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
231{
232 task->thread.ckpt_regs.trap = trap & 0xfff0;
233 return 0;
234}
235#endif
236
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000237#ifdef CONFIG_PPC64
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000238static int get_user_dscr(struct task_struct *task, unsigned long *data)
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000239{
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000240 *data = task->thread.dscr;
241 return 0;
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000242}
243
244static int set_user_dscr(struct task_struct *task, unsigned long dscr)
245{
246 task->thread.dscr = dscr;
247 task->thread.dscr_inherit = 1;
248 return 0;
249}
250#else
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000251static int get_user_dscr(struct task_struct *task, unsigned long *data)
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000252{
253 return -EIO;
254}
255
256static int set_user_dscr(struct task_struct *task, unsigned long dscr)
257{
258 return -EIO;
259}
260#endif
261
Roland McGrath26f77132007-12-20 03:57:51 -0800262/*
263 * We prevent mucking around with the reserved area of trap
264 * which are used internally by the kernel.
265 */
266static int set_user_trap(struct task_struct *task, unsigned long trap)
267{
268 task->thread.regs->trap = trap & 0xfff0;
269 return 0;
270}
271
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +1000272/*
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000273 * Get contents of register REGNO in task TASK.
274 */
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000275int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000276{
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000277 if ((task->thread.regs == NULL) || !data)
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000278 return -EIO;
279
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000280 if (regno == PT_MSR) {
281 *data = get_user_msr(task);
282 return 0;
283 }
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000284
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000285 if (regno == PT_DSCR)
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000286 return get_user_dscr(task, data);
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000287
Madhavan Srinivasana8a4b032017-08-20 23:28:24 +0530288#ifdef CONFIG_PPC64
289 /*
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +0530290 * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
Madhavan Srinivasana8a4b032017-08-20 23:28:24 +0530291 * no more used as a flag, lets force usr to alway see the softe value as 1
292 * which means interrupts are not soft disabled.
293 */
294 if (regno == PT_SOFTE) {
295 *data = 1;
296 return 0;
297 }
298#endif
299
Michael Ellerman3eeacd92018-10-13 00:39:31 +1100300 if (regno < (sizeof(struct user_pt_regs) / sizeof(unsigned long))) {
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000301 *data = ((unsigned long *)task->thread.regs)[regno];
302 return 0;
303 }
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000304
305 return -EIO;
306}
307
308/*
309 * Write contents of register REGNO in task TASK.
310 */
311int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
312{
313 if (task->thread.regs == NULL)
314 return -EIO;
315
Roland McGrath26f77132007-12-20 03:57:51 -0800316 if (regno == PT_MSR)
317 return set_user_msr(task, data);
318 if (regno == PT_TRAP)
319 return set_user_trap(task, data);
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000320 if (regno == PT_DSCR)
321 return set_user_dscr(task, data);
Roland McGrath26f77132007-12-20 03:57:51 -0800322
323 if (regno <= PT_MAX_PUT_REG) {
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000324 ((unsigned long *)task->thread.regs)[regno] = data;
325 return 0;
326 }
327 return -EIO;
328}
329
Roland McGrath44dd3f52007-12-20 03:57:55 -0800330static int gpr_get(struct task_struct *target, const struct user_regset *regset,
331 unsigned int pos, unsigned int count,
332 void *kbuf, void __user *ubuf)
333{
Mike Wolfa71f5d52011-03-21 11:14:53 +1100334 int i, ret;
Roland McGrath44dd3f52007-12-20 03:57:55 -0800335
336 if (target->thread.regs == NULL)
337 return -EIO;
338
Mike Wolfa71f5d52011-03-21 11:14:53 +1100339 if (!FULL_REGS(target->thread.regs)) {
340 /* We have a partial register set. Fill 14-31 with bogus values */
341 for (i = 14; i < 32; i++)
342 target->thread.regs->gpr[i] = NV_REG_POISON;
343 }
Roland McGrath44dd3f52007-12-20 03:57:55 -0800344
345 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
346 target->thread.regs,
347 0, offsetof(struct pt_regs, msr));
348 if (!ret) {
349 unsigned long msr = get_user_msr(target);
350 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
351 offsetof(struct pt_regs, msr),
352 offsetof(struct pt_regs, msr) +
353 sizeof(msr));
354 }
355
356 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
357 offsetof(struct pt_regs, msr) + sizeof(long));
358
359 if (!ret)
360 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
361 &target->thread.regs->orig_gpr3,
362 offsetof(struct pt_regs, orig_gpr3),
Michael Ellerman3eeacd92018-10-13 00:39:31 +1100363 sizeof(struct user_pt_regs));
Roland McGrath44dd3f52007-12-20 03:57:55 -0800364 if (!ret)
365 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
Michael Ellerman3eeacd92018-10-13 00:39:31 +1100366 sizeof(struct user_pt_regs), -1);
Roland McGrath44dd3f52007-12-20 03:57:55 -0800367
368 return ret;
369}
370
371static int gpr_set(struct task_struct *target, const struct user_regset *regset,
372 unsigned int pos, unsigned int count,
373 const void *kbuf, const void __user *ubuf)
374{
375 unsigned long reg;
376 int ret;
377
378 if (target->thread.regs == NULL)
379 return -EIO;
380
381 CHECK_FULL_REGS(target->thread.regs);
382
383 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
384 target->thread.regs,
385 0, PT_MSR * sizeof(reg));
386
387 if (!ret && count > 0) {
388 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
389 PT_MSR * sizeof(reg),
390 (PT_MSR + 1) * sizeof(reg));
391 if (!ret)
392 ret = set_user_msr(target, reg);
393 }
394
395 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
396 offsetof(struct pt_regs, msr) + sizeof(long));
397
398 if (!ret)
399 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
400 &target->thread.regs->orig_gpr3,
401 PT_ORIG_R3 * sizeof(reg),
402 (PT_MAX_PUT_REG + 1) * sizeof(reg));
403
404 if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
405 ret = user_regset_copyin_ignore(
406 &pos, &count, &kbuf, &ubuf,
407 (PT_MAX_PUT_REG + 1) * sizeof(reg),
408 PT_TRAP * sizeof(reg));
409
410 if (!ret && count > 0) {
411 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
412 PT_TRAP * sizeof(reg),
413 (PT_TRAP + 1) * sizeof(reg));
414 if (!ret)
415 ret = set_user_trap(target, reg);
416 }
417
418 if (!ret)
419 ret = user_regset_copyin_ignore(
420 &pos, &count, &kbuf, &ubuf,
421 (PT_TRAP + 1) * sizeof(reg), -1);
422
423 return ret;
424}
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000425
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800426/*
Cyril Burdc310662016-09-23 16:18:24 +1000427 * Regardless of transactions, 'fp_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000428 * value of all FPR registers and 'ckfp_state' holds the last checkpointed
Cyril Burdc310662016-09-23 16:18:24 +1000429 * value of all FPR registers for the current transaction.
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800430 *
431 * Userspace interface buffer layout:
432 *
433 * struct data {
434 * u64 fpr[32];
435 * u64 fpscr;
436 * };
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800437 */
Roland McGrathf65255e2007-12-20 03:57:34 -0800438static int fpr_get(struct task_struct *target, const struct user_regset *regset,
439 unsigned int pos, unsigned int count,
440 void *kbuf, void __user *ubuf)
441{
Michael Neulingc6e67712008-06-25 14:07:18 +1000442#ifdef CONFIG_VSX
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000443 u64 buf[33];
Michael Neulingc6e67712008-06-25 14:07:18 +1000444 int i;
Cyril Burdc310662016-09-23 16:18:24 +1000445
Roland McGrathf65255e2007-12-20 03:57:34 -0800446 flush_fp_to_thread(target);
447
Michael Neulingc6e67712008-06-25 14:07:18 +1000448 /* copy to local buffer then write that out */
449 for (i = 0; i < 32 ; i++)
450 buf[i] = target->thread.TS_FPR(i);
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000451 buf[32] = target->thread.fp_state.fpscr;
Michael Neulingc6e67712008-06-25 14:07:18 +1000452 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
Cyril Burdc310662016-09-23 16:18:24 +1000453#else
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000454 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
Khem Raj1e407ee2016-04-25 09:19:17 -0700455 offsetof(struct thread_fp_state, fpr[32]));
Roland McGrathf65255e2007-12-20 03:57:34 -0800456
Cyril Burdc310662016-09-23 16:18:24 +1000457 flush_fp_to_thread(target);
458
Roland McGrathf65255e2007-12-20 03:57:34 -0800459 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000460 &target->thread.fp_state, 0, -1);
Michael Neulingc6e67712008-06-25 14:07:18 +1000461#endif
Roland McGrathf65255e2007-12-20 03:57:34 -0800462}
463
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800464/*
Cyril Burdc310662016-09-23 16:18:24 +1000465 * Regardless of transactions, 'fp_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000466 * value of all FPR registers and 'ckfp_state' holds the last checkpointed
Cyril Burdc310662016-09-23 16:18:24 +1000467 * value of all FPR registers for the current transaction.
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800468 *
469 * Userspace interface buffer layout:
470 *
471 * struct data {
472 * u64 fpr[32];
473 * u64 fpscr;
474 * };
475 *
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800476 */
Roland McGrathf65255e2007-12-20 03:57:34 -0800477static int fpr_set(struct task_struct *target, const struct user_regset *regset,
478 unsigned int pos, unsigned int count,
479 const void *kbuf, const void __user *ubuf)
480{
Michael Neulingc6e67712008-06-25 14:07:18 +1000481#ifdef CONFIG_VSX
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000482 u64 buf[33];
Michael Neulingc6e67712008-06-25 14:07:18 +1000483 int i;
Cyril Burdc310662016-09-23 16:18:24 +1000484
Roland McGrathf65255e2007-12-20 03:57:34 -0800485 flush_fp_to_thread(target);
486
Dave Martin99dfe802017-01-05 16:50:57 +0000487 for (i = 0; i < 32 ; i++)
488 buf[i] = target->thread.TS_FPR(i);
489 buf[32] = target->thread.fp_state.fpscr;
490
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800491 /* copy to local buffer then write that out */
492 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
493 if (i)
494 return i;
495
Michael Neulingc6e67712008-06-25 14:07:18 +1000496 for (i = 0; i < 32 ; i++)
497 target->thread.TS_FPR(i) = buf[i];
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000498 target->thread.fp_state.fpscr = buf[32];
Michael Neulingc6e67712008-06-25 14:07:18 +1000499 return 0;
Cyril Burdc310662016-09-23 16:18:24 +1000500#else
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000501 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
Khem Raj1e407ee2016-04-25 09:19:17 -0700502 offsetof(struct thread_fp_state, fpr[32]));
Roland McGrathf65255e2007-12-20 03:57:34 -0800503
Cyril Burdc310662016-09-23 16:18:24 +1000504 flush_fp_to_thread(target);
505
Roland McGrathf65255e2007-12-20 03:57:34 -0800506 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000507 &target->thread.fp_state, 0, -1);
Michael Neulingc6e67712008-06-25 14:07:18 +1000508#endif
Roland McGrathf65255e2007-12-20 03:57:34 -0800509}
510
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000511#ifdef CONFIG_ALTIVEC
512/*
513 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
514 * The transfer totals 34 quadword. Quadwords 0-31 contain the
515 * corresponding vector registers. Quadword 32 contains the vscr as the
516 * last word (offset 12) within that quadword. Quadword 33 contains the
517 * vrsave as the first word (offset 0) within the quadword.
518 *
519 * This definition of the VMX state is compatible with the current PPC32
520 * ptrace interface. This allows signal handling and ptrace to use the
521 * same structures. This also simplifies the implementation of a bi-arch
522 * (combined (32- and 64-bit) gdb.
523 */
524
Roland McGrath3caf06c2007-12-20 03:57:39 -0800525static int vr_active(struct task_struct *target,
526 const struct user_regset *regset)
527{
528 flush_altivec_to_thread(target);
529 return target->thread.used_vr ? regset->n : 0;
530}
531
Anshuman Khanduald844e272016-07-28 10:57:33 +0800532/*
Cyril Burdc310662016-09-23 16:18:24 +1000533 * Regardless of transactions, 'vr_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000534 * value of all the VMX registers and 'ckvr_state' holds the last
Cyril Burdc310662016-09-23 16:18:24 +1000535 * checkpointed value of all the VMX registers for the current
536 * transaction to fall back on in case it aborts.
Anshuman Khanduald844e272016-07-28 10:57:33 +0800537 *
538 * Userspace interface buffer layout:
539 *
540 * struct data {
541 * vector128 vr[32];
542 * vector128 vscr;
543 * vector128 vrsave;
544 * };
545 */
Roland McGrath3caf06c2007-12-20 03:57:39 -0800546static int vr_get(struct task_struct *target, const struct user_regset *regset,
547 unsigned int pos, unsigned int count,
548 void *kbuf, void __user *ubuf)
549{
550 int ret;
551
552 flush_altivec_to_thread(target);
553
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000554 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
555 offsetof(struct thread_vr_state, vr[32]));
Roland McGrath3caf06c2007-12-20 03:57:39 -0800556
557 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Cyril Burdc310662016-09-23 16:18:24 +1000558 &target->thread.vr_state, 0,
Roland McGrath3caf06c2007-12-20 03:57:39 -0800559 33 * sizeof(vector128));
560 if (!ret) {
561 /*
562 * Copy out only the low-order word of vrsave.
563 */
564 union {
565 elf_vrreg_t reg;
566 u32 word;
567 } vrsave;
568 memset(&vrsave, 0, sizeof(vrsave));
Anshuman Khanduald844e272016-07-28 10:57:33 +0800569
Roland McGrath3caf06c2007-12-20 03:57:39 -0800570 vrsave.word = target->thread.vrsave;
Anshuman Khanduald844e272016-07-28 10:57:33 +0800571
Roland McGrath3caf06c2007-12-20 03:57:39 -0800572 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
573 33 * sizeof(vector128), -1);
574 }
575
576 return ret;
577}
578
Anshuman Khanduald844e272016-07-28 10:57:33 +0800579/*
Cyril Burdc310662016-09-23 16:18:24 +1000580 * Regardless of transactions, 'vr_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000581 * value of all the VMX registers and 'ckvr_state' holds the last
Cyril Burdc310662016-09-23 16:18:24 +1000582 * checkpointed value of all the VMX registers for the current
583 * transaction to fall back on in case it aborts.
Anshuman Khanduald844e272016-07-28 10:57:33 +0800584 *
585 * Userspace interface buffer layout:
586 *
587 * struct data {
588 * vector128 vr[32];
589 * vector128 vscr;
590 * vector128 vrsave;
591 * };
592 */
Roland McGrath3caf06c2007-12-20 03:57:39 -0800593static int vr_set(struct task_struct *target, const struct user_regset *regset,
594 unsigned int pos, unsigned int count,
595 const void *kbuf, const void __user *ubuf)
596{
597 int ret;
598
599 flush_altivec_to_thread(target);
600
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000601 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
602 offsetof(struct thread_vr_state, vr[32]));
Roland McGrath3caf06c2007-12-20 03:57:39 -0800603
604 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Cyril Burdc310662016-09-23 16:18:24 +1000605 &target->thread.vr_state, 0,
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000606 33 * sizeof(vector128));
Roland McGrath3caf06c2007-12-20 03:57:39 -0800607 if (!ret && count > 0) {
608 /*
609 * We use only the first word of vrsave.
610 */
611 union {
612 elf_vrreg_t reg;
613 u32 word;
614 } vrsave;
615 memset(&vrsave, 0, sizeof(vrsave));
Anshuman Khanduald844e272016-07-28 10:57:33 +0800616
Roland McGrath3caf06c2007-12-20 03:57:39 -0800617 vrsave.word = target->thread.vrsave;
Cyril Burdc310662016-09-23 16:18:24 +1000618
Roland McGrath3caf06c2007-12-20 03:57:39 -0800619 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
620 33 * sizeof(vector128), -1);
Cyril Burdc310662016-09-23 16:18:24 +1000621 if (!ret)
Roland McGrath3caf06c2007-12-20 03:57:39 -0800622 target->thread.vrsave = vrsave.word;
623 }
624
625 return ret;
626}
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000627#endif /* CONFIG_ALTIVEC */
628
Michael Neulingce48b212008-06-25 14:07:18 +1000629#ifdef CONFIG_VSX
630/*
631 * Currently to set and and get all the vsx state, you need to call
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300632 * the fp and VMX calls as well. This only get/sets the lower 32
Michael Neulingce48b212008-06-25 14:07:18 +1000633 * 128bit VSX registers.
634 */
635
636static int vsr_active(struct task_struct *target,
637 const struct user_regset *regset)
638{
639 flush_vsx_to_thread(target);
640 return target->thread.used_vsr ? regset->n : 0;
641}
642
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800643/*
Cyril Burdc310662016-09-23 16:18:24 +1000644 * Regardless of transactions, 'fp_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000645 * value of all FPR registers and 'ckfp_state' holds the last
Cyril Burdc310662016-09-23 16:18:24 +1000646 * checkpointed value of all FPR registers for the current
647 * transaction.
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800648 *
649 * Userspace interface buffer layout:
650 *
651 * struct data {
652 * u64 vsx[32];
653 * };
654 */
Michael Neulingce48b212008-06-25 14:07:18 +1000655static int vsr_get(struct task_struct *target, const struct user_regset *regset,
656 unsigned int pos, unsigned int count,
657 void *kbuf, void __user *ubuf)
658{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000659 u64 buf[32];
Michael Neulingf3e909c2008-07-01 14:01:39 +1000660 int ret, i;
Michael Neulingce48b212008-06-25 14:07:18 +1000661
Cyril Burdc310662016-09-23 16:18:24 +1000662 flush_tmregs_to_thread(target);
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800663 flush_fp_to_thread(target);
664 flush_altivec_to_thread(target);
Michael Neulingce48b212008-06-25 14:07:18 +1000665 flush_vsx_to_thread(target);
666
Michael Neulingf3e909c2008-07-01 14:01:39 +1000667 for (i = 0; i < 32 ; i++)
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000668 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
Cyril Burdc310662016-09-23 16:18:24 +1000669
Michael Neulingce48b212008-06-25 14:07:18 +1000670 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Michael Neulingf3e909c2008-07-01 14:01:39 +1000671 buf, 0, 32 * sizeof(double));
Michael Neulingce48b212008-06-25 14:07:18 +1000672
673 return ret;
674}
675
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800676/*
Cyril Burdc310662016-09-23 16:18:24 +1000677 * Regardless of transactions, 'fp_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000678 * value of all FPR registers and 'ckfp_state' holds the last
Cyril Burdc310662016-09-23 16:18:24 +1000679 * checkpointed value of all FPR registers for the current
680 * transaction.
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800681 *
682 * Userspace interface buffer layout:
683 *
684 * struct data {
685 * u64 vsx[32];
686 * };
687 */
Michael Neulingce48b212008-06-25 14:07:18 +1000688static int vsr_set(struct task_struct *target, const struct user_regset *regset,
689 unsigned int pos, unsigned int count,
690 const void *kbuf, const void __user *ubuf)
691{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000692 u64 buf[32];
Michael Neulingf3e909c2008-07-01 14:01:39 +1000693 int ret,i;
Michael Neulingce48b212008-06-25 14:07:18 +1000694
Cyril Burdc310662016-09-23 16:18:24 +1000695 flush_tmregs_to_thread(target);
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800696 flush_fp_to_thread(target);
697 flush_altivec_to_thread(target);
Michael Neulingce48b212008-06-25 14:07:18 +1000698 flush_vsx_to_thread(target);
699
Dave Martin99dfe802017-01-05 16:50:57 +0000700 for (i = 0; i < 32 ; i++)
701 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
702
Michael Neulingce48b212008-06-25 14:07:18 +1000703 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Michael Neulingf3e909c2008-07-01 14:01:39 +1000704 buf, 0, 32 * sizeof(double));
Cyril Burdc310662016-09-23 16:18:24 +1000705 if (!ret)
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800706 for (i = 0; i < 32 ; i++)
Cyril Burdc310662016-09-23 16:18:24 +1000707 target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
Michael Neulingce48b212008-06-25 14:07:18 +1000708
709 return ret;
710}
711#endif /* CONFIG_VSX */
712
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000713#ifdef CONFIG_SPE
714
715/*
716 * For get_evrregs/set_evrregs functions 'data' has the following layout:
717 *
718 * struct {
719 * u32 evr[32];
720 * u64 acc;
721 * u32 spefscr;
722 * }
723 */
724
Roland McGratha4e4b172007-12-20 03:57:48 -0800725static int evr_active(struct task_struct *target,
726 const struct user_regset *regset)
727{
728 flush_spe_to_thread(target);
729 return target->thread.used_spe ? regset->n : 0;
730}
731
732static int evr_get(struct task_struct *target, const struct user_regset *regset,
733 unsigned int pos, unsigned int count,
734 void *kbuf, void __user *ubuf)
735{
736 int ret;
737
738 flush_spe_to_thread(target);
739
740 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
741 &target->thread.evr,
742 0, sizeof(target->thread.evr));
743
744 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
745 offsetof(struct thread_struct, spefscr));
746
747 if (!ret)
748 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
749 &target->thread.acc,
750 sizeof(target->thread.evr), -1);
751
752 return ret;
753}
754
755static int evr_set(struct task_struct *target, const struct user_regset *regset,
756 unsigned int pos, unsigned int count,
757 const void *kbuf, const void __user *ubuf)
758{
759 int ret;
760
761 flush_spe_to_thread(target);
762
763 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
764 &target->thread.evr,
765 0, sizeof(target->thread.evr));
766
767 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
768 offsetof(struct thread_struct, spefscr));
769
770 if (!ret)
771 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
772 &target->thread.acc,
773 sizeof(target->thread.evr), -1);
774
775 return ret;
776}
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000777#endif /* CONFIG_SPE */
778
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800779#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
780/**
781 * tm_cgpr_active - get active number of registers in CGPR
782 * @target: The target task.
783 * @regset: The user regset structure.
784 *
785 * This function checks for the active number of available
786 * regisers in transaction checkpointed GPR category.
787 */
788static int tm_cgpr_active(struct task_struct *target,
789 const struct user_regset *regset)
790{
791 if (!cpu_has_feature(CPU_FTR_TM))
792 return -ENODEV;
793
794 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
795 return 0;
796
797 return regset->n;
798}
799
800/**
801 * tm_cgpr_get - get CGPR registers
802 * @target: The target task.
803 * @regset: The user regset structure.
804 * @pos: The buffer position.
805 * @count: Number of bytes to copy.
806 * @kbuf: Kernel buffer to copy from.
807 * @ubuf: User buffer to copy into.
808 *
809 * This function gets transaction checkpointed GPR registers.
810 *
811 * When the transaction is active, 'ckpt_regs' holds all the checkpointed
812 * GPR register values for the current transaction to fall back on if it
813 * aborts in between. This function gets those checkpointed GPR registers.
814 * The userspace interface buffer layout is as follows.
815 *
816 * struct data {
817 * struct pt_regs ckpt_regs;
818 * };
819 */
820static int tm_cgpr_get(struct task_struct *target,
821 const struct user_regset *regset,
822 unsigned int pos, unsigned int count,
823 void *kbuf, void __user *ubuf)
824{
825 int ret;
826
827 if (!cpu_has_feature(CPU_FTR_TM))
828 return -ENODEV;
829
830 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
831 return -ENODATA;
832
Cyril Burdc310662016-09-23 16:18:24 +1000833 flush_tmregs_to_thread(target);
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800834 flush_fp_to_thread(target);
835 flush_altivec_to_thread(target);
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800836
837 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
838 &target->thread.ckpt_regs,
839 0, offsetof(struct pt_regs, msr));
840 if (!ret) {
841 unsigned long msr = get_user_ckpt_msr(target);
842
843 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
844 offsetof(struct pt_regs, msr),
845 offsetof(struct pt_regs, msr) +
846 sizeof(msr));
847 }
848
849 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
850 offsetof(struct pt_regs, msr) + sizeof(long));
851
852 if (!ret)
853 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
854 &target->thread.ckpt_regs.orig_gpr3,
855 offsetof(struct pt_regs, orig_gpr3),
Michael Ellerman3eeacd92018-10-13 00:39:31 +1100856 sizeof(struct user_pt_regs));
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800857 if (!ret)
858 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
Michael Ellerman3eeacd92018-10-13 00:39:31 +1100859 sizeof(struct user_pt_regs), -1);
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800860
861 return ret;
862}
863
864/*
865 * tm_cgpr_set - set the CGPR registers
866 * @target: The target task.
867 * @regset: The user regset structure.
868 * @pos: The buffer position.
869 * @count: Number of bytes to copy.
870 * @kbuf: Kernel buffer to copy into.
871 * @ubuf: User buffer to copy from.
872 *
873 * This function sets in transaction checkpointed GPR registers.
874 *
875 * When the transaction is active, 'ckpt_regs' holds the checkpointed
876 * GPR register values for the current transaction to fall back on if it
877 * aborts in between. This function sets those checkpointed GPR registers.
878 * The userspace interface buffer layout is as follows.
879 *
880 * struct data {
881 * struct pt_regs ckpt_regs;
882 * };
883 */
884static int tm_cgpr_set(struct task_struct *target,
885 const struct user_regset *regset,
886 unsigned int pos, unsigned int count,
887 const void *kbuf, const void __user *ubuf)
888{
889 unsigned long reg;
890 int ret;
891
892 if (!cpu_has_feature(CPU_FTR_TM))
893 return -ENODEV;
894
895 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
896 return -ENODATA;
897
Cyril Burdc310662016-09-23 16:18:24 +1000898 flush_tmregs_to_thread(target);
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800899 flush_fp_to_thread(target);
900 flush_altivec_to_thread(target);
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800901
902 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
903 &target->thread.ckpt_regs,
904 0, PT_MSR * sizeof(reg));
905
906 if (!ret && count > 0) {
907 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
908 PT_MSR * sizeof(reg),
909 (PT_MSR + 1) * sizeof(reg));
910 if (!ret)
911 ret = set_user_ckpt_msr(target, reg);
912 }
913
914 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
915 offsetof(struct pt_regs, msr) + sizeof(long));
916
917 if (!ret)
918 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
919 &target->thread.ckpt_regs.orig_gpr3,
920 PT_ORIG_R3 * sizeof(reg),
921 (PT_MAX_PUT_REG + 1) * sizeof(reg));
922
923 if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
924 ret = user_regset_copyin_ignore(
925 &pos, &count, &kbuf, &ubuf,
926 (PT_MAX_PUT_REG + 1) * sizeof(reg),
927 PT_TRAP * sizeof(reg));
928
929 if (!ret && count > 0) {
930 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
931 PT_TRAP * sizeof(reg),
932 (PT_TRAP + 1) * sizeof(reg));
933 if (!ret)
934 ret = set_user_ckpt_trap(target, reg);
935 }
936
937 if (!ret)
938 ret = user_regset_copyin_ignore(
939 &pos, &count, &kbuf, &ubuf,
940 (PT_TRAP + 1) * sizeof(reg), -1);
941
942 return ret;
943}
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +0800944
945/**
946 * tm_cfpr_active - get active number of registers in CFPR
947 * @target: The target task.
948 * @regset: The user regset structure.
949 *
950 * This function checks for the active number of available
951 * regisers in transaction checkpointed FPR category.
952 */
953static int tm_cfpr_active(struct task_struct *target,
954 const struct user_regset *regset)
955{
956 if (!cpu_has_feature(CPU_FTR_TM))
957 return -ENODEV;
958
959 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
960 return 0;
961
962 return regset->n;
963}
964
965/**
966 * tm_cfpr_get - get CFPR registers
967 * @target: The target task.
968 * @regset: The user regset structure.
969 * @pos: The buffer position.
970 * @count: Number of bytes to copy.
971 * @kbuf: Kernel buffer to copy from.
972 * @ubuf: User buffer to copy into.
973 *
974 * This function gets in transaction checkpointed FPR registers.
975 *
Cyril Bur000ec282016-09-23 16:18:25 +1000976 * When the transaction is active 'ckfp_state' holds the checkpointed
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +0800977 * values for the current transaction to fall back on if it aborts
978 * in between. This function gets those checkpointed FPR registers.
979 * The userspace interface buffer layout is as follows.
980 *
981 * struct data {
982 * u64 fpr[32];
983 * u64 fpscr;
984 *};
985 */
986static int tm_cfpr_get(struct task_struct *target,
987 const struct user_regset *regset,
988 unsigned int pos, unsigned int count,
989 void *kbuf, void __user *ubuf)
990{
991 u64 buf[33];
992 int i;
993
994 if (!cpu_has_feature(CPU_FTR_TM))
995 return -ENODEV;
996
997 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
998 return -ENODATA;
999
Cyril Burdc310662016-09-23 16:18:24 +10001000 flush_tmregs_to_thread(target);
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001001 flush_fp_to_thread(target);
1002 flush_altivec_to_thread(target);
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001003
1004 /* copy to local buffer then write that out */
1005 for (i = 0; i < 32 ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +10001006 buf[i] = target->thread.TS_CKFPR(i);
1007 buf[32] = target->thread.ckfp_state.fpscr;
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001008 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1009}
1010
1011/**
1012 * tm_cfpr_set - set CFPR registers
1013 * @target: The target task.
1014 * @regset: The user regset structure.
1015 * @pos: The buffer position.
1016 * @count: Number of bytes to copy.
1017 * @kbuf: Kernel buffer to copy into.
1018 * @ubuf: User buffer to copy from.
1019 *
1020 * This function sets in transaction checkpointed FPR registers.
1021 *
Cyril Bur000ec282016-09-23 16:18:25 +10001022 * When the transaction is active 'ckfp_state' holds the checkpointed
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001023 * FPR register values for the current transaction to fall back on
1024 * if it aborts in between. This function sets these checkpointed
1025 * FPR registers. The userspace interface buffer layout is as follows.
1026 *
1027 * struct data {
1028 * u64 fpr[32];
1029 * u64 fpscr;
1030 *};
1031 */
1032static int tm_cfpr_set(struct task_struct *target,
1033 const struct user_regset *regset,
1034 unsigned int pos, unsigned int count,
1035 const void *kbuf, const void __user *ubuf)
1036{
1037 u64 buf[33];
1038 int i;
1039
1040 if (!cpu_has_feature(CPU_FTR_TM))
1041 return -ENODEV;
1042
1043 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1044 return -ENODATA;
1045
Cyril Burdc310662016-09-23 16:18:24 +10001046 flush_tmregs_to_thread(target);
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001047 flush_fp_to_thread(target);
1048 flush_altivec_to_thread(target);
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001049
Dave Martinb34ca602017-01-05 16:50:57 +00001050 for (i = 0; i < 32; i++)
1051 buf[i] = target->thread.TS_CKFPR(i);
1052 buf[32] = target->thread.ckfp_state.fpscr;
1053
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001054 /* copy to local buffer then write that out */
1055 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1056 if (i)
1057 return i;
1058 for (i = 0; i < 32 ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +10001059 target->thread.TS_CKFPR(i) = buf[i];
1060 target->thread.ckfp_state.fpscr = buf[32];
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001061 return 0;
1062}
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001063
1064/**
1065 * tm_cvmx_active - get active number of registers in CVMX
1066 * @target: The target task.
1067 * @regset: The user regset structure.
1068 *
1069 * This function checks for the active number of available
1070 * regisers in checkpointed VMX category.
1071 */
1072static int tm_cvmx_active(struct task_struct *target,
1073 const struct user_regset *regset)
1074{
1075 if (!cpu_has_feature(CPU_FTR_TM))
1076 return -ENODEV;
1077
1078 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1079 return 0;
1080
1081 return regset->n;
1082}
1083
1084/**
1085 * tm_cvmx_get - get CMVX registers
1086 * @target: The target task.
1087 * @regset: The user regset structure.
1088 * @pos: The buffer position.
1089 * @count: Number of bytes to copy.
1090 * @kbuf: Kernel buffer to copy from.
1091 * @ubuf: User buffer to copy into.
1092 *
1093 * This function gets in transaction checkpointed VMX registers.
1094 *
Cyril Bur000ec282016-09-23 16:18:25 +10001095 * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001096 * the checkpointed values for the current transaction to fall
1097 * back on if it aborts in between. The userspace interface buffer
1098 * layout is as follows.
1099 *
1100 * struct data {
1101 * vector128 vr[32];
1102 * vector128 vscr;
1103 * vector128 vrsave;
1104 *};
1105 */
1106static int tm_cvmx_get(struct task_struct *target,
1107 const struct user_regset *regset,
1108 unsigned int pos, unsigned int count,
1109 void *kbuf, void __user *ubuf)
1110{
1111 int ret;
1112
1113 BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1114
1115 if (!cpu_has_feature(CPU_FTR_TM))
1116 return -ENODEV;
1117
1118 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1119 return -ENODATA;
1120
1121 /* Flush the state */
Cyril Burdc310662016-09-23 16:18:24 +10001122 flush_tmregs_to_thread(target);
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001123 flush_fp_to_thread(target);
1124 flush_altivec_to_thread(target);
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001125
1126 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Cyril Bur000ec282016-09-23 16:18:25 +10001127 &target->thread.ckvr_state, 0,
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001128 33 * sizeof(vector128));
1129 if (!ret) {
1130 /*
1131 * Copy out only the low-order word of vrsave.
1132 */
1133 union {
1134 elf_vrreg_t reg;
1135 u32 word;
1136 } vrsave;
1137 memset(&vrsave, 0, sizeof(vrsave));
Cyril Bur000ec282016-09-23 16:18:25 +10001138 vrsave.word = target->thread.ckvrsave;
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001139 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
1140 33 * sizeof(vector128), -1);
1141 }
1142
1143 return ret;
1144}
1145
1146/**
1147 * tm_cvmx_set - set CMVX registers
1148 * @target: The target task.
1149 * @regset: The user regset structure.
1150 * @pos: The buffer position.
1151 * @count: Number of bytes to copy.
1152 * @kbuf: Kernel buffer to copy into.
1153 * @ubuf: User buffer to copy from.
1154 *
1155 * This function sets in transaction checkpointed VMX registers.
1156 *
Cyril Bur000ec282016-09-23 16:18:25 +10001157 * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001158 * the checkpointed values for the current transaction to fall
1159 * back on if it aborts in between. The userspace interface buffer
1160 * layout is as follows.
1161 *
1162 * struct data {
1163 * vector128 vr[32];
1164 * vector128 vscr;
1165 * vector128 vrsave;
1166 *};
1167 */
1168static int tm_cvmx_set(struct task_struct *target,
1169 const struct user_regset *regset,
1170 unsigned int pos, unsigned int count,
1171 const void *kbuf, const void __user *ubuf)
1172{
1173 int ret;
1174
1175 BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1176
1177 if (!cpu_has_feature(CPU_FTR_TM))
1178 return -ENODEV;
1179
1180 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1181 return -ENODATA;
1182
Cyril Burdc310662016-09-23 16:18:24 +10001183 flush_tmregs_to_thread(target);
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001184 flush_fp_to_thread(target);
1185 flush_altivec_to_thread(target);
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001186
1187 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Cyril Bur000ec282016-09-23 16:18:25 +10001188 &target->thread.ckvr_state, 0,
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001189 33 * sizeof(vector128));
1190 if (!ret && count > 0) {
1191 /*
1192 * We use only the low-order word of vrsave.
1193 */
1194 union {
1195 elf_vrreg_t reg;
1196 u32 word;
1197 } vrsave;
1198 memset(&vrsave, 0, sizeof(vrsave));
Cyril Bur000ec282016-09-23 16:18:25 +10001199 vrsave.word = target->thread.ckvrsave;
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001200 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
1201 33 * sizeof(vector128), -1);
1202 if (!ret)
Cyril Bur000ec282016-09-23 16:18:25 +10001203 target->thread.ckvrsave = vrsave.word;
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001204 }
1205
1206 return ret;
1207}
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001208
1209/**
1210 * tm_cvsx_active - get active number of registers in CVSX
1211 * @target: The target task.
1212 * @regset: The user regset structure.
1213 *
1214 * This function checks for the active number of available
1215 * regisers in transaction checkpointed VSX category.
1216 */
1217static int tm_cvsx_active(struct task_struct *target,
1218 const struct user_regset *regset)
1219{
1220 if (!cpu_has_feature(CPU_FTR_TM))
1221 return -ENODEV;
1222
1223 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1224 return 0;
1225
1226 flush_vsx_to_thread(target);
1227 return target->thread.used_vsr ? regset->n : 0;
1228}
1229
1230/**
1231 * tm_cvsx_get - get CVSX registers
1232 * @target: The target task.
1233 * @regset: The user regset structure.
1234 * @pos: The buffer position.
1235 * @count: Number of bytes to copy.
1236 * @kbuf: Kernel buffer to copy from.
1237 * @ubuf: User buffer to copy into.
1238 *
1239 * This function gets in transaction checkpointed VSX registers.
1240 *
Cyril Bur000ec282016-09-23 16:18:25 +10001241 * When the transaction is active 'ckfp_state' holds the checkpointed
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001242 * values for the current transaction to fall back on if it aborts
1243 * in between. This function gets those checkpointed VSX registers.
1244 * The userspace interface buffer layout is as follows.
1245 *
1246 * struct data {
1247 * u64 vsx[32];
1248 *};
1249 */
1250static int tm_cvsx_get(struct task_struct *target,
1251 const struct user_regset *regset,
1252 unsigned int pos, unsigned int count,
1253 void *kbuf, void __user *ubuf)
1254{
1255 u64 buf[32];
1256 int ret, i;
1257
1258 if (!cpu_has_feature(CPU_FTR_TM))
1259 return -ENODEV;
1260
1261 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1262 return -ENODATA;
1263
1264 /* Flush the state */
Cyril Burdc310662016-09-23 16:18:24 +10001265 flush_tmregs_to_thread(target);
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001266 flush_fp_to_thread(target);
1267 flush_altivec_to_thread(target);
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001268 flush_vsx_to_thread(target);
1269
1270 for (i = 0; i < 32 ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +10001271 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001272 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1273 buf, 0, 32 * sizeof(double));
1274
1275 return ret;
1276}
1277
1278/**
1279 * tm_cvsx_set - set CFPR registers
1280 * @target: The target task.
1281 * @regset: The user regset structure.
1282 * @pos: The buffer position.
1283 * @count: Number of bytes to copy.
1284 * @kbuf: Kernel buffer to copy into.
1285 * @ubuf: User buffer to copy from.
1286 *
1287 * This function sets in transaction checkpointed VSX registers.
1288 *
Cyril Bur000ec282016-09-23 16:18:25 +10001289 * When the transaction is active 'ckfp_state' holds the checkpointed
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001290 * VSX register values for the current transaction to fall back on
1291 * if it aborts in between. This function sets these checkpointed
1292 * FPR registers. The userspace interface buffer layout is as follows.
1293 *
1294 * struct data {
1295 * u64 vsx[32];
1296 *};
1297 */
1298static int tm_cvsx_set(struct task_struct *target,
1299 const struct user_regset *regset,
1300 unsigned int pos, unsigned int count,
1301 const void *kbuf, const void __user *ubuf)
1302{
1303 u64 buf[32];
1304 int ret, i;
1305
1306 if (!cpu_has_feature(CPU_FTR_TM))
1307 return -ENODEV;
1308
1309 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1310 return -ENODATA;
1311
1312 /* Flush the state */
Cyril Burdc310662016-09-23 16:18:24 +10001313 flush_tmregs_to_thread(target);
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001314 flush_fp_to_thread(target);
1315 flush_altivec_to_thread(target);
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001316 flush_vsx_to_thread(target);
1317
Dave Martinb34ca602017-01-05 16:50:57 +00001318 for (i = 0; i < 32 ; i++)
1319 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1320
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001321 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1322 buf, 0, 32 * sizeof(double));
Cyril Burdc310662016-09-23 16:18:24 +10001323 if (!ret)
1324 for (i = 0; i < 32 ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +10001325 target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001326
1327 return ret;
1328}
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001329
1330/**
1331 * tm_spr_active - get active number of registers in TM SPR
1332 * @target: The target task.
1333 * @regset: The user regset structure.
1334 *
1335 * This function checks the active number of available
1336 * regisers in the transactional memory SPR category.
1337 */
1338static int tm_spr_active(struct task_struct *target,
1339 const struct user_regset *regset)
1340{
1341 if (!cpu_has_feature(CPU_FTR_TM))
1342 return -ENODEV;
1343
1344 return regset->n;
1345}
1346
1347/**
1348 * tm_spr_get - get the TM related SPR registers
1349 * @target: The target task.
1350 * @regset: The user regset structure.
1351 * @pos: The buffer position.
1352 * @count: Number of bytes to copy.
1353 * @kbuf: Kernel buffer to copy from.
1354 * @ubuf: User buffer to copy into.
1355 *
1356 * This function gets transactional memory related SPR registers.
1357 * The userspace interface buffer layout is as follows.
1358 *
1359 * struct {
1360 * u64 tm_tfhar;
1361 * u64 tm_texasr;
1362 * u64 tm_tfiar;
1363 * };
1364 */
1365static int tm_spr_get(struct task_struct *target,
1366 const struct user_regset *regset,
1367 unsigned int pos, unsigned int count,
1368 void *kbuf, void __user *ubuf)
1369{
1370 int ret;
1371
1372 /* Build tests */
1373 BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1374 BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1375 BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1376
1377 if (!cpu_has_feature(CPU_FTR_TM))
1378 return -ENODEV;
1379
1380 /* Flush the states */
Cyril Burdc310662016-09-23 16:18:24 +10001381 flush_tmregs_to_thread(target);
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001382 flush_fp_to_thread(target);
1383 flush_altivec_to_thread(target);
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001384
1385 /* TFHAR register */
1386 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1387 &target->thread.tm_tfhar, 0, sizeof(u64));
1388
1389 /* TEXASR register */
1390 if (!ret)
1391 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1392 &target->thread.tm_texasr, sizeof(u64),
1393 2 * sizeof(u64));
1394
1395 /* TFIAR register */
1396 if (!ret)
1397 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1398 &target->thread.tm_tfiar,
1399 2 * sizeof(u64), 3 * sizeof(u64));
1400 return ret;
1401}
1402
1403/**
1404 * tm_spr_set - set the TM related SPR registers
1405 * @target: The target task.
1406 * @regset: The user regset structure.
1407 * @pos: The buffer position.
1408 * @count: Number of bytes to copy.
1409 * @kbuf: Kernel buffer to copy into.
1410 * @ubuf: User buffer to copy from.
1411 *
1412 * This function sets transactional memory related SPR registers.
1413 * The userspace interface buffer layout is as follows.
1414 *
1415 * struct {
1416 * u64 tm_tfhar;
1417 * u64 tm_texasr;
1418 * u64 tm_tfiar;
1419 * };
1420 */
1421static int tm_spr_set(struct task_struct *target,
1422 const struct user_regset *regset,
1423 unsigned int pos, unsigned int count,
1424 const void *kbuf, const void __user *ubuf)
1425{
1426 int ret;
1427
1428 /* Build tests */
1429 BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1430 BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1431 BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1432
1433 if (!cpu_has_feature(CPU_FTR_TM))
1434 return -ENODEV;
1435
1436 /* Flush the states */
Cyril Burdc310662016-09-23 16:18:24 +10001437 flush_tmregs_to_thread(target);
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001438 flush_fp_to_thread(target);
1439 flush_altivec_to_thread(target);
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001440
1441 /* TFHAR register */
1442 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1443 &target->thread.tm_tfhar, 0, sizeof(u64));
1444
1445 /* TEXASR register */
1446 if (!ret)
1447 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1448 &target->thread.tm_texasr, sizeof(u64),
1449 2 * sizeof(u64));
1450
1451 /* TFIAR register */
1452 if (!ret)
1453 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1454 &target->thread.tm_tfiar,
1455 2 * sizeof(u64), 3 * sizeof(u64));
1456 return ret;
1457}
Anshuman Khandualc45dc902016-07-28 10:57:41 +08001458
1459static int tm_tar_active(struct task_struct *target,
1460 const struct user_regset *regset)
1461{
1462 if (!cpu_has_feature(CPU_FTR_TM))
1463 return -ENODEV;
1464
1465 if (MSR_TM_ACTIVE(target->thread.regs->msr))
1466 return regset->n;
1467
1468 return 0;
1469}
1470
1471static int tm_tar_get(struct task_struct *target,
1472 const struct user_regset *regset,
1473 unsigned int pos, unsigned int count,
1474 void *kbuf, void __user *ubuf)
1475{
1476 int ret;
1477
1478 if (!cpu_has_feature(CPU_FTR_TM))
1479 return -ENODEV;
1480
1481 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1482 return -ENODATA;
1483
1484 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1485 &target->thread.tm_tar, 0, sizeof(u64));
1486 return ret;
1487}
1488
1489static int tm_tar_set(struct task_struct *target,
1490 const struct user_regset *regset,
1491 unsigned int pos, unsigned int count,
1492 const void *kbuf, const void __user *ubuf)
1493{
1494 int ret;
1495
1496 if (!cpu_has_feature(CPU_FTR_TM))
1497 return -ENODEV;
1498
1499 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1500 return -ENODATA;
1501
1502 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1503 &target->thread.tm_tar, 0, sizeof(u64));
1504 return ret;
1505}
1506
1507static int tm_ppr_active(struct task_struct *target,
1508 const struct user_regset *regset)
1509{
1510 if (!cpu_has_feature(CPU_FTR_TM))
1511 return -ENODEV;
1512
1513 if (MSR_TM_ACTIVE(target->thread.regs->msr))
1514 return regset->n;
1515
1516 return 0;
1517}
1518
1519
1520static int tm_ppr_get(struct task_struct *target,
1521 const struct user_regset *regset,
1522 unsigned int pos, unsigned int count,
1523 void *kbuf, void __user *ubuf)
1524{
1525 int ret;
1526
1527 if (!cpu_has_feature(CPU_FTR_TM))
1528 return -ENODEV;
1529
1530 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1531 return -ENODATA;
1532
1533 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1534 &target->thread.tm_ppr, 0, sizeof(u64));
1535 return ret;
1536}
1537
1538static int tm_ppr_set(struct task_struct *target,
1539 const struct user_regset *regset,
1540 unsigned int pos, unsigned int count,
1541 const void *kbuf, const void __user *ubuf)
1542{
1543 int ret;
1544
1545 if (!cpu_has_feature(CPU_FTR_TM))
1546 return -ENODEV;
1547
1548 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1549 return -ENODATA;
1550
1551 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1552 &target->thread.tm_ppr, 0, sizeof(u64));
1553 return ret;
1554}
1555
1556static int tm_dscr_active(struct task_struct *target,
1557 const struct user_regset *regset)
1558{
1559 if (!cpu_has_feature(CPU_FTR_TM))
1560 return -ENODEV;
1561
1562 if (MSR_TM_ACTIVE(target->thread.regs->msr))
1563 return regset->n;
1564
1565 return 0;
1566}
1567
1568static int tm_dscr_get(struct task_struct *target,
1569 const struct user_regset *regset,
1570 unsigned int pos, unsigned int count,
1571 void *kbuf, void __user *ubuf)
1572{
1573 int ret;
1574
1575 if (!cpu_has_feature(CPU_FTR_TM))
1576 return -ENODEV;
1577
1578 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1579 return -ENODATA;
1580
1581 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1582 &target->thread.tm_dscr, 0, sizeof(u64));
1583 return ret;
1584}
1585
1586static int tm_dscr_set(struct task_struct *target,
1587 const struct user_regset *regset,
1588 unsigned int pos, unsigned int count,
1589 const void *kbuf, const void __user *ubuf)
1590{
1591 int ret;
1592
1593 if (!cpu_has_feature(CPU_FTR_TM))
1594 return -ENODEV;
1595
1596 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1597 return -ENODATA;
1598
1599 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1600 &target->thread.tm_dscr, 0, sizeof(u64));
1601 return ret;
1602}
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001603#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10001604
Anshuman Khandualfa439812016-07-28 10:57:42 +08001605#ifdef CONFIG_PPC64
1606static int ppr_get(struct task_struct *target,
1607 const struct user_regset *regset,
1608 unsigned int pos, unsigned int count,
1609 void *kbuf, void __user *ubuf)
1610{
Masahiro Yamada7f2462a2016-09-06 20:21:50 +09001611 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Nicholas Piggin4c2de742018-10-13 00:15:16 +11001612 &target->thread.regs->ppr, 0, sizeof(u64));
Anshuman Khandualfa439812016-07-28 10:57:42 +08001613}
1614
1615static int ppr_set(struct task_struct *target,
1616 const struct user_regset *regset,
1617 unsigned int pos, unsigned int count,
1618 const void *kbuf, const void __user *ubuf)
1619{
Masahiro Yamada7f2462a2016-09-06 20:21:50 +09001620 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Nicholas Piggin4c2de742018-10-13 00:15:16 +11001621 &target->thread.regs->ppr, 0, sizeof(u64));
Anshuman Khandualfa439812016-07-28 10:57:42 +08001622}
1623
1624static int dscr_get(struct task_struct *target,
1625 const struct user_regset *regset,
1626 unsigned int pos, unsigned int count,
1627 void *kbuf, void __user *ubuf)
1628{
Masahiro Yamada7f2462a2016-09-06 20:21:50 +09001629 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1630 &target->thread.dscr, 0, sizeof(u64));
Anshuman Khandualfa439812016-07-28 10:57:42 +08001631}
1632static int dscr_set(struct task_struct *target,
1633 const struct user_regset *regset,
1634 unsigned int pos, unsigned int count,
1635 const void *kbuf, const void __user *ubuf)
1636{
Masahiro Yamada7f2462a2016-09-06 20:21:50 +09001637 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1638 &target->thread.dscr, 0, sizeof(u64));
Anshuman Khandualfa439812016-07-28 10:57:42 +08001639}
1640#endif
1641#ifdef CONFIG_PPC_BOOK3S_64
1642static int tar_get(struct task_struct *target,
1643 const struct user_regset *regset,
1644 unsigned int pos, unsigned int count,
1645 void *kbuf, void __user *ubuf)
1646{
Masahiro Yamada7f2462a2016-09-06 20:21:50 +09001647 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1648 &target->thread.tar, 0, sizeof(u64));
Anshuman Khandualfa439812016-07-28 10:57:42 +08001649}
1650static int tar_set(struct task_struct *target,
1651 const struct user_regset *regset,
1652 unsigned int pos, unsigned int count,
1653 const void *kbuf, const void __user *ubuf)
1654{
Masahiro Yamada7f2462a2016-09-06 20:21:50 +09001655 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1656 &target->thread.tar, 0, sizeof(u64));
Anshuman Khandualfa439812016-07-28 10:57:42 +08001657}
Anshuman Khandualcf89d4e2016-07-28 10:57:43 +08001658
1659static int ebb_active(struct task_struct *target,
1660 const struct user_regset *regset)
1661{
1662 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1663 return -ENODEV;
1664
1665 if (target->thread.used_ebb)
1666 return regset->n;
1667
1668 return 0;
1669}
1670
1671static int ebb_get(struct task_struct *target,
1672 const struct user_regset *regset,
1673 unsigned int pos, unsigned int count,
1674 void *kbuf, void __user *ubuf)
1675{
1676 /* Build tests */
1677 BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1678 BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1679
1680 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1681 return -ENODEV;
1682
1683 if (!target->thread.used_ebb)
1684 return -ENODATA;
1685
1686 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1687 &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
1688}
1689
1690static int ebb_set(struct task_struct *target,
1691 const struct user_regset *regset,
1692 unsigned int pos, unsigned int count,
1693 const void *kbuf, const void __user *ubuf)
1694{
1695 int ret = 0;
1696
1697 /* Build tests */
1698 BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1699 BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1700
1701 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1702 return -ENODEV;
1703
1704 if (target->thread.used_ebb)
1705 return -ENODATA;
1706
1707 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1708 &target->thread.ebbrr, 0, sizeof(unsigned long));
1709
1710 if (!ret)
1711 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1712 &target->thread.ebbhr, sizeof(unsigned long),
1713 2 * sizeof(unsigned long));
1714
1715 if (!ret)
1716 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1717 &target->thread.bescr,
1718 2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1719
1720 return ret;
1721}
Anshuman Khanduala67ae752016-07-28 10:57:44 +08001722static int pmu_active(struct task_struct *target,
1723 const struct user_regset *regset)
1724{
1725 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1726 return -ENODEV;
1727
1728 return regset->n;
1729}
1730
1731static int pmu_get(struct task_struct *target,
1732 const struct user_regset *regset,
1733 unsigned int pos, unsigned int count,
1734 void *kbuf, void __user *ubuf)
1735{
1736 /* Build tests */
1737 BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1738 BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1739 BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1740 BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1741
1742 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1743 return -ENODEV;
1744
1745 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1746 &target->thread.siar, 0,
1747 5 * sizeof(unsigned long));
1748}
1749
1750static int pmu_set(struct task_struct *target,
1751 const struct user_regset *regset,
1752 unsigned int pos, unsigned int count,
1753 const void *kbuf, const void __user *ubuf)
1754{
1755 int ret = 0;
1756
1757 /* Build tests */
1758 BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1759 BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1760 BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1761 BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1762
1763 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1764 return -ENODEV;
1765
1766 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1767 &target->thread.siar, 0,
1768 sizeof(unsigned long));
1769
1770 if (!ret)
1771 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1772 &target->thread.sdar, sizeof(unsigned long),
1773 2 * sizeof(unsigned long));
1774
1775 if (!ret)
1776 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1777 &target->thread.sier, 2 * sizeof(unsigned long),
1778 3 * sizeof(unsigned long));
1779
1780 if (!ret)
1781 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1782 &target->thread.mmcr2, 3 * sizeof(unsigned long),
1783 4 * sizeof(unsigned long));
1784
1785 if (!ret)
1786 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1787 &target->thread.mmcr0, 4 * sizeof(unsigned long),
1788 5 * sizeof(unsigned long));
1789 return ret;
1790}
Anshuman Khandualfa439812016-07-28 10:57:42 +08001791#endif
Thiago Jung Bauermannc5cc1f42018-01-18 17:50:43 -08001792
1793#ifdef CONFIG_PPC_MEM_KEYS
1794static int pkey_active(struct task_struct *target,
1795 const struct user_regset *regset)
1796{
1797 if (!arch_pkeys_enabled())
1798 return -ENODEV;
1799
1800 return regset->n;
1801}
1802
1803static int pkey_get(struct task_struct *target,
1804 const struct user_regset *regset,
1805 unsigned int pos, unsigned int count,
1806 void *kbuf, void __user *ubuf)
1807{
1808 BUILD_BUG_ON(TSO(amr) + sizeof(unsigned long) != TSO(iamr));
1809 BUILD_BUG_ON(TSO(iamr) + sizeof(unsigned long) != TSO(uamor));
1810
1811 if (!arch_pkeys_enabled())
1812 return -ENODEV;
1813
1814 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1815 &target->thread.amr, 0,
1816 ELF_NPKEY * sizeof(unsigned long));
1817}
1818
1819static int pkey_set(struct task_struct *target,
1820 const struct user_regset *regset,
1821 unsigned int pos, unsigned int count,
1822 const void *kbuf, const void __user *ubuf)
1823{
1824 u64 new_amr;
1825 int ret;
1826
1827 if (!arch_pkeys_enabled())
1828 return -ENODEV;
1829
1830 /* Only the AMR can be set from userspace */
1831 if (pos != 0 || count != sizeof(new_amr))
1832 return -EINVAL;
1833
1834 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1835 &new_amr, 0, sizeof(new_amr));
1836 if (ret)
1837 return ret;
1838
1839 /* UAMOR determines which bits of the AMR can be set from userspace. */
1840 target->thread.amr = (new_amr & target->thread.uamor) |
1841 (target->thread.amr & ~target->thread.uamor);
1842
1843 return 0;
1844}
1845#endif /* CONFIG_PPC_MEM_KEYS */
1846
Roland McGrath80fdf472007-12-20 03:58:00 -08001847/*
1848 * These are our native regset flavors.
1849 */
1850enum powerpc_regset {
1851 REGSET_GPR,
1852 REGSET_FPR,
1853#ifdef CONFIG_ALTIVEC
1854 REGSET_VMX,
1855#endif
Michael Neulingce48b212008-06-25 14:07:18 +10001856#ifdef CONFIG_VSX
1857 REGSET_VSX,
1858#endif
Roland McGrath80fdf472007-12-20 03:58:00 -08001859#ifdef CONFIG_SPE
1860 REGSET_SPE,
1861#endif
Anshuman Khandual25847fb2016-07-28 10:57:36 +08001862#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1863 REGSET_TM_CGPR, /* TM checkpointed GPR registers */
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001864 REGSET_TM_CFPR, /* TM checkpointed FPR registers */
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001865 REGSET_TM_CVMX, /* TM checkpointed VMX registers */
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001866 REGSET_TM_CVSX, /* TM checkpointed VSX registers */
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001867 REGSET_TM_SPR, /* TM specific SPR registers */
Anshuman Khandualc45dc902016-07-28 10:57:41 +08001868 REGSET_TM_CTAR, /* TM checkpointed TAR register */
1869 REGSET_TM_CPPR, /* TM checkpointed PPR register */
1870 REGSET_TM_CDSCR, /* TM checkpointed DSCR register */
Anshuman Khandual25847fb2016-07-28 10:57:36 +08001871#endif
Anshuman Khandualfa439812016-07-28 10:57:42 +08001872#ifdef CONFIG_PPC64
1873 REGSET_PPR, /* PPR register */
1874 REGSET_DSCR, /* DSCR register */
1875#endif
1876#ifdef CONFIG_PPC_BOOK3S_64
1877 REGSET_TAR, /* TAR register */
Anshuman Khandualcf89d4e2016-07-28 10:57:43 +08001878 REGSET_EBB, /* EBB registers */
Anshuman Khanduala67ae752016-07-28 10:57:44 +08001879 REGSET_PMR, /* Performance Monitor Registers */
Anshuman Khandualfa439812016-07-28 10:57:42 +08001880#endif
Thiago Jung Bauermannc5cc1f42018-01-18 17:50:43 -08001881#ifdef CONFIG_PPC_MEM_KEYS
1882 REGSET_PKEY, /* AMR register */
1883#endif
Roland McGrath80fdf472007-12-20 03:58:00 -08001884};
1885
1886static const struct user_regset native_regsets[] = {
1887 [REGSET_GPR] = {
1888 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1889 .size = sizeof(long), .align = sizeof(long),
1890 .get = gpr_get, .set = gpr_set
1891 },
1892 [REGSET_FPR] = {
1893 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1894 .size = sizeof(double), .align = sizeof(double),
1895 .get = fpr_get, .set = fpr_set
1896 },
1897#ifdef CONFIG_ALTIVEC
1898 [REGSET_VMX] = {
1899 .core_note_type = NT_PPC_VMX, .n = 34,
1900 .size = sizeof(vector128), .align = sizeof(vector128),
1901 .active = vr_active, .get = vr_get, .set = vr_set
1902 },
1903#endif
Michael Neulingce48b212008-06-25 14:07:18 +10001904#ifdef CONFIG_VSX
1905 [REGSET_VSX] = {
Michael Neulingf3e909c2008-07-01 14:01:39 +10001906 .core_note_type = NT_PPC_VSX, .n = 32,
1907 .size = sizeof(double), .align = sizeof(double),
Michael Neulingce48b212008-06-25 14:07:18 +10001908 .active = vsr_active, .get = vsr_get, .set = vsr_set
1909 },
1910#endif
Roland McGrath80fdf472007-12-20 03:58:00 -08001911#ifdef CONFIG_SPE
1912 [REGSET_SPE] = {
Suzuki Poulosea0b38b42013-08-27 13:22:14 +05301913 .core_note_type = NT_PPC_SPE, .n = 35,
Roland McGrath80fdf472007-12-20 03:58:00 -08001914 .size = sizeof(u32), .align = sizeof(u32),
1915 .active = evr_active, .get = evr_get, .set = evr_set
1916 },
1917#endif
Anshuman Khandual25847fb2016-07-28 10:57:36 +08001918#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1919 [REGSET_TM_CGPR] = {
1920 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1921 .size = sizeof(long), .align = sizeof(long),
1922 .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
1923 },
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001924 [REGSET_TM_CFPR] = {
1925 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1926 .size = sizeof(double), .align = sizeof(double),
1927 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1928 },
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001929 [REGSET_TM_CVMX] = {
1930 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1931 .size = sizeof(vector128), .align = sizeof(vector128),
1932 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1933 },
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001934 [REGSET_TM_CVSX] = {
1935 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
1936 .size = sizeof(double), .align = sizeof(double),
1937 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
1938 },
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001939 [REGSET_TM_SPR] = {
1940 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
1941 .size = sizeof(u64), .align = sizeof(u64),
1942 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
1943 },
Anshuman Khandualc45dc902016-07-28 10:57:41 +08001944 [REGSET_TM_CTAR] = {
1945 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
1946 .size = sizeof(u64), .align = sizeof(u64),
1947 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
1948 },
1949 [REGSET_TM_CPPR] = {
1950 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
1951 .size = sizeof(u64), .align = sizeof(u64),
1952 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
1953 },
1954 [REGSET_TM_CDSCR] = {
1955 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
1956 .size = sizeof(u64), .align = sizeof(u64),
1957 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
1958 },
Anshuman Khandual25847fb2016-07-28 10:57:36 +08001959#endif
Anshuman Khandualfa439812016-07-28 10:57:42 +08001960#ifdef CONFIG_PPC64
1961 [REGSET_PPR] = {
1962 .core_note_type = NT_PPC_PPR, .n = 1,
1963 .size = sizeof(u64), .align = sizeof(u64),
1964 .get = ppr_get, .set = ppr_set
1965 },
1966 [REGSET_DSCR] = {
1967 .core_note_type = NT_PPC_DSCR, .n = 1,
1968 .size = sizeof(u64), .align = sizeof(u64),
1969 .get = dscr_get, .set = dscr_set
1970 },
1971#endif
1972#ifdef CONFIG_PPC_BOOK3S_64
1973 [REGSET_TAR] = {
1974 .core_note_type = NT_PPC_TAR, .n = 1,
1975 .size = sizeof(u64), .align = sizeof(u64),
1976 .get = tar_get, .set = tar_set
1977 },
Anshuman Khandualcf89d4e2016-07-28 10:57:43 +08001978 [REGSET_EBB] = {
1979 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
1980 .size = sizeof(u64), .align = sizeof(u64),
1981 .active = ebb_active, .get = ebb_get, .set = ebb_set
1982 },
Anshuman Khanduala67ae752016-07-28 10:57:44 +08001983 [REGSET_PMR] = {
1984 .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
1985 .size = sizeof(u64), .align = sizeof(u64),
1986 .active = pmu_active, .get = pmu_get, .set = pmu_set
1987 },
Anshuman Khandualfa439812016-07-28 10:57:42 +08001988#endif
Thiago Jung Bauermannc5cc1f42018-01-18 17:50:43 -08001989#ifdef CONFIG_PPC_MEM_KEYS
1990 [REGSET_PKEY] = {
1991 .core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
1992 .size = sizeof(u64), .align = sizeof(u64),
1993 .active = pkey_active, .get = pkey_get, .set = pkey_set
1994 },
1995#endif
Roland McGrath80fdf472007-12-20 03:58:00 -08001996};
1997
1998static const struct user_regset_view user_ppc_native_view = {
1999 .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
2000 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2001};
2002
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002003#ifdef CONFIG_PPC64
2004#include <linux/compat.h>
2005
Anshuman Khandual04fcadc2016-07-28 10:57:35 +08002006static int gpr32_get_common(struct task_struct *target,
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002007 const struct user_regset *regset,
2008 unsigned int pos, unsigned int count,
Simon Guo26183112016-09-11 21:44:13 +08002009 void *kbuf, void __user *ubuf,
2010 unsigned long *regs)
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002011{
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002012 compat_ulong_t *k = kbuf;
2013 compat_ulong_t __user *u = ubuf;
2014 compat_ulong_t reg;
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002015
2016 pos /= sizeof(reg);
2017 count /= sizeof(reg);
2018
2019 if (kbuf)
2020 for (; count > 0 && pos < PT_MSR; --count)
2021 *k++ = regs[pos++];
2022 else
2023 for (; count > 0 && pos < PT_MSR; --count)
2024 if (__put_user((compat_ulong_t) regs[pos++], u++))
2025 return -EFAULT;
2026
2027 if (count > 0 && pos == PT_MSR) {
2028 reg = get_user_msr(target);
2029 if (kbuf)
2030 *k++ = reg;
2031 else if (__put_user(reg, u++))
2032 return -EFAULT;
2033 ++pos;
2034 --count;
2035 }
2036
2037 if (kbuf)
2038 for (; count > 0 && pos < PT_REGS_COUNT; --count)
2039 *k++ = regs[pos++];
2040 else
2041 for (; count > 0 && pos < PT_REGS_COUNT; --count)
2042 if (__put_user((compat_ulong_t) regs[pos++], u++))
2043 return -EFAULT;
2044
2045 kbuf = k;
2046 ubuf = u;
2047 pos *= sizeof(reg);
2048 count *= sizeof(reg);
2049 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
2050 PT_REGS_COUNT * sizeof(reg), -1);
2051}
2052
Anshuman Khandual04fcadc2016-07-28 10:57:35 +08002053static int gpr32_set_common(struct task_struct *target,
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002054 const struct user_regset *regset,
2055 unsigned int pos, unsigned int count,
Simon Guo26183112016-09-11 21:44:13 +08002056 const void *kbuf, const void __user *ubuf,
2057 unsigned long *regs)
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002058{
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002059 const compat_ulong_t *k = kbuf;
2060 const compat_ulong_t __user *u = ubuf;
2061 compat_ulong_t reg;
2062
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002063 pos /= sizeof(reg);
2064 count /= sizeof(reg);
2065
2066 if (kbuf)
2067 for (; count > 0 && pos < PT_MSR; --count)
2068 regs[pos++] = *k++;
2069 else
2070 for (; count > 0 && pos < PT_MSR; --count) {
2071 if (__get_user(reg, u++))
2072 return -EFAULT;
2073 regs[pos++] = reg;
2074 }
2075
2076
2077 if (count > 0 && pos == PT_MSR) {
2078 if (kbuf)
2079 reg = *k++;
2080 else if (__get_user(reg, u++))
2081 return -EFAULT;
2082 set_user_msr(target, reg);
2083 ++pos;
2084 --count;
2085 }
2086
Roland McGrathc2372eb2008-03-13 19:25:35 +11002087 if (kbuf) {
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002088 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
2089 regs[pos++] = *k++;
Roland McGrathc2372eb2008-03-13 19:25:35 +11002090 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2091 ++k;
2092 } else {
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002093 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
2094 if (__get_user(reg, u++))
2095 return -EFAULT;
2096 regs[pos++] = reg;
2097 }
Roland McGrathc2372eb2008-03-13 19:25:35 +11002098 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2099 if (__get_user(reg, u++))
2100 return -EFAULT;
2101 }
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002102
2103 if (count > 0 && pos == PT_TRAP) {
2104 if (kbuf)
2105 reg = *k++;
2106 else if (__get_user(reg, u++))
2107 return -EFAULT;
2108 set_user_trap(target, reg);
2109 ++pos;
2110 --count;
2111 }
2112
2113 kbuf = k;
2114 ubuf = u;
2115 pos *= sizeof(reg);
2116 count *= sizeof(reg);
2117 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
2118 (PT_TRAP + 1) * sizeof(reg), -1);
2119}
2120
Anshuman Khandual25847fb2016-07-28 10:57:36 +08002121#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2122static int tm_cgpr32_get(struct task_struct *target,
2123 const struct user_regset *regset,
2124 unsigned int pos, unsigned int count,
2125 void *kbuf, void __user *ubuf)
2126{
Simon Guo26183112016-09-11 21:44:13 +08002127 return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2128 &target->thread.ckpt_regs.gpr[0]);
Anshuman Khandual25847fb2016-07-28 10:57:36 +08002129}
2130
2131static int tm_cgpr32_set(struct task_struct *target,
2132 const struct user_regset *regset,
2133 unsigned int pos, unsigned int count,
2134 const void *kbuf, const void __user *ubuf)
2135{
Simon Guo26183112016-09-11 21:44:13 +08002136 return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2137 &target->thread.ckpt_regs.gpr[0]);
Anshuman Khandual25847fb2016-07-28 10:57:36 +08002138}
2139#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2140
Anshuman Khandual04fcadc2016-07-28 10:57:35 +08002141static int gpr32_get(struct task_struct *target,
2142 const struct user_regset *regset,
2143 unsigned int pos, unsigned int count,
2144 void *kbuf, void __user *ubuf)
2145{
Simon Guo26183112016-09-11 21:44:13 +08002146 int i;
2147
2148 if (target->thread.regs == NULL)
2149 return -EIO;
2150
2151 if (!FULL_REGS(target->thread.regs)) {
2152 /*
2153 * We have a partial register set.
2154 * Fill 14-31 with bogus values.
2155 */
2156 for (i = 14; i < 32; i++)
2157 target->thread.regs->gpr[i] = NV_REG_POISON;
2158 }
2159 return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2160 &target->thread.regs->gpr[0]);
Anshuman Khandual04fcadc2016-07-28 10:57:35 +08002161}
2162
2163static int gpr32_set(struct task_struct *target,
2164 const struct user_regset *regset,
2165 unsigned int pos, unsigned int count,
2166 const void *kbuf, const void __user *ubuf)
2167{
Simon Guo26183112016-09-11 21:44:13 +08002168 if (target->thread.regs == NULL)
2169 return -EIO;
2170
2171 CHECK_FULL_REGS(target->thread.regs);
2172 return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2173 &target->thread.regs->gpr[0]);
Anshuman Khandual04fcadc2016-07-28 10:57:35 +08002174}
2175
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002176/*
2177 * These are the regset flavors matching the CONFIG_PPC32 native set.
2178 */
2179static const struct user_regset compat_regsets[] = {
2180 [REGSET_GPR] = {
2181 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
2182 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
2183 .get = gpr32_get, .set = gpr32_set
2184 },
2185 [REGSET_FPR] = {
2186 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
2187 .size = sizeof(double), .align = sizeof(double),
2188 .get = fpr_get, .set = fpr_set
2189 },
2190#ifdef CONFIG_ALTIVEC
2191 [REGSET_VMX] = {
2192 .core_note_type = NT_PPC_VMX, .n = 34,
2193 .size = sizeof(vector128), .align = sizeof(vector128),
2194 .active = vr_active, .get = vr_get, .set = vr_set
2195 },
2196#endif
2197#ifdef CONFIG_SPE
2198 [REGSET_SPE] = {
Roland McGrath24f1a842008-01-02 17:05:48 -08002199 .core_note_type = NT_PPC_SPE, .n = 35,
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002200 .size = sizeof(u32), .align = sizeof(u32),
2201 .active = evr_active, .get = evr_get, .set = evr_set
2202 },
2203#endif
Anshuman Khandual25847fb2016-07-28 10:57:36 +08002204#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2205 [REGSET_TM_CGPR] = {
2206 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
2207 .size = sizeof(long), .align = sizeof(long),
2208 .active = tm_cgpr_active,
2209 .get = tm_cgpr32_get, .set = tm_cgpr32_set
2210 },
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08002211 [REGSET_TM_CFPR] = {
2212 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
2213 .size = sizeof(double), .align = sizeof(double),
2214 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
2215 },
Anshuman Khandual8c13f592016-07-28 10:57:38 +08002216 [REGSET_TM_CVMX] = {
2217 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
2218 .size = sizeof(vector128), .align = sizeof(vector128),
2219 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
2220 },
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08002221 [REGSET_TM_CVSX] = {
2222 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
2223 .size = sizeof(double), .align = sizeof(double),
2224 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
2225 },
Anshuman Khandual08e1c012016-07-28 10:57:40 +08002226 [REGSET_TM_SPR] = {
2227 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
2228 .size = sizeof(u64), .align = sizeof(u64),
2229 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
2230 },
Anshuman Khandualc45dc902016-07-28 10:57:41 +08002231 [REGSET_TM_CTAR] = {
2232 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
2233 .size = sizeof(u64), .align = sizeof(u64),
2234 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
2235 },
2236 [REGSET_TM_CPPR] = {
2237 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
2238 .size = sizeof(u64), .align = sizeof(u64),
2239 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2240 },
2241 [REGSET_TM_CDSCR] = {
2242 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2243 .size = sizeof(u64), .align = sizeof(u64),
2244 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2245 },
Anshuman Khandual25847fb2016-07-28 10:57:36 +08002246#endif
Anshuman Khandualfa439812016-07-28 10:57:42 +08002247#ifdef CONFIG_PPC64
2248 [REGSET_PPR] = {
2249 .core_note_type = NT_PPC_PPR, .n = 1,
2250 .size = sizeof(u64), .align = sizeof(u64),
2251 .get = ppr_get, .set = ppr_set
2252 },
2253 [REGSET_DSCR] = {
2254 .core_note_type = NT_PPC_DSCR, .n = 1,
2255 .size = sizeof(u64), .align = sizeof(u64),
2256 .get = dscr_get, .set = dscr_set
2257 },
2258#endif
2259#ifdef CONFIG_PPC_BOOK3S_64
2260 [REGSET_TAR] = {
2261 .core_note_type = NT_PPC_TAR, .n = 1,
2262 .size = sizeof(u64), .align = sizeof(u64),
2263 .get = tar_get, .set = tar_set
2264 },
Anshuman Khandualcf89d4e2016-07-28 10:57:43 +08002265 [REGSET_EBB] = {
2266 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2267 .size = sizeof(u64), .align = sizeof(u64),
2268 .active = ebb_active, .get = ebb_get, .set = ebb_set
2269 },
Anshuman Khandualfa439812016-07-28 10:57:42 +08002270#endif
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002271};
2272
2273static const struct user_regset_view user_ppc_compat_view = {
2274 .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
2275 .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
2276};
2277#endif /* CONFIG_PPC64 */
2278
Roland McGrath80fdf472007-12-20 03:58:00 -08002279const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2280{
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002281#ifdef CONFIG_PPC64
2282 if (test_tsk_thread_flag(task, TIF_32BIT))
2283 return &user_ppc_compat_view;
2284#endif
Roland McGrath80fdf472007-12-20 03:58:00 -08002285 return &user_ppc_native_view;
2286}
2287
2288
Roland McGrath2a84b0d2008-01-30 13:30:51 +01002289void user_enable_single_step(struct task_struct *task)
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002290{
2291 struct pt_regs *regs = task->thread.regs;
2292
2293 if (regs != NULL) {
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002294#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302295 task->thread.debug.dbcr0 &= ~DBCR0_BT;
2296 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002297 regs->msr |= MSR_DE;
2298#else
Roland McGrathec097c82009-05-28 21:26:38 +00002299 regs->msr &= ~MSR_BE;
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002300 regs->msr |= MSR_SE;
2301#endif
2302 }
2303 set_tsk_thread_flag(task, TIF_SINGLESTEP);
2304}
2305
Roland McGrathec097c82009-05-28 21:26:38 +00002306void user_enable_block_step(struct task_struct *task)
2307{
2308 struct pt_regs *regs = task->thread.regs;
2309
2310 if (regs != NULL) {
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002311#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302312 task->thread.debug.dbcr0 &= ~DBCR0_IC;
2313 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
Roland McGrathec097c82009-05-28 21:26:38 +00002314 regs->msr |= MSR_DE;
2315#else
2316 regs->msr &= ~MSR_SE;
2317 regs->msr |= MSR_BE;
2318#endif
2319 }
2320 set_tsk_thread_flag(task, TIF_SINGLESTEP);
2321}
2322
Roland McGrath2a84b0d2008-01-30 13:30:51 +01002323void user_disable_single_step(struct task_struct *task)
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002324{
2325 struct pt_regs *regs = task->thread.regs;
2326
2327 if (regs != NULL) {
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002328#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002329 /*
2330 * The logic to disable single stepping should be as
2331 * simple as turning off the Instruction Complete flag.
2332 * And, after doing so, if all debug flags are off, turn
2333 * off DBCR0(IDM) and MSR(DE) .... Torez
2334 */
James Yang682775b2013-07-05 14:49:43 -05002335 task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002336 /*
2337 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
2338 */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302339 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2340 task->thread.debug.dbcr1)) {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002341 /*
2342 * All debug events were off.....
2343 */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302344 task->thread.debug.dbcr0 &= ~DBCR0_IDM;
Dave Kleikamp28477fb2009-07-08 13:46:18 +00002345 regs->msr &= ~MSR_DE;
2346 }
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002347#else
Roland McGrathec097c82009-05-28 21:26:38 +00002348 regs->msr &= ~(MSR_SE | MSR_BE);
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002349#endif
2350 }
2351 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
2352}
2353
K.Prasad5aae8a52010-06-15 11:35:19 +05302354#ifdef CONFIG_HAVE_HW_BREAKPOINT
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02002355void ptrace_triggered(struct perf_event *bp,
K.Prasad5aae8a52010-06-15 11:35:19 +05302356 struct perf_sample_data *data, struct pt_regs *regs)
2357{
2358 struct perf_event_attr attr;
2359
2360 /*
2361 * Disable the breakpoint request here since ptrace has defined a
2362 * one-shot behaviour for breakpoint exceptions in PPC64.
2363 * The SIGTRAP signal is generated automatically for us in do_dabr().
2364 * We don't have to do anything about that here
2365 */
2366 attr = bp->attr;
2367 attr.disabled = true;
2368 modify_user_hw_breakpoint(bp, &attr);
2369}
2370#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2371
Anton Blancharde51df2c2014-08-20 08:55:18 +10002372static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002373 unsigned long data)
2374{
K.Prasad5aae8a52010-06-15 11:35:19 +05302375#ifdef CONFIG_HAVE_HW_BREAKPOINT
2376 int ret;
2377 struct thread_struct *thread = &(task->thread);
2378 struct perf_event *bp;
2379 struct perf_event_attr attr;
2380#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Michael Neuling9422de32012-12-20 14:06:44 +00002381#ifndef CONFIG_PPC_ADV_DEBUG_REGS
Michael Neuling85ce9a52018-03-27 15:37:18 +11002382 bool set_bp = true;
Michael Neuling9422de32012-12-20 14:06:44 +00002383 struct arch_hw_breakpoint hw_brk;
2384#endif
K.Prasad5aae8a52010-06-15 11:35:19 +05302385
Luis Machadod6a61bf2008-07-24 02:10:41 +10002386 /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
2387 * For embedded processors we support one DAC and no IAC's at the
2388 * moment.
2389 */
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002390 if (addr > 0)
2391 return -EINVAL;
2392
Kumar Gala2325f0a2008-07-26 05:27:33 +10002393 /* The bottom 3 bits in dabr are flags */
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002394 if ((data & ~0x7UL) >= TASK_SIZE)
2395 return -EIO;
2396
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002397#ifndef CONFIG_PPC_ADV_DEBUG_REGS
Luis Machadod6a61bf2008-07-24 02:10:41 +10002398 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
2399 * It was assumed, on previous implementations, that 3 bits were
2400 * passed together with the data address, fitting the design of the
2401 * DABR register, as follows:
2402 *
2403 * bit 0: Read flag
2404 * bit 1: Write flag
2405 * bit 2: Breakpoint translation
2406 *
2407 * Thus, we use them here as so.
2408 */
2409
2410 /* Ensure breakpoint translation bit is set */
Michael Neuling9422de32012-12-20 14:06:44 +00002411 if (data && !(data & HW_BRK_TYPE_TRANSLATE))
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002412 return -EIO;
Michael Neuling9422de32012-12-20 14:06:44 +00002413 hw_brk.address = data & (~HW_BRK_TYPE_DABR);
2414 hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
2415 hw_brk.len = 8;
Michael Neuling85ce9a52018-03-27 15:37:18 +11002416 set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR);
K.Prasad5aae8a52010-06-15 11:35:19 +05302417#ifdef CONFIG_HAVE_HW_BREAKPOINT
2418 bp = thread->ptrace_bps[0];
Michael Neuling85ce9a52018-03-27 15:37:18 +11002419 if (!set_bp) {
K.Prasad5aae8a52010-06-15 11:35:19 +05302420 if (bp) {
2421 unregister_hw_breakpoint(bp);
2422 thread->ptrace_bps[0] = NULL;
2423 }
2424 return 0;
2425 }
2426 if (bp) {
2427 attr = bp->attr;
Michael Neuling9422de32012-12-20 14:06:44 +00002428 attr.bp_addr = hw_brk.address;
2429 arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
Aravinda Prasada53fd612012-11-04 22:15:28 +00002430
2431 /* Enable breakpoint */
2432 attr.disabled = false;
2433
K.Prasad5aae8a52010-06-15 11:35:19 +05302434 ret = modify_user_hw_breakpoint(bp, &attr);
Frederic Weisbecker925f83c2011-05-06 01:53:18 +02002435 if (ret) {
K.Prasad5aae8a52010-06-15 11:35:19 +05302436 return ret;
Frederic Weisbecker925f83c2011-05-06 01:53:18 +02002437 }
K.Prasad5aae8a52010-06-15 11:35:19 +05302438 thread->ptrace_bps[0] = bp;
Michael Neuling9422de32012-12-20 14:06:44 +00002439 thread->hw_brk = hw_brk;
K.Prasad5aae8a52010-06-15 11:35:19 +05302440 return 0;
2441 }
2442
2443 /* Create a new breakpoint request if one doesn't exist already */
2444 hw_breakpoint_init(&attr);
Michael Neuling9422de32012-12-20 14:06:44 +00002445 attr.bp_addr = hw_brk.address;
Michael Neuling4f7c06e2018-05-17 15:37:15 +10002446 attr.bp_len = 8;
Michael Neuling9422de32012-12-20 14:06:44 +00002447 arch_bp_generic_fields(hw_brk.type,
2448 &attr.bp_type);
K.Prasad5aae8a52010-06-15 11:35:19 +05302449
2450 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
Avi Kivity4dc0da82011-06-29 18:42:35 +03002451 ptrace_triggered, NULL, task);
K.Prasad5aae8a52010-06-15 11:35:19 +05302452 if (IS_ERR(bp)) {
2453 thread->ptrace_bps[0] = NULL;
2454 return PTR_ERR(bp);
2455 }
2456
Michael Neuling85ce9a52018-03-27 15:37:18 +11002457#else /* !CONFIG_HAVE_HW_BREAKPOINT */
2458 if (set_bp && (!ppc_breakpoint_available()))
2459 return -ENODEV;
K.Prasad5aae8a52010-06-15 11:35:19 +05302460#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Michael Neuling9422de32012-12-20 14:06:44 +00002461 task->thread.hw_brk = hw_brk;
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002462#else /* CONFIG_PPC_ADV_DEBUG_REGS */
Luis Machadod6a61bf2008-07-24 02:10:41 +10002463 /* As described above, it was assumed 3 bits were passed with the data
2464 * address, but we will assume only the mode bits will be passed
2465 * as to not cause alignment restrictions for DAC-based processors.
2466 */
2467
2468 /* DAC's hold the whole address without any mode flags */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302469 task->thread.debug.dac1 = data & ~0x3UL;
Luis Machadod6a61bf2008-07-24 02:10:41 +10002470
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302471 if (task->thread.debug.dac1 == 0) {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002472 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302473 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2474 task->thread.debug.dbcr1)) {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002475 task->thread.regs->msr &= ~MSR_DE;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302476 task->thread.debug.dbcr0 &= ~DBCR0_IDM;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002477 }
Luis Machadod6a61bf2008-07-24 02:10:41 +10002478 return 0;
2479 }
2480
2481 /* Read or Write bits must be set */
2482
2483 if (!(data & 0x3UL))
2484 return -EINVAL;
2485
2486 /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
2487 register */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302488 task->thread.debug.dbcr0 |= DBCR0_IDM;
Luis Machadod6a61bf2008-07-24 02:10:41 +10002489
2490 /* Check for write and read flags and set DBCR0
2491 accordingly */
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002492 dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
Luis Machadod6a61bf2008-07-24 02:10:41 +10002493 if (data & 0x1UL)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002494 dbcr_dac(task) |= DBCR_DAC1R;
Luis Machadod6a61bf2008-07-24 02:10:41 +10002495 if (data & 0x2UL)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002496 dbcr_dac(task) |= DBCR_DAC1W;
Luis Machadod6a61bf2008-07-24 02:10:41 +10002497 task->thread.regs->msr |= MSR_DE;
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002498#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002499 return 0;
2500}
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002501
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002502/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 * Called by kernel/ptrace.c when detaching..
2504 *
2505 * Make sure single step bits etc are not set.
2506 */
2507void ptrace_disable(struct task_struct *child)
2508{
2509 /* make sure the single step bit is not set. */
Roland McGrath2a84b0d2008-01-30 13:30:51 +01002510 user_disable_single_step(child);
Breno Leitao5521eb42018-09-20 13:45:06 -03002511 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512}
2513
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002514#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Michael Neuling84295df2012-10-28 15:13:16 +00002515static long set_instruction_bp(struct task_struct *child,
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002516 struct ppc_hw_breakpoint *bp_info)
2517{
2518 int slot;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302519 int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
2520 int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
2521 int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
2522 int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002523
2524 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2525 slot2_in_use = 1;
2526 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2527 slot4_in_use = 1;
2528
2529 if (bp_info->addr >= TASK_SIZE)
2530 return -EIO;
2531
2532 if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
2533
2534 /* Make sure range is valid. */
2535 if (bp_info->addr2 >= TASK_SIZE)
2536 return -EIO;
2537
2538 /* We need a pair of IAC regsisters */
2539 if ((!slot1_in_use) && (!slot2_in_use)) {
2540 slot = 1;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302541 child->thread.debug.iac1 = bp_info->addr;
2542 child->thread.debug.iac2 = bp_info->addr2;
2543 child->thread.debug.dbcr0 |= DBCR0_IAC1;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002544 if (bp_info->addr_mode ==
2545 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2546 dbcr_iac_range(child) |= DBCR_IAC12X;
2547 else
2548 dbcr_iac_range(child) |= DBCR_IAC12I;
2549#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2550 } else if ((!slot3_in_use) && (!slot4_in_use)) {
2551 slot = 3;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302552 child->thread.debug.iac3 = bp_info->addr;
2553 child->thread.debug.iac4 = bp_info->addr2;
2554 child->thread.debug.dbcr0 |= DBCR0_IAC3;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002555 if (bp_info->addr_mode ==
2556 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2557 dbcr_iac_range(child) |= DBCR_IAC34X;
2558 else
2559 dbcr_iac_range(child) |= DBCR_IAC34I;
2560#endif
2561 } else
2562 return -ENOSPC;
2563 } else {
2564 /* We only need one. If possible leave a pair free in
2565 * case a range is needed later
2566 */
2567 if (!slot1_in_use) {
2568 /*
2569 * Don't use iac1 if iac1-iac2 are free and either
2570 * iac3 or iac4 (but not both) are free
2571 */
2572 if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
2573 slot = 1;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302574 child->thread.debug.iac1 = bp_info->addr;
2575 child->thread.debug.dbcr0 |= DBCR0_IAC1;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002576 goto out;
2577 }
2578 }
2579 if (!slot2_in_use) {
2580 slot = 2;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302581 child->thread.debug.iac2 = bp_info->addr;
2582 child->thread.debug.dbcr0 |= DBCR0_IAC2;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002583#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2584 } else if (!slot3_in_use) {
2585 slot = 3;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302586 child->thread.debug.iac3 = bp_info->addr;
2587 child->thread.debug.dbcr0 |= DBCR0_IAC3;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002588 } else if (!slot4_in_use) {
2589 slot = 4;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302590 child->thread.debug.iac4 = bp_info->addr;
2591 child->thread.debug.dbcr0 |= DBCR0_IAC4;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002592#endif
2593 } else
2594 return -ENOSPC;
2595 }
2596out:
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302597 child->thread.debug.dbcr0 |= DBCR0_IDM;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002598 child->thread.regs->msr |= MSR_DE;
2599
2600 return slot;
2601}
2602
2603static int del_instruction_bp(struct task_struct *child, int slot)
2604{
2605 switch (slot) {
2606 case 1:
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302607 if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002608 return -ENOENT;
2609
2610 if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
2611 /* address range - clear slots 1 & 2 */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302612 child->thread.debug.iac2 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002613 dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
2614 }
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302615 child->thread.debug.iac1 = 0;
2616 child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002617 break;
2618 case 2:
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302619 if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002620 return -ENOENT;
2621
2622 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2623 /* used in a range */
2624 return -EINVAL;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302625 child->thread.debug.iac2 = 0;
2626 child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002627 break;
2628#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2629 case 3:
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302630 if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002631 return -ENOENT;
2632
2633 if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
2634 /* address range - clear slots 3 & 4 */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302635 child->thread.debug.iac4 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002636 dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
2637 }
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302638 child->thread.debug.iac3 = 0;
2639 child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002640 break;
2641 case 4:
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302642 if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002643 return -ENOENT;
2644
2645 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2646 /* Used in a range */
2647 return -EINVAL;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302648 child->thread.debug.iac4 = 0;
2649 child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002650 break;
2651#endif
2652 default:
2653 return -EINVAL;
2654 }
2655 return 0;
2656}
2657
2658static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
2659{
2660 int byte_enable =
2661 (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
2662 & 0xf;
2663 int condition_mode =
2664 bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
2665 int slot;
2666
2667 if (byte_enable && (condition_mode == 0))
2668 return -EINVAL;
2669
2670 if (bp_info->addr >= TASK_SIZE)
2671 return -EIO;
2672
2673 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
2674 slot = 1;
2675 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2676 dbcr_dac(child) |= DBCR_DAC1R;
2677 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2678 dbcr_dac(child) |= DBCR_DAC1W;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302679 child->thread.debug.dac1 = (unsigned long)bp_info->addr;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002680#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2681 if (byte_enable) {
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302682 child->thread.debug.dvc1 =
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002683 (unsigned long)bp_info->condition_value;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302684 child->thread.debug.dbcr2 |=
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002685 ((byte_enable << DBCR2_DVC1BE_SHIFT) |
2686 (condition_mode << DBCR2_DVC1M_SHIFT));
2687 }
2688#endif
2689#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302690 } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002691 /* Both dac1 and dac2 are part of a range */
2692 return -ENOSPC;
2693#endif
2694 } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
2695 slot = 2;
2696 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2697 dbcr_dac(child) |= DBCR_DAC2R;
2698 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2699 dbcr_dac(child) |= DBCR_DAC2W;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302700 child->thread.debug.dac2 = (unsigned long)bp_info->addr;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002701#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2702 if (byte_enable) {
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302703 child->thread.debug.dvc2 =
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002704 (unsigned long)bp_info->condition_value;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302705 child->thread.debug.dbcr2 |=
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002706 ((byte_enable << DBCR2_DVC2BE_SHIFT) |
2707 (condition_mode << DBCR2_DVC2M_SHIFT));
2708 }
2709#endif
2710 } else
2711 return -ENOSPC;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302712 child->thread.debug.dbcr0 |= DBCR0_IDM;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002713 child->thread.regs->msr |= MSR_DE;
2714
2715 return slot + 4;
2716}
2717
2718static int del_dac(struct task_struct *child, int slot)
2719{
2720 if (slot == 1) {
Dave Kleikamp30124d12010-03-01 04:57:34 +00002721 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002722 return -ENOENT;
2723
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302724 child->thread.debug.dac1 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002725 dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2726#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302727 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2728 child->thread.debug.dac2 = 0;
2729 child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002730 }
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302731 child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002732#endif
2733#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302734 child->thread.debug.dvc1 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002735#endif
2736 } else if (slot == 2) {
Dave Kleikamp30124d12010-03-01 04:57:34 +00002737 if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002738 return -ENOENT;
2739
2740#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302741 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002742 /* Part of a range */
2743 return -EINVAL;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302744 child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002745#endif
2746#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302747 child->thread.debug.dvc2 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002748#endif
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302749 child->thread.debug.dac2 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002750 dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
2751 } else
2752 return -EINVAL;
2753
2754 return 0;
2755}
2756#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2757
2758#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2759static int set_dac_range(struct task_struct *child,
2760 struct ppc_hw_breakpoint *bp_info)
2761{
2762 int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
2763
2764 /* We don't allow range watchpoints to be used with DVC */
2765 if (bp_info->condition_mode)
2766 return -EINVAL;
2767
2768 /*
2769 * Best effort to verify the address range. The user/supervisor bits
2770 * prevent trapping in kernel space, but let's fail on an obvious bad
2771 * range. The simple test on the mask is not fool-proof, and any
2772 * exclusive range will spill over into kernel space.
2773 */
2774 if (bp_info->addr >= TASK_SIZE)
2775 return -EIO;
2776 if (mode == PPC_BREAKPOINT_MODE_MASK) {
2777 /*
2778 * dac2 is a bitmask. Don't allow a mask that makes a
2779 * kernel space address from a valid dac1 value
2780 */
2781 if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
2782 return -EIO;
2783 } else {
2784 /*
2785 * For range breakpoints, addr2 must also be a valid address
2786 */
2787 if (bp_info->addr2 >= TASK_SIZE)
2788 return -EIO;
2789 }
2790
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302791 if (child->thread.debug.dbcr0 &
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002792 (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
2793 return -ENOSPC;
2794
2795 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302796 child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002797 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302798 child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
2799 child->thread.debug.dac1 = bp_info->addr;
2800 child->thread.debug.dac2 = bp_info->addr2;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002801 if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302802 child->thread.debug.dbcr2 |= DBCR2_DAC12M;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002803 else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302804 child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002805 else /* PPC_BREAKPOINT_MODE_MASK */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302806 child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002807 child->thread.regs->msr |= MSR_DE;
2808
2809 return 5;
2810}
2811#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2812
Dave Kleikamp3162d922010-02-08 11:51:05 +00002813static long ppc_set_hwdebug(struct task_struct *child,
2814 struct ppc_hw_breakpoint *bp_info)
2815{
K.Prasad6c7a2852012-10-28 15:13:15 +00002816#ifdef CONFIG_HAVE_HW_BREAKPOINT
2817 int len = 0;
2818 struct thread_struct *thread = &(child->thread);
2819 struct perf_event *bp;
2820 struct perf_event_attr attr;
2821#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002822#ifndef CONFIG_PPC_ADV_DEBUG_REGS
Michael Neuling9422de32012-12-20 14:06:44 +00002823 struct arch_hw_breakpoint brk;
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002824#endif
2825
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002826 if (bp_info->version != 1)
2827 return -ENOTSUPP;
2828#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Dave Kleikamp3162d922010-02-08 11:51:05 +00002829 /*
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002830 * Check for invalid flags and combinations
2831 */
2832 if ((bp_info->trigger_type == 0) ||
2833 (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
2834 PPC_BREAKPOINT_TRIGGER_RW)) ||
2835 (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
2836 (bp_info->condition_mode &
2837 ~(PPC_BREAKPOINT_CONDITION_MODE |
2838 PPC_BREAKPOINT_CONDITION_BE_ALL)))
2839 return -EINVAL;
2840#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2841 if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2842 return -EINVAL;
2843#endif
2844
2845 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
2846 if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
2847 (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
2848 return -EINVAL;
Michael Neuling84295df2012-10-28 15:13:16 +00002849 return set_instruction_bp(child, bp_info);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002850 }
2851 if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2852 return set_dac(child, bp_info);
2853
2854#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2855 return set_dac_range(child, bp_info);
2856#else
2857 return -EINVAL;
2858#endif
2859#else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2860 /*
2861 * We only support one data breakpoint
Dave Kleikamp3162d922010-02-08 11:51:05 +00002862 */
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002863 if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
2864 (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002865 bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
Dave Kleikamp3162d922010-02-08 11:51:05 +00002866 return -EINVAL;
2867
Dave Kleikamp3162d922010-02-08 11:51:05 +00002868 if ((unsigned long)bp_info->addr >= TASK_SIZE)
2869 return -EIO;
2870
Michael Neuling9422de32012-12-20 14:06:44 +00002871 brk.address = bp_info->addr & ~7UL;
2872 brk.type = HW_BRK_TYPE_TRANSLATE;
Michael Neuling2bb78ef2013-03-11 16:42:49 +00002873 brk.len = 8;
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002874 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
Michael Neuling9422de32012-12-20 14:06:44 +00002875 brk.type |= HW_BRK_TYPE_READ;
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002876 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
Michael Neuling9422de32012-12-20 14:06:44 +00002877 brk.type |= HW_BRK_TYPE_WRITE;
K.Prasad6c7a2852012-10-28 15:13:15 +00002878#ifdef CONFIG_HAVE_HW_BREAKPOINT
K.Prasad6c7a2852012-10-28 15:13:15 +00002879 /*
2880 * Check if the request is for 'range' breakpoints. We can
2881 * support it if range < 8 bytes.
2882 */
Oleg Nesterov6961ed92013-07-08 16:00:49 -07002883 if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
K.Prasad6c7a2852012-10-28 15:13:15 +00002884 len = bp_info->addr2 - bp_info->addr;
Oleg Nesterov6961ed92013-07-08 16:00:49 -07002885 else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
Michael Neulingb0b0aa92013-06-24 15:47:22 +10002886 len = 1;
Oleg Nesterov6961ed92013-07-08 16:00:49 -07002887 else
K.Prasad6c7a2852012-10-28 15:13:15 +00002888 return -EINVAL;
K.Prasad6c7a2852012-10-28 15:13:15 +00002889 bp = thread->ptrace_bps[0];
Oleg Nesterov6961ed92013-07-08 16:00:49 -07002890 if (bp)
K.Prasad6c7a2852012-10-28 15:13:15 +00002891 return -ENOSPC;
K.Prasad6c7a2852012-10-28 15:13:15 +00002892
2893 /* Create a new breakpoint request if one doesn't exist already */
2894 hw_breakpoint_init(&attr);
2895 attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
2896 attr.bp_len = len;
Michael Neuling9422de32012-12-20 14:06:44 +00002897 arch_bp_generic_fields(brk.type, &attr.bp_type);
K.Prasad6c7a2852012-10-28 15:13:15 +00002898
2899 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2900 ptrace_triggered, NULL, child);
2901 if (IS_ERR(bp)) {
2902 thread->ptrace_bps[0] = NULL;
K.Prasad6c7a2852012-10-28 15:13:15 +00002903 return PTR_ERR(bp);
2904 }
2905
K.Prasad6c7a2852012-10-28 15:13:15 +00002906 return 1;
2907#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2908
2909 if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
2910 return -EINVAL;
2911
Michael Neuling9422de32012-12-20 14:06:44 +00002912 if (child->thread.hw_brk.address)
K.Prasad6c7a2852012-10-28 15:13:15 +00002913 return -ENOSPC;
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002914
Michael Neuling85ce9a52018-03-27 15:37:18 +11002915 if (!ppc_breakpoint_available())
2916 return -ENODEV;
2917
Michael Neuling9422de32012-12-20 14:06:44 +00002918 child->thread.hw_brk = brk;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002919
Dave Kleikamp3162d922010-02-08 11:51:05 +00002920 return 1;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002921#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
Dave Kleikamp3162d922010-02-08 11:51:05 +00002922}
2923
Michael Neulingec1b33d2012-10-28 15:13:17 +00002924static long ppc_del_hwdebug(struct task_struct *child, long data)
Dave Kleikamp3162d922010-02-08 11:51:05 +00002925{
K.Prasad6c7a2852012-10-28 15:13:15 +00002926#ifdef CONFIG_HAVE_HW_BREAKPOINT
2927 int ret = 0;
2928 struct thread_struct *thread = &(child->thread);
2929 struct perf_event *bp;
2930#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002931#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2932 int rc;
2933
2934 if (data <= 4)
2935 rc = del_instruction_bp(child, (int)data);
2936 else
2937 rc = del_dac(child, (int)data - 4);
2938
2939 if (!rc) {
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302940 if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
2941 child->thread.debug.dbcr1)) {
2942 child->thread.debug.dbcr0 &= ~DBCR0_IDM;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002943 child->thread.regs->msr &= ~MSR_DE;
2944 }
2945 }
2946 return rc;
2947#else
Dave Kleikamp3162d922010-02-08 11:51:05 +00002948 if (data != 1)
2949 return -EINVAL;
K.Prasad6c7a2852012-10-28 15:13:15 +00002950
2951#ifdef CONFIG_HAVE_HW_BREAKPOINT
K.Prasad6c7a2852012-10-28 15:13:15 +00002952 bp = thread->ptrace_bps[0];
2953 if (bp) {
2954 unregister_hw_breakpoint(bp);
2955 thread->ptrace_bps[0] = NULL;
2956 } else
2957 ret = -ENOENT;
K.Prasad6c7a2852012-10-28 15:13:15 +00002958 return ret;
2959#else /* CONFIG_HAVE_HW_BREAKPOINT */
Michael Neuling9422de32012-12-20 14:06:44 +00002960 if (child->thread.hw_brk.address == 0)
Dave Kleikamp3162d922010-02-08 11:51:05 +00002961 return -ENOENT;
2962
Michael Neuling9422de32012-12-20 14:06:44 +00002963 child->thread.hw_brk.address = 0;
2964 child->thread.hw_brk.type = 0;
K.Prasad6c7a2852012-10-28 15:13:15 +00002965#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002966
Dave Kleikamp3162d922010-02-08 11:51:05 +00002967 return 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002968#endif
Dave Kleikamp3162d922010-02-08 11:51:05 +00002969}
2970
Namhyung Kim9b05a692010-10-27 15:33:47 -07002971long arch_ptrace(struct task_struct *child, long request,
2972 unsigned long addr, unsigned long data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 int ret = -EPERM;
Namhyung Kimf68d2042010-10-27 15:34:01 -07002975 void __user *datavp = (void __user *) data;
2976 unsigned long __user *datalp = datavp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978 switch (request) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 /* read the word at location addr in the USER area. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 case PTRACE_PEEKUSR: {
2981 unsigned long index, tmp;
2982
2983 ret = -EIO;
2984 /* convert to index and check */
Stephen Rothwelle8a30302005-10-13 15:52:04 +10002985#ifdef CONFIG_PPC32
Namhyung Kim9b05a692010-10-27 15:33:47 -07002986 index = addr >> 2;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10002987 if ((addr & 3) || (index > PT_FPSCR)
2988 || (child->thread.regs == NULL))
2989#else
Namhyung Kim9b05a692010-10-27 15:33:47 -07002990 index = addr >> 3;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10002991 if ((addr & 7) || (index > PT_FPSCR))
2992#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 break;
2994
2995 CHECK_FULL_REGS(child->thread.regs);
2996 if (index < PT_FPR0) {
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +00002997 ret = ptrace_get_reg(child, (int) index, &tmp);
2998 if (ret)
2999 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000 } else {
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00003001 unsigned int fpidx = index - PT_FPR0;
3002
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003003 flush_fp_to_thread(child);
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00003004 if (fpidx < (PT_FPSCR - PT_FPR0))
Ulrich Weigand36aa1b12013-12-12 15:59:34 +11003005 memcpy(&tmp, &child->thread.TS_FPR(fpidx),
Anton Blanchard87fec052013-09-23 12:04:38 +10003006 sizeof(long));
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00003007 else
Paul Mackerrasde79f7b2013-09-10 20:20:42 +10003008 tmp = child->thread.fp_state.fpscr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 }
Namhyung Kimf68d2042010-10-27 15:34:01 -07003010 ret = put_user(tmp, datalp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011 break;
3012 }
3013
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014 /* write the word at location addr in the USER area */
3015 case PTRACE_POKEUSR: {
3016 unsigned long index;
3017
3018 ret = -EIO;
3019 /* convert to index and check */
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003020#ifdef CONFIG_PPC32
Namhyung Kim9b05a692010-10-27 15:33:47 -07003021 index = addr >> 2;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003022 if ((addr & 3) || (index > PT_FPSCR)
3023 || (child->thread.regs == NULL))
3024#else
Namhyung Kim9b05a692010-10-27 15:33:47 -07003025 index = addr >> 3;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003026 if ((addr & 7) || (index > PT_FPSCR))
3027#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 break;
3029
3030 CHECK_FULL_REGS(child->thread.regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031 if (index < PT_FPR0) {
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10003032 ret = ptrace_put_reg(child, index, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033 } else {
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00003034 unsigned int fpidx = index - PT_FPR0;
3035
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003036 flush_fp_to_thread(child);
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00003037 if (fpidx < (PT_FPSCR - PT_FPR0))
Ulrich Weigand36aa1b12013-12-12 15:59:34 +11003038 memcpy(&child->thread.TS_FPR(fpidx), &data,
Anton Blanchard87fec052013-09-23 12:04:38 +10003039 sizeof(long));
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00003040 else
Paul Mackerrasde79f7b2013-09-10 20:20:42 +10003041 child->thread.fp_state.fpscr = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 ret = 0;
3043 }
3044 break;
3045 }
3046
Dave Kleikamp3162d922010-02-08 11:51:05 +00003047 case PPC_PTRACE_GETHWDBGINFO: {
3048 struct ppc_debug_info dbginfo;
3049
3050 dbginfo.version = 1;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00003051#ifdef CONFIG_PPC_ADV_DEBUG_REGS
3052 dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
3053 dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
3054 dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
3055 dbginfo.data_bp_alignment = 4;
3056 dbginfo.sizeof_condition = 4;
3057 dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
3058 PPC_DEBUG_FEATURE_INSN_BP_MASK;
3059#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
3060 dbginfo.features |=
3061 PPC_DEBUG_FEATURE_DATA_BP_RANGE |
3062 PPC_DEBUG_FEATURE_DATA_BP_MASK;
3063#endif
3064#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
Dave Kleikamp3162d922010-02-08 11:51:05 +00003065 dbginfo.num_instruction_bps = 0;
Michael Neuling85ce9a52018-03-27 15:37:18 +11003066 if (ppc_breakpoint_available())
3067 dbginfo.num_data_bps = 1;
3068 else
3069 dbginfo.num_data_bps = 0;
Dave Kleikamp3162d922010-02-08 11:51:05 +00003070 dbginfo.num_condition_regs = 0;
3071#ifdef CONFIG_PPC64
3072 dbginfo.data_bp_alignment = 8;
3073#else
3074 dbginfo.data_bp_alignment = 4;
3075#endif
3076 dbginfo.sizeof_condition = 0;
K.Prasad6c7a2852012-10-28 15:13:15 +00003077#ifdef CONFIG_HAVE_HW_BREAKPOINT
3078 dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
Michael Neuling517b7312013-03-21 20:12:33 +00003079 if (cpu_has_feature(CPU_FTR_DAWR))
3080 dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
K.Prasad6c7a2852012-10-28 15:13:15 +00003081#else
Dave Kleikamp3162d922010-02-08 11:51:05 +00003082 dbginfo.features = 0;
K.Prasad6c7a2852012-10-28 15:13:15 +00003083#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Dave Kleikamp3bffb652010-02-08 11:51:18 +00003084#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
Dave Kleikamp3162d922010-02-08 11:51:05 +00003085
Al Viro6bcdd292018-05-29 22:57:38 +10003086 if (copy_to_user(datavp, &dbginfo,
3087 sizeof(struct ppc_debug_info)))
Dave Kleikamp3162d922010-02-08 11:51:05 +00003088 return -EFAULT;
Al Viro6bcdd292018-05-29 22:57:38 +10003089 return 0;
Dave Kleikamp3162d922010-02-08 11:51:05 +00003090 }
3091
3092 case PPC_PTRACE_SETHWDEBUG: {
3093 struct ppc_hw_breakpoint bp_info;
3094
Al Viro6bcdd292018-05-29 22:57:38 +10003095 if (copy_from_user(&bp_info, datavp,
3096 sizeof(struct ppc_hw_breakpoint)))
Dave Kleikamp3162d922010-02-08 11:51:05 +00003097 return -EFAULT;
Al Viro6bcdd292018-05-29 22:57:38 +10003098 return ppc_set_hwdebug(child, &bp_info);
Dave Kleikamp3162d922010-02-08 11:51:05 +00003099 }
3100
3101 case PPC_PTRACE_DELHWDEBUG: {
Michael Neulingec1b33d2012-10-28 15:13:17 +00003102 ret = ppc_del_hwdebug(child, data);
Dave Kleikamp3162d922010-02-08 11:51:05 +00003103 break;
3104 }
3105
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003106 case PTRACE_GET_DEBUGREG: {
Michael Neuling9422de32012-12-20 14:06:44 +00003107#ifndef CONFIG_PPC_ADV_DEBUG_REGS
3108 unsigned long dabr_fake;
3109#endif
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003110 ret = -EINVAL;
3111 /* We only support one DABR and no IABRS at the moment */
3112 if (addr > 0)
3113 break;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00003114#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05303115 ret = put_user(child->thread.debug.dac1, datalp);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00003116#else
Michael Neuling9422de32012-12-20 14:06:44 +00003117 dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
3118 (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
3119 ret = put_user(dabr_fake, datalp);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00003120#endif
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003121 break;
3122 }
3123
3124 case PTRACE_SET_DEBUGREG:
3125 ret = ptrace_set_debugreg(child, addr, data);
3126 break;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003127
Benjamin Herrenschmidte17666b2007-06-04 15:15:43 +10003128#ifdef CONFIG_PPC64
3129 case PTRACE_GETREGS64:
3130#endif
Roland McGrathc391cd02007-12-20 03:58:36 -08003131 case PTRACE_GETREGS: /* Get all pt_regs from the child. */
3132 return copy_regset_to_user(child, &user_ppc_native_view,
3133 REGSET_GPR,
Michael Ellerman3eeacd92018-10-13 00:39:31 +11003134 0, sizeof(struct user_pt_regs),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003135 datavp);
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003136
Benjamin Herrenschmidt0b3d5c42007-06-04 15:15:39 +10003137#ifdef CONFIG_PPC64
Benjamin Herrenschmidte17666b2007-06-04 15:15:43 +10003138 case PTRACE_SETREGS64:
3139#endif
Roland McGrathc391cd02007-12-20 03:58:36 -08003140 case PTRACE_SETREGS: /* Set all gp regs in the child. */
3141 return copy_regset_from_user(child, &user_ppc_native_view,
3142 REGSET_GPR,
Michael Ellerman3eeacd92018-10-13 00:39:31 +11003143 0, sizeof(struct user_pt_regs),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003144 datavp);
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003145
Roland McGrathc391cd02007-12-20 03:58:36 -08003146 case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
3147 return copy_regset_to_user(child, &user_ppc_native_view,
3148 REGSET_FPR,
3149 0, sizeof(elf_fpregset_t),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003150 datavp);
Benjamin Herrenschmidte17666b2007-06-04 15:15:43 +10003151
Roland McGrathc391cd02007-12-20 03:58:36 -08003152 case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
3153 return copy_regset_from_user(child, &user_ppc_native_view,
3154 REGSET_FPR,
3155 0, sizeof(elf_fpregset_t),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003156 datavp);
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003157
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158#ifdef CONFIG_ALTIVEC
3159 case PTRACE_GETVRREGS:
Roland McGrathc391cd02007-12-20 03:58:36 -08003160 return copy_regset_to_user(child, &user_ppc_native_view,
3161 REGSET_VMX,
3162 0, (33 * sizeof(vector128) +
3163 sizeof(u32)),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003164 datavp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165
3166 case PTRACE_SETVRREGS:
Roland McGrathc391cd02007-12-20 03:58:36 -08003167 return copy_regset_from_user(child, &user_ppc_native_view,
3168 REGSET_VMX,
3169 0, (33 * sizeof(vector128) +
3170 sizeof(u32)),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003171 datavp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172#endif
Michael Neulingce48b212008-06-25 14:07:18 +10003173#ifdef CONFIG_VSX
3174 case PTRACE_GETVSRREGS:
3175 return copy_regset_to_user(child, &user_ppc_native_view,
3176 REGSET_VSX,
Michael Neuling1ac42ef82008-07-29 01:13:14 +10003177 0, 32 * sizeof(double),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003178 datavp);
Michael Neulingce48b212008-06-25 14:07:18 +10003179
3180 case PTRACE_SETVSRREGS:
3181 return copy_regset_from_user(child, &user_ppc_native_view,
3182 REGSET_VSX,
Michael Neuling1ac42ef82008-07-29 01:13:14 +10003183 0, 32 * sizeof(double),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003184 datavp);
Michael Neulingce48b212008-06-25 14:07:18 +10003185#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186#ifdef CONFIG_SPE
3187 case PTRACE_GETEVRREGS:
3188 /* Get the child spe register state. */
Roland McGrathc391cd02007-12-20 03:58:36 -08003189 return copy_regset_to_user(child, &user_ppc_native_view,
3190 REGSET_SPE, 0, 35 * sizeof(u32),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003191 datavp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192
3193 case PTRACE_SETEVRREGS:
3194 /* Set the child spe register state. */
Roland McGrathc391cd02007-12-20 03:58:36 -08003195 return copy_regset_from_user(child, &user_ppc_native_view,
3196 REGSET_SPE, 0, 35 * sizeof(u32),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003197 datavp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198#endif
3199
3200 default:
3201 ret = ptrace_request(child, request, addr, data);
3202 break;
3203 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204 return ret;
3205}
3206
Michael Ellerman2449acc2015-07-23 20:21:09 +10003207#ifdef CONFIG_SECCOMP
3208static int do_seccomp(struct pt_regs *regs)
3209{
3210 if (!test_thread_flag(TIF_SECCOMP))
3211 return 0;
3212
3213 /*
3214 * The ABI we present to seccomp tracers is that r3 contains
3215 * the syscall return value and orig_gpr3 contains the first
3216 * syscall parameter. This is different to the ptrace ABI where
3217 * both r3 and orig_gpr3 contain the first syscall parameter.
3218 */
3219 regs->gpr[3] = -ENOSYS;
3220
3221 /*
3222 * We use the __ version here because we have already checked
3223 * TIF_SECCOMP. If this fails, there is nothing left to do, we
3224 * have already loaded -ENOSYS into r3, or seccomp has put
3225 * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
3226 */
Andy Lutomirski2f275de2016-05-27 12:57:02 -07003227 if (__secure_computing(NULL))
Michael Ellerman2449acc2015-07-23 20:21:09 +10003228 return -1;
3229
3230 /*
3231 * The syscall was allowed by seccomp, restore the register
Kees Cook1addc572016-06-02 19:55:09 -07003232 * state to what audit expects.
Michael Ellerman2449acc2015-07-23 20:21:09 +10003233 * Note that we use orig_gpr3, which means a seccomp tracer can
3234 * modify the first syscall parameter (in orig_gpr3) and also
3235 * allow the syscall to proceed.
3236 */
3237 regs->gpr[3] = regs->orig_gpr3;
3238
3239 return 0;
3240}
3241#else
3242static inline int do_seccomp(struct pt_regs *regs) { return 0; }
3243#endif /* CONFIG_SECCOMP */
3244
Michael Ellermand3837412015-07-23 20:21:02 +10003245/**
3246 * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
3247 * @regs: the pt_regs of the task to trace (current)
3248 *
3249 * Performs various types of tracing on syscall entry. This includes seccomp,
3250 * ptrace, syscall tracepoints and audit.
3251 *
3252 * The pt_regs are potentially visible to userspace via ptrace, so their
3253 * contents is ABI.
3254 *
3255 * One or more of the tracers may modify the contents of pt_regs, in particular
3256 * to modify arguments or even the syscall number itself.
3257 *
3258 * It's also possible that a tracer can choose to reject the system call. In
3259 * that case this function will return an illegal syscall number, and will put
3260 * an appropriate return value in regs->r3.
3261 *
3262 * Return: the (possibly changed) syscall number.
Roland McGrath4f72c422008-07-27 16:51:03 +10003263 */
3264long do_syscall_trace_enter(struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265{
Li Zhong22ecbe82013-05-13 16:16:40 +00003266 user_exit();
3267
Breno Leitao5521eb42018-09-20 13:45:06 -03003268 if (test_thread_flag(TIF_SYSCALL_EMU)) {
3269 ptrace_report_syscall(regs);
3270 /*
3271 * Returning -1 will skip the syscall execution. We want to
3272 * avoid clobbering any register also, thus, not 'gotoing'
3273 * skip label.
3274 */
3275 return -1;
3276 }
3277
Kees Cook1addc572016-06-02 19:55:09 -07003278 /*
3279 * The tracer may decide to abort the syscall, if so tracehook
3280 * will return !0. Note that the tracer may also just change
3281 * regs->gpr[0] to an invalid syscall number, that is handled
3282 * below on the exit path.
3283 */
3284 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
3285 tracehook_report_syscall_entry(regs))
3286 goto skip;
3287
3288 /* Run seccomp after ptrace; allow it to set gpr[3]. */
Michael Ellerman2449acc2015-07-23 20:21:09 +10003289 if (do_seccomp(regs))
3290 return -1;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003291
Kees Cook1addc572016-06-02 19:55:09 -07003292 /* Avoid trace and audit when syscall is invalid. */
3293 if (regs->gpr[0] >= NR_syscalls)
3294 goto skip;
David Woodhouseea9c1022005-05-08 15:56:09 +01003295
Ian Munsie02424d82011-02-02 17:27:24 +00003296 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3297 trace_sys_enter(regs, regs->gpr[0]);
3298
David Woodhousecfcd1702007-01-14 09:38:18 +08003299#ifdef CONFIG_PPC64
Eric Parisb05d8442012-01-03 14:23:06 -05003300 if (!is_32bit_task())
Eric Paris91397402014-03-11 13:29:28 -04003301 audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
Eric Parisb05d8442012-01-03 14:23:06 -05003302 regs->gpr[5], regs->gpr[6]);
3303 else
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003304#endif
Eric Paris91397402014-03-11 13:29:28 -04003305 audit_syscall_entry(regs->gpr[0],
Eric Parisb05d8442012-01-03 14:23:06 -05003306 regs->gpr[3] & 0xffffffff,
3307 regs->gpr[4] & 0xffffffff,
3308 regs->gpr[5] & 0xffffffff,
3309 regs->gpr[6] & 0xffffffff);
Roland McGrath4f72c422008-07-27 16:51:03 +10003310
Michael Ellermand3837412015-07-23 20:21:02 +10003311 /* Return the possibly modified but valid syscall number */
3312 return regs->gpr[0];
Kees Cook1addc572016-06-02 19:55:09 -07003313
3314skip:
3315 /*
3316 * If we are aborting explicitly, or if the syscall number is
3317 * now invalid, set the return value to -ENOSYS.
3318 */
3319 regs->gpr[3] = -ENOSYS;
3320 return -1;
David Woodhouseea9c1022005-05-08 15:56:09 +01003321}
3322
3323void do_syscall_trace_leave(struct pt_regs *regs)
3324{
Roland McGrath4f72c422008-07-27 16:51:03 +10003325 int step;
3326
Eric Parisd7e75282012-01-03 14:23:06 -05003327 audit_syscall_exit(regs);
David Woodhouseea9c1022005-05-08 15:56:09 +01003328
Ian Munsie02424d82011-02-02 17:27:24 +00003329 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3330 trace_sys_exit(regs, regs->result);
3331
Roland McGrath4f72c422008-07-27 16:51:03 +10003332 step = test_thread_flag(TIF_SINGLESTEP);
3333 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
3334 tracehook_report_syscall_exit(regs, step);
Li Zhong22ecbe82013-05-13 16:16:40 +00003335
3336 user_enter();
David Woodhouseea9c1022005-05-08 15:56:09 +01003337}
Michael Ellerman002af932018-10-12 23:13:17 +11003338
3339void __init pt_regs_check(void)
3340{
3341 BUILD_BUG_ON(offsetof(struct pt_regs, gpr) !=
3342 offsetof(struct user_pt_regs, gpr));
3343 BUILD_BUG_ON(offsetof(struct pt_regs, nip) !=
3344 offsetof(struct user_pt_regs, nip));
3345 BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
3346 offsetof(struct user_pt_regs, msr));
3347 BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
3348 offsetof(struct user_pt_regs, msr));
3349 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
3350 offsetof(struct user_pt_regs, orig_gpr3));
3351 BUILD_BUG_ON(offsetof(struct pt_regs, ctr) !=
3352 offsetof(struct user_pt_regs, ctr));
3353 BUILD_BUG_ON(offsetof(struct pt_regs, link) !=
3354 offsetof(struct user_pt_regs, link));
3355 BUILD_BUG_ON(offsetof(struct pt_regs, xer) !=
3356 offsetof(struct user_pt_regs, xer));
3357 BUILD_BUG_ON(offsetof(struct pt_regs, ccr) !=
3358 offsetof(struct user_pt_regs, ccr));
3359#ifdef __powerpc64__
3360 BUILD_BUG_ON(offsetof(struct pt_regs, softe) !=
3361 offsetof(struct user_pt_regs, softe));
3362#else
3363 BUILD_BUG_ON(offsetof(struct pt_regs, mq) !=
3364 offsetof(struct user_pt_regs, mq));
3365#endif
3366 BUILD_BUG_ON(offsetof(struct pt_regs, trap) !=
3367 offsetof(struct user_pt_regs, trap));
3368 BUILD_BUG_ON(offsetof(struct pt_regs, dar) !=
3369 offsetof(struct user_pt_regs, dar));
3370 BUILD_BUG_ON(offsetof(struct pt_regs, dsisr) !=
3371 offsetof(struct user_pt_regs, dsisr));
3372 BUILD_BUG_ON(offsetof(struct pt_regs, result) !=
3373 offsetof(struct user_pt_regs, result));
3374
3375 BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
3376}