blob: 10543eb1f8f391971d1cab26a29e670ce56eed8b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Derived from "arch/m68k/kernel/ptrace.c"
6 * Copyright (C) 1994 by Hamish Macdonald
7 * Taken from linux/kernel/ptrace.c and modified for M680x0.
8 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
9 *
10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
Paul Mackerrasb1239232005-10-20 09:11:29 +100011 * and Paul Mackerras (paulus@samba.org).
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
13 * This file is subject to the terms and conditions of the GNU General
14 * Public License. See the file README.legal in the main directory of
15 * this archive for more details.
16 */
17
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/mm.h>
21#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/errno.h>
23#include <linux/ptrace.h>
Roland McGrathf65255e2007-12-20 03:57:34 -080024#include <linux/regset.h>
Roland McGrath4f72c422008-07-27 16:51:03 +100025#include <linux/tracehook.h>
Roland McGrath3caf06c2007-12-20 03:57:39 -080026#include <linux/elf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/user.h>
28#include <linux/security.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070029#include <linux/signal.h>
David Woodhouseea9c1022005-05-08 15:56:09 +010030#include <linux/seccomp.h>
31#include <linux/audit.h>
Ian Munsie02424d82011-02-02 17:27:24 +000032#include <trace/syscall.h>
K.Prasad5aae8a52010-06-15 11:35:19 +053033#include <linux/hw_breakpoint.h>
34#include <linux/perf_event.h>
Li Zhong22ecbe82013-05-13 16:16:40 +000035#include <linux/context_tracking.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080037#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <asm/page.h>
39#include <asm/pgtable.h>
David Howellsae3a1972012-03-28 18:30:02 +010040#include <asm/switch_to.h>
Cyril Burc7a318b2016-08-10 15:44:46 +100041#include <asm/tm.h>
Daniel Axtens0545d542016-09-06 15:32:43 +100042#include <asm/asm-prototypes.h>
Paul Mackerras21a62902005-11-19 20:47:22 +110043
Ian Munsie02424d82011-02-02 17:27:24 +000044#define CREATE_TRACE_POINTS
45#include <trace/events/syscalls.h>
46
Linus Torvalds1da177e2005-04-16 15:20:36 -070047/*
Mahesh Salgaonkar359e4282010-04-07 18:10:20 +100048 * The parameter save area on the stack is used to store arguments being passed
49 * to callee function and is located at fixed offset from stack pointer.
50 */
51#ifdef CONFIG_PPC32
52#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
53#else /* CONFIG_PPC32 */
54#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
55#endif
56
57struct pt_regs_offset {
58 const char *name;
59 int offset;
60};
61
62#define STR(s) #s /* convert to string */
63#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
64#define GPR_OFFSET_NAME(num) \
Rashmica Gupta343c3322015-11-21 17:08:16 +110065 {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
Mahesh Salgaonkar359e4282010-04-07 18:10:20 +100066 {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
67#define REG_OFFSET_END {.name = NULL, .offset = 0}
68
Anshuman Khandual8c13f592016-07-28 10:57:38 +080069#define TVSO(f) (offsetof(struct thread_vr_state, f))
Anshuman Khandual9d3918f2016-07-28 10:57:39 +080070#define TFSO(f) (offsetof(struct thread_fp_state, f))
Anshuman Khandual08e1c012016-07-28 10:57:40 +080071#define TSO(f) (offsetof(struct thread_struct, f))
Anshuman Khandual8c13f592016-07-28 10:57:38 +080072
Mahesh Salgaonkar359e4282010-04-07 18:10:20 +100073static const struct pt_regs_offset regoffset_table[] = {
74 GPR_OFFSET_NAME(0),
75 GPR_OFFSET_NAME(1),
76 GPR_OFFSET_NAME(2),
77 GPR_OFFSET_NAME(3),
78 GPR_OFFSET_NAME(4),
79 GPR_OFFSET_NAME(5),
80 GPR_OFFSET_NAME(6),
81 GPR_OFFSET_NAME(7),
82 GPR_OFFSET_NAME(8),
83 GPR_OFFSET_NAME(9),
84 GPR_OFFSET_NAME(10),
85 GPR_OFFSET_NAME(11),
86 GPR_OFFSET_NAME(12),
87 GPR_OFFSET_NAME(13),
88 GPR_OFFSET_NAME(14),
89 GPR_OFFSET_NAME(15),
90 GPR_OFFSET_NAME(16),
91 GPR_OFFSET_NAME(17),
92 GPR_OFFSET_NAME(18),
93 GPR_OFFSET_NAME(19),
94 GPR_OFFSET_NAME(20),
95 GPR_OFFSET_NAME(21),
96 GPR_OFFSET_NAME(22),
97 GPR_OFFSET_NAME(23),
98 GPR_OFFSET_NAME(24),
99 GPR_OFFSET_NAME(25),
100 GPR_OFFSET_NAME(26),
101 GPR_OFFSET_NAME(27),
102 GPR_OFFSET_NAME(28),
103 GPR_OFFSET_NAME(29),
104 GPR_OFFSET_NAME(30),
105 GPR_OFFSET_NAME(31),
106 REG_OFFSET_NAME(nip),
107 REG_OFFSET_NAME(msr),
108 REG_OFFSET_NAME(ctr),
109 REG_OFFSET_NAME(link),
110 REG_OFFSET_NAME(xer),
111 REG_OFFSET_NAME(ccr),
112#ifdef CONFIG_PPC64
113 REG_OFFSET_NAME(softe),
114#else
115 REG_OFFSET_NAME(mq),
116#endif
117 REG_OFFSET_NAME(trap),
118 REG_OFFSET_NAME(dar),
119 REG_OFFSET_NAME(dsisr),
120 REG_OFFSET_END,
121};
122
Cyril Burc7a318b2016-08-10 15:44:46 +1000123#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
124static void flush_tmregs_to_thread(struct task_struct *tsk)
125{
126 /*
127 * If task is not current, it will have been flushed already to
128 * it's thread_struct during __switch_to().
129 *
130 * A reclaim flushes ALL the state.
131 */
132
133 if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
134 tm_reclaim_current(TM_CAUSE_SIGNAL);
135
136}
137#else
138static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
139#endif
140
Mahesh Salgaonkar359e4282010-04-07 18:10:20 +1000141/**
142 * regs_query_register_offset() - query register offset from its name
143 * @name: the name of a register
144 *
145 * regs_query_register_offset() returns the offset of a register in struct
146 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
147 */
148int regs_query_register_offset(const char *name)
149{
150 const struct pt_regs_offset *roff;
151 for (roff = regoffset_table; roff->name != NULL; roff++)
152 if (!strcmp(roff->name, name))
153 return roff->offset;
154 return -EINVAL;
155}
156
157/**
158 * regs_query_register_name() - query register name from its offset
159 * @offset: the offset of a register in struct pt_regs.
160 *
161 * regs_query_register_name() returns the name of a register from its
162 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
163 */
164const char *regs_query_register_name(unsigned int offset)
165{
166 const struct pt_regs_offset *roff;
167 for (roff = regoffset_table; roff->name != NULL; roff++)
168 if (roff->offset == offset)
169 return roff->name;
170 return NULL;
171}
172
173/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 * does not yet catch signals sent when the child dies.
175 * in exit.c or in signal.c.
176 */
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178/*
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +1000179 * Set of msr bits that gdb can change on behalf of a process.
180 */
Dave Kleikamp172ae2e2010-02-08 11:50:57 +0000181#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +1000182#define MSR_DEBUGCHANGE 0
183#else
184#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
185#endif
186
187/*
188 * Max register writeable via put_reg
189 */
190#ifdef CONFIG_PPC32
191#define PT_MAX_PUT_REG PT_MQ
192#else
193#define PT_MAX_PUT_REG PT_CCR
194#endif
195
Roland McGrath26f77132007-12-20 03:57:51 -0800196static unsigned long get_user_msr(struct task_struct *task)
197{
198 return task->thread.regs->msr | task->thread.fpexc_mode;
199}
200
201static int set_user_msr(struct task_struct *task, unsigned long msr)
202{
203 task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
204 task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
205 return 0;
206}
207
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800208#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
209static unsigned long get_user_ckpt_msr(struct task_struct *task)
210{
211 return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
212}
213
214static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
215{
216 task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
217 task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
218 return 0;
219}
220
221static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
222{
223 task->thread.ckpt_regs.trap = trap & 0xfff0;
224 return 0;
225}
226#endif
227
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000228#ifdef CONFIG_PPC64
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000229static int get_user_dscr(struct task_struct *task, unsigned long *data)
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000230{
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000231 *data = task->thread.dscr;
232 return 0;
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000233}
234
235static int set_user_dscr(struct task_struct *task, unsigned long dscr)
236{
237 task->thread.dscr = dscr;
238 task->thread.dscr_inherit = 1;
239 return 0;
240}
241#else
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000242static int get_user_dscr(struct task_struct *task, unsigned long *data)
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000243{
244 return -EIO;
245}
246
247static int set_user_dscr(struct task_struct *task, unsigned long dscr)
248{
249 return -EIO;
250}
251#endif
252
Roland McGrath26f77132007-12-20 03:57:51 -0800253/*
254 * We prevent mucking around with the reserved area of trap
255 * which are used internally by the kernel.
256 */
257static int set_user_trap(struct task_struct *task, unsigned long trap)
258{
259 task->thread.regs->trap = trap & 0xfff0;
260 return 0;
261}
262
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +1000263/*
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000264 * Get contents of register REGNO in task TASK.
265 */
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000266int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000267{
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000268 if ((task->thread.regs == NULL) || !data)
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000269 return -EIO;
270
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000271 if (regno == PT_MSR) {
272 *data = get_user_msr(task);
273 return 0;
274 }
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000275
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000276 if (regno == PT_DSCR)
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000277 return get_user_dscr(task, data);
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000278
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +0000279 if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
280 *data = ((unsigned long *)task->thread.regs)[regno];
281 return 0;
282 }
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000283
284 return -EIO;
285}
286
287/*
288 * Write contents of register REGNO in task TASK.
289 */
290int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
291{
292 if (task->thread.regs == NULL)
293 return -EIO;
294
Roland McGrath26f77132007-12-20 03:57:51 -0800295 if (regno == PT_MSR)
296 return set_user_msr(task, data);
297 if (regno == PT_TRAP)
298 return set_user_trap(task, data);
Alexey Kardashevskiy1715a822013-01-10 20:29:09 +0000299 if (regno == PT_DSCR)
300 return set_user_dscr(task, data);
Roland McGrath26f77132007-12-20 03:57:51 -0800301
302 if (regno <= PT_MAX_PUT_REG) {
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000303 ((unsigned long *)task->thread.regs)[regno] = data;
304 return 0;
305 }
306 return -EIO;
307}
308
Roland McGrath44dd3f52007-12-20 03:57:55 -0800309static int gpr_get(struct task_struct *target, const struct user_regset *regset,
310 unsigned int pos, unsigned int count,
311 void *kbuf, void __user *ubuf)
312{
Mike Wolfa71f5d52011-03-21 11:14:53 +1100313 int i, ret;
Roland McGrath44dd3f52007-12-20 03:57:55 -0800314
315 if (target->thread.regs == NULL)
316 return -EIO;
317
Mike Wolfa71f5d52011-03-21 11:14:53 +1100318 if (!FULL_REGS(target->thread.regs)) {
319 /* We have a partial register set. Fill 14-31 with bogus values */
320 for (i = 14; i < 32; i++)
321 target->thread.regs->gpr[i] = NV_REG_POISON;
322 }
Roland McGrath44dd3f52007-12-20 03:57:55 -0800323
324 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
325 target->thread.regs,
326 0, offsetof(struct pt_regs, msr));
327 if (!ret) {
328 unsigned long msr = get_user_msr(target);
329 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
330 offsetof(struct pt_regs, msr),
331 offsetof(struct pt_regs, msr) +
332 sizeof(msr));
333 }
334
335 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
336 offsetof(struct pt_regs, msr) + sizeof(long));
337
338 if (!ret)
339 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
340 &target->thread.regs->orig_gpr3,
341 offsetof(struct pt_regs, orig_gpr3),
342 sizeof(struct pt_regs));
343 if (!ret)
344 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
345 sizeof(struct pt_regs), -1);
346
347 return ret;
348}
349
350static int gpr_set(struct task_struct *target, const struct user_regset *regset,
351 unsigned int pos, unsigned int count,
352 const void *kbuf, const void __user *ubuf)
353{
354 unsigned long reg;
355 int ret;
356
357 if (target->thread.regs == NULL)
358 return -EIO;
359
360 CHECK_FULL_REGS(target->thread.regs);
361
362 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
363 target->thread.regs,
364 0, PT_MSR * sizeof(reg));
365
366 if (!ret && count > 0) {
367 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
368 PT_MSR * sizeof(reg),
369 (PT_MSR + 1) * sizeof(reg));
370 if (!ret)
371 ret = set_user_msr(target, reg);
372 }
373
374 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
375 offsetof(struct pt_regs, msr) + sizeof(long));
376
377 if (!ret)
378 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
379 &target->thread.regs->orig_gpr3,
380 PT_ORIG_R3 * sizeof(reg),
381 (PT_MAX_PUT_REG + 1) * sizeof(reg));
382
383 if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
384 ret = user_regset_copyin_ignore(
385 &pos, &count, &kbuf, &ubuf,
386 (PT_MAX_PUT_REG + 1) * sizeof(reg),
387 PT_TRAP * sizeof(reg));
388
389 if (!ret && count > 0) {
390 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
391 PT_TRAP * sizeof(reg),
392 (PT_TRAP + 1) * sizeof(reg));
393 if (!ret)
394 ret = set_user_trap(target, reg);
395 }
396
397 if (!ret)
398 ret = user_regset_copyin_ignore(
399 &pos, &count, &kbuf, &ubuf,
400 (PT_TRAP + 1) * sizeof(reg), -1);
401
402 return ret;
403}
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000404
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800405/*
Cyril Burdc310662016-09-23 16:18:24 +1000406 * Regardless of transactions, 'fp_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000407 * value of all FPR registers and 'ckfp_state' holds the last checkpointed
Cyril Burdc310662016-09-23 16:18:24 +1000408 * value of all FPR registers for the current transaction.
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800409 *
410 * Userspace interface buffer layout:
411 *
412 * struct data {
413 * u64 fpr[32];
414 * u64 fpscr;
415 * };
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800416 */
Roland McGrathf65255e2007-12-20 03:57:34 -0800417static int fpr_get(struct task_struct *target, const struct user_regset *regset,
418 unsigned int pos, unsigned int count,
419 void *kbuf, void __user *ubuf)
420{
Michael Neulingc6e67712008-06-25 14:07:18 +1000421#ifdef CONFIG_VSX
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000422 u64 buf[33];
Michael Neulingc6e67712008-06-25 14:07:18 +1000423 int i;
Cyril Burdc310662016-09-23 16:18:24 +1000424
Roland McGrathf65255e2007-12-20 03:57:34 -0800425 flush_fp_to_thread(target);
426
Michael Neulingc6e67712008-06-25 14:07:18 +1000427 /* copy to local buffer then write that out */
428 for (i = 0; i < 32 ; i++)
429 buf[i] = target->thread.TS_FPR(i);
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000430 buf[32] = target->thread.fp_state.fpscr;
Michael Neulingc6e67712008-06-25 14:07:18 +1000431 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
Cyril Burdc310662016-09-23 16:18:24 +1000432#else
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000433 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
Khem Raj1e407ee2016-04-25 09:19:17 -0700434 offsetof(struct thread_fp_state, fpr[32]));
Roland McGrathf65255e2007-12-20 03:57:34 -0800435
Cyril Burdc310662016-09-23 16:18:24 +1000436 flush_fp_to_thread(target);
437
Roland McGrathf65255e2007-12-20 03:57:34 -0800438 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000439 &target->thread.fp_state, 0, -1);
Michael Neulingc6e67712008-06-25 14:07:18 +1000440#endif
Roland McGrathf65255e2007-12-20 03:57:34 -0800441}
442
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800443/*
Cyril Burdc310662016-09-23 16:18:24 +1000444 * Regardless of transactions, 'fp_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000445 * value of all FPR registers and 'ckfp_state' holds the last checkpointed
Cyril Burdc310662016-09-23 16:18:24 +1000446 * value of all FPR registers for the current transaction.
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800447 *
448 * Userspace interface buffer layout:
449 *
450 * struct data {
451 * u64 fpr[32];
452 * u64 fpscr;
453 * };
454 *
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800455 */
Roland McGrathf65255e2007-12-20 03:57:34 -0800456static int fpr_set(struct task_struct *target, const struct user_regset *regset,
457 unsigned int pos, unsigned int count,
458 const void *kbuf, const void __user *ubuf)
459{
Michael Neulingc6e67712008-06-25 14:07:18 +1000460#ifdef CONFIG_VSX
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000461 u64 buf[33];
Michael Neulingc6e67712008-06-25 14:07:18 +1000462 int i;
Cyril Burdc310662016-09-23 16:18:24 +1000463
Roland McGrathf65255e2007-12-20 03:57:34 -0800464 flush_fp_to_thread(target);
465
Dave Martin99dfe802017-01-05 16:50:57 +0000466 for (i = 0; i < 32 ; i++)
467 buf[i] = target->thread.TS_FPR(i);
468 buf[32] = target->thread.fp_state.fpscr;
469
Anshuman Khandual1ec85492016-07-28 10:57:32 +0800470 /* copy to local buffer then write that out */
471 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
472 if (i)
473 return i;
474
Michael Neulingc6e67712008-06-25 14:07:18 +1000475 for (i = 0; i < 32 ; i++)
476 target->thread.TS_FPR(i) = buf[i];
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000477 target->thread.fp_state.fpscr = buf[32];
Michael Neulingc6e67712008-06-25 14:07:18 +1000478 return 0;
Cyril Burdc310662016-09-23 16:18:24 +1000479#else
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000480 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
Khem Raj1e407ee2016-04-25 09:19:17 -0700481 offsetof(struct thread_fp_state, fpr[32]));
Roland McGrathf65255e2007-12-20 03:57:34 -0800482
Cyril Burdc310662016-09-23 16:18:24 +1000483 flush_fp_to_thread(target);
484
Roland McGrathf65255e2007-12-20 03:57:34 -0800485 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000486 &target->thread.fp_state, 0, -1);
Michael Neulingc6e67712008-06-25 14:07:18 +1000487#endif
Roland McGrathf65255e2007-12-20 03:57:34 -0800488}
489
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000490#ifdef CONFIG_ALTIVEC
491/*
492 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
493 * The transfer totals 34 quadword. Quadwords 0-31 contain the
494 * corresponding vector registers. Quadword 32 contains the vscr as the
495 * last word (offset 12) within that quadword. Quadword 33 contains the
496 * vrsave as the first word (offset 0) within the quadword.
497 *
498 * This definition of the VMX state is compatible with the current PPC32
499 * ptrace interface. This allows signal handling and ptrace to use the
500 * same structures. This also simplifies the implementation of a bi-arch
501 * (combined (32- and 64-bit) gdb.
502 */
503
Roland McGrath3caf06c2007-12-20 03:57:39 -0800504static int vr_active(struct task_struct *target,
505 const struct user_regset *regset)
506{
507 flush_altivec_to_thread(target);
508 return target->thread.used_vr ? regset->n : 0;
509}
510
Anshuman Khanduald844e272016-07-28 10:57:33 +0800511/*
Cyril Burdc310662016-09-23 16:18:24 +1000512 * Regardless of transactions, 'vr_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000513 * value of all the VMX registers and 'ckvr_state' holds the last
Cyril Burdc310662016-09-23 16:18:24 +1000514 * checkpointed value of all the VMX registers for the current
515 * transaction to fall back on in case it aborts.
Anshuman Khanduald844e272016-07-28 10:57:33 +0800516 *
517 * Userspace interface buffer layout:
518 *
519 * struct data {
520 * vector128 vr[32];
521 * vector128 vscr;
522 * vector128 vrsave;
523 * };
524 */
Roland McGrath3caf06c2007-12-20 03:57:39 -0800525static int vr_get(struct task_struct *target, const struct user_regset *regset,
526 unsigned int pos, unsigned int count,
527 void *kbuf, void __user *ubuf)
528{
529 int ret;
530
531 flush_altivec_to_thread(target);
532
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000533 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
534 offsetof(struct thread_vr_state, vr[32]));
Roland McGrath3caf06c2007-12-20 03:57:39 -0800535
536 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Cyril Burdc310662016-09-23 16:18:24 +1000537 &target->thread.vr_state, 0,
Roland McGrath3caf06c2007-12-20 03:57:39 -0800538 33 * sizeof(vector128));
539 if (!ret) {
540 /*
541 * Copy out only the low-order word of vrsave.
542 */
543 union {
544 elf_vrreg_t reg;
545 u32 word;
546 } vrsave;
547 memset(&vrsave, 0, sizeof(vrsave));
Anshuman Khanduald844e272016-07-28 10:57:33 +0800548
Roland McGrath3caf06c2007-12-20 03:57:39 -0800549 vrsave.word = target->thread.vrsave;
Anshuman Khanduald844e272016-07-28 10:57:33 +0800550
Roland McGrath3caf06c2007-12-20 03:57:39 -0800551 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
552 33 * sizeof(vector128), -1);
553 }
554
555 return ret;
556}
557
Anshuman Khanduald844e272016-07-28 10:57:33 +0800558/*
Cyril Burdc310662016-09-23 16:18:24 +1000559 * Regardless of transactions, 'vr_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000560 * value of all the VMX registers and 'ckvr_state' holds the last
Cyril Burdc310662016-09-23 16:18:24 +1000561 * checkpointed value of all the VMX registers for the current
562 * transaction to fall back on in case it aborts.
Anshuman Khanduald844e272016-07-28 10:57:33 +0800563 *
564 * Userspace interface buffer layout:
565 *
566 * struct data {
567 * vector128 vr[32];
568 * vector128 vscr;
569 * vector128 vrsave;
570 * };
571 */
Roland McGrath3caf06c2007-12-20 03:57:39 -0800572static int vr_set(struct task_struct *target, const struct user_regset *regset,
573 unsigned int pos, unsigned int count,
574 const void *kbuf, const void __user *ubuf)
575{
576 int ret;
577
578 flush_altivec_to_thread(target);
579
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000580 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
581 offsetof(struct thread_vr_state, vr[32]));
Roland McGrath3caf06c2007-12-20 03:57:39 -0800582
583 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Cyril Burdc310662016-09-23 16:18:24 +1000584 &target->thread.vr_state, 0,
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000585 33 * sizeof(vector128));
Roland McGrath3caf06c2007-12-20 03:57:39 -0800586 if (!ret && count > 0) {
587 /*
588 * We use only the first word of vrsave.
589 */
590 union {
591 elf_vrreg_t reg;
592 u32 word;
593 } vrsave;
594 memset(&vrsave, 0, sizeof(vrsave));
Anshuman Khanduald844e272016-07-28 10:57:33 +0800595
Roland McGrath3caf06c2007-12-20 03:57:39 -0800596 vrsave.word = target->thread.vrsave;
Cyril Burdc310662016-09-23 16:18:24 +1000597
Roland McGrath3caf06c2007-12-20 03:57:39 -0800598 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
599 33 * sizeof(vector128), -1);
Cyril Burdc310662016-09-23 16:18:24 +1000600 if (!ret)
Roland McGrath3caf06c2007-12-20 03:57:39 -0800601 target->thread.vrsave = vrsave.word;
602 }
603
604 return ret;
605}
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000606#endif /* CONFIG_ALTIVEC */
607
Michael Neulingce48b212008-06-25 14:07:18 +1000608#ifdef CONFIG_VSX
609/*
610 * Currently to set and and get all the vsx state, you need to call
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300611 * the fp and VMX calls as well. This only get/sets the lower 32
Michael Neulingce48b212008-06-25 14:07:18 +1000612 * 128bit VSX registers.
613 */
614
615static int vsr_active(struct task_struct *target,
616 const struct user_regset *regset)
617{
618 flush_vsx_to_thread(target);
619 return target->thread.used_vsr ? regset->n : 0;
620}
621
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800622/*
Cyril Burdc310662016-09-23 16:18:24 +1000623 * Regardless of transactions, 'fp_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000624 * value of all FPR registers and 'ckfp_state' holds the last
Cyril Burdc310662016-09-23 16:18:24 +1000625 * checkpointed value of all FPR registers for the current
626 * transaction.
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800627 *
628 * Userspace interface buffer layout:
629 *
630 * struct data {
631 * u64 vsx[32];
632 * };
633 */
Michael Neulingce48b212008-06-25 14:07:18 +1000634static int vsr_get(struct task_struct *target, const struct user_regset *regset,
635 unsigned int pos, unsigned int count,
636 void *kbuf, void __user *ubuf)
637{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000638 u64 buf[32];
Michael Neulingf3e909c2008-07-01 14:01:39 +1000639 int ret, i;
Michael Neulingce48b212008-06-25 14:07:18 +1000640
Cyril Burdc310662016-09-23 16:18:24 +1000641 flush_tmregs_to_thread(target);
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800642 flush_fp_to_thread(target);
643 flush_altivec_to_thread(target);
Michael Neulingce48b212008-06-25 14:07:18 +1000644 flush_vsx_to_thread(target);
645
Michael Neulingf3e909c2008-07-01 14:01:39 +1000646 for (i = 0; i < 32 ; i++)
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000647 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
Cyril Burdc310662016-09-23 16:18:24 +1000648
Michael Neulingce48b212008-06-25 14:07:18 +1000649 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Michael Neulingf3e909c2008-07-01 14:01:39 +1000650 buf, 0, 32 * sizeof(double));
Michael Neulingce48b212008-06-25 14:07:18 +1000651
652 return ret;
653}
654
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800655/*
Cyril Burdc310662016-09-23 16:18:24 +1000656 * Regardless of transactions, 'fp_state' holds the current running
Cyril Bur000ec282016-09-23 16:18:25 +1000657 * value of all FPR registers and 'ckfp_state' holds the last
Cyril Burdc310662016-09-23 16:18:24 +1000658 * checkpointed value of all FPR registers for the current
659 * transaction.
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800660 *
661 * Userspace interface buffer layout:
662 *
663 * struct data {
664 * u64 vsx[32];
665 * };
666 */
Michael Neulingce48b212008-06-25 14:07:18 +1000667static int vsr_set(struct task_struct *target, const struct user_regset *regset,
668 unsigned int pos, unsigned int count,
669 const void *kbuf, const void __user *ubuf)
670{
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000671 u64 buf[32];
Michael Neulingf3e909c2008-07-01 14:01:39 +1000672 int ret,i;
Michael Neulingce48b212008-06-25 14:07:18 +1000673
Cyril Burdc310662016-09-23 16:18:24 +1000674 flush_tmregs_to_thread(target);
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800675 flush_fp_to_thread(target);
676 flush_altivec_to_thread(target);
Michael Neulingce48b212008-06-25 14:07:18 +1000677 flush_vsx_to_thread(target);
678
Dave Martin99dfe802017-01-05 16:50:57 +0000679 for (i = 0; i < 32 ; i++)
680 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
681
Michael Neulingce48b212008-06-25 14:07:18 +1000682 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Michael Neulingf3e909c2008-07-01 14:01:39 +1000683 buf, 0, 32 * sizeof(double));
Cyril Burdc310662016-09-23 16:18:24 +1000684 if (!ret)
Anshuman Khandual94b7d362016-07-28 10:57:34 +0800685 for (i = 0; i < 32 ; i++)
Cyril Burdc310662016-09-23 16:18:24 +1000686 target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
Michael Neulingce48b212008-06-25 14:07:18 +1000687
688 return ret;
689}
690#endif /* CONFIG_VSX */
691
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000692#ifdef CONFIG_SPE
693
694/*
695 * For get_evrregs/set_evrregs functions 'data' has the following layout:
696 *
697 * struct {
698 * u32 evr[32];
699 * u64 acc;
700 * u32 spefscr;
701 * }
702 */
703
Roland McGratha4e4b172007-12-20 03:57:48 -0800704static int evr_active(struct task_struct *target,
705 const struct user_regset *regset)
706{
707 flush_spe_to_thread(target);
708 return target->thread.used_spe ? regset->n : 0;
709}
710
711static int evr_get(struct task_struct *target, const struct user_regset *regset,
712 unsigned int pos, unsigned int count,
713 void *kbuf, void __user *ubuf)
714{
715 int ret;
716
717 flush_spe_to_thread(target);
718
719 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
720 &target->thread.evr,
721 0, sizeof(target->thread.evr));
722
723 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
724 offsetof(struct thread_struct, spefscr));
725
726 if (!ret)
727 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
728 &target->thread.acc,
729 sizeof(target->thread.evr), -1);
730
731 return ret;
732}
733
734static int evr_set(struct task_struct *target, const struct user_regset *regset,
735 unsigned int pos, unsigned int count,
736 const void *kbuf, const void __user *ubuf)
737{
738 int ret;
739
740 flush_spe_to_thread(target);
741
742 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
743 &target->thread.evr,
744 0, sizeof(target->thread.evr));
745
746 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
747 offsetof(struct thread_struct, spefscr));
748
749 if (!ret)
750 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
751 &target->thread.acc,
752 sizeof(target->thread.evr), -1);
753
754 return ret;
755}
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +1000756#endif /* CONFIG_SPE */
757
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800758#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
759/**
760 * tm_cgpr_active - get active number of registers in CGPR
761 * @target: The target task.
762 * @regset: The user regset structure.
763 *
764 * This function checks for the active number of available
765 * regisers in transaction checkpointed GPR category.
766 */
767static int tm_cgpr_active(struct task_struct *target,
768 const struct user_regset *regset)
769{
770 if (!cpu_has_feature(CPU_FTR_TM))
771 return -ENODEV;
772
773 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
774 return 0;
775
776 return regset->n;
777}
778
779/**
780 * tm_cgpr_get - get CGPR registers
781 * @target: The target task.
782 * @regset: The user regset structure.
783 * @pos: The buffer position.
784 * @count: Number of bytes to copy.
785 * @kbuf: Kernel buffer to copy from.
786 * @ubuf: User buffer to copy into.
787 *
788 * This function gets transaction checkpointed GPR registers.
789 *
790 * When the transaction is active, 'ckpt_regs' holds all the checkpointed
791 * GPR register values for the current transaction to fall back on if it
792 * aborts in between. This function gets those checkpointed GPR registers.
793 * The userspace interface buffer layout is as follows.
794 *
795 * struct data {
796 * struct pt_regs ckpt_regs;
797 * };
798 */
799static int tm_cgpr_get(struct task_struct *target,
800 const struct user_regset *regset,
801 unsigned int pos, unsigned int count,
802 void *kbuf, void __user *ubuf)
803{
804 int ret;
805
806 if (!cpu_has_feature(CPU_FTR_TM))
807 return -ENODEV;
808
809 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
810 return -ENODATA;
811
Cyril Burdc310662016-09-23 16:18:24 +1000812 flush_tmregs_to_thread(target);
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800813 flush_fp_to_thread(target);
814 flush_altivec_to_thread(target);
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800815
816 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
817 &target->thread.ckpt_regs,
818 0, offsetof(struct pt_regs, msr));
819 if (!ret) {
820 unsigned long msr = get_user_ckpt_msr(target);
821
822 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
823 offsetof(struct pt_regs, msr),
824 offsetof(struct pt_regs, msr) +
825 sizeof(msr));
826 }
827
828 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
829 offsetof(struct pt_regs, msr) + sizeof(long));
830
831 if (!ret)
832 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
833 &target->thread.ckpt_regs.orig_gpr3,
834 offsetof(struct pt_regs, orig_gpr3),
835 sizeof(struct pt_regs));
836 if (!ret)
837 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
838 sizeof(struct pt_regs), -1);
839
840 return ret;
841}
842
843/*
844 * tm_cgpr_set - set the CGPR registers
845 * @target: The target task.
846 * @regset: The user regset structure.
847 * @pos: The buffer position.
848 * @count: Number of bytes to copy.
849 * @kbuf: Kernel buffer to copy into.
850 * @ubuf: User buffer to copy from.
851 *
852 * This function sets in transaction checkpointed GPR registers.
853 *
854 * When the transaction is active, 'ckpt_regs' holds the checkpointed
855 * GPR register values for the current transaction to fall back on if it
856 * aborts in between. This function sets those checkpointed GPR registers.
857 * The userspace interface buffer layout is as follows.
858 *
859 * struct data {
860 * struct pt_regs ckpt_regs;
861 * };
862 */
863static int tm_cgpr_set(struct task_struct *target,
864 const struct user_regset *regset,
865 unsigned int pos, unsigned int count,
866 const void *kbuf, const void __user *ubuf)
867{
868 unsigned long reg;
869 int ret;
870
871 if (!cpu_has_feature(CPU_FTR_TM))
872 return -ENODEV;
873
874 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
875 return -ENODATA;
876
Cyril Burdc310662016-09-23 16:18:24 +1000877 flush_tmregs_to_thread(target);
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800878 flush_fp_to_thread(target);
879 flush_altivec_to_thread(target);
Anshuman Khandual25847fb2016-07-28 10:57:36 +0800880
881 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
882 &target->thread.ckpt_regs,
883 0, PT_MSR * sizeof(reg));
884
885 if (!ret && count > 0) {
886 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
887 PT_MSR * sizeof(reg),
888 (PT_MSR + 1) * sizeof(reg));
889 if (!ret)
890 ret = set_user_ckpt_msr(target, reg);
891 }
892
893 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
894 offsetof(struct pt_regs, msr) + sizeof(long));
895
896 if (!ret)
897 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
898 &target->thread.ckpt_regs.orig_gpr3,
899 PT_ORIG_R3 * sizeof(reg),
900 (PT_MAX_PUT_REG + 1) * sizeof(reg));
901
902 if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
903 ret = user_regset_copyin_ignore(
904 &pos, &count, &kbuf, &ubuf,
905 (PT_MAX_PUT_REG + 1) * sizeof(reg),
906 PT_TRAP * sizeof(reg));
907
908 if (!ret && count > 0) {
909 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
910 PT_TRAP * sizeof(reg),
911 (PT_TRAP + 1) * sizeof(reg));
912 if (!ret)
913 ret = set_user_ckpt_trap(target, reg);
914 }
915
916 if (!ret)
917 ret = user_regset_copyin_ignore(
918 &pos, &count, &kbuf, &ubuf,
919 (PT_TRAP + 1) * sizeof(reg), -1);
920
921 return ret;
922}
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +0800923
924/**
925 * tm_cfpr_active - get active number of registers in CFPR
926 * @target: The target task.
927 * @regset: The user regset structure.
928 *
929 * This function checks for the active number of available
930 * regisers in transaction checkpointed FPR category.
931 */
932static int tm_cfpr_active(struct task_struct *target,
933 const struct user_regset *regset)
934{
935 if (!cpu_has_feature(CPU_FTR_TM))
936 return -ENODEV;
937
938 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
939 return 0;
940
941 return regset->n;
942}
943
944/**
945 * tm_cfpr_get - get CFPR registers
946 * @target: The target task.
947 * @regset: The user regset structure.
948 * @pos: The buffer position.
949 * @count: Number of bytes to copy.
950 * @kbuf: Kernel buffer to copy from.
951 * @ubuf: User buffer to copy into.
952 *
953 * This function gets in transaction checkpointed FPR registers.
954 *
Cyril Bur000ec282016-09-23 16:18:25 +1000955 * When the transaction is active 'ckfp_state' holds the checkpointed
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +0800956 * values for the current transaction to fall back on if it aborts
957 * in between. This function gets those checkpointed FPR registers.
958 * The userspace interface buffer layout is as follows.
959 *
960 * struct data {
961 * u64 fpr[32];
962 * u64 fpscr;
963 *};
964 */
965static int tm_cfpr_get(struct task_struct *target,
966 const struct user_regset *regset,
967 unsigned int pos, unsigned int count,
968 void *kbuf, void __user *ubuf)
969{
970 u64 buf[33];
971 int i;
972
973 if (!cpu_has_feature(CPU_FTR_TM))
974 return -ENODEV;
975
976 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
977 return -ENODATA;
978
Cyril Burdc310662016-09-23 16:18:24 +1000979 flush_tmregs_to_thread(target);
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +0800980 flush_fp_to_thread(target);
981 flush_altivec_to_thread(target);
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +0800982
983 /* copy to local buffer then write that out */
984 for (i = 0; i < 32 ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +1000985 buf[i] = target->thread.TS_CKFPR(i);
986 buf[32] = target->thread.ckfp_state.fpscr;
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +0800987 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
988}
989
990/**
991 * tm_cfpr_set - set CFPR registers
992 * @target: The target task.
993 * @regset: The user regset structure.
994 * @pos: The buffer position.
995 * @count: Number of bytes to copy.
996 * @kbuf: Kernel buffer to copy into.
997 * @ubuf: User buffer to copy from.
998 *
999 * This function sets in transaction checkpointed FPR registers.
1000 *
Cyril Bur000ec282016-09-23 16:18:25 +10001001 * When the transaction is active 'ckfp_state' holds the checkpointed
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001002 * FPR register values for the current transaction to fall back on
1003 * if it aborts in between. This function sets these checkpointed
1004 * FPR registers. The userspace interface buffer layout is as follows.
1005 *
1006 * struct data {
1007 * u64 fpr[32];
1008 * u64 fpscr;
1009 *};
1010 */
1011static int tm_cfpr_set(struct task_struct *target,
1012 const struct user_regset *regset,
1013 unsigned int pos, unsigned int count,
1014 const void *kbuf, const void __user *ubuf)
1015{
1016 u64 buf[33];
1017 int i;
1018
1019 if (!cpu_has_feature(CPU_FTR_TM))
1020 return -ENODEV;
1021
1022 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1023 return -ENODATA;
1024
Cyril Burdc310662016-09-23 16:18:24 +10001025 flush_tmregs_to_thread(target);
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001026 flush_fp_to_thread(target);
1027 flush_altivec_to_thread(target);
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001028
1029 /* copy to local buffer then write that out */
1030 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1031 if (i)
1032 return i;
1033 for (i = 0; i < 32 ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +10001034 target->thread.TS_CKFPR(i) = buf[i];
1035 target->thread.ckfp_state.fpscr = buf[32];
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001036 return 0;
1037}
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001038
1039/**
1040 * tm_cvmx_active - get active number of registers in CVMX
1041 * @target: The target task.
1042 * @regset: The user regset structure.
1043 *
1044 * This function checks for the active number of available
1045 * regisers in checkpointed VMX category.
1046 */
1047static int tm_cvmx_active(struct task_struct *target,
1048 const struct user_regset *regset)
1049{
1050 if (!cpu_has_feature(CPU_FTR_TM))
1051 return -ENODEV;
1052
1053 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1054 return 0;
1055
1056 return regset->n;
1057}
1058
1059/**
1060 * tm_cvmx_get - get CMVX registers
1061 * @target: The target task.
1062 * @regset: The user regset structure.
1063 * @pos: The buffer position.
1064 * @count: Number of bytes to copy.
1065 * @kbuf: Kernel buffer to copy from.
1066 * @ubuf: User buffer to copy into.
1067 *
1068 * This function gets in transaction checkpointed VMX registers.
1069 *
Cyril Bur000ec282016-09-23 16:18:25 +10001070 * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001071 * the checkpointed values for the current transaction to fall
1072 * back on if it aborts in between. The userspace interface buffer
1073 * layout is as follows.
1074 *
1075 * struct data {
1076 * vector128 vr[32];
1077 * vector128 vscr;
1078 * vector128 vrsave;
1079 *};
1080 */
1081static int tm_cvmx_get(struct task_struct *target,
1082 const struct user_regset *regset,
1083 unsigned int pos, unsigned int count,
1084 void *kbuf, void __user *ubuf)
1085{
1086 int ret;
1087
1088 BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1089
1090 if (!cpu_has_feature(CPU_FTR_TM))
1091 return -ENODEV;
1092
1093 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1094 return -ENODATA;
1095
1096 /* Flush the state */
Cyril Burdc310662016-09-23 16:18:24 +10001097 flush_tmregs_to_thread(target);
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001098 flush_fp_to_thread(target);
1099 flush_altivec_to_thread(target);
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001100
1101 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
Cyril Bur000ec282016-09-23 16:18:25 +10001102 &target->thread.ckvr_state, 0,
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001103 33 * sizeof(vector128));
1104 if (!ret) {
1105 /*
1106 * Copy out only the low-order word of vrsave.
1107 */
1108 union {
1109 elf_vrreg_t reg;
1110 u32 word;
1111 } vrsave;
1112 memset(&vrsave, 0, sizeof(vrsave));
Cyril Bur000ec282016-09-23 16:18:25 +10001113 vrsave.word = target->thread.ckvrsave;
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001114 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
1115 33 * sizeof(vector128), -1);
1116 }
1117
1118 return ret;
1119}
1120
1121/**
1122 * tm_cvmx_set - set CMVX registers
1123 * @target: The target task.
1124 * @regset: The user regset structure.
1125 * @pos: The buffer position.
1126 * @count: Number of bytes to copy.
1127 * @kbuf: Kernel buffer to copy into.
1128 * @ubuf: User buffer to copy from.
1129 *
1130 * This function sets in transaction checkpointed VMX registers.
1131 *
Cyril Bur000ec282016-09-23 16:18:25 +10001132 * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001133 * the checkpointed values for the current transaction to fall
1134 * back on if it aborts in between. The userspace interface buffer
1135 * layout is as follows.
1136 *
1137 * struct data {
1138 * vector128 vr[32];
1139 * vector128 vscr;
1140 * vector128 vrsave;
1141 *};
1142 */
1143static int tm_cvmx_set(struct task_struct *target,
1144 const struct user_regset *regset,
1145 unsigned int pos, unsigned int count,
1146 const void *kbuf, const void __user *ubuf)
1147{
1148 int ret;
1149
1150 BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1151
1152 if (!cpu_has_feature(CPU_FTR_TM))
1153 return -ENODEV;
1154
1155 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1156 return -ENODATA;
1157
Cyril Burdc310662016-09-23 16:18:24 +10001158 flush_tmregs_to_thread(target);
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001159 flush_fp_to_thread(target);
1160 flush_altivec_to_thread(target);
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001161
1162 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
Cyril Bur000ec282016-09-23 16:18:25 +10001163 &target->thread.ckvr_state, 0,
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001164 33 * sizeof(vector128));
1165 if (!ret && count > 0) {
1166 /*
1167 * We use only the low-order word of vrsave.
1168 */
1169 union {
1170 elf_vrreg_t reg;
1171 u32 word;
1172 } vrsave;
1173 memset(&vrsave, 0, sizeof(vrsave));
Cyril Bur000ec282016-09-23 16:18:25 +10001174 vrsave.word = target->thread.ckvrsave;
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001175 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
1176 33 * sizeof(vector128), -1);
1177 if (!ret)
Cyril Bur000ec282016-09-23 16:18:25 +10001178 target->thread.ckvrsave = vrsave.word;
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001179 }
1180
1181 return ret;
1182}
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001183
1184/**
1185 * tm_cvsx_active - get active number of registers in CVSX
1186 * @target: The target task.
1187 * @regset: The user regset structure.
1188 *
1189 * This function checks for the active number of available
1190 * regisers in transaction checkpointed VSX category.
1191 */
1192static int tm_cvsx_active(struct task_struct *target,
1193 const struct user_regset *regset)
1194{
1195 if (!cpu_has_feature(CPU_FTR_TM))
1196 return -ENODEV;
1197
1198 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1199 return 0;
1200
1201 flush_vsx_to_thread(target);
1202 return target->thread.used_vsr ? regset->n : 0;
1203}
1204
1205/**
1206 * tm_cvsx_get - get CVSX registers
1207 * @target: The target task.
1208 * @regset: The user regset structure.
1209 * @pos: The buffer position.
1210 * @count: Number of bytes to copy.
1211 * @kbuf: Kernel buffer to copy from.
1212 * @ubuf: User buffer to copy into.
1213 *
1214 * This function gets in transaction checkpointed VSX registers.
1215 *
Cyril Bur000ec282016-09-23 16:18:25 +10001216 * When the transaction is active 'ckfp_state' holds the checkpointed
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001217 * values for the current transaction to fall back on if it aborts
1218 * in between. This function gets those checkpointed VSX registers.
1219 * The userspace interface buffer layout is as follows.
1220 *
1221 * struct data {
1222 * u64 vsx[32];
1223 *};
1224 */
1225static int tm_cvsx_get(struct task_struct *target,
1226 const struct user_regset *regset,
1227 unsigned int pos, unsigned int count,
1228 void *kbuf, void __user *ubuf)
1229{
1230 u64 buf[32];
1231 int ret, i;
1232
1233 if (!cpu_has_feature(CPU_FTR_TM))
1234 return -ENODEV;
1235
1236 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1237 return -ENODATA;
1238
1239 /* Flush the state */
Cyril Burdc310662016-09-23 16:18:24 +10001240 flush_tmregs_to_thread(target);
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001241 flush_fp_to_thread(target);
1242 flush_altivec_to_thread(target);
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001243 flush_vsx_to_thread(target);
1244
1245 for (i = 0; i < 32 ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +10001246 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001247 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1248 buf, 0, 32 * sizeof(double));
1249
1250 return ret;
1251}
1252
1253/**
1254 * tm_cvsx_set - set CFPR registers
1255 * @target: The target task.
1256 * @regset: The user regset structure.
1257 * @pos: The buffer position.
1258 * @count: Number of bytes to copy.
1259 * @kbuf: Kernel buffer to copy into.
1260 * @ubuf: User buffer to copy from.
1261 *
1262 * This function sets in transaction checkpointed VSX registers.
1263 *
Cyril Bur000ec282016-09-23 16:18:25 +10001264 * When the transaction is active 'ckfp_state' holds the checkpointed
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001265 * VSX register values for the current transaction to fall back on
1266 * if it aborts in between. This function sets these checkpointed
1267 * FPR registers. The userspace interface buffer layout is as follows.
1268 *
1269 * struct data {
1270 * u64 vsx[32];
1271 *};
1272 */
1273static int tm_cvsx_set(struct task_struct *target,
1274 const struct user_regset *regset,
1275 unsigned int pos, unsigned int count,
1276 const void *kbuf, const void __user *ubuf)
1277{
1278 u64 buf[32];
1279 int ret, i;
1280
1281 if (!cpu_has_feature(CPU_FTR_TM))
1282 return -ENODEV;
1283
1284 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1285 return -ENODATA;
1286
1287 /* Flush the state */
Cyril Burdc310662016-09-23 16:18:24 +10001288 flush_tmregs_to_thread(target);
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001289 flush_fp_to_thread(target);
1290 flush_altivec_to_thread(target);
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001291 flush_vsx_to_thread(target);
1292
1293 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1294 buf, 0, 32 * sizeof(double));
Cyril Burdc310662016-09-23 16:18:24 +10001295 if (!ret)
1296 for (i = 0; i < 32 ; i++)
Cyril Bur000ec282016-09-23 16:18:25 +10001297 target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001298
1299 return ret;
1300}
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001301
1302/**
1303 * tm_spr_active - get active number of registers in TM SPR
1304 * @target: The target task.
1305 * @regset: The user regset structure.
1306 *
1307 * This function checks the active number of available
1308 * regisers in the transactional memory SPR category.
1309 */
1310static int tm_spr_active(struct task_struct *target,
1311 const struct user_regset *regset)
1312{
1313 if (!cpu_has_feature(CPU_FTR_TM))
1314 return -ENODEV;
1315
1316 return regset->n;
1317}
1318
1319/**
1320 * tm_spr_get - get the TM related SPR registers
1321 * @target: The target task.
1322 * @regset: The user regset structure.
1323 * @pos: The buffer position.
1324 * @count: Number of bytes to copy.
1325 * @kbuf: Kernel buffer to copy from.
1326 * @ubuf: User buffer to copy into.
1327 *
1328 * This function gets transactional memory related SPR registers.
1329 * The userspace interface buffer layout is as follows.
1330 *
1331 * struct {
1332 * u64 tm_tfhar;
1333 * u64 tm_texasr;
1334 * u64 tm_tfiar;
1335 * };
1336 */
1337static int tm_spr_get(struct task_struct *target,
1338 const struct user_regset *regset,
1339 unsigned int pos, unsigned int count,
1340 void *kbuf, void __user *ubuf)
1341{
1342 int ret;
1343
1344 /* Build tests */
1345 BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1346 BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1347 BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1348
1349 if (!cpu_has_feature(CPU_FTR_TM))
1350 return -ENODEV;
1351
1352 /* Flush the states */
Cyril Burdc310662016-09-23 16:18:24 +10001353 flush_tmregs_to_thread(target);
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001354 flush_fp_to_thread(target);
1355 flush_altivec_to_thread(target);
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001356
1357 /* TFHAR register */
1358 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1359 &target->thread.tm_tfhar, 0, sizeof(u64));
1360
1361 /* TEXASR register */
1362 if (!ret)
1363 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1364 &target->thread.tm_texasr, sizeof(u64),
1365 2 * sizeof(u64));
1366
1367 /* TFIAR register */
1368 if (!ret)
1369 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1370 &target->thread.tm_tfiar,
1371 2 * sizeof(u64), 3 * sizeof(u64));
1372 return ret;
1373}
1374
1375/**
1376 * tm_spr_set - set the TM related SPR registers
1377 * @target: The target task.
1378 * @regset: The user regset structure.
1379 * @pos: The buffer position.
1380 * @count: Number of bytes to copy.
1381 * @kbuf: Kernel buffer to copy into.
1382 * @ubuf: User buffer to copy from.
1383 *
1384 * This function sets transactional memory related SPR registers.
1385 * The userspace interface buffer layout is as follows.
1386 *
1387 * struct {
1388 * u64 tm_tfhar;
1389 * u64 tm_texasr;
1390 * u64 tm_tfiar;
1391 * };
1392 */
1393static int tm_spr_set(struct task_struct *target,
1394 const struct user_regset *regset,
1395 unsigned int pos, unsigned int count,
1396 const void *kbuf, const void __user *ubuf)
1397{
1398 int ret;
1399
1400 /* Build tests */
1401 BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1402 BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1403 BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1404
1405 if (!cpu_has_feature(CPU_FTR_TM))
1406 return -ENODEV;
1407
1408 /* Flush the states */
Cyril Burdc310662016-09-23 16:18:24 +10001409 flush_tmregs_to_thread(target);
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001410 flush_fp_to_thread(target);
1411 flush_altivec_to_thread(target);
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001412
1413 /* TFHAR register */
1414 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1415 &target->thread.tm_tfhar, 0, sizeof(u64));
1416
1417 /* TEXASR register */
1418 if (!ret)
1419 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1420 &target->thread.tm_texasr, sizeof(u64),
1421 2 * sizeof(u64));
1422
1423 /* TFIAR register */
1424 if (!ret)
1425 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1426 &target->thread.tm_tfiar,
1427 2 * sizeof(u64), 3 * sizeof(u64));
1428 return ret;
1429}
Anshuman Khandualc45dc902016-07-28 10:57:41 +08001430
1431static int tm_tar_active(struct task_struct *target,
1432 const struct user_regset *regset)
1433{
1434 if (!cpu_has_feature(CPU_FTR_TM))
1435 return -ENODEV;
1436
1437 if (MSR_TM_ACTIVE(target->thread.regs->msr))
1438 return regset->n;
1439
1440 return 0;
1441}
1442
1443static int tm_tar_get(struct task_struct *target,
1444 const struct user_regset *regset,
1445 unsigned int pos, unsigned int count,
1446 void *kbuf, void __user *ubuf)
1447{
1448 int ret;
1449
1450 if (!cpu_has_feature(CPU_FTR_TM))
1451 return -ENODEV;
1452
1453 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1454 return -ENODATA;
1455
1456 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1457 &target->thread.tm_tar, 0, sizeof(u64));
1458 return ret;
1459}
1460
1461static int tm_tar_set(struct task_struct *target,
1462 const struct user_regset *regset,
1463 unsigned int pos, unsigned int count,
1464 const void *kbuf, const void __user *ubuf)
1465{
1466 int ret;
1467
1468 if (!cpu_has_feature(CPU_FTR_TM))
1469 return -ENODEV;
1470
1471 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1472 return -ENODATA;
1473
1474 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1475 &target->thread.tm_tar, 0, sizeof(u64));
1476 return ret;
1477}
1478
1479static int tm_ppr_active(struct task_struct *target,
1480 const struct user_regset *regset)
1481{
1482 if (!cpu_has_feature(CPU_FTR_TM))
1483 return -ENODEV;
1484
1485 if (MSR_TM_ACTIVE(target->thread.regs->msr))
1486 return regset->n;
1487
1488 return 0;
1489}
1490
1491
1492static int tm_ppr_get(struct task_struct *target,
1493 const struct user_regset *regset,
1494 unsigned int pos, unsigned int count,
1495 void *kbuf, void __user *ubuf)
1496{
1497 int ret;
1498
1499 if (!cpu_has_feature(CPU_FTR_TM))
1500 return -ENODEV;
1501
1502 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1503 return -ENODATA;
1504
1505 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1506 &target->thread.tm_ppr, 0, sizeof(u64));
1507 return ret;
1508}
1509
1510static int tm_ppr_set(struct task_struct *target,
1511 const struct user_regset *regset,
1512 unsigned int pos, unsigned int count,
1513 const void *kbuf, const void __user *ubuf)
1514{
1515 int ret;
1516
1517 if (!cpu_has_feature(CPU_FTR_TM))
1518 return -ENODEV;
1519
1520 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1521 return -ENODATA;
1522
1523 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1524 &target->thread.tm_ppr, 0, sizeof(u64));
1525 return ret;
1526}
1527
1528static int tm_dscr_active(struct task_struct *target,
1529 const struct user_regset *regset)
1530{
1531 if (!cpu_has_feature(CPU_FTR_TM))
1532 return -ENODEV;
1533
1534 if (MSR_TM_ACTIVE(target->thread.regs->msr))
1535 return regset->n;
1536
1537 return 0;
1538}
1539
1540static int tm_dscr_get(struct task_struct *target,
1541 const struct user_regset *regset,
1542 unsigned int pos, unsigned int count,
1543 void *kbuf, void __user *ubuf)
1544{
1545 int ret;
1546
1547 if (!cpu_has_feature(CPU_FTR_TM))
1548 return -ENODEV;
1549
1550 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1551 return -ENODATA;
1552
1553 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1554 &target->thread.tm_dscr, 0, sizeof(u64));
1555 return ret;
1556}
1557
1558static int tm_dscr_set(struct task_struct *target,
1559 const struct user_regset *regset,
1560 unsigned int pos, unsigned int count,
1561 const void *kbuf, const void __user *ubuf)
1562{
1563 int ret;
1564
1565 if (!cpu_has_feature(CPU_FTR_TM))
1566 return -ENODEV;
1567
1568 if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1569 return -ENODATA;
1570
1571 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1572 &target->thread.tm_dscr, 0, sizeof(u64));
1573 return ret;
1574}
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001575#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10001576
Anshuman Khandualfa439812016-07-28 10:57:42 +08001577#ifdef CONFIG_PPC64
1578static int ppr_get(struct task_struct *target,
1579 const struct user_regset *regset,
1580 unsigned int pos, unsigned int count,
1581 void *kbuf, void __user *ubuf)
1582{
1583 int ret;
1584
1585 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1586 &target->thread.ppr, 0, sizeof(u64));
1587 return ret;
1588}
1589
1590static int ppr_set(struct task_struct *target,
1591 const struct user_regset *regset,
1592 unsigned int pos, unsigned int count,
1593 const void *kbuf, const void __user *ubuf)
1594{
1595 int ret;
1596
1597 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1598 &target->thread.ppr, 0, sizeof(u64));
1599 return ret;
1600}
1601
1602static int dscr_get(struct task_struct *target,
1603 const struct user_regset *regset,
1604 unsigned int pos, unsigned int count,
1605 void *kbuf, void __user *ubuf)
1606{
1607 int ret;
1608
1609 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1610 &target->thread.dscr, 0, sizeof(u64));
1611 return ret;
1612}
1613static int dscr_set(struct task_struct *target,
1614 const struct user_regset *regset,
1615 unsigned int pos, unsigned int count,
1616 const void *kbuf, const void __user *ubuf)
1617{
1618 int ret;
1619
1620 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1621 &target->thread.dscr, 0, sizeof(u64));
1622 return ret;
1623}
1624#endif
1625#ifdef CONFIG_PPC_BOOK3S_64
1626static int tar_get(struct task_struct *target,
1627 const struct user_regset *regset,
1628 unsigned int pos, unsigned int count,
1629 void *kbuf, void __user *ubuf)
1630{
1631 int ret;
1632
1633 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1634 &target->thread.tar, 0, sizeof(u64));
1635 return ret;
1636}
1637static int tar_set(struct task_struct *target,
1638 const struct user_regset *regset,
1639 unsigned int pos, unsigned int count,
1640 const void *kbuf, const void __user *ubuf)
1641{
1642 int ret;
1643
1644 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1645 &target->thread.tar, 0, sizeof(u64));
1646 return ret;
1647}
Anshuman Khandualcf89d4e2016-07-28 10:57:43 +08001648
1649static int ebb_active(struct task_struct *target,
1650 const struct user_regset *regset)
1651{
1652 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1653 return -ENODEV;
1654
1655 if (target->thread.used_ebb)
1656 return regset->n;
1657
1658 return 0;
1659}
1660
1661static int ebb_get(struct task_struct *target,
1662 const struct user_regset *regset,
1663 unsigned int pos, unsigned int count,
1664 void *kbuf, void __user *ubuf)
1665{
1666 /* Build tests */
1667 BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1668 BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1669
1670 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1671 return -ENODEV;
1672
1673 if (!target->thread.used_ebb)
1674 return -ENODATA;
1675
1676 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1677 &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
1678}
1679
1680static int ebb_set(struct task_struct *target,
1681 const struct user_regset *regset,
1682 unsigned int pos, unsigned int count,
1683 const void *kbuf, const void __user *ubuf)
1684{
1685 int ret = 0;
1686
1687 /* Build tests */
1688 BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1689 BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1690
1691 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1692 return -ENODEV;
1693
1694 if (target->thread.used_ebb)
1695 return -ENODATA;
1696
1697 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1698 &target->thread.ebbrr, 0, sizeof(unsigned long));
1699
1700 if (!ret)
1701 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1702 &target->thread.ebbhr, sizeof(unsigned long),
1703 2 * sizeof(unsigned long));
1704
1705 if (!ret)
1706 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1707 &target->thread.bescr,
1708 2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1709
1710 return ret;
1711}
Anshuman Khanduala67ae752016-07-28 10:57:44 +08001712static int pmu_active(struct task_struct *target,
1713 const struct user_regset *regset)
1714{
1715 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1716 return -ENODEV;
1717
1718 return regset->n;
1719}
1720
1721static int pmu_get(struct task_struct *target,
1722 const struct user_regset *regset,
1723 unsigned int pos, unsigned int count,
1724 void *kbuf, void __user *ubuf)
1725{
1726 /* Build tests */
1727 BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1728 BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1729 BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1730 BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1731
1732 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1733 return -ENODEV;
1734
1735 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1736 &target->thread.siar, 0,
1737 5 * sizeof(unsigned long));
1738}
1739
1740static int pmu_set(struct task_struct *target,
1741 const struct user_regset *regset,
1742 unsigned int pos, unsigned int count,
1743 const void *kbuf, const void __user *ubuf)
1744{
1745 int ret = 0;
1746
1747 /* Build tests */
1748 BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1749 BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1750 BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1751 BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1752
1753 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1754 return -ENODEV;
1755
1756 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1757 &target->thread.siar, 0,
1758 sizeof(unsigned long));
1759
1760 if (!ret)
1761 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1762 &target->thread.sdar, sizeof(unsigned long),
1763 2 * sizeof(unsigned long));
1764
1765 if (!ret)
1766 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1767 &target->thread.sier, 2 * sizeof(unsigned long),
1768 3 * sizeof(unsigned long));
1769
1770 if (!ret)
1771 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1772 &target->thread.mmcr2, 3 * sizeof(unsigned long),
1773 4 * sizeof(unsigned long));
1774
1775 if (!ret)
1776 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1777 &target->thread.mmcr0, 4 * sizeof(unsigned long),
1778 5 * sizeof(unsigned long));
1779 return ret;
1780}
Anshuman Khandualfa439812016-07-28 10:57:42 +08001781#endif
Roland McGrath80fdf472007-12-20 03:58:00 -08001782/*
1783 * These are our native regset flavors.
1784 */
1785enum powerpc_regset {
1786 REGSET_GPR,
1787 REGSET_FPR,
1788#ifdef CONFIG_ALTIVEC
1789 REGSET_VMX,
1790#endif
Michael Neulingce48b212008-06-25 14:07:18 +10001791#ifdef CONFIG_VSX
1792 REGSET_VSX,
1793#endif
Roland McGrath80fdf472007-12-20 03:58:00 -08001794#ifdef CONFIG_SPE
1795 REGSET_SPE,
1796#endif
Anshuman Khandual25847fb2016-07-28 10:57:36 +08001797#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1798 REGSET_TM_CGPR, /* TM checkpointed GPR registers */
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001799 REGSET_TM_CFPR, /* TM checkpointed FPR registers */
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001800 REGSET_TM_CVMX, /* TM checkpointed VMX registers */
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001801 REGSET_TM_CVSX, /* TM checkpointed VSX registers */
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001802 REGSET_TM_SPR, /* TM specific SPR registers */
Anshuman Khandualc45dc902016-07-28 10:57:41 +08001803 REGSET_TM_CTAR, /* TM checkpointed TAR register */
1804 REGSET_TM_CPPR, /* TM checkpointed PPR register */
1805 REGSET_TM_CDSCR, /* TM checkpointed DSCR register */
Anshuman Khandual25847fb2016-07-28 10:57:36 +08001806#endif
Anshuman Khandualfa439812016-07-28 10:57:42 +08001807#ifdef CONFIG_PPC64
1808 REGSET_PPR, /* PPR register */
1809 REGSET_DSCR, /* DSCR register */
1810#endif
1811#ifdef CONFIG_PPC_BOOK3S_64
1812 REGSET_TAR, /* TAR register */
Anshuman Khandualcf89d4e2016-07-28 10:57:43 +08001813 REGSET_EBB, /* EBB registers */
Anshuman Khanduala67ae752016-07-28 10:57:44 +08001814 REGSET_PMR, /* Performance Monitor Registers */
Anshuman Khandualfa439812016-07-28 10:57:42 +08001815#endif
Roland McGrath80fdf472007-12-20 03:58:00 -08001816};
1817
1818static const struct user_regset native_regsets[] = {
1819 [REGSET_GPR] = {
1820 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1821 .size = sizeof(long), .align = sizeof(long),
1822 .get = gpr_get, .set = gpr_set
1823 },
1824 [REGSET_FPR] = {
1825 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1826 .size = sizeof(double), .align = sizeof(double),
1827 .get = fpr_get, .set = fpr_set
1828 },
1829#ifdef CONFIG_ALTIVEC
1830 [REGSET_VMX] = {
1831 .core_note_type = NT_PPC_VMX, .n = 34,
1832 .size = sizeof(vector128), .align = sizeof(vector128),
1833 .active = vr_active, .get = vr_get, .set = vr_set
1834 },
1835#endif
Michael Neulingce48b212008-06-25 14:07:18 +10001836#ifdef CONFIG_VSX
1837 [REGSET_VSX] = {
Michael Neulingf3e909c2008-07-01 14:01:39 +10001838 .core_note_type = NT_PPC_VSX, .n = 32,
1839 .size = sizeof(double), .align = sizeof(double),
Michael Neulingce48b212008-06-25 14:07:18 +10001840 .active = vsr_active, .get = vsr_get, .set = vsr_set
1841 },
1842#endif
Roland McGrath80fdf472007-12-20 03:58:00 -08001843#ifdef CONFIG_SPE
1844 [REGSET_SPE] = {
Suzuki Poulosea0b38b42013-08-27 13:22:14 +05301845 .core_note_type = NT_PPC_SPE, .n = 35,
Roland McGrath80fdf472007-12-20 03:58:00 -08001846 .size = sizeof(u32), .align = sizeof(u32),
1847 .active = evr_active, .get = evr_get, .set = evr_set
1848 },
1849#endif
Anshuman Khandual25847fb2016-07-28 10:57:36 +08001850#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1851 [REGSET_TM_CGPR] = {
1852 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1853 .size = sizeof(long), .align = sizeof(long),
1854 .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
1855 },
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08001856 [REGSET_TM_CFPR] = {
1857 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1858 .size = sizeof(double), .align = sizeof(double),
1859 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1860 },
Anshuman Khandual8c13f592016-07-28 10:57:38 +08001861 [REGSET_TM_CVMX] = {
1862 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1863 .size = sizeof(vector128), .align = sizeof(vector128),
1864 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1865 },
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08001866 [REGSET_TM_CVSX] = {
1867 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
1868 .size = sizeof(double), .align = sizeof(double),
1869 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
1870 },
Anshuman Khandual08e1c012016-07-28 10:57:40 +08001871 [REGSET_TM_SPR] = {
1872 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
1873 .size = sizeof(u64), .align = sizeof(u64),
1874 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
1875 },
Anshuman Khandualc45dc902016-07-28 10:57:41 +08001876 [REGSET_TM_CTAR] = {
1877 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
1878 .size = sizeof(u64), .align = sizeof(u64),
1879 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
1880 },
1881 [REGSET_TM_CPPR] = {
1882 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
1883 .size = sizeof(u64), .align = sizeof(u64),
1884 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
1885 },
1886 [REGSET_TM_CDSCR] = {
1887 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
1888 .size = sizeof(u64), .align = sizeof(u64),
1889 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
1890 },
Anshuman Khandual25847fb2016-07-28 10:57:36 +08001891#endif
Anshuman Khandualfa439812016-07-28 10:57:42 +08001892#ifdef CONFIG_PPC64
1893 [REGSET_PPR] = {
1894 .core_note_type = NT_PPC_PPR, .n = 1,
1895 .size = sizeof(u64), .align = sizeof(u64),
1896 .get = ppr_get, .set = ppr_set
1897 },
1898 [REGSET_DSCR] = {
1899 .core_note_type = NT_PPC_DSCR, .n = 1,
1900 .size = sizeof(u64), .align = sizeof(u64),
1901 .get = dscr_get, .set = dscr_set
1902 },
1903#endif
1904#ifdef CONFIG_PPC_BOOK3S_64
1905 [REGSET_TAR] = {
1906 .core_note_type = NT_PPC_TAR, .n = 1,
1907 .size = sizeof(u64), .align = sizeof(u64),
1908 .get = tar_get, .set = tar_set
1909 },
Anshuman Khandualcf89d4e2016-07-28 10:57:43 +08001910 [REGSET_EBB] = {
1911 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
1912 .size = sizeof(u64), .align = sizeof(u64),
1913 .active = ebb_active, .get = ebb_get, .set = ebb_set
1914 },
Anshuman Khanduala67ae752016-07-28 10:57:44 +08001915 [REGSET_PMR] = {
1916 .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
1917 .size = sizeof(u64), .align = sizeof(u64),
1918 .active = pmu_active, .get = pmu_get, .set = pmu_set
1919 },
Anshuman Khandualfa439812016-07-28 10:57:42 +08001920#endif
Roland McGrath80fdf472007-12-20 03:58:00 -08001921};
1922
1923static const struct user_regset_view user_ppc_native_view = {
1924 .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
1925 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
1926};
1927
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08001928#ifdef CONFIG_PPC64
1929#include <linux/compat.h>
1930
Anshuman Khandual04fcadc2016-07-28 10:57:35 +08001931static int gpr32_get_common(struct task_struct *target,
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08001932 const struct user_regset *regset,
1933 unsigned int pos, unsigned int count,
Simon Guo26183112016-09-11 21:44:13 +08001934 void *kbuf, void __user *ubuf,
1935 unsigned long *regs)
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08001936{
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08001937 compat_ulong_t *k = kbuf;
1938 compat_ulong_t __user *u = ubuf;
1939 compat_ulong_t reg;
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08001940
1941 pos /= sizeof(reg);
1942 count /= sizeof(reg);
1943
1944 if (kbuf)
1945 for (; count > 0 && pos < PT_MSR; --count)
1946 *k++ = regs[pos++];
1947 else
1948 for (; count > 0 && pos < PT_MSR; --count)
1949 if (__put_user((compat_ulong_t) regs[pos++], u++))
1950 return -EFAULT;
1951
1952 if (count > 0 && pos == PT_MSR) {
1953 reg = get_user_msr(target);
1954 if (kbuf)
1955 *k++ = reg;
1956 else if (__put_user(reg, u++))
1957 return -EFAULT;
1958 ++pos;
1959 --count;
1960 }
1961
1962 if (kbuf)
1963 for (; count > 0 && pos < PT_REGS_COUNT; --count)
1964 *k++ = regs[pos++];
1965 else
1966 for (; count > 0 && pos < PT_REGS_COUNT; --count)
1967 if (__put_user((compat_ulong_t) regs[pos++], u++))
1968 return -EFAULT;
1969
1970 kbuf = k;
1971 ubuf = u;
1972 pos *= sizeof(reg);
1973 count *= sizeof(reg);
1974 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
1975 PT_REGS_COUNT * sizeof(reg), -1);
1976}
1977
Anshuman Khandual04fcadc2016-07-28 10:57:35 +08001978static int gpr32_set_common(struct task_struct *target,
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08001979 const struct user_regset *regset,
1980 unsigned int pos, unsigned int count,
Simon Guo26183112016-09-11 21:44:13 +08001981 const void *kbuf, const void __user *ubuf,
1982 unsigned long *regs)
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08001983{
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08001984 const compat_ulong_t *k = kbuf;
1985 const compat_ulong_t __user *u = ubuf;
1986 compat_ulong_t reg;
1987
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08001988 pos /= sizeof(reg);
1989 count /= sizeof(reg);
1990
1991 if (kbuf)
1992 for (; count > 0 && pos < PT_MSR; --count)
1993 regs[pos++] = *k++;
1994 else
1995 for (; count > 0 && pos < PT_MSR; --count) {
1996 if (__get_user(reg, u++))
1997 return -EFAULT;
1998 regs[pos++] = reg;
1999 }
2000
2001
2002 if (count > 0 && pos == PT_MSR) {
2003 if (kbuf)
2004 reg = *k++;
2005 else if (__get_user(reg, u++))
2006 return -EFAULT;
2007 set_user_msr(target, reg);
2008 ++pos;
2009 --count;
2010 }
2011
Roland McGrathc2372eb2008-03-13 19:25:35 +11002012 if (kbuf) {
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002013 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
2014 regs[pos++] = *k++;
Roland McGrathc2372eb2008-03-13 19:25:35 +11002015 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2016 ++k;
2017 } else {
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002018 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
2019 if (__get_user(reg, u++))
2020 return -EFAULT;
2021 regs[pos++] = reg;
2022 }
Roland McGrathc2372eb2008-03-13 19:25:35 +11002023 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2024 if (__get_user(reg, u++))
2025 return -EFAULT;
2026 }
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002027
2028 if (count > 0 && pos == PT_TRAP) {
2029 if (kbuf)
2030 reg = *k++;
2031 else if (__get_user(reg, u++))
2032 return -EFAULT;
2033 set_user_trap(target, reg);
2034 ++pos;
2035 --count;
2036 }
2037
2038 kbuf = k;
2039 ubuf = u;
2040 pos *= sizeof(reg);
2041 count *= sizeof(reg);
2042 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
2043 (PT_TRAP + 1) * sizeof(reg), -1);
2044}
2045
Anshuman Khandual25847fb2016-07-28 10:57:36 +08002046#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2047static int tm_cgpr32_get(struct task_struct *target,
2048 const struct user_regset *regset,
2049 unsigned int pos, unsigned int count,
2050 void *kbuf, void __user *ubuf)
2051{
Simon Guo26183112016-09-11 21:44:13 +08002052 return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2053 &target->thread.ckpt_regs.gpr[0]);
Anshuman Khandual25847fb2016-07-28 10:57:36 +08002054}
2055
2056static int tm_cgpr32_set(struct task_struct *target,
2057 const struct user_regset *regset,
2058 unsigned int pos, unsigned int count,
2059 const void *kbuf, const void __user *ubuf)
2060{
Simon Guo26183112016-09-11 21:44:13 +08002061 return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2062 &target->thread.ckpt_regs.gpr[0]);
Anshuman Khandual25847fb2016-07-28 10:57:36 +08002063}
2064#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2065
Anshuman Khandual04fcadc2016-07-28 10:57:35 +08002066static int gpr32_get(struct task_struct *target,
2067 const struct user_regset *regset,
2068 unsigned int pos, unsigned int count,
2069 void *kbuf, void __user *ubuf)
2070{
Simon Guo26183112016-09-11 21:44:13 +08002071 int i;
2072
2073 if (target->thread.regs == NULL)
2074 return -EIO;
2075
2076 if (!FULL_REGS(target->thread.regs)) {
2077 /*
2078 * We have a partial register set.
2079 * Fill 14-31 with bogus values.
2080 */
2081 for (i = 14; i < 32; i++)
2082 target->thread.regs->gpr[i] = NV_REG_POISON;
2083 }
2084 return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2085 &target->thread.regs->gpr[0]);
Anshuman Khandual04fcadc2016-07-28 10:57:35 +08002086}
2087
2088static int gpr32_set(struct task_struct *target,
2089 const struct user_regset *regset,
2090 unsigned int pos, unsigned int count,
2091 const void *kbuf, const void __user *ubuf)
2092{
Simon Guo26183112016-09-11 21:44:13 +08002093 if (target->thread.regs == NULL)
2094 return -EIO;
2095
2096 CHECK_FULL_REGS(target->thread.regs);
2097 return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2098 &target->thread.regs->gpr[0]);
Anshuman Khandual04fcadc2016-07-28 10:57:35 +08002099}
2100
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002101/*
2102 * These are the regset flavors matching the CONFIG_PPC32 native set.
2103 */
2104static const struct user_regset compat_regsets[] = {
2105 [REGSET_GPR] = {
2106 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
2107 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
2108 .get = gpr32_get, .set = gpr32_set
2109 },
2110 [REGSET_FPR] = {
2111 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
2112 .size = sizeof(double), .align = sizeof(double),
2113 .get = fpr_get, .set = fpr_set
2114 },
2115#ifdef CONFIG_ALTIVEC
2116 [REGSET_VMX] = {
2117 .core_note_type = NT_PPC_VMX, .n = 34,
2118 .size = sizeof(vector128), .align = sizeof(vector128),
2119 .active = vr_active, .get = vr_get, .set = vr_set
2120 },
2121#endif
2122#ifdef CONFIG_SPE
2123 [REGSET_SPE] = {
Roland McGrath24f1a842008-01-02 17:05:48 -08002124 .core_note_type = NT_PPC_SPE, .n = 35,
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002125 .size = sizeof(u32), .align = sizeof(u32),
2126 .active = evr_active, .get = evr_get, .set = evr_set
2127 },
2128#endif
Anshuman Khandual25847fb2016-07-28 10:57:36 +08002129#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2130 [REGSET_TM_CGPR] = {
2131 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
2132 .size = sizeof(long), .align = sizeof(long),
2133 .active = tm_cgpr_active,
2134 .get = tm_cgpr32_get, .set = tm_cgpr32_set
2135 },
Anshuman Khandual19cbcbf2016-07-28 10:57:37 +08002136 [REGSET_TM_CFPR] = {
2137 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
2138 .size = sizeof(double), .align = sizeof(double),
2139 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
2140 },
Anshuman Khandual8c13f592016-07-28 10:57:38 +08002141 [REGSET_TM_CVMX] = {
2142 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
2143 .size = sizeof(vector128), .align = sizeof(vector128),
2144 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
2145 },
Anshuman Khandual9d3918f2016-07-28 10:57:39 +08002146 [REGSET_TM_CVSX] = {
2147 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
2148 .size = sizeof(double), .align = sizeof(double),
2149 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
2150 },
Anshuman Khandual08e1c012016-07-28 10:57:40 +08002151 [REGSET_TM_SPR] = {
2152 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
2153 .size = sizeof(u64), .align = sizeof(u64),
2154 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
2155 },
Anshuman Khandualc45dc902016-07-28 10:57:41 +08002156 [REGSET_TM_CTAR] = {
2157 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
2158 .size = sizeof(u64), .align = sizeof(u64),
2159 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
2160 },
2161 [REGSET_TM_CPPR] = {
2162 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
2163 .size = sizeof(u64), .align = sizeof(u64),
2164 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2165 },
2166 [REGSET_TM_CDSCR] = {
2167 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2168 .size = sizeof(u64), .align = sizeof(u64),
2169 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2170 },
Anshuman Khandual25847fb2016-07-28 10:57:36 +08002171#endif
Anshuman Khandualfa439812016-07-28 10:57:42 +08002172#ifdef CONFIG_PPC64
2173 [REGSET_PPR] = {
2174 .core_note_type = NT_PPC_PPR, .n = 1,
2175 .size = sizeof(u64), .align = sizeof(u64),
2176 .get = ppr_get, .set = ppr_set
2177 },
2178 [REGSET_DSCR] = {
2179 .core_note_type = NT_PPC_DSCR, .n = 1,
2180 .size = sizeof(u64), .align = sizeof(u64),
2181 .get = dscr_get, .set = dscr_set
2182 },
2183#endif
2184#ifdef CONFIG_PPC_BOOK3S_64
2185 [REGSET_TAR] = {
2186 .core_note_type = NT_PPC_TAR, .n = 1,
2187 .size = sizeof(u64), .align = sizeof(u64),
2188 .get = tar_get, .set = tar_set
2189 },
Anshuman Khandualcf89d4e2016-07-28 10:57:43 +08002190 [REGSET_EBB] = {
2191 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2192 .size = sizeof(u64), .align = sizeof(u64),
2193 .active = ebb_active, .get = ebb_get, .set = ebb_set
2194 },
Anshuman Khandualfa439812016-07-28 10:57:42 +08002195#endif
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002196};
2197
2198static const struct user_regset_view user_ppc_compat_view = {
2199 .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
2200 .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
2201};
2202#endif /* CONFIG_PPC64 */
2203
Roland McGrath80fdf472007-12-20 03:58:00 -08002204const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2205{
Roland McGrathfa8f5cb2007-12-20 03:58:08 -08002206#ifdef CONFIG_PPC64
2207 if (test_tsk_thread_flag(task, TIF_32BIT))
2208 return &user_ppc_compat_view;
2209#endif
Roland McGrath80fdf472007-12-20 03:58:00 -08002210 return &user_ppc_native_view;
2211}
2212
2213
Roland McGrath2a84b0d2008-01-30 13:30:51 +01002214void user_enable_single_step(struct task_struct *task)
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002215{
2216 struct pt_regs *regs = task->thread.regs;
2217
2218 if (regs != NULL) {
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002219#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302220 task->thread.debug.dbcr0 &= ~DBCR0_BT;
2221 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002222 regs->msr |= MSR_DE;
2223#else
Roland McGrathec097c82009-05-28 21:26:38 +00002224 regs->msr &= ~MSR_BE;
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002225 regs->msr |= MSR_SE;
2226#endif
2227 }
2228 set_tsk_thread_flag(task, TIF_SINGLESTEP);
2229}
2230
Roland McGrathec097c82009-05-28 21:26:38 +00002231void user_enable_block_step(struct task_struct *task)
2232{
2233 struct pt_regs *regs = task->thread.regs;
2234
2235 if (regs != NULL) {
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002236#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302237 task->thread.debug.dbcr0 &= ~DBCR0_IC;
2238 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
Roland McGrathec097c82009-05-28 21:26:38 +00002239 regs->msr |= MSR_DE;
2240#else
2241 regs->msr &= ~MSR_SE;
2242 regs->msr |= MSR_BE;
2243#endif
2244 }
2245 set_tsk_thread_flag(task, TIF_SINGLESTEP);
2246}
2247
Roland McGrath2a84b0d2008-01-30 13:30:51 +01002248void user_disable_single_step(struct task_struct *task)
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002249{
2250 struct pt_regs *regs = task->thread.regs;
2251
2252 if (regs != NULL) {
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002253#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002254 /*
2255 * The logic to disable single stepping should be as
2256 * simple as turning off the Instruction Complete flag.
2257 * And, after doing so, if all debug flags are off, turn
2258 * off DBCR0(IDM) and MSR(DE) .... Torez
2259 */
James Yang682775b2013-07-05 14:49:43 -05002260 task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002261 /*
2262 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
2263 */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302264 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2265 task->thread.debug.dbcr1)) {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002266 /*
2267 * All debug events were off.....
2268 */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302269 task->thread.debug.dbcr0 &= ~DBCR0_IDM;
Dave Kleikamp28477fb2009-07-08 13:46:18 +00002270 regs->msr &= ~MSR_DE;
2271 }
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002272#else
Roland McGrathec097c82009-05-28 21:26:38 +00002273 regs->msr &= ~(MSR_SE | MSR_BE);
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002274#endif
2275 }
2276 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
2277}
2278
K.Prasad5aae8a52010-06-15 11:35:19 +05302279#ifdef CONFIG_HAVE_HW_BREAKPOINT
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02002280void ptrace_triggered(struct perf_event *bp,
K.Prasad5aae8a52010-06-15 11:35:19 +05302281 struct perf_sample_data *data, struct pt_regs *regs)
2282{
2283 struct perf_event_attr attr;
2284
2285 /*
2286 * Disable the breakpoint request here since ptrace has defined a
2287 * one-shot behaviour for breakpoint exceptions in PPC64.
2288 * The SIGTRAP signal is generated automatically for us in do_dabr().
2289 * We don't have to do anything about that here
2290 */
2291 attr = bp->attr;
2292 attr.disabled = true;
2293 modify_user_hw_breakpoint(bp, &attr);
2294}
2295#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2296
Anton Blancharde51df2c2014-08-20 08:55:18 +10002297static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002298 unsigned long data)
2299{
K.Prasad5aae8a52010-06-15 11:35:19 +05302300#ifdef CONFIG_HAVE_HW_BREAKPOINT
2301 int ret;
2302 struct thread_struct *thread = &(task->thread);
2303 struct perf_event *bp;
2304 struct perf_event_attr attr;
2305#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Michael Neuling9422de32012-12-20 14:06:44 +00002306#ifndef CONFIG_PPC_ADV_DEBUG_REGS
2307 struct arch_hw_breakpoint hw_brk;
2308#endif
K.Prasad5aae8a52010-06-15 11:35:19 +05302309
Luis Machadod6a61bf2008-07-24 02:10:41 +10002310 /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
2311 * For embedded processors we support one DAC and no IAC's at the
2312 * moment.
2313 */
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002314 if (addr > 0)
2315 return -EINVAL;
2316
Kumar Gala2325f0a2008-07-26 05:27:33 +10002317 /* The bottom 3 bits in dabr are flags */
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002318 if ((data & ~0x7UL) >= TASK_SIZE)
2319 return -EIO;
2320
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002321#ifndef CONFIG_PPC_ADV_DEBUG_REGS
Luis Machadod6a61bf2008-07-24 02:10:41 +10002322 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
2323 * It was assumed, on previous implementations, that 3 bits were
2324 * passed together with the data address, fitting the design of the
2325 * DABR register, as follows:
2326 *
2327 * bit 0: Read flag
2328 * bit 1: Write flag
2329 * bit 2: Breakpoint translation
2330 *
2331 * Thus, we use them here as so.
2332 */
2333
2334 /* Ensure breakpoint translation bit is set */
Michael Neuling9422de32012-12-20 14:06:44 +00002335 if (data && !(data & HW_BRK_TYPE_TRANSLATE))
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002336 return -EIO;
Michael Neuling9422de32012-12-20 14:06:44 +00002337 hw_brk.address = data & (~HW_BRK_TYPE_DABR);
2338 hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
2339 hw_brk.len = 8;
K.Prasad5aae8a52010-06-15 11:35:19 +05302340#ifdef CONFIG_HAVE_HW_BREAKPOINT
2341 bp = thread->ptrace_bps[0];
Michael Neuling9422de32012-12-20 14:06:44 +00002342 if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) {
K.Prasad5aae8a52010-06-15 11:35:19 +05302343 if (bp) {
2344 unregister_hw_breakpoint(bp);
2345 thread->ptrace_bps[0] = NULL;
2346 }
2347 return 0;
2348 }
2349 if (bp) {
2350 attr = bp->attr;
Michael Neuling9422de32012-12-20 14:06:44 +00002351 attr.bp_addr = hw_brk.address;
2352 arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
Aravinda Prasada53fd612012-11-04 22:15:28 +00002353
2354 /* Enable breakpoint */
2355 attr.disabled = false;
2356
K.Prasad5aae8a52010-06-15 11:35:19 +05302357 ret = modify_user_hw_breakpoint(bp, &attr);
Frederic Weisbecker925f83c2011-05-06 01:53:18 +02002358 if (ret) {
K.Prasad5aae8a52010-06-15 11:35:19 +05302359 return ret;
Frederic Weisbecker925f83c2011-05-06 01:53:18 +02002360 }
K.Prasad5aae8a52010-06-15 11:35:19 +05302361 thread->ptrace_bps[0] = bp;
Michael Neuling9422de32012-12-20 14:06:44 +00002362 thread->hw_brk = hw_brk;
K.Prasad5aae8a52010-06-15 11:35:19 +05302363 return 0;
2364 }
2365
2366 /* Create a new breakpoint request if one doesn't exist already */
2367 hw_breakpoint_init(&attr);
Michael Neuling9422de32012-12-20 14:06:44 +00002368 attr.bp_addr = hw_brk.address;
2369 arch_bp_generic_fields(hw_brk.type,
2370 &attr.bp_type);
K.Prasad5aae8a52010-06-15 11:35:19 +05302371
2372 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
Avi Kivity4dc0da82011-06-29 18:42:35 +03002373 ptrace_triggered, NULL, task);
K.Prasad5aae8a52010-06-15 11:35:19 +05302374 if (IS_ERR(bp)) {
2375 thread->ptrace_bps[0] = NULL;
2376 return PTR_ERR(bp);
2377 }
2378
2379#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Michael Neuling9422de32012-12-20 14:06:44 +00002380 task->thread.hw_brk = hw_brk;
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002381#else /* CONFIG_PPC_ADV_DEBUG_REGS */
Luis Machadod6a61bf2008-07-24 02:10:41 +10002382 /* As described above, it was assumed 3 bits were passed with the data
2383 * address, but we will assume only the mode bits will be passed
2384 * as to not cause alignment restrictions for DAC-based processors.
2385 */
2386
2387 /* DAC's hold the whole address without any mode flags */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302388 task->thread.debug.dac1 = data & ~0x3UL;
Luis Machadod6a61bf2008-07-24 02:10:41 +10002389
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302390 if (task->thread.debug.dac1 == 0) {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002391 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302392 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2393 task->thread.debug.dbcr1)) {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002394 task->thread.regs->msr &= ~MSR_DE;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302395 task->thread.debug.dbcr0 &= ~DBCR0_IDM;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002396 }
Luis Machadod6a61bf2008-07-24 02:10:41 +10002397 return 0;
2398 }
2399
2400 /* Read or Write bits must be set */
2401
2402 if (!(data & 0x3UL))
2403 return -EINVAL;
2404
2405 /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
2406 register */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302407 task->thread.debug.dbcr0 |= DBCR0_IDM;
Luis Machadod6a61bf2008-07-24 02:10:41 +10002408
2409 /* Check for write and read flags and set DBCR0
2410 accordingly */
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002411 dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
Luis Machadod6a61bf2008-07-24 02:10:41 +10002412 if (data & 0x1UL)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002413 dbcr_dac(task) |= DBCR_DAC1R;
Luis Machadod6a61bf2008-07-24 02:10:41 +10002414 if (data & 0x2UL)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002415 dbcr_dac(task) |= DBCR_DAC1W;
Luis Machadod6a61bf2008-07-24 02:10:41 +10002416 task->thread.regs->msr |= MSR_DE;
Dave Kleikamp172ae2e2010-02-08 11:50:57 +00002417#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002418 return 0;
2419}
Benjamin Herrenschmidtabd06502007-06-04 15:15:47 +10002420
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002421/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 * Called by kernel/ptrace.c when detaching..
2423 *
2424 * Make sure single step bits etc are not set.
2425 */
2426void ptrace_disable(struct task_struct *child)
2427{
2428 /* make sure the single step bit is not set. */
Roland McGrath2a84b0d2008-01-30 13:30:51 +01002429 user_disable_single_step(child);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430}
2431
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002432#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Michael Neuling84295df2012-10-28 15:13:16 +00002433static long set_instruction_bp(struct task_struct *child,
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002434 struct ppc_hw_breakpoint *bp_info)
2435{
2436 int slot;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302437 int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
2438 int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
2439 int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
2440 int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002441
2442 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2443 slot2_in_use = 1;
2444 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2445 slot4_in_use = 1;
2446
2447 if (bp_info->addr >= TASK_SIZE)
2448 return -EIO;
2449
2450 if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
2451
2452 /* Make sure range is valid. */
2453 if (bp_info->addr2 >= TASK_SIZE)
2454 return -EIO;
2455
2456 /* We need a pair of IAC regsisters */
2457 if ((!slot1_in_use) && (!slot2_in_use)) {
2458 slot = 1;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302459 child->thread.debug.iac1 = bp_info->addr;
2460 child->thread.debug.iac2 = bp_info->addr2;
2461 child->thread.debug.dbcr0 |= DBCR0_IAC1;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002462 if (bp_info->addr_mode ==
2463 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2464 dbcr_iac_range(child) |= DBCR_IAC12X;
2465 else
2466 dbcr_iac_range(child) |= DBCR_IAC12I;
2467#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2468 } else if ((!slot3_in_use) && (!slot4_in_use)) {
2469 slot = 3;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302470 child->thread.debug.iac3 = bp_info->addr;
2471 child->thread.debug.iac4 = bp_info->addr2;
2472 child->thread.debug.dbcr0 |= DBCR0_IAC3;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002473 if (bp_info->addr_mode ==
2474 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2475 dbcr_iac_range(child) |= DBCR_IAC34X;
2476 else
2477 dbcr_iac_range(child) |= DBCR_IAC34I;
2478#endif
2479 } else
2480 return -ENOSPC;
2481 } else {
2482 /* We only need one. If possible leave a pair free in
2483 * case a range is needed later
2484 */
2485 if (!slot1_in_use) {
2486 /*
2487 * Don't use iac1 if iac1-iac2 are free and either
2488 * iac3 or iac4 (but not both) are free
2489 */
2490 if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
2491 slot = 1;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302492 child->thread.debug.iac1 = bp_info->addr;
2493 child->thread.debug.dbcr0 |= DBCR0_IAC1;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002494 goto out;
2495 }
2496 }
2497 if (!slot2_in_use) {
2498 slot = 2;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302499 child->thread.debug.iac2 = bp_info->addr;
2500 child->thread.debug.dbcr0 |= DBCR0_IAC2;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002501#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2502 } else if (!slot3_in_use) {
2503 slot = 3;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302504 child->thread.debug.iac3 = bp_info->addr;
2505 child->thread.debug.dbcr0 |= DBCR0_IAC3;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002506 } else if (!slot4_in_use) {
2507 slot = 4;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302508 child->thread.debug.iac4 = bp_info->addr;
2509 child->thread.debug.dbcr0 |= DBCR0_IAC4;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002510#endif
2511 } else
2512 return -ENOSPC;
2513 }
2514out:
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302515 child->thread.debug.dbcr0 |= DBCR0_IDM;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002516 child->thread.regs->msr |= MSR_DE;
2517
2518 return slot;
2519}
2520
2521static int del_instruction_bp(struct task_struct *child, int slot)
2522{
2523 switch (slot) {
2524 case 1:
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302525 if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002526 return -ENOENT;
2527
2528 if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
2529 /* address range - clear slots 1 & 2 */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302530 child->thread.debug.iac2 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002531 dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
2532 }
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302533 child->thread.debug.iac1 = 0;
2534 child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002535 break;
2536 case 2:
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302537 if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002538 return -ENOENT;
2539
2540 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2541 /* used in a range */
2542 return -EINVAL;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302543 child->thread.debug.iac2 = 0;
2544 child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002545 break;
2546#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2547 case 3:
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302548 if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002549 return -ENOENT;
2550
2551 if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
2552 /* address range - clear slots 3 & 4 */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302553 child->thread.debug.iac4 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002554 dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
2555 }
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302556 child->thread.debug.iac3 = 0;
2557 child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002558 break;
2559 case 4:
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302560 if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002561 return -ENOENT;
2562
2563 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2564 /* Used in a range */
2565 return -EINVAL;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302566 child->thread.debug.iac4 = 0;
2567 child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002568 break;
2569#endif
2570 default:
2571 return -EINVAL;
2572 }
2573 return 0;
2574}
2575
2576static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
2577{
2578 int byte_enable =
2579 (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
2580 & 0xf;
2581 int condition_mode =
2582 bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
2583 int slot;
2584
2585 if (byte_enable && (condition_mode == 0))
2586 return -EINVAL;
2587
2588 if (bp_info->addr >= TASK_SIZE)
2589 return -EIO;
2590
2591 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
2592 slot = 1;
2593 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2594 dbcr_dac(child) |= DBCR_DAC1R;
2595 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2596 dbcr_dac(child) |= DBCR_DAC1W;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302597 child->thread.debug.dac1 = (unsigned long)bp_info->addr;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002598#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2599 if (byte_enable) {
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302600 child->thread.debug.dvc1 =
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002601 (unsigned long)bp_info->condition_value;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302602 child->thread.debug.dbcr2 |=
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002603 ((byte_enable << DBCR2_DVC1BE_SHIFT) |
2604 (condition_mode << DBCR2_DVC1M_SHIFT));
2605 }
2606#endif
2607#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302608 } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002609 /* Both dac1 and dac2 are part of a range */
2610 return -ENOSPC;
2611#endif
2612 } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
2613 slot = 2;
2614 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2615 dbcr_dac(child) |= DBCR_DAC2R;
2616 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2617 dbcr_dac(child) |= DBCR_DAC2W;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302618 child->thread.debug.dac2 = (unsigned long)bp_info->addr;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002619#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2620 if (byte_enable) {
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302621 child->thread.debug.dvc2 =
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002622 (unsigned long)bp_info->condition_value;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302623 child->thread.debug.dbcr2 |=
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002624 ((byte_enable << DBCR2_DVC2BE_SHIFT) |
2625 (condition_mode << DBCR2_DVC2M_SHIFT));
2626 }
2627#endif
2628 } else
2629 return -ENOSPC;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302630 child->thread.debug.dbcr0 |= DBCR0_IDM;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002631 child->thread.regs->msr |= MSR_DE;
2632
2633 return slot + 4;
2634}
2635
2636static int del_dac(struct task_struct *child, int slot)
2637{
2638 if (slot == 1) {
Dave Kleikamp30124d12010-03-01 04:57:34 +00002639 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002640 return -ENOENT;
2641
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302642 child->thread.debug.dac1 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002643 dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2644#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302645 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2646 child->thread.debug.dac2 = 0;
2647 child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002648 }
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302649 child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002650#endif
2651#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302652 child->thread.debug.dvc1 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002653#endif
2654 } else if (slot == 2) {
Dave Kleikamp30124d12010-03-01 04:57:34 +00002655 if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002656 return -ENOENT;
2657
2658#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302659 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002660 /* Part of a range */
2661 return -EINVAL;
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302662 child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002663#endif
2664#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302665 child->thread.debug.dvc2 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002666#endif
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302667 child->thread.debug.dac2 = 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002668 dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
2669 } else
2670 return -EINVAL;
2671
2672 return 0;
2673}
2674#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2675
2676#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2677static int set_dac_range(struct task_struct *child,
2678 struct ppc_hw_breakpoint *bp_info)
2679{
2680 int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
2681
2682 /* We don't allow range watchpoints to be used with DVC */
2683 if (bp_info->condition_mode)
2684 return -EINVAL;
2685
2686 /*
2687 * Best effort to verify the address range. The user/supervisor bits
2688 * prevent trapping in kernel space, but let's fail on an obvious bad
2689 * range. The simple test on the mask is not fool-proof, and any
2690 * exclusive range will spill over into kernel space.
2691 */
2692 if (bp_info->addr >= TASK_SIZE)
2693 return -EIO;
2694 if (mode == PPC_BREAKPOINT_MODE_MASK) {
2695 /*
2696 * dac2 is a bitmask. Don't allow a mask that makes a
2697 * kernel space address from a valid dac1 value
2698 */
2699 if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
2700 return -EIO;
2701 } else {
2702 /*
2703 * For range breakpoints, addr2 must also be a valid address
2704 */
2705 if (bp_info->addr2 >= TASK_SIZE)
2706 return -EIO;
2707 }
2708
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302709 if (child->thread.debug.dbcr0 &
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002710 (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
2711 return -ENOSPC;
2712
2713 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302714 child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002715 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302716 child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
2717 child->thread.debug.dac1 = bp_info->addr;
2718 child->thread.debug.dac2 = bp_info->addr2;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002719 if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302720 child->thread.debug.dbcr2 |= DBCR2_DAC12M;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002721 else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302722 child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002723 else /* PPC_BREAKPOINT_MODE_MASK */
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302724 child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002725 child->thread.regs->msr |= MSR_DE;
2726
2727 return 5;
2728}
2729#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2730
Dave Kleikamp3162d922010-02-08 11:51:05 +00002731static long ppc_set_hwdebug(struct task_struct *child,
2732 struct ppc_hw_breakpoint *bp_info)
2733{
K.Prasad6c7a2852012-10-28 15:13:15 +00002734#ifdef CONFIG_HAVE_HW_BREAKPOINT
2735 int len = 0;
2736 struct thread_struct *thread = &(child->thread);
2737 struct perf_event *bp;
2738 struct perf_event_attr attr;
2739#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002740#ifndef CONFIG_PPC_ADV_DEBUG_REGS
Michael Neuling9422de32012-12-20 14:06:44 +00002741 struct arch_hw_breakpoint brk;
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002742#endif
2743
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002744 if (bp_info->version != 1)
2745 return -ENOTSUPP;
2746#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Dave Kleikamp3162d922010-02-08 11:51:05 +00002747 /*
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002748 * Check for invalid flags and combinations
2749 */
2750 if ((bp_info->trigger_type == 0) ||
2751 (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
2752 PPC_BREAKPOINT_TRIGGER_RW)) ||
2753 (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
2754 (bp_info->condition_mode &
2755 ~(PPC_BREAKPOINT_CONDITION_MODE |
2756 PPC_BREAKPOINT_CONDITION_BE_ALL)))
2757 return -EINVAL;
2758#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2759 if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2760 return -EINVAL;
2761#endif
2762
2763 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
2764 if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
2765 (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
2766 return -EINVAL;
Michael Neuling84295df2012-10-28 15:13:16 +00002767 return set_instruction_bp(child, bp_info);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002768 }
2769 if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2770 return set_dac(child, bp_info);
2771
2772#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2773 return set_dac_range(child, bp_info);
2774#else
2775 return -EINVAL;
2776#endif
2777#else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2778 /*
2779 * We only support one data breakpoint
Dave Kleikamp3162d922010-02-08 11:51:05 +00002780 */
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002781 if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
2782 (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002783 bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
Dave Kleikamp3162d922010-02-08 11:51:05 +00002784 return -EINVAL;
2785
Dave Kleikamp3162d922010-02-08 11:51:05 +00002786 if ((unsigned long)bp_info->addr >= TASK_SIZE)
2787 return -EIO;
2788
Michael Neuling9422de32012-12-20 14:06:44 +00002789 brk.address = bp_info->addr & ~7UL;
2790 brk.type = HW_BRK_TYPE_TRANSLATE;
Michael Neuling2bb78ef2013-03-11 16:42:49 +00002791 brk.len = 8;
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002792 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
Michael Neuling9422de32012-12-20 14:06:44 +00002793 brk.type |= HW_BRK_TYPE_READ;
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002794 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
Michael Neuling9422de32012-12-20 14:06:44 +00002795 brk.type |= HW_BRK_TYPE_WRITE;
K.Prasad6c7a2852012-10-28 15:13:15 +00002796#ifdef CONFIG_HAVE_HW_BREAKPOINT
K.Prasad6c7a2852012-10-28 15:13:15 +00002797 /*
2798 * Check if the request is for 'range' breakpoints. We can
2799 * support it if range < 8 bytes.
2800 */
Oleg Nesterov6961ed92013-07-08 16:00:49 -07002801 if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
K.Prasad6c7a2852012-10-28 15:13:15 +00002802 len = bp_info->addr2 - bp_info->addr;
Oleg Nesterov6961ed92013-07-08 16:00:49 -07002803 else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
Michael Neulingb0b0aa92013-06-24 15:47:22 +10002804 len = 1;
Oleg Nesterov6961ed92013-07-08 16:00:49 -07002805 else
K.Prasad6c7a2852012-10-28 15:13:15 +00002806 return -EINVAL;
K.Prasad6c7a2852012-10-28 15:13:15 +00002807 bp = thread->ptrace_bps[0];
Oleg Nesterov6961ed92013-07-08 16:00:49 -07002808 if (bp)
K.Prasad6c7a2852012-10-28 15:13:15 +00002809 return -ENOSPC;
K.Prasad6c7a2852012-10-28 15:13:15 +00002810
2811 /* Create a new breakpoint request if one doesn't exist already */
2812 hw_breakpoint_init(&attr);
2813 attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
2814 attr.bp_len = len;
Michael Neuling9422de32012-12-20 14:06:44 +00002815 arch_bp_generic_fields(brk.type, &attr.bp_type);
K.Prasad6c7a2852012-10-28 15:13:15 +00002816
2817 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2818 ptrace_triggered, NULL, child);
2819 if (IS_ERR(bp)) {
2820 thread->ptrace_bps[0] = NULL;
K.Prasad6c7a2852012-10-28 15:13:15 +00002821 return PTR_ERR(bp);
2822 }
2823
K.Prasad6c7a2852012-10-28 15:13:15 +00002824 return 1;
2825#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2826
2827 if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
2828 return -EINVAL;
2829
Michael Neuling9422de32012-12-20 14:06:44 +00002830 if (child->thread.hw_brk.address)
K.Prasad6c7a2852012-10-28 15:13:15 +00002831 return -ENOSPC;
Andreas Schwab4dfbf292010-11-27 14:24:53 +00002832
Michael Neuling9422de32012-12-20 14:06:44 +00002833 child->thread.hw_brk = brk;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002834
Dave Kleikamp3162d922010-02-08 11:51:05 +00002835 return 1;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002836#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
Dave Kleikamp3162d922010-02-08 11:51:05 +00002837}
2838
Michael Neulingec1b33d2012-10-28 15:13:17 +00002839static long ppc_del_hwdebug(struct task_struct *child, long data)
Dave Kleikamp3162d922010-02-08 11:51:05 +00002840{
K.Prasad6c7a2852012-10-28 15:13:15 +00002841#ifdef CONFIG_HAVE_HW_BREAKPOINT
2842 int ret = 0;
2843 struct thread_struct *thread = &(child->thread);
2844 struct perf_event *bp;
2845#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002846#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2847 int rc;
2848
2849 if (data <= 4)
2850 rc = del_instruction_bp(child, (int)data);
2851 else
2852 rc = del_dac(child, (int)data - 4);
2853
2854 if (!rc) {
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05302855 if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
2856 child->thread.debug.dbcr1)) {
2857 child->thread.debug.dbcr0 &= ~DBCR0_IDM;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002858 child->thread.regs->msr &= ~MSR_DE;
2859 }
2860 }
2861 return rc;
2862#else
Dave Kleikamp3162d922010-02-08 11:51:05 +00002863 if (data != 1)
2864 return -EINVAL;
K.Prasad6c7a2852012-10-28 15:13:15 +00002865
2866#ifdef CONFIG_HAVE_HW_BREAKPOINT
K.Prasad6c7a2852012-10-28 15:13:15 +00002867 bp = thread->ptrace_bps[0];
2868 if (bp) {
2869 unregister_hw_breakpoint(bp);
2870 thread->ptrace_bps[0] = NULL;
2871 } else
2872 ret = -ENOENT;
K.Prasad6c7a2852012-10-28 15:13:15 +00002873 return ret;
2874#else /* CONFIG_HAVE_HW_BREAKPOINT */
Michael Neuling9422de32012-12-20 14:06:44 +00002875 if (child->thread.hw_brk.address == 0)
Dave Kleikamp3162d922010-02-08 11:51:05 +00002876 return -ENOENT;
2877
Michael Neuling9422de32012-12-20 14:06:44 +00002878 child->thread.hw_brk.address = 0;
2879 child->thread.hw_brk.type = 0;
K.Prasad6c7a2852012-10-28 15:13:15 +00002880#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002881
Dave Kleikamp3162d922010-02-08 11:51:05 +00002882 return 0;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002883#endif
Dave Kleikamp3162d922010-02-08 11:51:05 +00002884}
2885
Namhyung Kim9b05a692010-10-27 15:33:47 -07002886long arch_ptrace(struct task_struct *child, long request,
2887 unsigned long addr, unsigned long data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 int ret = -EPERM;
Namhyung Kimf68d2042010-10-27 15:34:01 -07002890 void __user *datavp = (void __user *) data;
2891 unsigned long __user *datalp = datavp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 switch (request) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 /* read the word at location addr in the USER area. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 case PTRACE_PEEKUSR: {
2896 unsigned long index, tmp;
2897
2898 ret = -EIO;
2899 /* convert to index and check */
Stephen Rothwelle8a30302005-10-13 15:52:04 +10002900#ifdef CONFIG_PPC32
Namhyung Kim9b05a692010-10-27 15:33:47 -07002901 index = addr >> 2;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10002902 if ((addr & 3) || (index > PT_FPSCR)
2903 || (child->thread.regs == NULL))
2904#else
Namhyung Kim9b05a692010-10-27 15:33:47 -07002905 index = addr >> 3;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10002906 if ((addr & 7) || (index > PT_FPSCR))
2907#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 break;
2909
2910 CHECK_FULL_REGS(child->thread.regs);
2911 if (index < PT_FPR0) {
Alexey Kardashevskiyee4a3912013-02-14 17:44:23 +00002912 ret = ptrace_get_reg(child, (int) index, &tmp);
2913 if (ret)
2914 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915 } else {
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00002916 unsigned int fpidx = index - PT_FPR0;
2917
Stephen Rothwelle8a30302005-10-13 15:52:04 +10002918 flush_fp_to_thread(child);
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00002919 if (fpidx < (PT_FPSCR - PT_FPR0))
Ulrich Weigand36aa1b12013-12-12 15:59:34 +11002920 memcpy(&tmp, &child->thread.TS_FPR(fpidx),
Anton Blanchard87fec052013-09-23 12:04:38 +10002921 sizeof(long));
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00002922 else
Paul Mackerrasde79f7b2013-09-10 20:20:42 +10002923 tmp = child->thread.fp_state.fpscr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 }
Namhyung Kimf68d2042010-10-27 15:34:01 -07002925 ret = put_user(tmp, datalp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 break;
2927 }
2928
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 /* write the word at location addr in the USER area */
2930 case PTRACE_POKEUSR: {
2931 unsigned long index;
2932
2933 ret = -EIO;
2934 /* convert to index and check */
Stephen Rothwelle8a30302005-10-13 15:52:04 +10002935#ifdef CONFIG_PPC32
Namhyung Kim9b05a692010-10-27 15:33:47 -07002936 index = addr >> 2;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10002937 if ((addr & 3) || (index > PT_FPSCR)
2938 || (child->thread.regs == NULL))
2939#else
Namhyung Kim9b05a692010-10-27 15:33:47 -07002940 index = addr >> 3;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10002941 if ((addr & 7) || (index > PT_FPSCR))
2942#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943 break;
2944
2945 CHECK_FULL_REGS(child->thread.regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946 if (index < PT_FPR0) {
Benjamin Herrenschmidt865418d2007-06-04 15:15:44 +10002947 ret = ptrace_put_reg(child, index, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 } else {
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00002949 unsigned int fpidx = index - PT_FPR0;
2950
Stephen Rothwelle8a30302005-10-13 15:52:04 +10002951 flush_fp_to_thread(child);
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00002952 if (fpidx < (PT_FPSCR - PT_FPR0))
Ulrich Weigand36aa1b12013-12-12 15:59:34 +11002953 memcpy(&child->thread.TS_FPR(fpidx), &data,
Anton Blanchard87fec052013-09-23 12:04:38 +10002954 sizeof(long));
Benjamin Herrenschmidte69b7422011-09-26 19:37:57 +00002955 else
Paul Mackerrasde79f7b2013-09-10 20:20:42 +10002956 child->thread.fp_state.fpscr = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 ret = 0;
2958 }
2959 break;
2960 }
2961
Dave Kleikamp3162d922010-02-08 11:51:05 +00002962 case PPC_PTRACE_GETHWDBGINFO: {
2963 struct ppc_debug_info dbginfo;
2964
2965 dbginfo.version = 1;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002966#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2967 dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
2968 dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
2969 dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
2970 dbginfo.data_bp_alignment = 4;
2971 dbginfo.sizeof_condition = 4;
2972 dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
2973 PPC_DEBUG_FEATURE_INSN_BP_MASK;
2974#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2975 dbginfo.features |=
2976 PPC_DEBUG_FEATURE_DATA_BP_RANGE |
2977 PPC_DEBUG_FEATURE_DATA_BP_MASK;
2978#endif
2979#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
Dave Kleikamp3162d922010-02-08 11:51:05 +00002980 dbginfo.num_instruction_bps = 0;
2981 dbginfo.num_data_bps = 1;
2982 dbginfo.num_condition_regs = 0;
2983#ifdef CONFIG_PPC64
2984 dbginfo.data_bp_alignment = 8;
2985#else
2986 dbginfo.data_bp_alignment = 4;
2987#endif
2988 dbginfo.sizeof_condition = 0;
K.Prasad6c7a2852012-10-28 15:13:15 +00002989#ifdef CONFIG_HAVE_HW_BREAKPOINT
2990 dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
Michael Neuling517b7312013-03-21 20:12:33 +00002991 if (cpu_has_feature(CPU_FTR_DAWR))
2992 dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
K.Prasad6c7a2852012-10-28 15:13:15 +00002993#else
Dave Kleikamp3162d922010-02-08 11:51:05 +00002994 dbginfo.features = 0;
K.Prasad6c7a2852012-10-28 15:13:15 +00002995#endif /* CONFIG_HAVE_HW_BREAKPOINT */
Dave Kleikamp3bffb652010-02-08 11:51:18 +00002996#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
Dave Kleikamp3162d922010-02-08 11:51:05 +00002997
Namhyung Kimf68d2042010-10-27 15:34:01 -07002998 if (!access_ok(VERIFY_WRITE, datavp,
Dave Kleikamp3162d922010-02-08 11:51:05 +00002999 sizeof(struct ppc_debug_info)))
3000 return -EFAULT;
Namhyung Kimf68d2042010-10-27 15:34:01 -07003001 ret = __copy_to_user(datavp, &dbginfo,
3002 sizeof(struct ppc_debug_info)) ?
Dave Kleikamp3162d922010-02-08 11:51:05 +00003003 -EFAULT : 0;
3004 break;
3005 }
3006
3007 case PPC_PTRACE_SETHWDEBUG: {
3008 struct ppc_hw_breakpoint bp_info;
3009
Namhyung Kimf68d2042010-10-27 15:34:01 -07003010 if (!access_ok(VERIFY_READ, datavp,
Dave Kleikamp3162d922010-02-08 11:51:05 +00003011 sizeof(struct ppc_hw_breakpoint)))
3012 return -EFAULT;
Namhyung Kimf68d2042010-10-27 15:34:01 -07003013 ret = __copy_from_user(&bp_info, datavp,
Dave Kleikamp3162d922010-02-08 11:51:05 +00003014 sizeof(struct ppc_hw_breakpoint)) ?
3015 -EFAULT : 0;
3016 if (!ret)
3017 ret = ppc_set_hwdebug(child, &bp_info);
3018 break;
3019 }
3020
3021 case PPC_PTRACE_DELHWDEBUG: {
Michael Neulingec1b33d2012-10-28 15:13:17 +00003022 ret = ppc_del_hwdebug(child, data);
Dave Kleikamp3162d922010-02-08 11:51:05 +00003023 break;
3024 }
3025
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003026 case PTRACE_GET_DEBUGREG: {
Michael Neuling9422de32012-12-20 14:06:44 +00003027#ifndef CONFIG_PPC_ADV_DEBUG_REGS
3028 unsigned long dabr_fake;
3029#endif
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003030 ret = -EINVAL;
3031 /* We only support one DABR and no IABRS at the moment */
3032 if (addr > 0)
3033 break;
Dave Kleikamp3bffb652010-02-08 11:51:18 +00003034#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Bharat Bhushan51ae8d42013-07-04 11:45:46 +05303035 ret = put_user(child->thread.debug.dac1, datalp);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00003036#else
Michael Neuling9422de32012-12-20 14:06:44 +00003037 dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
3038 (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
3039 ret = put_user(dabr_fake, datalp);
Dave Kleikamp3bffb652010-02-08 11:51:18 +00003040#endif
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003041 break;
3042 }
3043
3044 case PTRACE_SET_DEBUGREG:
3045 ret = ptrace_set_debugreg(child, addr, data);
3046 break;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003047
Benjamin Herrenschmidte17666b2007-06-04 15:15:43 +10003048#ifdef CONFIG_PPC64
3049 case PTRACE_GETREGS64:
3050#endif
Roland McGrathc391cd02007-12-20 03:58:36 -08003051 case PTRACE_GETREGS: /* Get all pt_regs from the child. */
3052 return copy_regset_to_user(child, &user_ppc_native_view,
3053 REGSET_GPR,
3054 0, sizeof(struct pt_regs),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003055 datavp);
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003056
Benjamin Herrenschmidt0b3d5c42007-06-04 15:15:39 +10003057#ifdef CONFIG_PPC64
Benjamin Herrenschmidte17666b2007-06-04 15:15:43 +10003058 case PTRACE_SETREGS64:
3059#endif
Roland McGrathc391cd02007-12-20 03:58:36 -08003060 case PTRACE_SETREGS: /* Set all gp regs in the child. */
3061 return copy_regset_from_user(child, &user_ppc_native_view,
3062 REGSET_GPR,
3063 0, sizeof(struct pt_regs),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003064 datavp);
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003065
Roland McGrathc391cd02007-12-20 03:58:36 -08003066 case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
3067 return copy_regset_to_user(child, &user_ppc_native_view,
3068 REGSET_FPR,
3069 0, sizeof(elf_fpregset_t),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003070 datavp);
Benjamin Herrenschmidte17666b2007-06-04 15:15:43 +10003071
Roland McGrathc391cd02007-12-20 03:58:36 -08003072 case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
3073 return copy_regset_from_user(child, &user_ppc_native_view,
3074 REGSET_FPR,
3075 0, sizeof(elf_fpregset_t),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003076 datavp);
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003077
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078#ifdef CONFIG_ALTIVEC
3079 case PTRACE_GETVRREGS:
Roland McGrathc391cd02007-12-20 03:58:36 -08003080 return copy_regset_to_user(child, &user_ppc_native_view,
3081 REGSET_VMX,
3082 0, (33 * sizeof(vector128) +
3083 sizeof(u32)),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003084 datavp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085
3086 case PTRACE_SETVRREGS:
Roland McGrathc391cd02007-12-20 03:58:36 -08003087 return copy_regset_from_user(child, &user_ppc_native_view,
3088 REGSET_VMX,
3089 0, (33 * sizeof(vector128) +
3090 sizeof(u32)),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003091 datavp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092#endif
Michael Neulingce48b212008-06-25 14:07:18 +10003093#ifdef CONFIG_VSX
3094 case PTRACE_GETVSRREGS:
3095 return copy_regset_to_user(child, &user_ppc_native_view,
3096 REGSET_VSX,
Michael Neuling1ac42ef82008-07-29 01:13:14 +10003097 0, 32 * sizeof(double),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003098 datavp);
Michael Neulingce48b212008-06-25 14:07:18 +10003099
3100 case PTRACE_SETVSRREGS:
3101 return copy_regset_from_user(child, &user_ppc_native_view,
3102 REGSET_VSX,
Michael Neuling1ac42ef82008-07-29 01:13:14 +10003103 0, 32 * sizeof(double),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003104 datavp);
Michael Neulingce48b212008-06-25 14:07:18 +10003105#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003106#ifdef CONFIG_SPE
3107 case PTRACE_GETEVRREGS:
3108 /* Get the child spe register state. */
Roland McGrathc391cd02007-12-20 03:58:36 -08003109 return copy_regset_to_user(child, &user_ppc_native_view,
3110 REGSET_SPE, 0, 35 * sizeof(u32),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003111 datavp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003112
3113 case PTRACE_SETEVRREGS:
3114 /* Set the child spe register state. */
Roland McGrathc391cd02007-12-20 03:58:36 -08003115 return copy_regset_from_user(child, &user_ppc_native_view,
3116 REGSET_SPE, 0, 35 * sizeof(u32),
Namhyung Kimf68d2042010-10-27 15:34:01 -07003117 datavp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003118#endif
3119
3120 default:
3121 ret = ptrace_request(child, request, addr, data);
3122 break;
3123 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124 return ret;
3125}
3126
Michael Ellerman2449acc2015-07-23 20:21:09 +10003127#ifdef CONFIG_SECCOMP
3128static int do_seccomp(struct pt_regs *regs)
3129{
3130 if (!test_thread_flag(TIF_SECCOMP))
3131 return 0;
3132
3133 /*
3134 * The ABI we present to seccomp tracers is that r3 contains
3135 * the syscall return value and orig_gpr3 contains the first
3136 * syscall parameter. This is different to the ptrace ABI where
3137 * both r3 and orig_gpr3 contain the first syscall parameter.
3138 */
3139 regs->gpr[3] = -ENOSYS;
3140
3141 /*
3142 * We use the __ version here because we have already checked
3143 * TIF_SECCOMP. If this fails, there is nothing left to do, we
3144 * have already loaded -ENOSYS into r3, or seccomp has put
3145 * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
3146 */
Andy Lutomirski2f275de2016-05-27 12:57:02 -07003147 if (__secure_computing(NULL))
Michael Ellerman2449acc2015-07-23 20:21:09 +10003148 return -1;
3149
3150 /*
3151 * The syscall was allowed by seccomp, restore the register
Kees Cook1addc572016-06-02 19:55:09 -07003152 * state to what audit expects.
Michael Ellerman2449acc2015-07-23 20:21:09 +10003153 * Note that we use orig_gpr3, which means a seccomp tracer can
3154 * modify the first syscall parameter (in orig_gpr3) and also
3155 * allow the syscall to proceed.
3156 */
3157 regs->gpr[3] = regs->orig_gpr3;
3158
3159 return 0;
3160}
3161#else
3162static inline int do_seccomp(struct pt_regs *regs) { return 0; }
3163#endif /* CONFIG_SECCOMP */
3164
Michael Ellermand3837412015-07-23 20:21:02 +10003165/**
3166 * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
3167 * @regs: the pt_regs of the task to trace (current)
3168 *
3169 * Performs various types of tracing on syscall entry. This includes seccomp,
3170 * ptrace, syscall tracepoints and audit.
3171 *
3172 * The pt_regs are potentially visible to userspace via ptrace, so their
3173 * contents is ABI.
3174 *
3175 * One or more of the tracers may modify the contents of pt_regs, in particular
3176 * to modify arguments or even the syscall number itself.
3177 *
3178 * It's also possible that a tracer can choose to reject the system call. In
3179 * that case this function will return an illegal syscall number, and will put
3180 * an appropriate return value in regs->r3.
3181 *
3182 * Return: the (possibly changed) syscall number.
Roland McGrath4f72c422008-07-27 16:51:03 +10003183 */
3184long do_syscall_trace_enter(struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185{
Li Zhong22ecbe82013-05-13 16:16:40 +00003186 user_exit();
3187
Kees Cook1addc572016-06-02 19:55:09 -07003188 /*
3189 * The tracer may decide to abort the syscall, if so tracehook
3190 * will return !0. Note that the tracer may also just change
3191 * regs->gpr[0] to an invalid syscall number, that is handled
3192 * below on the exit path.
3193 */
3194 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
3195 tracehook_report_syscall_entry(regs))
3196 goto skip;
3197
3198 /* Run seccomp after ptrace; allow it to set gpr[3]. */
Michael Ellerman2449acc2015-07-23 20:21:09 +10003199 if (do_seccomp(regs))
3200 return -1;
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003201
Kees Cook1addc572016-06-02 19:55:09 -07003202 /* Avoid trace and audit when syscall is invalid. */
3203 if (regs->gpr[0] >= NR_syscalls)
3204 goto skip;
David Woodhouseea9c1022005-05-08 15:56:09 +01003205
Ian Munsie02424d82011-02-02 17:27:24 +00003206 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3207 trace_sys_enter(regs, regs->gpr[0]);
3208
David Woodhousecfcd1702007-01-14 09:38:18 +08003209#ifdef CONFIG_PPC64
Eric Parisb05d8442012-01-03 14:23:06 -05003210 if (!is_32bit_task())
Eric Paris91397402014-03-11 13:29:28 -04003211 audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
Eric Parisb05d8442012-01-03 14:23:06 -05003212 regs->gpr[5], regs->gpr[6]);
3213 else
Stephen Rothwelle8a30302005-10-13 15:52:04 +10003214#endif
Eric Paris91397402014-03-11 13:29:28 -04003215 audit_syscall_entry(regs->gpr[0],
Eric Parisb05d8442012-01-03 14:23:06 -05003216 regs->gpr[3] & 0xffffffff,
3217 regs->gpr[4] & 0xffffffff,
3218 regs->gpr[5] & 0xffffffff,
3219 regs->gpr[6] & 0xffffffff);
Roland McGrath4f72c422008-07-27 16:51:03 +10003220
Michael Ellermand3837412015-07-23 20:21:02 +10003221 /* Return the possibly modified but valid syscall number */
3222 return regs->gpr[0];
Kees Cook1addc572016-06-02 19:55:09 -07003223
3224skip:
3225 /*
3226 * If we are aborting explicitly, or if the syscall number is
3227 * now invalid, set the return value to -ENOSYS.
3228 */
3229 regs->gpr[3] = -ENOSYS;
3230 return -1;
David Woodhouseea9c1022005-05-08 15:56:09 +01003231}
3232
3233void do_syscall_trace_leave(struct pt_regs *regs)
3234{
Roland McGrath4f72c422008-07-27 16:51:03 +10003235 int step;
3236
Eric Parisd7e75282012-01-03 14:23:06 -05003237 audit_syscall_exit(regs);
David Woodhouseea9c1022005-05-08 15:56:09 +01003238
Ian Munsie02424d82011-02-02 17:27:24 +00003239 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3240 trace_sys_exit(regs, regs->result);
3241
Roland McGrath4f72c422008-07-27 16:51:03 +10003242 step = test_thread_flag(TIF_SINGLESTEP);
3243 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
3244 tracehook_report_syscall_exit(regs, step);
Li Zhong22ecbe82013-05-13 16:16:40 +00003245
3246 user_enter();
David Woodhouseea9c1022005-05-08 15:56:09 +01003247}