1 /* By Ross Biro 1/23/92 */
3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/errno.h>
15 #include <linux/ptrace.h>
16 #include <linux/regset.h>
17 #include <linux/tracehook.h>
18 #include <linux/user.h>
19 #include <linux/elf.h>
20 #include <linux/security.h>
21 #include <linux/audit.h>
22 #include <linux/seccomp.h>
23 #include <linux/signal.h>
24 #include <linux/workqueue.h>
25 #include <linux/perf_event.h>
26 #include <linux/hw_breakpoint.h>
28 #include <asm/uaccess.h>
29 #include <asm/pgtable.h>
30 #include <asm/system.h>
31 #include <asm/processor.h>
33 #include <asm/debugreg.h>
36 #include <asm/prctl.h>
37 #include <asm/proto.h>
39 #include <asm/hw_breakpoint.h>
43 #define CREATE_TRACE_POINTS
44 #include <trace/events/syscalls.h>
50 REGSET_IOPERM64 = REGSET_XFP,
55 struct pt_regs_offset {
60 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
61 #define REG_OFFSET_END {.name = NULL, .offset = 0}
63 static const struct pt_regs_offset regoffset_table[] = {
87 REG_OFFSET_NAME(orig_ax),
90 REG_OFFSET_NAME(flags),
97 * regs_query_register_offset() - query register offset from its name
98 * @name: the name of a register
100 * regs_query_register_offset() returns the offset of a register in struct
101 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
103 int regs_query_register_offset(const char *name)
105 const struct pt_regs_offset *roff;
106 for (roff = regoffset_table; roff->name != NULL; roff++)
107 if (!strcmp(roff->name, name))
113 * regs_query_register_name() - query register name from its offset
114 * @offset: the offset of a register in struct pt_regs.
116 * regs_query_register_name() returns the name of a register from its
117 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
119 const char *regs_query_register_name(unsigned int offset)
121 const struct pt_regs_offset *roff;
122 for (roff = regoffset_table; roff->name != NULL; roff++)
123 if (roff->offset == offset)
128 static const int arg_offs_table[] = {
130 [0] = offsetof(struct pt_regs, ax),
131 [1] = offsetof(struct pt_regs, dx),
132 [2] = offsetof(struct pt_regs, cx)
133 #else /* CONFIG_X86_64 */
134 [0] = offsetof(struct pt_regs, di),
135 [1] = offsetof(struct pt_regs, si),
136 [2] = offsetof(struct pt_regs, dx),
137 [3] = offsetof(struct pt_regs, cx),
138 [4] = offsetof(struct pt_regs, r8),
139 [5] = offsetof(struct pt_regs, r9)
144 * regs_get_argument_nth() - get Nth argument at function call
145 * @regs: pt_regs which contains registers at function entry.
146 * @n: argument number.
148 * regs_get_argument_nth() returns @n th argument of a function call.
149 * Since usually the kernel stack will be changed right after function entry,
150 * you must use this at function entry. If the @n th entry is NOT in the
151 * kernel stack or pt_regs, this returns 0.
153 unsigned long regs_get_argument_nth(struct pt_regs *regs, unsigned int n)
155 if (n < ARRAY_SIZE(arg_offs_table))
156 return *(unsigned long *)((char *)regs + arg_offs_table[n]);
159 * The typical case: arg n is on the stack.
160 * (Note: stack[0] = return address, so skip it)
162 n -= ARRAY_SIZE(arg_offs_table);
163 return regs_get_kernel_stack_nth(regs, 1 + n);
168 * does not yet catch signals sent when the child dies.
169 * in exit.c or in signal.c.
173 * Determines which flags the user has access to [1 = access, 0 = no access].
175 #define FLAG_MASK_32 ((unsigned long) \
176 (X86_EFLAGS_CF | X86_EFLAGS_PF | \
177 X86_EFLAGS_AF | X86_EFLAGS_ZF | \
178 X86_EFLAGS_SF | X86_EFLAGS_TF | \
179 X86_EFLAGS_DF | X86_EFLAGS_OF | \
180 X86_EFLAGS_RF | X86_EFLAGS_AC))
183 * Determines whether a value may be installed in a segment register.
185 static inline bool invalid_selector(u16 value)
187 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
192 #define FLAG_MASK FLAG_MASK_32
194 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
196 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
197 return ®s->bx + (regno >> 2);
200 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
203 * Returning the value truncates it to 16 bits.
206 if (offset != offsetof(struct user_regs_struct, gs))
207 retval = *pt_regs_access(task_pt_regs(task), offset);
210 retval = get_user_gs(task_pt_regs(task));
212 retval = task_user_gs(task);
217 static int set_segment_reg(struct task_struct *task,
218 unsigned long offset, u16 value)
221 * The value argument was already truncated to 16 bits.
223 if (invalid_selector(value))
227 * For %cs and %ss we cannot permit a null selector.
228 * We can permit a bogus selector as long as it has USER_RPL.
229 * Null selectors are fine for other segment registers, but
230 * we will never get back to user mode with invalid %cs or %ss
231 * and will take the trap in iret instead. Much code relies
232 * on user_mode() to distinguish a user trap frame (which can
233 * safely use invalid selectors) from a kernel trap frame.
236 case offsetof(struct user_regs_struct, cs):
237 case offsetof(struct user_regs_struct, ss):
238 if (unlikely(value == 0))
242 *pt_regs_access(task_pt_regs(task), offset) = value;
245 case offsetof(struct user_regs_struct, gs):
247 set_user_gs(task_pt_regs(task), value);
249 task_user_gs(task) = value;
255 #else /* CONFIG_X86_64 */
257 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
259 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
261 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
262 return ®s->r15 + (offset / sizeof(regs->r15));
265 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
268 * Returning the value truncates it to 16 bits.
273 case offsetof(struct user_regs_struct, fs):
274 if (task == current) {
275 /* Older gas can't assemble movq %?s,%r?? */
276 asm("movl %%fs,%0" : "=r" (seg));
279 return task->thread.fsindex;
280 case offsetof(struct user_regs_struct, gs):
281 if (task == current) {
282 asm("movl %%gs,%0" : "=r" (seg));
285 return task->thread.gsindex;
286 case offsetof(struct user_regs_struct, ds):
287 if (task == current) {
288 asm("movl %%ds,%0" : "=r" (seg));
291 return task->thread.ds;
292 case offsetof(struct user_regs_struct, es):
293 if (task == current) {
294 asm("movl %%es,%0" : "=r" (seg));
297 return task->thread.es;
299 case offsetof(struct user_regs_struct, cs):
300 case offsetof(struct user_regs_struct, ss):
303 return *pt_regs_access(task_pt_regs(task), offset);
306 static int set_segment_reg(struct task_struct *task,
307 unsigned long offset, u16 value)
310 * The value argument was already truncated to 16 bits.
312 if (invalid_selector(value))
316 case offsetof(struct user_regs_struct,fs):
318 * If this is setting fs as for normal 64-bit use but
319 * setting fs_base has implicitly changed it, leave it.
321 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
322 task->thread.fs != 0) ||
323 (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
324 task->thread.fs == 0))
326 task->thread.fsindex = value;
328 loadsegment(fs, task->thread.fsindex);
330 case offsetof(struct user_regs_struct,gs):
332 * If this is setting gs as for normal 64-bit use but
333 * setting gs_base has implicitly changed it, leave it.
335 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
336 task->thread.gs != 0) ||
337 (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
338 task->thread.gs == 0))
340 task->thread.gsindex = value;
342 load_gs_index(task->thread.gsindex);
344 case offsetof(struct user_regs_struct,ds):
345 task->thread.ds = value;
347 loadsegment(ds, task->thread.ds);
349 case offsetof(struct user_regs_struct,es):
350 task->thread.es = value;
352 loadsegment(es, task->thread.es);
356 * Can't actually change these in 64-bit mode.
358 case offsetof(struct user_regs_struct,cs):
359 if (unlikely(value == 0))
361 #ifdef CONFIG_IA32_EMULATION
362 if (test_tsk_thread_flag(task, TIF_IA32))
363 task_pt_regs(task)->cs = value;
366 case offsetof(struct user_regs_struct,ss):
367 if (unlikely(value == 0))
369 #ifdef CONFIG_IA32_EMULATION
370 if (test_tsk_thread_flag(task, TIF_IA32))
371 task_pt_regs(task)->ss = value;
379 #endif /* CONFIG_X86_32 */
381 static unsigned long get_flags(struct task_struct *task)
383 unsigned long retval = task_pt_regs(task)->flags;
386 * If the debugger set TF, hide it from the readout.
388 if (test_tsk_thread_flag(task, TIF_FORCED_TF))
389 retval &= ~X86_EFLAGS_TF;
394 static int set_flags(struct task_struct *task, unsigned long value)
396 struct pt_regs *regs = task_pt_regs(task);
399 * If the user value contains TF, mark that
400 * it was not "us" (the debugger) that set it.
401 * If not, make sure it stays set if we had.
403 if (value & X86_EFLAGS_TF)
404 clear_tsk_thread_flag(task, TIF_FORCED_TF);
405 else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
406 value |= X86_EFLAGS_TF;
408 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
413 static int putreg(struct task_struct *child,
414 unsigned long offset, unsigned long value)
417 case offsetof(struct user_regs_struct, cs):
418 case offsetof(struct user_regs_struct, ds):
419 case offsetof(struct user_regs_struct, es):
420 case offsetof(struct user_regs_struct, fs):
421 case offsetof(struct user_regs_struct, gs):
422 case offsetof(struct user_regs_struct, ss):
423 return set_segment_reg(child, offset, value);
425 case offsetof(struct user_regs_struct, flags):
426 return set_flags(child, value);
429 case offsetof(struct user_regs_struct,fs_base):
430 if (value >= TASK_SIZE_OF(child))
433 * When changing the segment base, use do_arch_prctl
434 * to set either thread.fs or thread.fsindex and the
435 * corresponding GDT slot.
437 if (child->thread.fs != value)
438 return do_arch_prctl(child, ARCH_SET_FS, value);
440 case offsetof(struct user_regs_struct,gs_base):
442 * Exactly the same here as the %fs handling above.
444 if (value >= TASK_SIZE_OF(child))
446 if (child->thread.gs != value)
447 return do_arch_prctl(child, ARCH_SET_GS, value);
452 *pt_regs_access(task_pt_regs(child), offset) = value;
456 static unsigned long getreg(struct task_struct *task, unsigned long offset)
459 case offsetof(struct user_regs_struct, cs):
460 case offsetof(struct user_regs_struct, ds):
461 case offsetof(struct user_regs_struct, es):
462 case offsetof(struct user_regs_struct, fs):
463 case offsetof(struct user_regs_struct, gs):
464 case offsetof(struct user_regs_struct, ss):
465 return get_segment_reg(task, offset);
467 case offsetof(struct user_regs_struct, flags):
468 return get_flags(task);
471 case offsetof(struct user_regs_struct, fs_base): {
473 * do_arch_prctl may have used a GDT slot instead of
474 * the MSR. To userland, it appears the same either
475 * way, except the %fs segment selector might not be 0.
477 unsigned int seg = task->thread.fsindex;
478 if (task->thread.fs != 0)
479 return task->thread.fs;
481 asm("movl %%fs,%0" : "=r" (seg));
482 if (seg != FS_TLS_SEL)
484 return get_desc_base(&task->thread.tls_array[FS_TLS]);
486 case offsetof(struct user_regs_struct, gs_base): {
488 * Exactly the same here as the %fs handling above.
490 unsigned int seg = task->thread.gsindex;
491 if (task->thread.gs != 0)
492 return task->thread.gs;
494 asm("movl %%gs,%0" : "=r" (seg));
495 if (seg != GS_TLS_SEL)
497 return get_desc_base(&task->thread.tls_array[GS_TLS]);
502 return *pt_regs_access(task_pt_regs(task), offset);
505 static int genregs_get(struct task_struct *target,
506 const struct user_regset *regset,
507 unsigned int pos, unsigned int count,
508 void *kbuf, void __user *ubuf)
511 unsigned long *k = kbuf;
513 *k++ = getreg(target, pos);
518 unsigned long __user *u = ubuf;
520 if (__put_user(getreg(target, pos), u++))
530 static int genregs_set(struct task_struct *target,
531 const struct user_regset *regset,
532 unsigned int pos, unsigned int count,
533 const void *kbuf, const void __user *ubuf)
537 const unsigned long *k = kbuf;
538 while (count > 0 && !ret) {
539 ret = putreg(target, pos, *k++);
544 const unsigned long __user *u = ubuf;
545 while (count > 0 && !ret) {
547 ret = __get_user(word, u++);
550 ret = putreg(target, pos, word);
558 static void ptrace_triggered(struct perf_event *bp, void *data)
561 struct thread_struct *thread = &(current->thread);
564 * Store in the virtual DR6 register the fact that the breakpoint
565 * was hit so the thread's debugger will see it.
567 for (i = 0; i < HBP_NUM; i++) {
568 if (thread->ptrace_bps[i] == bp)
572 thread->debugreg6 |= (DR_TRAP0 << i);
576 * Walk through every ptrace breakpoints for this thread and
577 * build the dr7 value on top of their attributes.
580 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
584 struct arch_hw_breakpoint *info;
586 for (i = 0; i < HBP_NUM; i++) {
587 if (bp[i] && !bp[i]->attr.disabled) {
588 info = counter_arch_bp(bp[i]);
589 dr7 |= encode_dr7(i, info->len, info->type);
596 static struct perf_event *
597 ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
598 struct task_struct *tsk, int disabled)
601 int gen_len, gen_type;
602 DEFINE_BREAKPOINT_ATTR(attr);
605 * We shoud have at least an inactive breakpoint at this
606 * slot. It means the user is writing dr7 without having
607 * written the address register first
610 return ERR_PTR(-EINVAL);
612 err = arch_bp_generic_fields(len, type, &gen_len, &gen_type);
617 attr.bp_len = gen_len;
618 attr.bp_type = gen_type;
619 attr.disabled = disabled;
621 return modify_user_hw_breakpoint(bp, &attr);
625 * Handle ptrace writes to debug register 7.
627 static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
629 struct thread_struct *thread = &(tsk->thread);
630 unsigned long old_dr7;
631 int i, orig_ret = 0, rc = 0;
632 int enabled, second_pass = 0;
634 struct perf_event *bp;
636 data &= ~DR_CONTROL_RESERVED;
637 old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
640 * Loop through all the hardware breakpoints, making the
641 * appropriate changes to each.
643 for (i = 0; i < HBP_NUM; i++) {
644 enabled = decode_dr7(data, i, &len, &type);
645 bp = thread->ptrace_bps[i];
650 * Don't unregister the breakpoints right-away,
651 * unless all register_user_hw_breakpoint()
652 * requests have succeeded. This prevents
653 * any window of opportunity for debug
654 * register grabbing by other users.
659 thread->ptrace_bps[i] = NULL;
660 bp = ptrace_modify_breakpoint(bp, len, type,
664 thread->ptrace_bps[i] = NULL;
667 thread->ptrace_bps[i] = bp;
672 bp = ptrace_modify_breakpoint(bp, len, type, tsk, 0);
674 /* Incorrect bp, or we have a bug in bp API */
677 thread->ptrace_bps[i] = NULL;
680 thread->ptrace_bps[i] = bp;
683 * Make a second pass to free the remaining unused breakpoints
684 * or to restore the original breakpoints if an error occurred.
694 return ((orig_ret < 0) ? orig_ret : rc);
698 * Handle PTRACE_PEEKUSR calls for the debug register area.
700 static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
702 struct thread_struct *thread = &(tsk->thread);
703 unsigned long val = 0;
706 struct perf_event *bp;
707 bp = thread->ptrace_bps[n];
710 val = bp->hw.info.address;
712 val = thread->debugreg6;
714 val = ptrace_get_dr7(thread->ptrace_bps);
719 static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
722 struct perf_event *bp;
723 struct thread_struct *t = &tsk->thread;
724 DEFINE_BREAKPOINT_ATTR(attr);
726 if (!t->ptrace_bps[nr]) {
728 * Put stub len and type to register (reserve) an inactive but
732 attr.bp_len = HW_BREAKPOINT_LEN_1;
733 attr.bp_type = HW_BREAKPOINT_W;
736 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
738 bp = t->ptrace_bps[nr];
739 t->ptrace_bps[nr] = NULL;
743 bp = modify_user_hw_breakpoint(bp, &attr);
746 * CHECKME: the previous code returned -EIO if the addr wasn't a
747 * valid task virtual addr. The new one will return -EINVAL in this
749 * -EINVAL may be what we want for in-kernel breakpoints users, but
750 * -EIO looks better for ptrace, since we refuse a register writing
751 * for the user. And anyway this is the previous behaviour.
756 t->ptrace_bps[nr] = bp;
762 * Handle PTRACE_POKEUSR calls for the debug register area.
764 int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val)
766 struct thread_struct *thread = &(tsk->thread);
769 /* There are no DR4 or DR5 registers */
770 if (n == 4 || n == 5)
774 thread->debugreg6 = val;
778 rc = ptrace_set_breakpoint_addr(tsk, n, val);
782 /* All that's left is DR7 */
784 rc = ptrace_write_dr7(tsk, val);
791 * These access the current or another (stopped) task's io permission
792 * bitmap for debugging or core dump.
794 static int ioperm_active(struct task_struct *target,
795 const struct user_regset *regset)
797 return target->thread.io_bitmap_max / regset->size;
800 static int ioperm_get(struct task_struct *target,
801 const struct user_regset *regset,
802 unsigned int pos, unsigned int count,
803 void *kbuf, void __user *ubuf)
805 if (!target->thread.io_bitmap_ptr)
808 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
809 target->thread.io_bitmap_ptr,
813 #ifdef CONFIG_X86_PTRACE_BTS
815 * A branch trace store context.
817 * Contexts may only be installed by ptrace_bts_config() and only for
820 * Contexts are destroyed when the tracee is detached from the tracer.
821 * The actual destruction work requires interrupts enabled, so the
822 * work is deferred and will be scheduled during __ptrace_unlink().
824 * Contexts hold an additional task_struct reference on the traced
825 * task, as well as a reference on the tracer's mm.
827 * Ptrace already holds a task_struct for the duration of ptrace operations,
828 * but since destruction is deferred, it may be executed after both
829 * tracer and tracee exited.
832 /* The branch trace handle. */
833 struct bts_tracer *tracer;
835 /* The buffer used to store the branch trace and its size. */
839 /* The mm that paid for the above buffer. */
840 struct mm_struct *mm;
842 /* The task this context belongs to. */
843 struct task_struct *task;
845 /* The signal to send on a bts buffer overflow. */
846 unsigned int bts_ovfl_signal;
848 /* The work struct to destroy a context. */
849 struct work_struct work;
852 static int alloc_bts_buffer(struct bts_context *context, unsigned int size)
857 err = account_locked_memory(current->mm, current->signal->rlim, size);
861 buffer = kzalloc(size, GFP_KERNEL);
865 context->buffer = buffer;
866 context->size = size;
867 context->mm = get_task_mm(current);
872 refund_locked_memory(current->mm, size);
876 static inline void free_bts_buffer(struct bts_context *context)
878 if (!context->buffer)
881 kfree(context->buffer);
882 context->buffer = NULL;
884 refund_locked_memory(context->mm, context->size);
891 static void free_bts_context_work(struct work_struct *w)
893 struct bts_context *context;
895 context = container_of(w, struct bts_context, work);
897 ds_release_bts(context->tracer);
898 put_task_struct(context->task);
899 free_bts_buffer(context);
903 static inline void free_bts_context(struct bts_context *context)
905 INIT_WORK(&context->work, free_bts_context_work);
906 schedule_work(&context->work);
909 static inline struct bts_context *alloc_bts_context(struct task_struct *task)
911 struct bts_context *context = kzalloc(sizeof(*context), GFP_KERNEL);
913 context->task = task;
916 get_task_struct(task);
922 static int ptrace_bts_read_record(struct task_struct *child, size_t index,
923 struct bts_struct __user *out)
925 struct bts_context *context;
926 const struct bts_trace *trace;
927 struct bts_struct bts;
928 const unsigned char *at;
931 context = child->bts;
935 trace = ds_read_bts(context->tracer);
939 at = trace->ds.top - ((index + 1) * trace->ds.size);
940 if ((void *)at < trace->ds.begin)
941 at += (trace->ds.n * trace->ds.size);
946 error = trace->read(context->tracer, at, &bts);
950 if (copy_to_user(out, &bts, sizeof(bts)))
956 static int ptrace_bts_drain(struct task_struct *child,
958 struct bts_struct __user *out)
960 struct bts_context *context;
961 const struct bts_trace *trace;
962 const unsigned char *at;
963 int error, drained = 0;
965 context = child->bts;
969 trace = ds_read_bts(context->tracer);
976 if (size < (trace->ds.top - trace->ds.begin))
979 for (at = trace->ds.begin; (void *)at < trace->ds.top;
980 out++, drained++, at += trace->ds.size) {
981 struct bts_struct bts;
983 error = trace->read(context->tracer, at, &bts);
987 if (copy_to_user(out, &bts, sizeof(bts)))
991 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
993 error = ds_reset_bts(context->tracer);
1000 static int ptrace_bts_config(struct task_struct *child,
1002 const struct ptrace_bts_config __user *ucfg)
1004 struct bts_context *context;
1005 struct ptrace_bts_config cfg;
1006 unsigned int flags = 0;
1008 if (cfg_size < sizeof(cfg))
1011 if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
1014 context = child->bts;
1016 context = alloc_bts_context(child);
1020 if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
1025 context->bts_ovfl_signal = cfg.signal;
1028 ds_release_bts(context->tracer);
1029 context->tracer = NULL;
1031 if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) {
1034 free_bts_buffer(context);
1038 err = alloc_bts_buffer(context, cfg.size);
1043 if (cfg.flags & PTRACE_BTS_O_TRACE)
1046 if (cfg.flags & PTRACE_BTS_O_SCHED)
1047 flags |= BTS_TIMESTAMPS;
1050 ds_request_bts_task(child, context->buffer, context->size,
1051 NULL, (size_t)-1, flags);
1052 if (unlikely(IS_ERR(context->tracer))) {
1053 int error = PTR_ERR(context->tracer);
1055 free_bts_buffer(context);
1056 context->tracer = NULL;
1063 static int ptrace_bts_status(struct task_struct *child,
1065 struct ptrace_bts_config __user *ucfg)
1067 struct bts_context *context;
1068 const struct bts_trace *trace;
1069 struct ptrace_bts_config cfg;
1071 context = child->bts;
1075 if (cfg_size < sizeof(cfg))
1078 trace = ds_read_bts(context->tracer);
1082 memset(&cfg, 0, sizeof(cfg));
1083 cfg.size = trace->ds.end - trace->ds.begin;
1084 cfg.signal = context->bts_ovfl_signal;
1085 cfg.bts_size = sizeof(struct bts_struct);
1088 cfg.flags |= PTRACE_BTS_O_SIGNAL;
1090 if (trace->ds.flags & BTS_USER)
1091 cfg.flags |= PTRACE_BTS_O_TRACE;
1093 if (trace->ds.flags & BTS_TIMESTAMPS)
1094 cfg.flags |= PTRACE_BTS_O_SCHED;
1096 if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
1102 static int ptrace_bts_clear(struct task_struct *child)
1104 struct bts_context *context;
1105 const struct bts_trace *trace;
1107 context = child->bts;
1111 trace = ds_read_bts(context->tracer);
1115 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
1117 return ds_reset_bts(context->tracer);
1120 static int ptrace_bts_size(struct task_struct *child)
1122 struct bts_context *context;
1123 const struct bts_trace *trace;
1125 context = child->bts;
1129 trace = ds_read_bts(context->tracer);
1133 return (trace->ds.top - trace->ds.begin) / trace->ds.size;
1137 * Called from __ptrace_unlink() after the child has been moved back
1138 * to its original parent.
1140 void ptrace_bts_untrace(struct task_struct *child)
1142 if (unlikely(child->bts)) {
1143 free_bts_context(child->bts);
1147 #endif /* CONFIG_X86_PTRACE_BTS */
1150 * Called by kernel/ptrace.c when detaching..
1152 * Make sure the single step bit is not set.
1154 void ptrace_disable(struct task_struct *child)
1156 user_disable_single_step(child);
1157 #ifdef TIF_SYSCALL_EMU
1158 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
1162 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1163 static const struct user_regset_view user_x86_32_view; /* Initialized below. */
1166 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1169 unsigned long __user *datap = (unsigned long __user *)data;
1172 /* read the word at location addr in the USER area. */
1173 case PTRACE_PEEKUSR: {
1177 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
1178 addr >= sizeof(struct user))
1181 tmp = 0; /* Default return condition */
1182 if (addr < sizeof(struct user_regs_struct))
1183 tmp = getreg(child, addr);
1184 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1185 addr <= offsetof(struct user, u_debugreg[7])) {
1186 addr -= offsetof(struct user, u_debugreg[0]);
1187 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1189 ret = put_user(tmp, datap);
1193 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
1195 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
1196 addr >= sizeof(struct user))
1199 if (addr < sizeof(struct user_regs_struct))
1200 ret = putreg(child, addr, data);
1201 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1202 addr <= offsetof(struct user, u_debugreg[7])) {
1203 addr -= offsetof(struct user, u_debugreg[0]);
1204 ret = ptrace_set_debugreg(child,
1205 addr / sizeof(data), data);
1209 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1210 return copy_regset_to_user(child,
1211 task_user_regset_view(current),
1213 0, sizeof(struct user_regs_struct),
1216 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1217 return copy_regset_from_user(child,
1218 task_user_regset_view(current),
1220 0, sizeof(struct user_regs_struct),
1223 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1224 return copy_regset_to_user(child,
1225 task_user_regset_view(current),
1227 0, sizeof(struct user_i387_struct),
1230 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1231 return copy_regset_from_user(child,
1232 task_user_regset_view(current),
1234 0, sizeof(struct user_i387_struct),
1237 #ifdef CONFIG_X86_32
1238 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
1239 return copy_regset_to_user(child, &user_x86_32_view,
1241 0, sizeof(struct user_fxsr_struct),
1244 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
1245 return copy_regset_from_user(child, &user_x86_32_view,
1247 0, sizeof(struct user_fxsr_struct),
1251 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1252 case PTRACE_GET_THREAD_AREA:
1255 ret = do_get_thread_area(child, addr,
1256 (struct user_desc __user *) data);
1259 case PTRACE_SET_THREAD_AREA:
1262 ret = do_set_thread_area(child, addr,
1263 (struct user_desc __user *) data, 0);
1267 #ifdef CONFIG_X86_64
1268 /* normal 64bit interface to access TLS data.
1269 Works just like arch_prctl, except that the arguments
1271 case PTRACE_ARCH_PRCTL:
1272 ret = do_arch_prctl(child, data, addr);
1277 * These bits need more cooking - not enabled yet:
1279 #ifdef CONFIG_X86_PTRACE_BTS
1280 case PTRACE_BTS_CONFIG:
1281 ret = ptrace_bts_config
1282 (child, data, (struct ptrace_bts_config __user *)addr);
1285 case PTRACE_BTS_STATUS:
1286 ret = ptrace_bts_status
1287 (child, data, (struct ptrace_bts_config __user *)addr);
1290 case PTRACE_BTS_SIZE:
1291 ret = ptrace_bts_size(child);
1294 case PTRACE_BTS_GET:
1295 ret = ptrace_bts_read_record
1296 (child, data, (struct bts_struct __user *) addr);
1299 case PTRACE_BTS_CLEAR:
1300 ret = ptrace_bts_clear(child);
1303 case PTRACE_BTS_DRAIN:
1304 ret = ptrace_bts_drain
1305 (child, data, (struct bts_struct __user *) addr);
1307 #endif /* CONFIG_X86_PTRACE_BTS */
1310 ret = ptrace_request(child, request, addr, data);
1317 #ifdef CONFIG_IA32_EMULATION
1319 #include <linux/compat.h>
1320 #include <linux/syscalls.h>
1321 #include <asm/ia32.h>
1322 #include <asm/user32.h>
1325 case offsetof(struct user32, regs.l): \
1326 regs->q = value; break
1329 case offsetof(struct user32, regs.rs): \
1330 return set_segment_reg(child, \
1331 offsetof(struct user_regs_struct, rs), \
1335 static int putreg32(struct task_struct *child, unsigned regno, u32 value)
1337 struct pt_regs *regs = task_pt_regs(child);
1358 case offsetof(struct user32, regs.orig_eax):
1360 * A 32-bit debugger setting orig_eax means to restore
1361 * the state of the task restarting a 32-bit syscall.
1362 * Make sure we interpret the -ERESTART* codes correctly
1363 * in case the task is not actually still sitting at the
1364 * exit from a 32-bit syscall with TS_COMPAT still set.
1366 regs->orig_ax = value;
1367 if (syscall_get_nr(child, regs) >= 0)
1368 task_thread_info(child)->status |= TS_COMPAT;
1371 case offsetof(struct user32, regs.eflags):
1372 return set_flags(child, value);
1374 case offsetof(struct user32, u_debugreg[0]) ...
1375 offsetof(struct user32, u_debugreg[7]):
1376 regno -= offsetof(struct user32, u_debugreg[0]);
1377 return ptrace_set_debugreg(child, regno / 4, value);
1380 if (regno > sizeof(struct user32) || (regno & 3))
1384 * Other dummy fields in the virtual user structure
1396 case offsetof(struct user32, regs.l): \
1397 *val = regs->q; break
1400 case offsetof(struct user32, regs.rs): \
1401 *val = get_segment_reg(child, \
1402 offsetof(struct user_regs_struct, rs)); \
1405 static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
1407 struct pt_regs *regs = task_pt_regs(child);
1425 R32(orig_eax, orig_ax);
1429 case offsetof(struct user32, regs.eflags):
1430 *val = get_flags(child);
1433 case offsetof(struct user32, u_debugreg[0]) ...
1434 offsetof(struct user32, u_debugreg[7]):
1435 regno -= offsetof(struct user32, u_debugreg[0]);
1436 *val = ptrace_get_debugreg(child, regno / 4);
1440 if (regno > sizeof(struct user32) || (regno & 3))
1444 * Other dummy fields in the virtual user structure
1456 static int genregs32_get(struct task_struct *target,
1457 const struct user_regset *regset,
1458 unsigned int pos, unsigned int count,
1459 void *kbuf, void __user *ubuf)
1462 compat_ulong_t *k = kbuf;
1464 getreg32(target, pos, k++);
1465 count -= sizeof(*k);
1469 compat_ulong_t __user *u = ubuf;
1471 compat_ulong_t word;
1472 getreg32(target, pos, &word);
1473 if (__put_user(word, u++))
1475 count -= sizeof(*u);
1483 static int genregs32_set(struct task_struct *target,
1484 const struct user_regset *regset,
1485 unsigned int pos, unsigned int count,
1486 const void *kbuf, const void __user *ubuf)
1490 const compat_ulong_t *k = kbuf;
1491 while (count > 0 && !ret) {
1492 ret = putreg32(target, pos, *k++);
1493 count -= sizeof(*k);
1497 const compat_ulong_t __user *u = ubuf;
1498 while (count > 0 && !ret) {
1499 compat_ulong_t word;
1500 ret = __get_user(word, u++);
1503 ret = putreg32(target, pos, word);
1504 count -= sizeof(*u);
1511 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1512 compat_ulong_t caddr, compat_ulong_t cdata)
1514 unsigned long addr = caddr;
1515 unsigned long data = cdata;
1516 void __user *datap = compat_ptr(data);
1521 case PTRACE_PEEKUSR:
1522 ret = getreg32(child, addr, &val);
1524 ret = put_user(val, (__u32 __user *)datap);
1527 case PTRACE_POKEUSR:
1528 ret = putreg32(child, addr, data);
1531 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1532 return copy_regset_to_user(child, &user_x86_32_view,
1534 0, sizeof(struct user_regs_struct32),
1537 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1538 return copy_regset_from_user(child, &user_x86_32_view,
1540 sizeof(struct user_regs_struct32),
1543 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1544 return copy_regset_to_user(child, &user_x86_32_view,
1546 sizeof(struct user_i387_ia32_struct),
1549 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1550 return copy_regset_from_user(
1551 child, &user_x86_32_view, REGSET_FP,
1552 0, sizeof(struct user_i387_ia32_struct), datap);
1554 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
1555 return copy_regset_to_user(child, &user_x86_32_view,
1557 sizeof(struct user32_fxsr_struct),
1560 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
1561 return copy_regset_from_user(child, &user_x86_32_view,
1563 sizeof(struct user32_fxsr_struct),
1566 case PTRACE_GET_THREAD_AREA:
1567 case PTRACE_SET_THREAD_AREA:
1568 #ifdef CONFIG_X86_PTRACE_BTS
1569 case PTRACE_BTS_CONFIG:
1570 case PTRACE_BTS_STATUS:
1571 case PTRACE_BTS_SIZE:
1572 case PTRACE_BTS_GET:
1573 case PTRACE_BTS_CLEAR:
1574 case PTRACE_BTS_DRAIN:
1575 #endif /* CONFIG_X86_PTRACE_BTS */
1576 return arch_ptrace(child, request, addr, data);
1579 return compat_ptrace_request(child, request, addr, data);
1585 #endif /* CONFIG_IA32_EMULATION */
1587 #ifdef CONFIG_X86_64
1589 static const struct user_regset x86_64_regsets[] = {
1590 [REGSET_GENERAL] = {
1591 .core_note_type = NT_PRSTATUS,
1592 .n = sizeof(struct user_regs_struct) / sizeof(long),
1593 .size = sizeof(long), .align = sizeof(long),
1594 .get = genregs_get, .set = genregs_set
1597 .core_note_type = NT_PRFPREG,
1598 .n = sizeof(struct user_i387_struct) / sizeof(long),
1599 .size = sizeof(long), .align = sizeof(long),
1600 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1602 [REGSET_IOPERM64] = {
1603 .core_note_type = NT_386_IOPERM,
1604 .n = IO_BITMAP_LONGS,
1605 .size = sizeof(long), .align = sizeof(long),
1606 .active = ioperm_active, .get = ioperm_get
1610 static const struct user_regset_view user_x86_64_view = {
1611 .name = "x86_64", .e_machine = EM_X86_64,
1612 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
1615 #else /* CONFIG_X86_32 */
1617 #define user_regs_struct32 user_regs_struct
1618 #define genregs32_get genregs_get
1619 #define genregs32_set genregs_set
1621 #define user_i387_ia32_struct user_i387_struct
1622 #define user32_fxsr_struct user_fxsr_struct
1624 #endif /* CONFIG_X86_64 */
1626 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1627 static const struct user_regset x86_32_regsets[] = {
1628 [REGSET_GENERAL] = {
1629 .core_note_type = NT_PRSTATUS,
1630 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
1631 .size = sizeof(u32), .align = sizeof(u32),
1632 .get = genregs32_get, .set = genregs32_set
1635 .core_note_type = NT_PRFPREG,
1636 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
1637 .size = sizeof(u32), .align = sizeof(u32),
1638 .active = fpregs_active, .get = fpregs_get, .set = fpregs_set
1641 .core_note_type = NT_PRXFPREG,
1642 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
1643 .size = sizeof(u32), .align = sizeof(u32),
1644 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1647 .core_note_type = NT_386_TLS,
1648 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
1649 .size = sizeof(struct user_desc),
1650 .align = sizeof(struct user_desc),
1651 .active = regset_tls_active,
1652 .get = regset_tls_get, .set = regset_tls_set
1654 [REGSET_IOPERM32] = {
1655 .core_note_type = NT_386_IOPERM,
1656 .n = IO_BITMAP_BYTES / sizeof(u32),
1657 .size = sizeof(u32), .align = sizeof(u32),
1658 .active = ioperm_active, .get = ioperm_get
1662 static const struct user_regset_view user_x86_32_view = {
1663 .name = "i386", .e_machine = EM_386,
1664 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
1668 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1670 #ifdef CONFIG_IA32_EMULATION
1671 if (test_tsk_thread_flag(task, TIF_IA32))
1673 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1674 return &user_x86_32_view;
1676 #ifdef CONFIG_X86_64
1677 return &user_x86_64_view;
1681 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1682 int error_code, int si_code)
1684 struct siginfo info;
1686 tsk->thread.trap_no = 1;
1687 tsk->thread.error_code = error_code;
1689 memset(&info, 0, sizeof(info));
1690 info.si_signo = SIGTRAP;
1691 info.si_code = si_code;
1694 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
1696 /* Send us the fake SIGTRAP */
1697 force_sig_info(SIGTRAP, &info, tsk);
1701 #ifdef CONFIG_X86_32
1703 #elif defined CONFIG_IA32_EMULATION
1704 # define IS_IA32 is_compat_task()
1710 * We must return the syscall number to actually look up in the table.
1711 * This can be -1L to skip running any syscall at all.
1713 asmregparm long syscall_trace_enter(struct pt_regs *regs)
1718 * If we stepped into a sysenter/syscall insn, it trapped in
1719 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
1720 * If user-mode had set TF itself, then it's still clear from
1721 * do_debug() and we need to set it again to restore the user
1722 * state. If we entered on the slow path, TF was already set.
1724 if (test_thread_flag(TIF_SINGLESTEP))
1725 regs->flags |= X86_EFLAGS_TF;
1727 /* do the secure computing check first */
1728 secure_computing(regs->orig_ax);
1730 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1733 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
1734 tracehook_report_syscall_entry(regs))
1737 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1738 trace_sys_enter(regs, regs->orig_ax);
1740 if (unlikely(current->audit_context)) {
1742 audit_syscall_entry(AUDIT_ARCH_I386,
1745 regs->dx, regs->si);
1746 #ifdef CONFIG_X86_64
1748 audit_syscall_entry(AUDIT_ARCH_X86_64,
1751 regs->dx, regs->r10);
1755 return ret ?: regs->orig_ax;
1758 asmregparm void syscall_trace_leave(struct pt_regs *regs)
1760 if (unlikely(current->audit_context))
1761 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1763 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1764 trace_sys_exit(regs, regs->ax);
1766 if (test_thread_flag(TIF_SYSCALL_TRACE))
1767 tracehook_report_syscall_exit(regs, 0);
1770 * If TIF_SYSCALL_EMU is set, we only get here because of
1771 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
1772 * We already reported this syscall instruction in
1773 * syscall_trace_enter(), so don't do any more now.
1775 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1779 * If we are single-stepping, synthesize a trap to follow the
1780 * system call instruction.
1782 if (test_thread_flag(TIF_SINGLESTEP) &&
1783 tracehook_consider_fatal_signal(current, SIGTRAP))
1784 send_sigtrap(current, regs, 0, TRAP_BRKPT);