ptrace: Clean transitions between TASK_STOPPED and TRACED
[linux-3.10.git] / kernel / ptrace.c
1 /*
2  * linux/kernel/ptrace.c
3  *
4  * (C) Copyright 1999 Linus Torvalds
5  *
6  * Common interfaces for "ptrace()" which we do not want
7  * to continually duplicate across every architecture.
8  */
9
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/ptrace.h>
18 #include <linux/security.h>
19 #include <linux/signal.h>
20 #include <linux/audit.h>
21 #include <linux/pid_namespace.h>
22 #include <linux/syscalls.h>
23 #include <linux/uaccess.h>
24 #include <linux/regset.h>
25
26
27 /*
28  * ptrace a task: make the debugger its new parent and
29  * move it to the ptrace list.
30  *
31  * Must be called with the tasklist lock write-held.
32  */
33 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
34 {
35         BUG_ON(!list_empty(&child->ptrace_entry));
36         list_add(&child->ptrace_entry, &new_parent->ptraced);
37         child->parent = new_parent;
38 }
39
40 /*
41  * Turn a tracing stop into a normal stop now, since with no tracer there
42  * would be no way to wake it up with SIGCONT or SIGKILL.  If there was a
43  * signal sent that would resume the child, but didn't because it was in
44  * TASK_TRACED, resume it now.
45  * Requires that irqs be disabled.
46  */
47 static void ptrace_untrace(struct task_struct *child)
48 {
49         spin_lock(&child->sighand->siglock);
50         if (task_is_traced(child)) {
51                 /*
52                  * If group stop is completed or in progress, it should
53                  * participate in the group stop.  Set GROUP_STOP_PENDING
54                  * before kicking it.
55                  *
56                  * This involves TRACED -> RUNNING -> STOPPED transition
57                  * which is similar to but in the opposite direction of
58                  * what happens while attaching to a stopped task.
59                  * However, in this direction, the intermediate RUNNING
60                  * state is not hidden even from the current ptracer and if
61                  * it immediately re-attaches and performs a WNOHANG
62                  * wait(2), it may fail.
63                  */
64                 if (child->signal->flags & SIGNAL_STOP_STOPPED ||
65                     child->signal->group_stop_count)
66                         child->group_stop |= GROUP_STOP_PENDING;
67                 signal_wake_up(child, 1);
68         }
69         spin_unlock(&child->sighand->siglock);
70 }
71
72 /*
73  * unptrace a task: move it back to its original parent and
74  * remove it from the ptrace list.
75  *
76  * Must be called with the tasklist lock write-held.
77  */
78 void __ptrace_unlink(struct task_struct *child)
79 {
80         BUG_ON(!child->ptrace);
81
82         child->ptrace = 0;
83         child->parent = child->real_parent;
84         list_del_init(&child->ptrace_entry);
85
86         if (task_is_traced(child))
87                 ptrace_untrace(child);
88 }
89
90 /*
91  * Check that we have indeed attached to the thing..
92  */
93 int ptrace_check_attach(struct task_struct *child, int kill)
94 {
95         int ret = -ESRCH;
96
97         /*
98          * We take the read lock around doing both checks to close a
99          * possible race where someone else was tracing our child and
100          * detached between these two checks.  After this locked check,
101          * we are sure that this is our traced child and that can only
102          * be changed by us so it's not changing right after this.
103          */
104         read_lock(&tasklist_lock);
105         if ((child->ptrace & PT_PTRACED) && child->parent == current) {
106                 ret = 0;
107                 /*
108                  * child->sighand can't be NULL, release_task()
109                  * does ptrace_unlink() before __exit_signal().
110                  */
111                 spin_lock_irq(&child->sighand->siglock);
112                 if (task_is_stopped(child))
113                         child->state = TASK_TRACED;
114                 else if (!task_is_traced(child) && !kill)
115                         ret = -ESRCH;
116                 spin_unlock_irq(&child->sighand->siglock);
117         }
118         read_unlock(&tasklist_lock);
119
120         if (!ret && !kill)
121                 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
122
123         /* All systems go.. */
124         return ret;
125 }
126
127 int __ptrace_may_access(struct task_struct *task, unsigned int mode)
128 {
129         const struct cred *cred = current_cred(), *tcred;
130
131         /* May we inspect the given task?
132          * This check is used both for attaching with ptrace
133          * and for allowing access to sensitive information in /proc.
134          *
135          * ptrace_attach denies several cases that /proc allows
136          * because setting up the necessary parent/child relationship
137          * or halting the specified task is impossible.
138          */
139         int dumpable = 0;
140         /* Don't let security modules deny introspection */
141         if (task == current)
142                 return 0;
143         rcu_read_lock();
144         tcred = __task_cred(task);
145         if ((cred->uid != tcred->euid ||
146              cred->uid != tcred->suid ||
147              cred->uid != tcred->uid  ||
148              cred->gid != tcred->egid ||
149              cred->gid != tcred->sgid ||
150              cred->gid != tcred->gid) &&
151             !capable(CAP_SYS_PTRACE)) {
152                 rcu_read_unlock();
153                 return -EPERM;
154         }
155         rcu_read_unlock();
156         smp_rmb();
157         if (task->mm)
158                 dumpable = get_dumpable(task->mm);
159         if (!dumpable && !capable(CAP_SYS_PTRACE))
160                 return -EPERM;
161
162         return security_ptrace_access_check(task, mode);
163 }
164
165 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
166 {
167         int err;
168         task_lock(task);
169         err = __ptrace_may_access(task, mode);
170         task_unlock(task);
171         return !err;
172 }
173
174 static int ptrace_attach(struct task_struct *task)
175 {
176         bool wait_trap = false;
177         int retval;
178
179         audit_ptrace(task);
180
181         retval = -EPERM;
182         if (unlikely(task->flags & PF_KTHREAD))
183                 goto out;
184         if (same_thread_group(task, current))
185                 goto out;
186
187         /*
188          * Protect exec's credential calculations against our interference;
189          * interference; SUID, SGID and LSM creds get determined differently
190          * under ptrace.
191          */
192         retval = -ERESTARTNOINTR;
193         if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
194                 goto out;
195
196         task_lock(task);
197         retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
198         task_unlock(task);
199         if (retval)
200                 goto unlock_creds;
201
202         write_lock_irq(&tasklist_lock);
203         retval = -EPERM;
204         if (unlikely(task->exit_state))
205                 goto unlock_tasklist;
206         if (task->ptrace)
207                 goto unlock_tasklist;
208
209         task->ptrace = PT_PTRACED;
210         if (capable(CAP_SYS_PTRACE))
211                 task->ptrace |= PT_PTRACE_CAP;
212
213         __ptrace_link(task, current);
214         send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
215
216         spin_lock(&task->sighand->siglock);
217
218         /*
219          * If the task is already STOPPED, set GROUP_STOP_PENDING and
220          * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
221          * will be cleared if the child completes the transition or any
222          * event which clears the group stop states happens.  We'll wait
223          * for the transition to complete before returning from this
224          * function.
225          *
226          * This hides STOPPED -> RUNNING -> TRACED transition from the
227          * attaching thread but a different thread in the same group can
228          * still observe the transient RUNNING state.  IOW, if another
229          * thread's WNOHANG wait(2) on the stopped tracee races against
230          * ATTACH, the wait(2) may fail due to the transient RUNNING.
231          *
232          * The following task_is_stopped() test is safe as both transitions
233          * in and out of STOPPED are protected by siglock.
234          */
235         if (task_is_stopped(task)) {
236                 task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING;
237                 signal_wake_up(task, 1);
238                 wait_trap = true;
239         }
240
241         spin_unlock(&task->sighand->siglock);
242
243         retval = 0;
244 unlock_tasklist:
245         write_unlock_irq(&tasklist_lock);
246 unlock_creds:
247         mutex_unlock(&task->signal->cred_guard_mutex);
248 out:
249         if (wait_trap)
250                 wait_event(current->signal->wait_chldexit,
251                            !(task->group_stop & GROUP_STOP_TRAPPING));
252         return retval;
253 }
254
255 /**
256  * ptrace_traceme  --  helper for PTRACE_TRACEME
257  *
258  * Performs checks and sets PT_PTRACED.
259  * Should be used by all ptrace implementations for PTRACE_TRACEME.
260  */
261 static int ptrace_traceme(void)
262 {
263         int ret = -EPERM;
264
265         write_lock_irq(&tasklist_lock);
266         /* Are we already being traced? */
267         if (!current->ptrace) {
268                 ret = security_ptrace_traceme(current->parent);
269                 /*
270                  * Check PF_EXITING to ensure ->real_parent has not passed
271                  * exit_ptrace(). Otherwise we don't report the error but
272                  * pretend ->real_parent untraces us right after return.
273                  */
274                 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
275                         current->ptrace = PT_PTRACED;
276                         __ptrace_link(current, current->real_parent);
277                 }
278         }
279         write_unlock_irq(&tasklist_lock);
280
281         return ret;
282 }
283
284 /*
285  * Called with irqs disabled, returns true if childs should reap themselves.
286  */
287 static int ignoring_children(struct sighand_struct *sigh)
288 {
289         int ret;
290         spin_lock(&sigh->siglock);
291         ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
292               (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
293         spin_unlock(&sigh->siglock);
294         return ret;
295 }
296
297 /*
298  * Called with tasklist_lock held for writing.
299  * Unlink a traced task, and clean it up if it was a traced zombie.
300  * Return true if it needs to be reaped with release_task().
301  * (We can't call release_task() here because we already hold tasklist_lock.)
302  *
303  * If it's a zombie, our attachedness prevented normal parent notification
304  * or self-reaping.  Do notification now if it would have happened earlier.
305  * If it should reap itself, return true.
306  *
307  * If it's our own child, there is no notification to do. But if our normal
308  * children self-reap, then this child was prevented by ptrace and we must
309  * reap it now, in that case we must also wake up sub-threads sleeping in
310  * do_wait().
311  */
312 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
313 {
314         __ptrace_unlink(p);
315
316         if (p->exit_state == EXIT_ZOMBIE) {
317                 if (!task_detached(p) && thread_group_empty(p)) {
318                         if (!same_thread_group(p->real_parent, tracer))
319                                 do_notify_parent(p, p->exit_signal);
320                         else if (ignoring_children(tracer->sighand)) {
321                                 __wake_up_parent(p, tracer);
322                                 p->exit_signal = -1;
323                         }
324                 }
325                 if (task_detached(p)) {
326                         /* Mark it as in the process of being reaped. */
327                         p->exit_state = EXIT_DEAD;
328                         return true;
329                 }
330         }
331
332         return false;
333 }
334
335 static int ptrace_detach(struct task_struct *child, unsigned int data)
336 {
337         bool dead = false;
338
339         if (!valid_signal(data))
340                 return -EIO;
341
342         /* Architecture-specific hardware disable .. */
343         ptrace_disable(child);
344         clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
345
346         write_lock_irq(&tasklist_lock);
347         /*
348          * This child can be already killed. Make sure de_thread() or
349          * our sub-thread doing do_wait() didn't do release_task() yet.
350          */
351         if (child->ptrace) {
352                 child->exit_code = data;
353                 dead = __ptrace_detach(current, child);
354         }
355         write_unlock_irq(&tasklist_lock);
356
357         if (unlikely(dead))
358                 release_task(child);
359
360         return 0;
361 }
362
363 /*
364  * Detach all tasks we were using ptrace on. Called with tasklist held
365  * for writing, and returns with it held too. But note it can release
366  * and reacquire the lock.
367  */
368 void exit_ptrace(struct task_struct *tracer)
369         __releases(&tasklist_lock)
370         __acquires(&tasklist_lock)
371 {
372         struct task_struct *p, *n;
373         LIST_HEAD(ptrace_dead);
374
375         if (likely(list_empty(&tracer->ptraced)))
376                 return;
377
378         list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
379                 if (__ptrace_detach(tracer, p))
380                         list_add(&p->ptrace_entry, &ptrace_dead);
381         }
382
383         write_unlock_irq(&tasklist_lock);
384         BUG_ON(!list_empty(&tracer->ptraced));
385
386         list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
387                 list_del_init(&p->ptrace_entry);
388                 release_task(p);
389         }
390
391         write_lock_irq(&tasklist_lock);
392 }
393
394 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
395 {
396         int copied = 0;
397
398         while (len > 0) {
399                 char buf[128];
400                 int this_len, retval;
401
402                 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
403                 retval = access_process_vm(tsk, src, buf, this_len, 0);
404                 if (!retval) {
405                         if (copied)
406                                 break;
407                         return -EIO;
408                 }
409                 if (copy_to_user(dst, buf, retval))
410                         return -EFAULT;
411                 copied += retval;
412                 src += retval;
413                 dst += retval;
414                 len -= retval;
415         }
416         return copied;
417 }
418
419 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
420 {
421         int copied = 0;
422
423         while (len > 0) {
424                 char buf[128];
425                 int this_len, retval;
426
427                 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
428                 if (copy_from_user(buf, src, this_len))
429                         return -EFAULT;
430                 retval = access_process_vm(tsk, dst, buf, this_len, 1);
431                 if (!retval) {
432                         if (copied)
433                                 break;
434                         return -EIO;
435                 }
436                 copied += retval;
437                 src += retval;
438                 dst += retval;
439                 len -= retval;
440         }
441         return copied;
442 }
443
444 static int ptrace_setoptions(struct task_struct *child, unsigned long data)
445 {
446         child->ptrace &= ~PT_TRACE_MASK;
447
448         if (data & PTRACE_O_TRACESYSGOOD)
449                 child->ptrace |= PT_TRACESYSGOOD;
450
451         if (data & PTRACE_O_TRACEFORK)
452                 child->ptrace |= PT_TRACE_FORK;
453
454         if (data & PTRACE_O_TRACEVFORK)
455                 child->ptrace |= PT_TRACE_VFORK;
456
457         if (data & PTRACE_O_TRACECLONE)
458                 child->ptrace |= PT_TRACE_CLONE;
459
460         if (data & PTRACE_O_TRACEEXEC)
461                 child->ptrace |= PT_TRACE_EXEC;
462
463         if (data & PTRACE_O_TRACEVFORKDONE)
464                 child->ptrace |= PT_TRACE_VFORK_DONE;
465
466         if (data & PTRACE_O_TRACEEXIT)
467                 child->ptrace |= PT_TRACE_EXIT;
468
469         return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
470 }
471
472 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
473 {
474         unsigned long flags;
475         int error = -ESRCH;
476
477         if (lock_task_sighand(child, &flags)) {
478                 error = -EINVAL;
479                 if (likely(child->last_siginfo != NULL)) {
480                         *info = *child->last_siginfo;
481                         error = 0;
482                 }
483                 unlock_task_sighand(child, &flags);
484         }
485         return error;
486 }
487
488 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
489 {
490         unsigned long flags;
491         int error = -ESRCH;
492
493         if (lock_task_sighand(child, &flags)) {
494                 error = -EINVAL;
495                 if (likely(child->last_siginfo != NULL)) {
496                         *child->last_siginfo = *info;
497                         error = 0;
498                 }
499                 unlock_task_sighand(child, &flags);
500         }
501         return error;
502 }
503
504
505 #ifdef PTRACE_SINGLESTEP
506 #define is_singlestep(request)          ((request) == PTRACE_SINGLESTEP)
507 #else
508 #define is_singlestep(request)          0
509 #endif
510
511 #ifdef PTRACE_SINGLEBLOCK
512 #define is_singleblock(request)         ((request) == PTRACE_SINGLEBLOCK)
513 #else
514 #define is_singleblock(request)         0
515 #endif
516
517 #ifdef PTRACE_SYSEMU
518 #define is_sysemu_singlestep(request)   ((request) == PTRACE_SYSEMU_SINGLESTEP)
519 #else
520 #define is_sysemu_singlestep(request)   0
521 #endif
522
523 static int ptrace_resume(struct task_struct *child, long request,
524                          unsigned long data)
525 {
526         if (!valid_signal(data))
527                 return -EIO;
528
529         if (request == PTRACE_SYSCALL)
530                 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
531         else
532                 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
533
534 #ifdef TIF_SYSCALL_EMU
535         if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
536                 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
537         else
538                 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
539 #endif
540
541         if (is_singleblock(request)) {
542                 if (unlikely(!arch_has_block_step()))
543                         return -EIO;
544                 user_enable_block_step(child);
545         } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
546                 if (unlikely(!arch_has_single_step()))
547                         return -EIO;
548                 user_enable_single_step(child);
549         } else {
550                 user_disable_single_step(child);
551         }
552
553         child->exit_code = data;
554         wake_up_process(child);
555
556         return 0;
557 }
558
559 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
560
561 static const struct user_regset *
562 find_regset(const struct user_regset_view *view, unsigned int type)
563 {
564         const struct user_regset *regset;
565         int n;
566
567         for (n = 0; n < view->n; ++n) {
568                 regset = view->regsets + n;
569                 if (regset->core_note_type == type)
570                         return regset;
571         }
572
573         return NULL;
574 }
575
576 static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
577                          struct iovec *kiov)
578 {
579         const struct user_regset_view *view = task_user_regset_view(task);
580         const struct user_regset *regset = find_regset(view, type);
581         int regset_no;
582
583         if (!regset || (kiov->iov_len % regset->size) != 0)
584                 return -EINVAL;
585
586         regset_no = regset - view->regsets;
587         kiov->iov_len = min(kiov->iov_len,
588                             (__kernel_size_t) (regset->n * regset->size));
589
590         if (req == PTRACE_GETREGSET)
591                 return copy_regset_to_user(task, view, regset_no, 0,
592                                            kiov->iov_len, kiov->iov_base);
593         else
594                 return copy_regset_from_user(task, view, regset_no, 0,
595                                              kiov->iov_len, kiov->iov_base);
596 }
597
598 #endif
599
600 int ptrace_request(struct task_struct *child, long request,
601                    unsigned long addr, unsigned long data)
602 {
603         int ret = -EIO;
604         siginfo_t siginfo;
605         void __user *datavp = (void __user *) data;
606         unsigned long __user *datalp = datavp;
607
608         switch (request) {
609         case PTRACE_PEEKTEXT:
610         case PTRACE_PEEKDATA:
611                 return generic_ptrace_peekdata(child, addr, data);
612         case PTRACE_POKETEXT:
613         case PTRACE_POKEDATA:
614                 return generic_ptrace_pokedata(child, addr, data);
615
616 #ifdef PTRACE_OLDSETOPTIONS
617         case PTRACE_OLDSETOPTIONS:
618 #endif
619         case PTRACE_SETOPTIONS:
620                 ret = ptrace_setoptions(child, data);
621                 break;
622         case PTRACE_GETEVENTMSG:
623                 ret = put_user(child->ptrace_message, datalp);
624                 break;
625
626         case PTRACE_GETSIGINFO:
627                 ret = ptrace_getsiginfo(child, &siginfo);
628                 if (!ret)
629                         ret = copy_siginfo_to_user(datavp, &siginfo);
630                 break;
631
632         case PTRACE_SETSIGINFO:
633                 if (copy_from_user(&siginfo, datavp, sizeof siginfo))
634                         ret = -EFAULT;
635                 else
636                         ret = ptrace_setsiginfo(child, &siginfo);
637                 break;
638
639         case PTRACE_DETACH:      /* detach a process that was attached. */
640                 ret = ptrace_detach(child, data);
641                 break;
642
643 #ifdef CONFIG_BINFMT_ELF_FDPIC
644         case PTRACE_GETFDPIC: {
645                 struct mm_struct *mm = get_task_mm(child);
646                 unsigned long tmp = 0;
647
648                 ret = -ESRCH;
649                 if (!mm)
650                         break;
651
652                 switch (addr) {
653                 case PTRACE_GETFDPIC_EXEC:
654                         tmp = mm->context.exec_fdpic_loadmap;
655                         break;
656                 case PTRACE_GETFDPIC_INTERP:
657                         tmp = mm->context.interp_fdpic_loadmap;
658                         break;
659                 default:
660                         break;
661                 }
662                 mmput(mm);
663
664                 ret = put_user(tmp, datalp);
665                 break;
666         }
667 #endif
668
669 #ifdef PTRACE_SINGLESTEP
670         case PTRACE_SINGLESTEP:
671 #endif
672 #ifdef PTRACE_SINGLEBLOCK
673         case PTRACE_SINGLEBLOCK:
674 #endif
675 #ifdef PTRACE_SYSEMU
676         case PTRACE_SYSEMU:
677         case PTRACE_SYSEMU_SINGLESTEP:
678 #endif
679         case PTRACE_SYSCALL:
680         case PTRACE_CONT:
681                 return ptrace_resume(child, request, data);
682
683         case PTRACE_KILL:
684                 if (child->exit_state)  /* already dead */
685                         return 0;
686                 return ptrace_resume(child, request, SIGKILL);
687
688 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
689         case PTRACE_GETREGSET:
690         case PTRACE_SETREGSET:
691         {
692                 struct iovec kiov;
693                 struct iovec __user *uiov = datavp;
694
695                 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
696                         return -EFAULT;
697
698                 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
699                     __get_user(kiov.iov_len, &uiov->iov_len))
700                         return -EFAULT;
701
702                 ret = ptrace_regset(child, request, addr, &kiov);
703                 if (!ret)
704                         ret = __put_user(kiov.iov_len, &uiov->iov_len);
705                 break;
706         }
707 #endif
708         default:
709                 break;
710         }
711
712         return ret;
713 }
714
715 static struct task_struct *ptrace_get_task_struct(pid_t pid)
716 {
717         struct task_struct *child;
718
719         rcu_read_lock();
720         child = find_task_by_vpid(pid);
721         if (child)
722                 get_task_struct(child);
723         rcu_read_unlock();
724
725         if (!child)
726                 return ERR_PTR(-ESRCH);
727         return child;
728 }
729
730 #ifndef arch_ptrace_attach
731 #define arch_ptrace_attach(child)       do { } while (0)
732 #endif
733
734 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
735                 unsigned long, data)
736 {
737         struct task_struct *child;
738         long ret;
739
740         if (request == PTRACE_TRACEME) {
741                 ret = ptrace_traceme();
742                 if (!ret)
743                         arch_ptrace_attach(current);
744                 goto out;
745         }
746
747         child = ptrace_get_task_struct(pid);
748         if (IS_ERR(child)) {
749                 ret = PTR_ERR(child);
750                 goto out;
751         }
752
753         if (request == PTRACE_ATTACH) {
754                 ret = ptrace_attach(child);
755                 /*
756                  * Some architectures need to do book-keeping after
757                  * a ptrace attach.
758                  */
759                 if (!ret)
760                         arch_ptrace_attach(child);
761                 goto out_put_task_struct;
762         }
763
764         ret = ptrace_check_attach(child, request == PTRACE_KILL);
765         if (ret < 0)
766                 goto out_put_task_struct;
767
768         ret = arch_ptrace(child, request, addr, data);
769
770  out_put_task_struct:
771         put_task_struct(child);
772  out:
773         return ret;
774 }
775
776 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
777                             unsigned long data)
778 {
779         unsigned long tmp;
780         int copied;
781
782         copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
783         if (copied != sizeof(tmp))
784                 return -EIO;
785         return put_user(tmp, (unsigned long __user *)data);
786 }
787
788 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
789                             unsigned long data)
790 {
791         int copied;
792
793         copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
794         return (copied == sizeof(data)) ? 0 : -EIO;
795 }
796
797 #if defined CONFIG_COMPAT
798 #include <linux/compat.h>
799
800 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
801                           compat_ulong_t addr, compat_ulong_t data)
802 {
803         compat_ulong_t __user *datap = compat_ptr(data);
804         compat_ulong_t word;
805         siginfo_t siginfo;
806         int ret;
807
808         switch (request) {
809         case PTRACE_PEEKTEXT:
810         case PTRACE_PEEKDATA:
811                 ret = access_process_vm(child, addr, &word, sizeof(word), 0);
812                 if (ret != sizeof(word))
813                         ret = -EIO;
814                 else
815                         ret = put_user(word, datap);
816                 break;
817
818         case PTRACE_POKETEXT:
819         case PTRACE_POKEDATA:
820                 ret = access_process_vm(child, addr, &data, sizeof(data), 1);
821                 ret = (ret != sizeof(data) ? -EIO : 0);
822                 break;
823
824         case PTRACE_GETEVENTMSG:
825                 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
826                 break;
827
828         case PTRACE_GETSIGINFO:
829                 ret = ptrace_getsiginfo(child, &siginfo);
830                 if (!ret)
831                         ret = copy_siginfo_to_user32(
832                                 (struct compat_siginfo __user *) datap,
833                                 &siginfo);
834                 break;
835
836         case PTRACE_SETSIGINFO:
837                 memset(&siginfo, 0, sizeof siginfo);
838                 if (copy_siginfo_from_user32(
839                             &siginfo, (struct compat_siginfo __user *) datap))
840                         ret = -EFAULT;
841                 else
842                         ret = ptrace_setsiginfo(child, &siginfo);
843                 break;
844 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
845         case PTRACE_GETREGSET:
846         case PTRACE_SETREGSET:
847         {
848                 struct iovec kiov;
849                 struct compat_iovec __user *uiov =
850                         (struct compat_iovec __user *) datap;
851                 compat_uptr_t ptr;
852                 compat_size_t len;
853
854                 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
855                         return -EFAULT;
856
857                 if (__get_user(ptr, &uiov->iov_base) ||
858                     __get_user(len, &uiov->iov_len))
859                         return -EFAULT;
860
861                 kiov.iov_base = compat_ptr(ptr);
862                 kiov.iov_len = len;
863
864                 ret = ptrace_regset(child, request, addr, &kiov);
865                 if (!ret)
866                         ret = __put_user(kiov.iov_len, &uiov->iov_len);
867                 break;
868         }
869 #endif
870
871         default:
872                 ret = ptrace_request(child, request, addr, data);
873         }
874
875         return ret;
876 }
877
878 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
879                                   compat_long_t addr, compat_long_t data)
880 {
881         struct task_struct *child;
882         long ret;
883
884         if (request == PTRACE_TRACEME) {
885                 ret = ptrace_traceme();
886                 goto out;
887         }
888
889         child = ptrace_get_task_struct(pid);
890         if (IS_ERR(child)) {
891                 ret = PTR_ERR(child);
892                 goto out;
893         }
894
895         if (request == PTRACE_ATTACH) {
896                 ret = ptrace_attach(child);
897                 /*
898                  * Some architectures need to do book-keeping after
899                  * a ptrace attach.
900                  */
901                 if (!ret)
902                         arch_ptrace_attach(child);
903                 goto out_put_task_struct;
904         }
905
906         ret = ptrace_check_attach(child, request == PTRACE_KILL);
907         if (!ret)
908                 ret = compat_arch_ptrace(child, request, addr, data);
909
910  out_put_task_struct:
911         put_task_struct(child);
912  out:
913         return ret;
914 }
915 #endif  /* CONFIG_COMPAT */