e6098434b5335090f91e831f97bd8de42d9d0fea
[linux-3.10.git] / kernel / ptrace.c
1 /*
2  * linux/kernel/ptrace.c
3  *
4  * (C) Copyright 1999 Linus Torvalds
5  *
6  * Common interfaces for "ptrace()" which we do not want
7  * to continually duplicate across every architecture.
8  */
9
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/ptrace.h>
18 #include <linux/security.h>
19 #include <linux/signal.h>
20 #include <linux/audit.h>
21 #include <linux/pid_namespace.h>
22 #include <linux/syscalls.h>
23 #include <linux/uaccess.h>
24 #include <linux/regset.h>
25
26
27 /*
28  * ptrace a task: make the debugger its new parent and
29  * move it to the ptrace list.
30  *
31  * Must be called with the tasklist lock write-held.
32  */
33 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
34 {
35         BUG_ON(!list_empty(&child->ptrace_entry));
36         list_add(&child->ptrace_entry, &new_parent->ptraced);
37         child->parent = new_parent;
38 }
39
40 /**
41  * __ptrace_unlink - unlink ptracee and restore its execution state
42  * @child: ptracee to be unlinked
43  *
44  * Remove @child from the ptrace list, move it back to the original parent.
45  *
46  * CONTEXT:
47  * write_lock_irq(tasklist_lock)
48  */
49 void __ptrace_unlink(struct task_struct *child)
50 {
51         BUG_ON(!child->ptrace);
52
53         child->ptrace = 0;
54         child->parent = child->real_parent;
55         list_del_init(&child->ptrace_entry);
56
57         spin_lock(&child->sighand->siglock);
58         if (task_is_traced(child)) {
59                 /*
60                  * If group stop is completed or in progress, it should
61                  * participate in the group stop.  Set GROUP_STOP_PENDING
62                  * before kicking it.
63                  *
64                  * This involves TRACED -> RUNNING -> STOPPED transition
65                  * which is similar to but in the opposite direction of
66                  * what happens while attaching to a stopped task.
67                  * However, in this direction, the intermediate RUNNING
68                  * state is not hidden even from the current ptracer and if
69                  * it immediately re-attaches and performs a WNOHANG
70                  * wait(2), it may fail.
71                  */
72                 if (child->signal->flags & SIGNAL_STOP_STOPPED ||
73                     child->signal->group_stop_count)
74                         child->group_stop |= GROUP_STOP_PENDING;
75                 signal_wake_up(child, 1);
76         }
77         spin_unlock(&child->sighand->siglock);
78 }
79
80 /*
81  * Check that we have indeed attached to the thing..
82  */
83 int ptrace_check_attach(struct task_struct *child, int kill)
84 {
85         int ret = -ESRCH;
86
87         /*
88          * We take the read lock around doing both checks to close a
89          * possible race where someone else was tracing our child and
90          * detached between these two checks.  After this locked check,
91          * we are sure that this is our traced child and that can only
92          * be changed by us so it's not changing right after this.
93          */
94         read_lock(&tasklist_lock);
95         if ((child->ptrace & PT_PTRACED) && child->parent == current) {
96                 ret = 0;
97                 /*
98                  * child->sighand can't be NULL, release_task()
99                  * does ptrace_unlink() before __exit_signal().
100                  */
101                 spin_lock_irq(&child->sighand->siglock);
102                 if (task_is_stopped(child))
103                         child->state = TASK_TRACED;
104                 else if (!task_is_traced(child) && !kill)
105                         ret = -ESRCH;
106                 spin_unlock_irq(&child->sighand->siglock);
107         }
108         read_unlock(&tasklist_lock);
109
110         if (!ret && !kill)
111                 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
112
113         /* All systems go.. */
114         return ret;
115 }
116
117 int __ptrace_may_access(struct task_struct *task, unsigned int mode)
118 {
119         const struct cred *cred = current_cred(), *tcred;
120
121         /* May we inspect the given task?
122          * This check is used both for attaching with ptrace
123          * and for allowing access to sensitive information in /proc.
124          *
125          * ptrace_attach denies several cases that /proc allows
126          * because setting up the necessary parent/child relationship
127          * or halting the specified task is impossible.
128          */
129         int dumpable = 0;
130         /* Don't let security modules deny introspection */
131         if (task == current)
132                 return 0;
133         rcu_read_lock();
134         tcred = __task_cred(task);
135         if ((cred->uid != tcred->euid ||
136              cred->uid != tcred->suid ||
137              cred->uid != tcred->uid  ||
138              cred->gid != tcred->egid ||
139              cred->gid != tcred->sgid ||
140              cred->gid != tcred->gid) &&
141             !capable(CAP_SYS_PTRACE)) {
142                 rcu_read_unlock();
143                 return -EPERM;
144         }
145         rcu_read_unlock();
146         smp_rmb();
147         if (task->mm)
148                 dumpable = get_dumpable(task->mm);
149         if (!dumpable && !capable(CAP_SYS_PTRACE))
150                 return -EPERM;
151
152         return security_ptrace_access_check(task, mode);
153 }
154
155 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
156 {
157         int err;
158         task_lock(task);
159         err = __ptrace_may_access(task, mode);
160         task_unlock(task);
161         return !err;
162 }
163
164 static int ptrace_attach(struct task_struct *task)
165 {
166         bool wait_trap = false;
167         int retval;
168
169         audit_ptrace(task);
170
171         retval = -EPERM;
172         if (unlikely(task->flags & PF_KTHREAD))
173                 goto out;
174         if (same_thread_group(task, current))
175                 goto out;
176
177         /*
178          * Protect exec's credential calculations against our interference;
179          * interference; SUID, SGID and LSM creds get determined differently
180          * under ptrace.
181          */
182         retval = -ERESTARTNOINTR;
183         if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
184                 goto out;
185
186         task_lock(task);
187         retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
188         task_unlock(task);
189         if (retval)
190                 goto unlock_creds;
191
192         write_lock_irq(&tasklist_lock);
193         retval = -EPERM;
194         if (unlikely(task->exit_state))
195                 goto unlock_tasklist;
196         if (task->ptrace)
197                 goto unlock_tasklist;
198
199         task->ptrace = PT_PTRACED;
200         if (capable(CAP_SYS_PTRACE))
201                 task->ptrace |= PT_PTRACE_CAP;
202
203         __ptrace_link(task, current);
204         send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
205
206         spin_lock(&task->sighand->siglock);
207
208         /*
209          * If the task is already STOPPED, set GROUP_STOP_PENDING and
210          * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
211          * will be cleared if the child completes the transition or any
212          * event which clears the group stop states happens.  We'll wait
213          * for the transition to complete before returning from this
214          * function.
215          *
216          * This hides STOPPED -> RUNNING -> TRACED transition from the
217          * attaching thread but a different thread in the same group can
218          * still observe the transient RUNNING state.  IOW, if another
219          * thread's WNOHANG wait(2) on the stopped tracee races against
220          * ATTACH, the wait(2) may fail due to the transient RUNNING.
221          *
222          * The following task_is_stopped() test is safe as both transitions
223          * in and out of STOPPED are protected by siglock.
224          */
225         if (task_is_stopped(task)) {
226                 task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING;
227                 signal_wake_up(task, 1);
228                 wait_trap = true;
229         }
230
231         spin_unlock(&task->sighand->siglock);
232
233         retval = 0;
234 unlock_tasklist:
235         write_unlock_irq(&tasklist_lock);
236 unlock_creds:
237         mutex_unlock(&task->signal->cred_guard_mutex);
238 out:
239         if (wait_trap)
240                 wait_event(current->signal->wait_chldexit,
241                            !(task->group_stop & GROUP_STOP_TRAPPING));
242         return retval;
243 }
244
245 /**
246  * ptrace_traceme  --  helper for PTRACE_TRACEME
247  *
248  * Performs checks and sets PT_PTRACED.
249  * Should be used by all ptrace implementations for PTRACE_TRACEME.
250  */
251 static int ptrace_traceme(void)
252 {
253         int ret = -EPERM;
254
255         write_lock_irq(&tasklist_lock);
256         /* Are we already being traced? */
257         if (!current->ptrace) {
258                 ret = security_ptrace_traceme(current->parent);
259                 /*
260                  * Check PF_EXITING to ensure ->real_parent has not passed
261                  * exit_ptrace(). Otherwise we don't report the error but
262                  * pretend ->real_parent untraces us right after return.
263                  */
264                 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
265                         current->ptrace = PT_PTRACED;
266                         __ptrace_link(current, current->real_parent);
267                 }
268         }
269         write_unlock_irq(&tasklist_lock);
270
271         return ret;
272 }
273
274 /*
275  * Called with irqs disabled, returns true if childs should reap themselves.
276  */
277 static int ignoring_children(struct sighand_struct *sigh)
278 {
279         int ret;
280         spin_lock(&sigh->siglock);
281         ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
282               (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
283         spin_unlock(&sigh->siglock);
284         return ret;
285 }
286
287 /*
288  * Called with tasklist_lock held for writing.
289  * Unlink a traced task, and clean it up if it was a traced zombie.
290  * Return true if it needs to be reaped with release_task().
291  * (We can't call release_task() here because we already hold tasklist_lock.)
292  *
293  * If it's a zombie, our attachedness prevented normal parent notification
294  * or self-reaping.  Do notification now if it would have happened earlier.
295  * If it should reap itself, return true.
296  *
297  * If it's our own child, there is no notification to do. But if our normal
298  * children self-reap, then this child was prevented by ptrace and we must
299  * reap it now, in that case we must also wake up sub-threads sleeping in
300  * do_wait().
301  */
302 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
303 {
304         __ptrace_unlink(p);
305
306         if (p->exit_state == EXIT_ZOMBIE) {
307                 if (!task_detached(p) && thread_group_empty(p)) {
308                         if (!same_thread_group(p->real_parent, tracer))
309                                 do_notify_parent(p, p->exit_signal);
310                         else if (ignoring_children(tracer->sighand)) {
311                                 __wake_up_parent(p, tracer);
312                                 p->exit_signal = -1;
313                         }
314                 }
315                 if (task_detached(p)) {
316                         /* Mark it as in the process of being reaped. */
317                         p->exit_state = EXIT_DEAD;
318                         return true;
319                 }
320         }
321
322         return false;
323 }
324
325 static int ptrace_detach(struct task_struct *child, unsigned int data)
326 {
327         bool dead = false;
328
329         if (!valid_signal(data))
330                 return -EIO;
331
332         /* Architecture-specific hardware disable .. */
333         ptrace_disable(child);
334         clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
335
336         write_lock_irq(&tasklist_lock);
337         /*
338          * This child can be already killed. Make sure de_thread() or
339          * our sub-thread doing do_wait() didn't do release_task() yet.
340          */
341         if (child->ptrace) {
342                 child->exit_code = data;
343                 dead = __ptrace_detach(current, child);
344         }
345         write_unlock_irq(&tasklist_lock);
346
347         if (unlikely(dead))
348                 release_task(child);
349
350         return 0;
351 }
352
353 /*
354  * Detach all tasks we were using ptrace on. Called with tasklist held
355  * for writing, and returns with it held too. But note it can release
356  * and reacquire the lock.
357  */
358 void exit_ptrace(struct task_struct *tracer)
359         __releases(&tasklist_lock)
360         __acquires(&tasklist_lock)
361 {
362         struct task_struct *p, *n;
363         LIST_HEAD(ptrace_dead);
364
365         if (likely(list_empty(&tracer->ptraced)))
366                 return;
367
368         list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
369                 if (__ptrace_detach(tracer, p))
370                         list_add(&p->ptrace_entry, &ptrace_dead);
371         }
372
373         write_unlock_irq(&tasklist_lock);
374         BUG_ON(!list_empty(&tracer->ptraced));
375
376         list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
377                 list_del_init(&p->ptrace_entry);
378                 release_task(p);
379         }
380
381         write_lock_irq(&tasklist_lock);
382 }
383
384 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
385 {
386         int copied = 0;
387
388         while (len > 0) {
389                 char buf[128];
390                 int this_len, retval;
391
392                 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
393                 retval = access_process_vm(tsk, src, buf, this_len, 0);
394                 if (!retval) {
395                         if (copied)
396                                 break;
397                         return -EIO;
398                 }
399                 if (copy_to_user(dst, buf, retval))
400                         return -EFAULT;
401                 copied += retval;
402                 src += retval;
403                 dst += retval;
404                 len -= retval;
405         }
406         return copied;
407 }
408
409 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
410 {
411         int copied = 0;
412
413         while (len > 0) {
414                 char buf[128];
415                 int this_len, retval;
416
417                 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
418                 if (copy_from_user(buf, src, this_len))
419                         return -EFAULT;
420                 retval = access_process_vm(tsk, dst, buf, this_len, 1);
421                 if (!retval) {
422                         if (copied)
423                                 break;
424                         return -EIO;
425                 }
426                 copied += retval;
427                 src += retval;
428                 dst += retval;
429                 len -= retval;
430         }
431         return copied;
432 }
433
434 static int ptrace_setoptions(struct task_struct *child, unsigned long data)
435 {
436         child->ptrace &= ~PT_TRACE_MASK;
437
438         if (data & PTRACE_O_TRACESYSGOOD)
439                 child->ptrace |= PT_TRACESYSGOOD;
440
441         if (data & PTRACE_O_TRACEFORK)
442                 child->ptrace |= PT_TRACE_FORK;
443
444         if (data & PTRACE_O_TRACEVFORK)
445                 child->ptrace |= PT_TRACE_VFORK;
446
447         if (data & PTRACE_O_TRACECLONE)
448                 child->ptrace |= PT_TRACE_CLONE;
449
450         if (data & PTRACE_O_TRACEEXEC)
451                 child->ptrace |= PT_TRACE_EXEC;
452
453         if (data & PTRACE_O_TRACEVFORKDONE)
454                 child->ptrace |= PT_TRACE_VFORK_DONE;
455
456         if (data & PTRACE_O_TRACEEXIT)
457                 child->ptrace |= PT_TRACE_EXIT;
458
459         return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
460 }
461
462 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
463 {
464         unsigned long flags;
465         int error = -ESRCH;
466
467         if (lock_task_sighand(child, &flags)) {
468                 error = -EINVAL;
469                 if (likely(child->last_siginfo != NULL)) {
470                         *info = *child->last_siginfo;
471                         error = 0;
472                 }
473                 unlock_task_sighand(child, &flags);
474         }
475         return error;
476 }
477
478 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
479 {
480         unsigned long flags;
481         int error = -ESRCH;
482
483         if (lock_task_sighand(child, &flags)) {
484                 error = -EINVAL;
485                 if (likely(child->last_siginfo != NULL)) {
486                         *child->last_siginfo = *info;
487                         error = 0;
488                 }
489                 unlock_task_sighand(child, &flags);
490         }
491         return error;
492 }
493
494
495 #ifdef PTRACE_SINGLESTEP
496 #define is_singlestep(request)          ((request) == PTRACE_SINGLESTEP)
497 #else
498 #define is_singlestep(request)          0
499 #endif
500
501 #ifdef PTRACE_SINGLEBLOCK
502 #define is_singleblock(request)         ((request) == PTRACE_SINGLEBLOCK)
503 #else
504 #define is_singleblock(request)         0
505 #endif
506
507 #ifdef PTRACE_SYSEMU
508 #define is_sysemu_singlestep(request)   ((request) == PTRACE_SYSEMU_SINGLESTEP)
509 #else
510 #define is_sysemu_singlestep(request)   0
511 #endif
512
513 static int ptrace_resume(struct task_struct *child, long request,
514                          unsigned long data)
515 {
516         if (!valid_signal(data))
517                 return -EIO;
518
519         if (request == PTRACE_SYSCALL)
520                 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
521         else
522                 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
523
524 #ifdef TIF_SYSCALL_EMU
525         if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
526                 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
527         else
528                 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
529 #endif
530
531         if (is_singleblock(request)) {
532                 if (unlikely(!arch_has_block_step()))
533                         return -EIO;
534                 user_enable_block_step(child);
535         } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
536                 if (unlikely(!arch_has_single_step()))
537                         return -EIO;
538                 user_enable_single_step(child);
539         } else {
540                 user_disable_single_step(child);
541         }
542
543         child->exit_code = data;
544         wake_up_process(child);
545
546         return 0;
547 }
548
549 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
550
551 static const struct user_regset *
552 find_regset(const struct user_regset_view *view, unsigned int type)
553 {
554         const struct user_regset *regset;
555         int n;
556
557         for (n = 0; n < view->n; ++n) {
558                 regset = view->regsets + n;
559                 if (regset->core_note_type == type)
560                         return regset;
561         }
562
563         return NULL;
564 }
565
566 static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
567                          struct iovec *kiov)
568 {
569         const struct user_regset_view *view = task_user_regset_view(task);
570         const struct user_regset *regset = find_regset(view, type);
571         int regset_no;
572
573         if (!regset || (kiov->iov_len % regset->size) != 0)
574                 return -EINVAL;
575
576         regset_no = regset - view->regsets;
577         kiov->iov_len = min(kiov->iov_len,
578                             (__kernel_size_t) (regset->n * regset->size));
579
580         if (req == PTRACE_GETREGSET)
581                 return copy_regset_to_user(task, view, regset_no, 0,
582                                            kiov->iov_len, kiov->iov_base);
583         else
584                 return copy_regset_from_user(task, view, regset_no, 0,
585                                              kiov->iov_len, kiov->iov_base);
586 }
587
588 #endif
589
590 int ptrace_request(struct task_struct *child, long request,
591                    unsigned long addr, unsigned long data)
592 {
593         int ret = -EIO;
594         siginfo_t siginfo;
595         void __user *datavp = (void __user *) data;
596         unsigned long __user *datalp = datavp;
597
598         switch (request) {
599         case PTRACE_PEEKTEXT:
600         case PTRACE_PEEKDATA:
601                 return generic_ptrace_peekdata(child, addr, data);
602         case PTRACE_POKETEXT:
603         case PTRACE_POKEDATA:
604                 return generic_ptrace_pokedata(child, addr, data);
605
606 #ifdef PTRACE_OLDSETOPTIONS
607         case PTRACE_OLDSETOPTIONS:
608 #endif
609         case PTRACE_SETOPTIONS:
610                 ret = ptrace_setoptions(child, data);
611                 break;
612         case PTRACE_GETEVENTMSG:
613                 ret = put_user(child->ptrace_message, datalp);
614                 break;
615
616         case PTRACE_GETSIGINFO:
617                 ret = ptrace_getsiginfo(child, &siginfo);
618                 if (!ret)
619                         ret = copy_siginfo_to_user(datavp, &siginfo);
620                 break;
621
622         case PTRACE_SETSIGINFO:
623                 if (copy_from_user(&siginfo, datavp, sizeof siginfo))
624                         ret = -EFAULT;
625                 else
626                         ret = ptrace_setsiginfo(child, &siginfo);
627                 break;
628
629         case PTRACE_DETACH:      /* detach a process that was attached. */
630                 ret = ptrace_detach(child, data);
631                 break;
632
633 #ifdef CONFIG_BINFMT_ELF_FDPIC
634         case PTRACE_GETFDPIC: {
635                 struct mm_struct *mm = get_task_mm(child);
636                 unsigned long tmp = 0;
637
638                 ret = -ESRCH;
639                 if (!mm)
640                         break;
641
642                 switch (addr) {
643                 case PTRACE_GETFDPIC_EXEC:
644                         tmp = mm->context.exec_fdpic_loadmap;
645                         break;
646                 case PTRACE_GETFDPIC_INTERP:
647                         tmp = mm->context.interp_fdpic_loadmap;
648                         break;
649                 default:
650                         break;
651                 }
652                 mmput(mm);
653
654                 ret = put_user(tmp, datalp);
655                 break;
656         }
657 #endif
658
659 #ifdef PTRACE_SINGLESTEP
660         case PTRACE_SINGLESTEP:
661 #endif
662 #ifdef PTRACE_SINGLEBLOCK
663         case PTRACE_SINGLEBLOCK:
664 #endif
665 #ifdef PTRACE_SYSEMU
666         case PTRACE_SYSEMU:
667         case PTRACE_SYSEMU_SINGLESTEP:
668 #endif
669         case PTRACE_SYSCALL:
670         case PTRACE_CONT:
671                 return ptrace_resume(child, request, data);
672
673         case PTRACE_KILL:
674                 if (child->exit_state)  /* already dead */
675                         return 0;
676                 return ptrace_resume(child, request, SIGKILL);
677
678 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
679         case PTRACE_GETREGSET:
680         case PTRACE_SETREGSET:
681         {
682                 struct iovec kiov;
683                 struct iovec __user *uiov = datavp;
684
685                 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
686                         return -EFAULT;
687
688                 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
689                     __get_user(kiov.iov_len, &uiov->iov_len))
690                         return -EFAULT;
691
692                 ret = ptrace_regset(child, request, addr, &kiov);
693                 if (!ret)
694                         ret = __put_user(kiov.iov_len, &uiov->iov_len);
695                 break;
696         }
697 #endif
698         default:
699                 break;
700         }
701
702         return ret;
703 }
704
705 static struct task_struct *ptrace_get_task_struct(pid_t pid)
706 {
707         struct task_struct *child;
708
709         rcu_read_lock();
710         child = find_task_by_vpid(pid);
711         if (child)
712                 get_task_struct(child);
713         rcu_read_unlock();
714
715         if (!child)
716                 return ERR_PTR(-ESRCH);
717         return child;
718 }
719
720 #ifndef arch_ptrace_attach
721 #define arch_ptrace_attach(child)       do { } while (0)
722 #endif
723
724 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
725                 unsigned long, data)
726 {
727         struct task_struct *child;
728         long ret;
729
730         if (request == PTRACE_TRACEME) {
731                 ret = ptrace_traceme();
732                 if (!ret)
733                         arch_ptrace_attach(current);
734                 goto out;
735         }
736
737         child = ptrace_get_task_struct(pid);
738         if (IS_ERR(child)) {
739                 ret = PTR_ERR(child);
740                 goto out;
741         }
742
743         if (request == PTRACE_ATTACH) {
744                 ret = ptrace_attach(child);
745                 /*
746                  * Some architectures need to do book-keeping after
747                  * a ptrace attach.
748                  */
749                 if (!ret)
750                         arch_ptrace_attach(child);
751                 goto out_put_task_struct;
752         }
753
754         ret = ptrace_check_attach(child, request == PTRACE_KILL);
755         if (ret < 0)
756                 goto out_put_task_struct;
757
758         ret = arch_ptrace(child, request, addr, data);
759
760  out_put_task_struct:
761         put_task_struct(child);
762  out:
763         return ret;
764 }
765
766 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
767                             unsigned long data)
768 {
769         unsigned long tmp;
770         int copied;
771
772         copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
773         if (copied != sizeof(tmp))
774                 return -EIO;
775         return put_user(tmp, (unsigned long __user *)data);
776 }
777
778 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
779                             unsigned long data)
780 {
781         int copied;
782
783         copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
784         return (copied == sizeof(data)) ? 0 : -EIO;
785 }
786
787 #if defined CONFIG_COMPAT
788 #include <linux/compat.h>
789
790 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
791                           compat_ulong_t addr, compat_ulong_t data)
792 {
793         compat_ulong_t __user *datap = compat_ptr(data);
794         compat_ulong_t word;
795         siginfo_t siginfo;
796         int ret;
797
798         switch (request) {
799         case PTRACE_PEEKTEXT:
800         case PTRACE_PEEKDATA:
801                 ret = access_process_vm(child, addr, &word, sizeof(word), 0);
802                 if (ret != sizeof(word))
803                         ret = -EIO;
804                 else
805                         ret = put_user(word, datap);
806                 break;
807
808         case PTRACE_POKETEXT:
809         case PTRACE_POKEDATA:
810                 ret = access_process_vm(child, addr, &data, sizeof(data), 1);
811                 ret = (ret != sizeof(data) ? -EIO : 0);
812                 break;
813
814         case PTRACE_GETEVENTMSG:
815                 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
816                 break;
817
818         case PTRACE_GETSIGINFO:
819                 ret = ptrace_getsiginfo(child, &siginfo);
820                 if (!ret)
821                         ret = copy_siginfo_to_user32(
822                                 (struct compat_siginfo __user *) datap,
823                                 &siginfo);
824                 break;
825
826         case PTRACE_SETSIGINFO:
827                 memset(&siginfo, 0, sizeof siginfo);
828                 if (copy_siginfo_from_user32(
829                             &siginfo, (struct compat_siginfo __user *) datap))
830                         ret = -EFAULT;
831                 else
832                         ret = ptrace_setsiginfo(child, &siginfo);
833                 break;
834 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
835         case PTRACE_GETREGSET:
836         case PTRACE_SETREGSET:
837         {
838                 struct iovec kiov;
839                 struct compat_iovec __user *uiov =
840                         (struct compat_iovec __user *) datap;
841                 compat_uptr_t ptr;
842                 compat_size_t len;
843
844                 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
845                         return -EFAULT;
846
847                 if (__get_user(ptr, &uiov->iov_base) ||
848                     __get_user(len, &uiov->iov_len))
849                         return -EFAULT;
850
851                 kiov.iov_base = compat_ptr(ptr);
852                 kiov.iov_len = len;
853
854                 ret = ptrace_regset(child, request, addr, &kiov);
855                 if (!ret)
856                         ret = __put_user(kiov.iov_len, &uiov->iov_len);
857                 break;
858         }
859 #endif
860
861         default:
862                 ret = ptrace_request(child, request, addr, data);
863         }
864
865         return ret;
866 }
867
868 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
869                                   compat_long_t addr, compat_long_t data)
870 {
871         struct task_struct *child;
872         long ret;
873
874         if (request == PTRACE_TRACEME) {
875                 ret = ptrace_traceme();
876                 goto out;
877         }
878
879         child = ptrace_get_task_struct(pid);
880         if (IS_ERR(child)) {
881                 ret = PTR_ERR(child);
882                 goto out;
883         }
884
885         if (request == PTRACE_ATTACH) {
886                 ret = ptrace_attach(child);
887                 /*
888                  * Some architectures need to do book-keeping after
889                  * a ptrace attach.
890                  */
891                 if (!ret)
892                         arch_ptrace_attach(child);
893                 goto out_put_task_struct;
894         }
895
896         ret = ptrace_check_attach(child, request == PTRACE_KILL);
897         if (!ret)
898                 ret = compat_arch_ptrace(child, request, addr, data);
899
900  out_put_task_struct:
901         put_task_struct(child);
902  out:
903         return ret;
904 }
905 #endif  /* CONFIG_COMPAT */