Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[linux-2.6.git] / kernel / exit.c
index e47ee8a..9ee58bb 100644 (file)
 #include <linux/init_task.h>
 #include <linux/perf_event.h>
 #include <trace/events/sched.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/oom.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include <asm/pgtable.h>
 #include <asm/mmu_context.h>
-#include "cred-internals.h"
 
 static void exit_mm(struct task_struct * tsk);
 
-static void __unhash_process(struct task_struct *p)
+static void __unhash_process(struct task_struct *p, bool group_dead)
 {
        nr_threads--;
        detach_pid(p, PIDTYPE_PID);
-       if (thread_group_leader(p)) {
+       if (group_dead) {
                detach_pid(p, PIDTYPE_PGID);
                detach_pid(p, PIDTYPE_SID);
 
                list_del_rcu(&p->tasks);
-               __get_cpu_var(process_counts)--;
+               list_del_init(&p->sibling);
+               __this_cpu_dec(process_counts);
        }
        list_del_rcu(&p->thread_group);
-       list_del_init(&p->sibling);
 }
 
 /*
@@ -79,23 +80,33 @@ static void __unhash_process(struct task_struct *p)
 static void __exit_signal(struct task_struct *tsk)
 {
        struct signal_struct *sig = tsk->signal;
+       bool group_dead = thread_group_leader(tsk);
        struct sighand_struct *sighand;
+       struct tty_struct *uninitialized_var(tty);
 
-       BUG_ON(!sig);
-       BUG_ON(!atomic_read(&sig->count));
-
-       sighand = rcu_dereference(tsk->sighand);
+       sighand = rcu_dereference_check(tsk->sighand,
+                                       lockdep_tasklist_lock_is_held());
        spin_lock(&sighand->siglock);
 
        posix_cpu_timers_exit(tsk);
-       if (atomic_dec_and_test(&sig->count))
+       if (group_dead) {
                posix_cpu_timers_exit_group(tsk);
-       else {
+               tty = sig->tty;
+               sig->tty = NULL;
+       } else {
+               /*
+                * This can only happen if the caller is de_thread().
+                * FIXME: this is the temporary hack, we should teach
+                * posix-cpu-timers to handle this case correctly.
+                */
+               if (unlikely(has_group_leader_pid(tsk)))
+                       posix_cpu_timers_exit_group(tsk);
+
                /*
                 * If there is any task waiting for the group exit
                 * then notify it:
                 */
-               if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
+               if (sig->notify_count > 0 && !--sig->notify_count)
                        wake_up_process(sig->group_exit_task);
 
                if (tsk == sig->curr_target)
@@ -110,9 +121,9 @@ static void __exit_signal(struct task_struct *tsk)
                 * We won't ever get here for the group leader, since it
                 * will have been the last reference on the signal_struct.
                 */
-               sig->utime = cputime_add(sig->utime, task_utime(tsk));
-               sig->stime = cputime_add(sig->stime, task_stime(tsk));
-               sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
+               sig->utime = cputime_add(sig->utime, tsk->utime);
+               sig->stime = cputime_add(sig->stime, tsk->stime);
+               sig->gtime = cputime_add(sig->gtime, tsk->gtime);
                sig->min_flt += tsk->min_flt;
                sig->maj_flt += tsk->maj_flt;
                sig->nvcsw += tsk->nvcsw;
@@ -121,32 +132,24 @@ static void __exit_signal(struct task_struct *tsk)
                sig->oublock += task_io_get_oublock(tsk);
                task_io_accounting_add(&sig->ioac, &tsk->ioac);
                sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
-               sig = NULL; /* Marker for below. */
        }
 
-       __unhash_process(tsk);
+       sig->nr_threads--;
+       __unhash_process(tsk, group_dead);
 
        /*
         * Do this under ->siglock, we can race with another thread
         * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
         */
        flush_sigqueue(&tsk->pending);
-
-       tsk->signal = NULL;
        tsk->sighand = NULL;
        spin_unlock(&sighand->siglock);
 
        __cleanup_sighand(sighand);
        clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
-       if (sig) {
+       if (group_dead) {
                flush_sigqueue(&sig->shared_pending);
-               taskstats_tgid_free(sig);
-               /*
-                * Make sure ->signal can't go away under rq->lock,
-                * see account_group_exec_runtime().
-                */
-               task_rq_unlock_wait(tsk);
-               __cleanup_signal(sig);
+               tty_kref_put(tty);
        }
 }
 
@@ -154,9 +157,7 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
 {
        struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
 
-#ifdef CONFIG_PERF_EVENTS
-       WARN_ON_ONCE(tsk->perf_event_ctxp);
-#endif
+       perf_event_delayed_put(tsk);
        trace_sched_process_free(tsk);
        put_task_struct(tsk);
 }
@@ -167,15 +168,16 @@ void release_task(struct task_struct * p)
        struct task_struct *leader;
        int zap_leader;
 repeat:
-       tracehook_prepare_release_task(p);
        /* don't need to get the RCU readlock here - the process is dead and
-        * can't be modifying its own credentials */
+        * can't be modifying its own credentials. But shut RCU-lockdep up */
+       rcu_read_lock();
        atomic_dec(&__task_cred(p)->user->processes);
+       rcu_read_unlock();
 
        proc_flush_task(p);
 
        write_lock_irq(&tasklist_lock);
-       tracehook_finish_release_task(p);
+       ptrace_release_task(p);
        __exit_signal(p);
 
        /*
@@ -186,22 +188,12 @@ repeat:
        zap_leader = 0;
        leader = p->group_leader;
        if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
-               BUG_ON(task_detached(leader));
-               do_notify_parent(leader, leader->exit_signal);
                /*
                 * If we were the last child thread and the leader has
                 * exited already, and the leader's parent ignores SIGCHLD,
                 * then we are the one who should release the leader.
-                *
-                * do_notify_parent() will have marked it self-reaping in
-                * that case.
-                */
-               zap_leader = task_detached(leader);
-
-               /*
-                * This maintains the invariant that release_task()
-                * only runs on a task in EXIT_DEAD, just for sanity.
                 */
+               zap_leader = do_notify_parent(leader, leader->exit_signal);
                if (zap_leader)
                        leader->exit_state = EXIT_DEAD;
        }
@@ -273,18 +265,16 @@ int is_current_pgrp_orphaned(void)
        return retval;
 }
 
-static int has_stopped_jobs(struct pid *pgrp)
+static bool has_stopped_jobs(struct pid *pgrp)
 {
-       int retval = 0;
        struct task_struct *p;
 
        do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
-               if (!task_is_stopped(p))
-                       continue;
-               retval = 1;
-               break;
+               if (p->signal->flags & SIGNAL_STOP_STOPPED)
+                       return true;
        } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
-       return retval;
+
+       return false;
 }
 
 /*
@@ -472,9 +462,11 @@ static void close_files(struct files_struct * files)
        /*
         * It is safe to dereference the fd table without RCU or
         * ->file_lock because this is the last reference to the
-        * files structure.
+        * files structure.  But use RCU to shut RCU-lockdep up.
         */
+       rcu_read_lock();
        fdt = files_fdtable(files);
+       rcu_read_unlock();
        for (;;) {
                unsigned long set;
                i = j * __NFDBITS;
@@ -520,10 +512,12 @@ void put_files_struct(struct files_struct *files)
                 * at the end of the RCU grace period. Otherwise,
                 * you can free files immediately.
                 */
+               rcu_read_lock();
                fdt = files_fdtable(files);
                if (fdt != &files->fdtab)
                        kmem_cache_free(files_cachep, files);
                free_fdtable(fdt);
+               rcu_read_unlock();
        }
 }
 
@@ -553,29 +547,28 @@ void exit_files(struct task_struct *tsk)
 
 #ifdef CONFIG_MM_OWNER
 /*
- * Task p is exiting and it owned mm, lets find a new owner for it
+ * A task is exiting.   If it owned this mm, find a new owner for the mm.
  */
-static inline int
-mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
-{
-       /*
-        * If there are other users of the mm and the owner (us) is exiting
-        * we need to find a new owner to take on the responsibility.
-        */
-       if (atomic_read(&mm->mm_users) <= 1)
-               return 0;
-       if (mm->owner != p)
-               return 0;
-       return 1;
-}
-
 void mm_update_next_owner(struct mm_struct *mm)
 {
        struct task_struct *c, *g, *p = current;
 
 retry:
-       if (!mm_need_new_owner(mm, p))
+       /*
+        * If the exiting or execing task is not the owner, it's
+        * someone else's problem.
+        */
+       if (mm->owner != p)
+               return;
+       /*
+        * The current owner is exiting/execing and there are no other
+        * candidates.  Do not leave the mm pointing to a possibly
+        * freed task structure.
+        */
+       if (atomic_read(&mm->mm_users) <= 1) {
+               mm->owner = NULL;
                return;
+       }
 
        read_lock(&tasklist_lock);
        /*
@@ -688,6 +681,8 @@ static void exit_mm(struct task_struct * tsk)
        enter_lazy_tlb(mm, current);
        /* We don't want this task to be frozen prematurely */
        clear_freeze_flag(tsk);
+       if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
+               atomic_dec(&mm->oom_disable_count);
        task_unlock(tsk);
        mm_update_next_owner(mm);
        mmput(mm);
@@ -701,6 +696,8 @@ static void exit_mm(struct task_struct * tsk)
  * space.
  */
 static struct task_struct *find_new_reaper(struct task_struct *father)
+       __releases(&tasklist_lock)
+       __acquires(&tasklist_lock)
 {
        struct pid_namespace *pid_ns = task_active_pid_ns(father);
        struct task_struct *thread;
@@ -735,15 +732,12 @@ static struct task_struct *find_new_reaper(struct task_struct *father)
 /*
 * Any that need to be release_task'd are put on the @dead list.
  */
-static void reparent_thread(struct task_struct *father, struct task_struct *p,
+static void reparent_leader(struct task_struct *father, struct task_struct *p,
                                struct list_head *dead)
 {
-       if (p->pdeath_signal)
-               group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
-
        list_move_tail(&p->sibling, &p->real_parent->children);
 
-       if (task_detached(p))
+       if (p->exit_state == EXIT_DEAD)
                return;
        /*
         * If this is a threaded reparent there is no need to
@@ -756,10 +750,9 @@ static void reparent_thread(struct task_struct *father, struct task_struct *p,
        p->exit_signal = SIGCHLD;
 
        /* If it has exited notify the new parent about this child's death. */
-       if (!task_ptrace(p) &&
+       if (!p->ptrace &&
            p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
-               do_notify_parent(p, p->exit_signal);
-               if (task_detached(p)) {
+               if (do_notify_parent(p, p->exit_signal)) {
                        p->exit_state = EXIT_DEAD;
                        list_move_tail(&p->sibling, dead);
                }
@@ -773,18 +766,27 @@ static void forget_original_parent(struct task_struct *father)
        struct task_struct *p, *n, *reaper;
        LIST_HEAD(dead_children);
 
-       exit_ptrace(father);
-
        write_lock_irq(&tasklist_lock);
+       /*
+        * Note that exit_ptrace() and find_new_reaper() might
+        * drop tasklist_lock and reacquire it.
+        */
+       exit_ptrace(father);
        reaper = find_new_reaper(father);
 
        list_for_each_entry_safe(p, n, &father->children, sibling) {
-               p->real_parent = reaper;
-               if (p->parent == father) {
-                       BUG_ON(task_ptrace(p));
-                       p->parent = p->real_parent;
-               }
-               reparent_thread(father, p, &dead_children);
+               struct task_struct *t = p;
+               do {
+                       t->real_parent = reaper;
+                       if (t->parent == father) {
+                               BUG_ON(t->ptrace);
+                               t->parent = t->real_parent;
+                       }
+                       if (t->pdeath_signal)
+                               group_send_sig_info(t->pdeath_signal,
+                                                   SEND_SIG_NOINFO, t);
+               } while_each_thread(p, t);
+               reparent_leader(father, p, &dead_children);
        }
        write_unlock_irq(&tasklist_lock);
 
@@ -802,8 +804,7 @@ static void forget_original_parent(struct task_struct *father)
  */
 static void exit_notify(struct task_struct *tsk, int group_dead)
 {
-       int signal;
-       void *cookie;
+       bool autoreap;
 
        /*
         * This does two things:
@@ -823,7 +824,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
        /* Let father know we died
         *
         * Thread signals are configurable, but you aren't going to use
-        * that to send signals to arbitary processes.
+        * that to send signals to arbitrary processes.
         * That stops right now.
         *
         * If the parent exec id doesn't match the exec id we saved
@@ -834,29 +835,33 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
         * we have changed execution domain as these two values started
         * the same after a fork.
         */
-       if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
+       if (thread_group_leader(tsk) && tsk->exit_signal != SIGCHLD &&
            (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
             tsk->self_exec_id != tsk->parent_exec_id))
                tsk->exit_signal = SIGCHLD;
 
-       signal = tracehook_notify_death(tsk, &cookie, group_dead);
-       if (signal >= 0)
-               signal = do_notify_parent(tsk, signal);
+       if (unlikely(tsk->ptrace)) {
+               int sig = thread_group_leader(tsk) &&
+                               thread_group_empty(tsk) &&
+                               !ptrace_reparented(tsk) ?
+                       tsk->exit_signal : SIGCHLD;
+               autoreap = do_notify_parent(tsk, sig);
+       } else if (thread_group_leader(tsk)) {
+               autoreap = thread_group_empty(tsk) &&
+                       do_notify_parent(tsk, tsk->exit_signal);
+       } else {
+               autoreap = true;
+       }
 
-       tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
+       tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
 
-       /* mt-exec, de_thread() is waiting for us */
-       if (thread_group_leader(tsk) &&
-           tsk->signal->group_exit_task &&
-           tsk->signal->notify_count < 0)
+       /* mt-exec, de_thread() is waiting for group leader */
+       if (unlikely(tsk->signal->notify_count < 0))
                wake_up_process(tsk->signal->group_exit_task);
-
        write_unlock_irq(&tasklist_lock);
 
-       tracehook_report_death(tsk, signal, cookie, group_dead);
-
        /* If the process is dead, release it - nobody will wait for it */
-       if (signal == DEATH_REAP)
+       if (autoreap)
                release_task(tsk);
 }
 
@@ -892,14 +897,23 @@ NORET_TYPE void do_exit(long code)
 
        profile_task_exit(tsk);
 
-       WARN_ON(atomic_read(&tsk->fs_excl));
+       WARN_ON(blk_needs_flush_plug(tsk));
 
        if (unlikely(in_interrupt()))
                panic("Aiee, killing interrupt handler!");
        if (unlikely(!tsk->pid))
                panic("Attempted to kill the idle task!");
 
-       tracehook_report_exit(&code);
+       /*
+        * If do_exit is called because this processes oopsed, it's possible
+        * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
+        * continuing. Amongst other possible reasons, this is to prevent
+        * mm_release()->clear_child_tid() from writing to a user-controlled
+        * kernel address.
+        */
+       set_fs(USER_DS);
+
+       ptrace_event(PTRACE_EVENT_EXIT, code);
 
        validate_creds_for_do_exit(tsk);
 
@@ -932,7 +946,7 @@ NORET_TYPE void do_exit(long code)
         * an exiting task cleaning up the robust pi futexes.
         */
        smp_mb();
-       spin_unlock_wait(&tsk->pi_lock);
+       raw_spin_unlock_wait(&tsk->pi_lock);
 
        if (unlikely(in_atomic()))
                printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
@@ -940,11 +954,15 @@ NORET_TYPE void do_exit(long code)
                                preempt_count());
 
        acct_update_integrals(tsk);
-
+       /* sync mm's RSS info before statistics gathering */
+       if (tsk->mm)
+               sync_mm_rss(tsk, tsk->mm);
        group_dead = atomic_dec_and_test(&tsk->signal->live);
        if (group_dead) {
                hrtimer_cancel(&tsk->signal->real_timer);
                exit_itimers(tsk->signal);
+               if (tsk->mm)
+                       setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
        }
        acct_collect(code, group_dead);
        if (group_dead)
@@ -966,31 +984,37 @@ NORET_TYPE void do_exit(long code)
        exit_fs(tsk);
        check_stack_usage();
        exit_thread();
+
+       /*
+        * Flush inherited counters to the parent - before the parent
+        * gets woken up by child-exit notifications.
+        *
+        * because of cgroup mode, must be called before cgroup_exit()
+        */
+       perf_event_exit_task(tsk);
+
        cgroup_exit(tsk, 1);
 
-       if (group_dead && tsk->signal->leader)
+       if (group_dead)
                disassociate_ctty(1);
 
        module_put(task_thread_info(tsk)->exec_domain->module);
-       if (tsk->binfmt)
-               module_put(tsk->binfmt->module);
 
        proc_exit_connector(tsk);
 
        /*
-        * Flush inherited counters to the parent - before the parent
-        * gets woken up by child-exit notifications.
+        * FIXME: do that only when needed, using sched_exit tracepoint
         */
-       perf_event_exit_task(tsk);
+       ptrace_put_breakpoints(tsk);
 
        exit_notify(tsk, group_dead);
 #ifdef CONFIG_NUMA
+       task_lock(tsk);
        mpol_put(tsk->mempolicy);
        tsk->mempolicy = NULL;
+       task_unlock(tsk);
 #endif
 #ifdef CONFIG_FUTEX
-       if (unlikely(!list_empty(&tsk->pi_state_list)))
-               exit_pi_state_list(tsk);
        if (unlikely(current->pi_state_cache))
                kfree(current->pi_state_cache);
 #endif
@@ -1006,7 +1030,7 @@ NORET_TYPE void do_exit(long code)
        tsk->flags |= PF_EXITPIDONE;
 
        if (tsk->io_context)
-               exit_io_context();
+               exit_io_context(tsk);
 
        if (tsk->splice_pipe)
                __free_pipe_info(tsk->splice_pipe);
@@ -1093,28 +1117,28 @@ struct wait_opts {
        int __user              *wo_stat;
        struct rusage __user    *wo_rusage;
 
+       wait_queue_t            child_wait;
        int                     notask_error;
 };
 
-static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
+static inline
+struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
 {
-       struct pid *pid = NULL;
-       if (type == PIDTYPE_PID)
-               pid = task->pids[type].pid;
-       else if (type < PIDTYPE_MAX)
-               pid = task->group_leader->pids[type].pid;
-       return pid;
+       if (type != PIDTYPE_PID)
+               task = task->group_leader;
+       return task->pids[type].pid;
 }
 
-static int eligible_child(struct wait_opts *wo, struct task_struct *p)
+static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
 {
-       int err;
-
-       if (wo->wo_type < PIDTYPE_MAX) {
-               if (task_pid_type(p, wo->wo_type) != wo->wo_pid)
-                       return 0;
-       }
+       return  wo->wo_type == PIDTYPE_MAX ||
+               task_pid_type(p, wo->wo_type) == wo->wo_pid;
+}
 
+static int eligible_child(struct wait_opts *wo, struct task_struct *p)
+{
+       if (!eligible_pid(wo, p))
+               return 0;
        /* Wait for all children (clone and not) if __WALL is set;
         * otherwise, wait for clone children *only* if __WCLONE is
         * set; otherwise, wait for non-clone children *only*.  (Note:
@@ -1124,10 +1148,6 @@ static int eligible_child(struct wait_opts *wo, struct task_struct *p)
            && !(wo->wo_flags & __WALL))
                return 0;
 
-       err = security_task_wait(p);
-       if (err)
-               return err;
-
        return 1;
 }
 
@@ -1140,18 +1160,20 @@ static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
 
        put_task_struct(p);
        infop = wo->wo_info;
-       if (!retval)
-               retval = put_user(SIGCHLD, &infop->si_signo);
-       if (!retval)
-               retval = put_user(0, &infop->si_errno);
-       if (!retval)
-               retval = put_user((short)why, &infop->si_code);
-       if (!retval)
-               retval = put_user(pid, &infop->si_pid);
-       if (!retval)
-               retval = put_user(uid, &infop->si_uid);
-       if (!retval)
-               retval = put_user(status, &infop->si_status);
+       if (infop) {
+               if (!retval)
+                       retval = put_user(SIGCHLD, &infop->si_signo);
+               if (!retval)
+                       retval = put_user(0, &infop->si_errno);
+               if (!retval)
+                       retval = put_user((short)why, &infop->si_code);
+               if (!retval)
+                       retval = put_user(pid, &infop->si_pid);
+               if (!retval)
+                       retval = put_user(uid, &infop->si_uid);
+               if (!retval)
+                       retval = put_user(status, &infop->si_status);
+       }
        if (!retval)
                retval = pid;
        return retval;
@@ -1176,7 +1198,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
 
        if (unlikely(wo->wo_flags & WNOWAIT)) {
                int exit_code = p->exit_code;
-               int why, status;
+               int why;
 
                get_task_struct(p);
                read_unlock(&tasklist_lock);
@@ -1203,11 +1225,13 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
        traced = ptrace_reparented(p);
        /*
         * It can be ptraced but not reparented, check
-        * !task_detached() to filter out sub-threads.
+        * thread_group_leader() to filter out sub-threads.
         */
-       if (likely(!traced) && likely(!task_detached(p))) {
+       if (likely(!traced) && thread_group_leader(p)) {
                struct signal_struct *psig;
                struct signal_struct *sig;
+               unsigned long maxrss;
+               cputime_t tgutime, tgstime;
 
                /*
                 * The resource counters for the group leader are in its
@@ -1223,20 +1247,23 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
                 * need to protect the access to parent->signal fields,
                 * as other threads in the parent group can be right
                 * here reaping other children at the same time.
+                *
+                * We use thread_group_times() to get times for the thread
+                * group, which consolidates times for all threads in the
+                * group including the group leader.
                 */
+               thread_group_times(p, &tgutime, &tgstime);
                spin_lock_irq(&p->real_parent->sighand->siglock);
                psig = p->real_parent->signal;
                sig = p->signal;
                psig->cutime =
                        cputime_add(psig->cutime,
-                       cputime_add(p->utime,
-                       cputime_add(sig->utime,
-                                   sig->cutime)));
+                       cputime_add(tgutime,
+                                   sig->cutime));
                psig->cstime =
                        cputime_add(psig->cstime,
-                       cputime_add(p->stime,
-                       cputime_add(sig->stime,
-                                   sig->cstime)));
+                       cputime_add(tgstime,
+                                   sig->cstime));
                psig->cgtime =
                        cputime_add(psig->cgtime,
                        cputime_add(p->gtime,
@@ -1256,6 +1283,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
                psig->coublock +=
                        task_io_get_oublock(p) +
                        sig->oublock + sig->coublock;
+               maxrss = max(sig->maxrss, sig->cmaxrss);
+               if (psig->cmaxrss < maxrss)
+                       psig->cmaxrss = maxrss;
                task_io_accounting_add(&psig->ioac, &p->ioac);
                task_io_accounting_add(&psig->ioac, &sig->ioac);
                spin_unlock_irq(&p->real_parent->sighand->siglock);
@@ -1305,16 +1335,13 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
                /* We dropped tasklist, ptracer could die and untrace */
                ptrace_unlink(p);
                /*
-                * If this is not a detached task, notify the parent.
-                * If it's still not detached after that, don't release
-                * it now.
+                * If this is not a sub-thread, notify the parent.
+                * If parent wants a zombie, don't release it now.
                 */
-               if (!task_detached(p)) {
-                       do_notify_parent(p, p->exit_signal);
-                       if (!task_detached(p)) {
-                               p->exit_state = EXIT_ZOMBIE;
-                               p = NULL;
-                       }
+               if (thread_group_leader(p) &&
+                   !do_notify_parent(p, p->exit_signal)) {
+                       p->exit_state = EXIT_ZOMBIE;
+                       p = NULL;
                }
                write_unlock_irq(&tasklist_lock);
        }
@@ -1327,7 +1354,8 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
 static int *task_stopped_code(struct task_struct *p, bool ptrace)
 {
        if (ptrace) {
-               if (task_is_stopped_or_traced(p))
+               if (task_is_stopped_or_traced(p) &&
+                   !(p->jobctl & JOBCTL_LISTENING))
                        return &p->exit_code;
        } else {
                if (p->signal->flags & SIGNAL_STOP_STOPPED)
@@ -1336,11 +1364,23 @@ static int *task_stopped_code(struct task_struct *p, bool ptrace)
        return NULL;
 }
 
-/*
- * Handle sys_wait4 work for one task in state TASK_STOPPED.  We hold
- * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
- * the lock and this task is uninteresting.  If we return nonzero, we have
- * released the lock and the system call should return.
+/**
+ * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
+ * @wo: wait options
+ * @ptrace: is the wait for ptrace
+ * @p: task to wait for
+ *
+ * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
+ *
+ * CONTEXT:
+ * read_lock(&tasklist_lock), which is released if return value is
+ * non-zero.  Also, grabs and releases @p->sighand->siglock.
+ *
+ * RETURNS:
+ * 0 if wait condition didn't exist and search for other wait conditions
+ * should continue.  Non-zero return, -errno on failure and @p's pid on
+ * success, implies that tasklist_lock is released and wait condition
+ * search should terminate.
  */
 static int wait_task_stopped(struct wait_opts *wo,
                                int ptrace, struct task_struct *p)
@@ -1356,6 +1396,9 @@ static int wait_task_stopped(struct wait_opts *wo,
        if (!ptrace && !(wo->wo_flags & WUNTRACED))
                return 0;
 
+       if (!task_stopped_code(p, ptrace))
+               return 0;
+
        exit_code = 0;
        spin_lock_irq(&p->sighand->siglock);
 
@@ -1370,8 +1413,7 @@ static int wait_task_stopped(struct wait_opts *wo,
        if (!unlikely(wo->wo_flags & WNOWAIT))
                *p_code = 0;
 
-       /* don't need the RCU readlock here as we're holding a spinlock */
-       uid = __task_cred(p)->uid;
+       uid = task_uid(p);
 unlock_sig:
        spin_unlock_irq(&p->sighand->siglock);
        if (!exit_code)
@@ -1444,7 +1486,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
        }
        if (!unlikely(wo->wo_flags & WNOWAIT))
                p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
-       uid = __task_cred(p)->uid;
+       uid = task_uid(p);
        spin_unlock_irq(&p->sighand->siglock);
 
        pid = task_pid_vnr(p);
@@ -1477,13 +1519,14 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
  * then ->notask_error is 0 if @p is an eligible child,
  * or another error from security_task_wait(), or still -ECHILD.
  */
-static int wait_consider_task(struct wait_opts *wo, struct task_struct *parent,
-                               int ptrace, struct task_struct *p)
+static int wait_consider_task(struct wait_opts *wo, int ptrace,
+                               struct task_struct *p)
 {
        int ret = eligible_child(wo, p);
        if (!ret)
                return ret;
 
+       ret = security_task_wait(p);
        if (unlikely(ret < 0)) {
                /*
                 * If we have not yet seen any eligible child,
@@ -1497,33 +1540,83 @@ static int wait_consider_task(struct wait_opts *wo, struct task_struct *parent,
                return 0;
        }
 
-       if (likely(!ptrace) && unlikely(task_ptrace(p))) {
+       /* dead body doesn't have much to contribute */
+       if (p->exit_state == EXIT_DEAD)
+               return 0;
+
+       /* slay zombie? */
+       if (p->exit_state == EXIT_ZOMBIE) {
+               /*
+                * A zombie ptracee is only visible to its ptracer.
+                * Notification and reaping will be cascaded to the real
+                * parent when the ptracer detaches.
+                */
+               if (likely(!ptrace) && unlikely(p->ptrace)) {
+                       /* it will become visible, clear notask_error */
+                       wo->notask_error = 0;
+                       return 0;
+               }
+
+               /* we don't reap group leaders with subthreads */
+               if (!delay_group_leader(p))
+                       return wait_task_zombie(wo, p);
+
+               /*
+                * Allow access to stopped/continued state via zombie by
+                * falling through.  Clearing of notask_error is complex.
+                *
+                * When !@ptrace:
+                *
+                * If WEXITED is set, notask_error should naturally be
+                * cleared.  If not, subset of WSTOPPED|WCONTINUED is set,
+                * so, if there are live subthreads, there are events to
+                * wait for.  If all subthreads are dead, it's still safe
+                * to clear - this function will be called again in finite
+                * amount time once all the subthreads are released and
+                * will then return without clearing.
+                *
+                * When @ptrace:
+                *
+                * Stopped state is per-task and thus can't change once the
+                * target task dies.  Only continued and exited can happen.
+                * Clear notask_error if WCONTINUED | WEXITED.
+                */
+               if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
+                       wo->notask_error = 0;
+       } else {
+               /*
+                * If @p is ptraced by a task in its real parent's group,
+                * hide group stop/continued state when looking at @p as
+                * the real parent; otherwise, a single stop can be
+                * reported twice as group and ptrace stops.
+                *
+                * If a ptracer wants to distinguish the two events for its
+                * own children, it should create a separate process which
+                * takes the role of real parent.
+                */
+               if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p))
+                       return 0;
+
                /*
-                * This child is hidden by ptrace.
-                * We aren't allowed to see it now, but eventually we will.
+                * @p is alive and it's gonna stop, continue or exit, so
+                * there always is something to wait for.
                 */
                wo->notask_error = 0;
-               return 0;
        }
 
-       if (p->exit_state == EXIT_DEAD)
-               return 0;
-
        /*
-        * We don't reap group leaders with subthreads.
+        * Wait for stopped.  Depending on @ptrace, different stopped state
+        * is used and the two don't interact with each other.
         */
-       if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p))
-               return wait_task_zombie(wo, p);
+       ret = wait_task_stopped(wo, ptrace, p);
+       if (ret)
+               return ret;
 
        /*
-        * It's stopped or running now, so it might
-        * later continue, exit, or stop again.
+        * Wait for continued.  There's only one continued state and the
+        * ptracer can consume it which can confuse the real parent.  Don't
+        * use WCONTINUED from ptracer.  You don't need or want it.
         */
-       wo->notask_error = 0;
-
-       if (task_stopped_code(p, ptrace))
-               return wait_task_stopped(wo, ptrace, p);
-
        return wait_task_continued(wo, p);
 }
 
@@ -1541,14 +1634,9 @@ static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
        struct task_struct *p;
 
        list_for_each_entry(p, &tsk->children, sibling) {
-               /*
-                * Do not consider detached threads.
-                */
-               if (!task_detached(p)) {
-                       int ret = wait_consider_task(wo, tsk, 0, p);
-                       if (ret)
-                               return ret;
-               }
+               int ret = wait_consider_task(wo, 0, p);
+               if (ret)
+                       return ret;
        }
 
        return 0;
@@ -1559,7 +1647,7 @@ static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
        struct task_struct *p;
 
        list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
-               int ret = wait_consider_task(wo, tsk, 1, p);
+               int ret = wait_consider_task(wo, 1, p);
                if (ret)
                        return ret;
        }
@@ -1567,15 +1655,38 @@ static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
        return 0;
 }
 
+static int child_wait_callback(wait_queue_t *wait, unsigned mode,
+                               int sync, void *key)
+{
+       struct wait_opts *wo = container_of(wait, struct wait_opts,
+                                               child_wait);
+       struct task_struct *p = key;
+
+       if (!eligible_pid(wo, p))
+               return 0;
+
+       if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
+               return 0;
+
+       return default_wake_function(wait, mode, sync, key);
+}
+
+void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
+{
+       __wake_up_sync_key(&parent->signal->wait_chldexit,
+                               TASK_INTERRUPTIBLE, 1, p);
+}
+
 static long do_wait(struct wait_opts *wo)
 {
-       DECLARE_WAITQUEUE(wait, current);
        struct task_struct *tsk;
        int retval;
 
        trace_sched_process_wait(wo->wo_pid);
 
-       add_wait_queue(&current->signal->wait_chldexit,&wait);
+       init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
+       wo->child_wait.private = current;
+       add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
 repeat:
        /*
         * If there is nothing that can match our critiera just get out.
@@ -1616,32 +1727,7 @@ notask:
        }
 end:
        __set_current_state(TASK_RUNNING);
-       remove_wait_queue(&current->signal->wait_chldexit,&wait);
-       if (wo->wo_info) {
-               struct siginfo __user *infop = wo->wo_info;
-
-               if (retval > 0)
-                       retval = 0;
-               else {
-                       /*
-                        * For a WNOHANG return, clear out all the fields
-                        * we would set so the user can easily tell the
-                        * difference.
-                        */
-                       if (!retval)
-                               retval = put_user(0, &infop->si_signo);
-                       if (!retval)
-                               retval = put_user(0, &infop->si_errno);
-                       if (!retval)
-                               retval = put_user(0, &infop->si_code);
-                       if (!retval)
-                               retval = put_user(0, &infop->si_pid);
-                       if (!retval)
-                               retval = put_user(0, &infop->si_uid);
-                       if (!retval)
-                               retval = put_user(0, &infop->si_status);
-               }
-       }
+       remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
        return retval;
 }
 
@@ -1686,6 +1772,29 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
        wo.wo_stat      = NULL;
        wo.wo_rusage    = ru;
        ret = do_wait(&wo);
+
+       if (ret > 0) {
+               ret = 0;
+       } else if (infop) {
+               /*
+                * For a WNOHANG return, clear out all the fields
+                * we would set so the user can easily tell the
+                * difference.
+                */
+               if (!ret)
+                       ret = put_user(0, &infop->si_signo);
+               if (!ret)
+                       ret = put_user(0, &infop->si_errno);
+               if (!ret)
+                       ret = put_user(0, &infop->si_code);
+               if (!ret)
+                       ret = put_user(0, &infop->si_pid);
+               if (!ret)
+                       ret = put_user(0, &infop->si_uid);
+               if (!ret)
+                       ret = put_user(0, &infop->si_status);
+       }
+
        put_pid(pid);
 
        /* avoid REGPARM breakage on x86: */