Merge branch 'ptrace' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc
Linus Torvalds [Fri, 22 Jul 2011 22:06:50 +0000 (15:06 -0700)]
* 'ptrace' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc: (39 commits)
  ptrace: do_wait(traced_leader_killed_by_mt_exec) can block forever
  ptrace: fix ptrace_signal() && STOP_DEQUEUED interaction
  connector: add an event for monitoring process tracers
  ptrace: dont send SIGSTOP on auto-attach if PT_SEIZED
  ptrace: mv send-SIGSTOP from do_fork() to ptrace_init_task()
  ptrace_init_task: initialize child->jobctl explicitly
  has_stopped_jobs: s/task_is_stopped/SIGNAL_STOP_STOPPED/
  ptrace: make former thread ID available via PTRACE_GETEVENTMSG after PTRACE_EVENT_EXEC stop
  ptrace: wait_consider_task: s/same_thread_group/ptrace_reparented/
  ptrace: kill real_parent_is_ptracer() in in favor of ptrace_reparented()
  ptrace: ptrace_reparented() should check same_thread_group()
  redefine thread_group_leader() as exit_signal >= 0
  do not change dead_task->exit_signal
  kill task_detached()
  reparent_leader: check EXIT_DEAD instead of task_detached()
  make do_notify_parent() __must_check, update the callers
  __ptrace_detach: avoid task_detached(), check do_notify_parent()
  kill tracehook_notify_death()
  make do_notify_parent() return bool
  ptrace: s/tracehook_tracer_task()/ptrace_parent()/
  ...

1  2 
fs/exec.c
fs/proc/base.c
include/linux/sched.h
kernel/exit.c
kernel/signal.c
mm/nommu.c
security/selinux/hooks.c

diff --combined fs/exec.c
+++ b/fs/exec.c
@@@ -963,9 -963,18 +963,18 @@@ static int de_thread(struct task_struc
                leader->group_leader = tsk;
  
                tsk->exit_signal = SIGCHLD;
+               leader->exit_signal = -1;
  
                BUG_ON(leader->exit_state != EXIT_ZOMBIE);
                leader->exit_state = EXIT_DEAD;
+               /*
+                * We are going to release_task()->ptrace_unlink() silently,
+                * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
+                * the tracer wont't block again waiting for this thread.
+                */
+               if (unlikely(leader->ptrace))
+                       __wake_up_parent(leader, leader->parent);
                write_unlock_irq(&tasklist_lock);
  
                release_task(leader);
@@@ -1093,7 -1102,6 +1102,7 @@@ int flush_old_exec(struct linux_binprm 
  
        bprm->mm = NULL;                /* We're using it now */
  
 +      set_fs(USER_DS);
        current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD);
        flush_thread();
        current->personality &= ~bprm->per_clear;
@@@ -1225,7 -1233,12 +1234,12 @@@ int check_unsafe_exec(struct linux_binp
        unsigned n_fs;
        int res = 0;
  
-       bprm->unsafe = tracehook_unsafe_exec(p);
+       if (p->ptrace) {
+               if (p->ptrace & PT_PTRACE_CAP)
+                       bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
+               else
+                       bprm->unsafe |= LSM_UNSAFE_PTRACE;
+       }
  
        n_fs = 1;
        spin_lock(&p->fs->lock);
@@@ -1353,15 -1366,25 +1367,21 @@@ int search_binary_handler(struct linux_
        unsigned int depth = bprm->recursion_depth;
        int try,retval;
        struct linux_binfmt *fmt;
+       pid_t old_pid;
  
        retval = security_bprm_check(bprm);
        if (retval)
                return retval;
  
 -      /* kernel module loader fixup */
 -      /* so we don't try to load run modprobe in kernel space. */
 -      set_fs(USER_DS);
 -
        retval = audit_bprm(bprm);
        if (retval)
                return retval;
  
+       /* Need to fetch pid before load_binary changes it */
+       rcu_read_lock();
+       old_pid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
+       rcu_read_unlock();
        retval = -ENOENT;
        for (try=0; try<2; try++) {
                read_lock(&binfmt_lock);
                        bprm->recursion_depth = depth;
                        if (retval >= 0) {
                                if (depth == 0)
-                                       tracehook_report_exec(fmt, bprm, regs);
+                                       ptrace_event(PTRACE_EVENT_EXEC,
+                                                       old_pid);
                                put_binfmt(fmt);
                                allow_write_access(bprm->file);
                                if (bprm->file)
@@@ -1769,7 -1793,7 +1790,7 @@@ static int zap_process(struct task_stru
  
        t = start;
        do {
-               task_clear_group_stop_pending(t);
+               task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
                if (t != current && t->mm) {
                        sigaddset(&t->pending.signal, SIGKILL);
                        signal_wake_up(t, 1);
@@@ -1996,7 -2020,7 +2017,7 @@@ static void wait_for_dump_helpers(struc
   * is a special value that we use to trap recursive
   * core dumps
   */
 -static int umh_pipe_setup(struct subprocess_info *info)
 +static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
  {
        struct file *rp, *wp;
        struct fdtable *fdt;
diff --combined fs/proc/base.c
@@@ -216,7 -216,7 +216,7 @@@ static struct mm_struct *__check_mem_pe
        if (task_is_stopped_or_traced(task)) {
                int match;
                rcu_read_lock();
-               match = (tracehook_tracer_task(task) == current);
+               match = (ptrace_parent(task) == current);
                rcu_read_unlock();
                if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
                        return mm;
@@@ -2169,7 -2169,11 +2169,7 @@@ static const struct file_operations pro
   */
  static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
  {
 -      int rv;
 -
 -      if (flags & IPERM_FLAG_RCU)
 -              return -ECHILD;
 -      rv = generic_permission(inode, mask, flags, NULL);
 +      int rv = generic_permission(inode, mask, flags, NULL);
        if (rv == 0)
                return 0;
        if (task_pid(current) == proc_pid(inode))
@@@ -2708,9 -2712,6 +2708,9 @@@ static int do_io_accounting(struct task
        struct task_io_accounting acct = task->ioac;
        unsigned long flags;
  
 +      if (!ptrace_may_access(task, PTRACE_MODE_READ))
 +              return -EACCES;
 +
        if (whole && lock_task_sighand(task, &flags)) {
                struct task_struct *t = task;
  
@@@ -2842,7 -2843,7 +2842,7 @@@ static const struct pid_entry tgid_base
        REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
  #endif
  #ifdef CONFIG_TASK_IO_ACCOUNTING
 -      INF("io",       S_IRUGO, proc_tgid_io_accounting),
 +      INF("io",       S_IRUSR, proc_tgid_io_accounting),
  #endif
  #ifdef CONFIG_HARDWALL
        INF("hardwall",   S_IRUGO, proc_pid_hardwall),
@@@ -3184,7 -3185,7 +3184,7 @@@ static const struct pid_entry tid_base_
        REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
  #endif
  #ifdef CONFIG_TASK_IO_ACCOUNTING
 -      INF("io",       S_IRUGO, proc_tid_io_accounting),
 +      INF("io",       S_IRUSR, proc_tid_io_accounting),
  #endif
  #ifdef CONFIG_HARDWALL
        INF("hardwall",   S_IRUGO, proc_pid_hardwall),
diff --combined include/linux/sched.h
@@@ -808,7 -808,7 +808,7 @@@ enum cpu_idle_type 
   * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
   * increased costs.
   */
 -#if BITS_PER_LONG > 32
 +#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load  */
  # define SCHED_LOAD_RESOLUTION        10
  # define scale_load(w)                ((w) << SCHED_LOAD_RESOLUTION)
  # define scale_load_down(w)   ((w) >> SCHED_LOAD_RESOLUTION)
  #define SD_SERIALIZE          0x0400  /* Only a single load balancing instance */
  #define SD_ASYM_PACKING               0x0800  /* Place busy groups earlier in the domain */
  #define SD_PREFER_SIBLING     0x1000  /* Prefer to place tasks in a sibling domain */
 +#define SD_OVERLAP            0x2000  /* sched_domains of this level overlap */
  
  enum powersavings_balance_level {
        POWERSAVINGS_BALANCE_NONE = 0,  /* No power saving load balance */
@@@ -894,21 -893,16 +894,21 @@@ static inline int sd_power_saving_flags
        return 0;
  }
  
 -struct sched_group {
 -      struct sched_group *next;       /* Must be a circular list */
 +struct sched_group_power {
        atomic_t ref;
 -
        /*
         * CPU power of this group, SCHED_LOAD_SCALE being max power for a
         * single CPU.
         */
 -      unsigned int cpu_power, cpu_power_orig;
 +      unsigned int power, power_orig;
 +};
 +
 +struct sched_group {
 +      struct sched_group *next;       /* Must be a circular list */
 +      atomic_t ref;
 +
        unsigned int group_weight;
 +      struct sched_group_power *sgp;
  
        /*
         * The CPUs this group covers.
@@@ -1069,7 -1063,6 +1069,7 @@@ struct sched_domain
   */
  #define WF_SYNC               0x01            /* waker goes to sleep after wakup */
  #define WF_FORK               0x02            /* child wakeup after fork */
 +#define WF_MIGRATED   0x04            /* internal use, task got migrated */
  
  #define ENQUEUE_WAKEUP                1
  #define ENQUEUE_HEAD          2
@@@ -1260,9 -1253,6 +1260,9 @@@ struct task_struct 
  #ifdef CONFIG_PREEMPT_RCU
        int rcu_read_lock_nesting;
        char rcu_read_unlock_special;
 +#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU)
 +      int rcu_boosted;
 +#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */
        struct list_head rcu_node_entry;
  #endif /* #ifdef CONFIG_PREEMPT_RCU */
  #ifdef CONFIG_TREE_PREEMPT_RCU
        int exit_state;
        int exit_code, exit_signal;
        int pdeath_signal;  /*  The signal sent when the parent dies  */
-       unsigned int group_stop;        /* GROUP_STOP_*, siglock protected */
+       unsigned int jobctl;    /* JOBCTL_*, siglock protected */
        /* ??? */
        unsigned int personality;
        unsigned did_exec:1;
@@@ -1813,15 -1803,34 +1813,34 @@@ extern void thread_group_times(struct t
  #define used_math() tsk_used_math(current)
  
  /*
-  * task->group_stop flags
+  * task->jobctl flags
   */
- #define GROUP_STOP_SIGMASK    0xffff    /* signr of the last group stop */
- #define GROUP_STOP_PENDING    (1 << 16) /* task should stop for group stop */
- #define GROUP_STOP_CONSUME    (1 << 17) /* consume group stop count */
- #define GROUP_STOP_TRAPPING   (1 << 18) /* switching from STOPPED to TRACED */
- #define GROUP_STOP_DEQUEUED   (1 << 19) /* stop signal dequeued */
- extern void task_clear_group_stop_pending(struct task_struct *task);
+ #define JOBCTL_STOP_SIGMASK   0xffff  /* signr of the last group stop */
+ #define JOBCTL_STOP_DEQUEUED_BIT 16   /* stop signal dequeued */
+ #define JOBCTL_STOP_PENDING_BIT       17      /* task should stop for group stop */
+ #define JOBCTL_STOP_CONSUME_BIT       18      /* consume group stop count */
+ #define JOBCTL_TRAP_STOP_BIT  19      /* trap for STOP */
+ #define JOBCTL_TRAP_NOTIFY_BIT        20      /* trap for NOTIFY */
+ #define JOBCTL_TRAPPING_BIT   21      /* switching to TRACED */
+ #define JOBCTL_LISTENING_BIT  22      /* ptracer is listening for events */
+ #define JOBCTL_STOP_DEQUEUED  (1 << JOBCTL_STOP_DEQUEUED_BIT)
+ #define JOBCTL_STOP_PENDING   (1 << JOBCTL_STOP_PENDING_BIT)
+ #define JOBCTL_STOP_CONSUME   (1 << JOBCTL_STOP_CONSUME_BIT)
+ #define JOBCTL_TRAP_STOP      (1 << JOBCTL_TRAP_STOP_BIT)
+ #define JOBCTL_TRAP_NOTIFY    (1 << JOBCTL_TRAP_NOTIFY_BIT)
+ #define JOBCTL_TRAPPING               (1 << JOBCTL_TRAPPING_BIT)
+ #define JOBCTL_LISTENING      (1 << JOBCTL_LISTENING_BIT)
+ #define JOBCTL_TRAP_MASK      (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
+ #define JOBCTL_PENDING_MASK   (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
+ extern bool task_set_jobctl_pending(struct task_struct *task,
+                                   unsigned int mask);
+ extern void task_clear_jobctl_trapping(struct task_struct *task);
+ extern void task_clear_jobctl_pending(struct task_struct *task,
+                                     unsigned int mask);
  
  #ifdef CONFIG_PREEMPT_RCU
  
@@@ -2136,7 -2145,7 +2155,7 @@@ static inline int dequeue_signal_lock(s
        spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
  
        return ret;
- }     
+ }
  
  extern void block_all_signals(int (*notifier)(void *priv), void *priv,
                              sigset_t *mask);
@@@ -2151,7 -2160,7 +2170,7 @@@ extern int kill_pid_info_as_uid(int, st
  extern int kill_pgrp(struct pid *pid, int sig, int priv);
  extern int kill_pid(struct pid *pid, int sig, int priv);
  extern int kill_proc_info(int, struct siginfo *, pid_t);
- extern int do_notify_parent(struct task_struct *, int);
+ extern __must_check bool do_notify_parent(struct task_struct *, int);
  extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
  extern void force_sig(int, struct task_struct *);
  extern int send_sig(int, struct task_struct *, int);
@@@ -2275,8 -2284,10 +2294,10 @@@ static inline int get_nr_threads(struc
        return tsk->signal->nr_threads;
  }
  
- /* de_thread depends on thread_group_leader not being a pid based check */
- #define thread_group_leader(p)        (p == p->group_leader)
+ static inline bool thread_group_leader(struct task_struct *p)
+ {
+       return p->exit_signal >= 0;
+ }
  
  /* Do to the insanities of de_thread it is possible for a process
   * to have the pid of the thread group leader without actually being
@@@ -2309,11 -2320,6 +2330,6 @@@ static inline int thread_group_empty(st
  #define delay_group_leader(p) \
                (thread_group_leader(p) && !thread_group_empty(p))
  
- static inline int task_detached(struct task_struct *p)
- {
-       return p->exit_signal == -1;
- }
  /*
   * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
   * subscriptions and synchronises with wait4().  Also used in procfs.  Also
diff --combined kernel/exit.c
@@@ -169,7 -169,6 +169,6 @@@ void release_task(struct task_struct * 
        struct task_struct *leader;
        int zap_leader;
  repeat:
-       tracehook_prepare_release_task(p);
        /* don't need to get the RCU readlock here - the process is dead and
         * can't be modifying its own credentials. But shut RCU-lockdep up */
        rcu_read_lock();
        proc_flush_task(p);
  
        write_lock_irq(&tasklist_lock);
-       tracehook_finish_release_task(p);
+       ptrace_release_task(p);
        __exit_signal(p);
  
        /*
        zap_leader = 0;
        leader = p->group_leader;
        if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
-               BUG_ON(task_detached(leader));
-               do_notify_parent(leader, leader->exit_signal);
                /*
                 * If we were the last child thread and the leader has
                 * exited already, and the leader's parent ignores SIGCHLD,
                 * then we are the one who should release the leader.
-                *
-                * do_notify_parent() will have marked it self-reaping in
-                * that case.
-                */
-               zap_leader = task_detached(leader);
-               /*
-                * This maintains the invariant that release_task()
-                * only runs on a task in EXIT_DEAD, just for sanity.
                 */
+               zap_leader = do_notify_parent(leader, leader->exit_signal);
                if (zap_leader)
                        leader->exit_state = EXIT_DEAD;
        }
@@@ -277,18 -266,16 +266,16 @@@ int is_current_pgrp_orphaned(void
        return retval;
  }
  
- static int has_stopped_jobs(struct pid *pgrp)
+ static bool has_stopped_jobs(struct pid *pgrp)
  {
        struct task_struct *p;
  
        do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
-               if (!task_is_stopped(p))
-                       continue;
-               retval = 1;
-               break;
+               if (p->signal->flags & SIGNAL_STOP_STOPPED)
+                       return true;
        } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
-       return retval;
+       return false;
  }
  
  /*
@@@ -561,28 -548,29 +548,28 @@@ void exit_files(struct task_struct *tsk
  
  #ifdef CONFIG_MM_OWNER
  /*
 - * Task p is exiting and it owned mm, lets find a new owner for it
 + * A task is exiting.   If it owned this mm, find a new owner for the mm.
   */
 -static inline int
 -mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
 -{
 -      /*
 -       * If there are other users of the mm and the owner (us) is exiting
 -       * we need to find a new owner to take on the responsibility.
 -       */
 -      if (atomic_read(&mm->mm_users) <= 1)
 -              return 0;
 -      if (mm->owner != p)
 -              return 0;
 -      return 1;
 -}
 -
  void mm_update_next_owner(struct mm_struct *mm)
  {
        struct task_struct *c, *g, *p = current;
  
  retry:
 -      if (!mm_need_new_owner(mm, p))
 +      /*
 +       * If the exiting or execing task is not the owner, it's
 +       * someone else's problem.
 +       */
 +      if (mm->owner != p)
                return;
 +      /*
 +       * The current owner is exiting/execing and there are no other
 +       * candidates.  Do not leave the mm pointing to a possibly
 +       * freed task structure.
 +       */
 +      if (atomic_read(&mm->mm_users) <= 1) {
 +              mm->owner = NULL;
 +              return;
 +      }
  
        read_lock(&tasklist_lock);
        /*
@@@ -751,7 -739,7 +738,7 @@@ static void reparent_leader(struct task
  {
        list_move_tail(&p->sibling, &p->real_parent->children);
  
-       if (task_detached(p))
+       if (p->exit_state == EXIT_DEAD)
                return;
        /*
         * If this is a threaded reparent there is no need to
        p->exit_signal = SIGCHLD;
  
        /* If it has exited notify the new parent about this child's death. */
-       if (!task_ptrace(p) &&
+       if (!p->ptrace &&
            p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
-               do_notify_parent(p, p->exit_signal);
-               if (task_detached(p)) {
+               if (do_notify_parent(p, p->exit_signal)) {
                        p->exit_state = EXIT_DEAD;
                        list_move_tail(&p->sibling, dead);
                }
@@@ -794,7 -781,7 +780,7 @@@ static void forget_original_parent(stru
                do {
                        t->real_parent = reaper;
                        if (t->parent == father) {
-                               BUG_ON(task_ptrace(t));
+                               BUG_ON(t->ptrace);
                                t->parent = t->real_parent;
                        }
                        if (t->pdeath_signal)
   */
  static void exit_notify(struct task_struct *tsk, int group_dead)
  {
-       int signal;
-       void *cookie;
+       bool autoreap;
  
        /*
         * This does two things:
         * we have changed execution domain as these two values started
         * the same after a fork.
         */
-       if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
+       if (thread_group_leader(tsk) && tsk->exit_signal != SIGCHLD &&
            (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
             tsk->self_exec_id != tsk->parent_exec_id))
                tsk->exit_signal = SIGCHLD;
  
-       signal = tracehook_notify_death(tsk, &cookie, group_dead);
-       if (signal >= 0)
-               signal = do_notify_parent(tsk, signal);
+       if (unlikely(tsk->ptrace)) {
+               int sig = thread_group_leader(tsk) &&
+                               thread_group_empty(tsk) &&
+                               !ptrace_reparented(tsk) ?
+                       tsk->exit_signal : SIGCHLD;
+               autoreap = do_notify_parent(tsk, sig);
+       } else if (thread_group_leader(tsk)) {
+               autoreap = thread_group_empty(tsk) &&
+                       do_notify_parent(tsk, tsk->exit_signal);
+       } else {
+               autoreap = true;
+       }
  
-       tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
+       tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
  
        /* mt-exec, de_thread() is waiting for group leader */
        if (unlikely(tsk->signal->notify_count < 0))
                wake_up_process(tsk->signal->group_exit_task);
        write_unlock_irq(&tasklist_lock);
  
-       tracehook_report_death(tsk, signal, cookie, group_dead);
        /* If the process is dead, release it - nobody will wait for it */
-       if (signal == DEATH_REAP)
+       if (autoreap)
                release_task(tsk);
  }
  
@@@ -923,7 -916,7 +915,7 @@@ NORET_TYPE void do_exit(long code
         */
        set_fs(USER_DS);
  
-       tracehook_report_exit(&code);
+       ptrace_event(PTRACE_EVENT_EXIT, code);
  
        validate_creds_for_do_exit(tsk);
  
@@@ -1235,9 -1228,9 +1227,9 @@@ static int wait_task_zombie(struct wait
        traced = ptrace_reparented(p);
        /*
         * It can be ptraced but not reparented, check
-        * !task_detached() to filter out sub-threads.
+        * thread_group_leader() to filter out sub-threads.
         */
-       if (likely(!traced) && likely(!task_detached(p))) {
+       if (likely(!traced) && thread_group_leader(p)) {
                struct signal_struct *psig;
                struct signal_struct *sig;
                unsigned long maxrss;
                /* We dropped tasklist, ptracer could die and untrace */
                ptrace_unlink(p);
                /*
-                * If this is not a detached task, notify the parent.
-                * If it's still not detached after that, don't release
-                * it now.
+                * If this is not a sub-thread, notify the parent.
+                * If parent wants a zombie, don't release it now.
                 */
-               if (!task_detached(p)) {
-                       do_notify_parent(p, p->exit_signal);
-                       if (!task_detached(p)) {
-                               p->exit_state = EXIT_ZOMBIE;
-                               p = NULL;
-                       }
+               if (thread_group_leader(p) &&
+                   !do_notify_parent(p, p->exit_signal)) {
+                       p->exit_state = EXIT_ZOMBIE;
+                       p = NULL;
                }
                write_unlock_irq(&tasklist_lock);
        }
  static int *task_stopped_code(struct task_struct *p, bool ptrace)
  {
        if (ptrace) {
-               if (task_is_stopped_or_traced(p))
+               if (task_is_stopped_or_traced(p) &&
+                   !(p->jobctl & JOBCTL_LISTENING))
                        return &p->exit_code;
        } else {
                if (p->signal->flags & SIGNAL_STOP_STOPPED)
@@@ -1563,7 -1554,7 +1553,7 @@@ static int wait_consider_task(struct wa
                 * Notification and reaping will be cascaded to the real
                 * parent when the ptracer detaches.
                 */
-               if (likely(!ptrace) && unlikely(task_ptrace(p))) {
+               if (likely(!ptrace) && unlikely(p->ptrace)) {
                        /* it will become visible, clear notask_error */
                        wo->notask_error = 0;
                        return 0;
                 * own children, it should create a separate process which
                 * takes the role of real parent.
                 */
-               if (likely(!ptrace) && task_ptrace(p) &&
-                   same_thread_group(p->parent, p->real_parent))
+               if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p))
                        return 0;
  
                /*
diff --combined kernel/signal.c
@@@ -87,7 -87,7 +87,7 @@@ static int sig_ignored(struct task_stru
        /*
         * Tracers may want to know about even ignored signals.
         */
-       return !tracehook_consider_ignored_signal(t, sig);
+       return !t->ptrace;
  }
  
  /*
@@@ -124,7 -124,7 +124,7 @@@ static inline int has_pending_signals(s
  
  static int recalc_sigpending_tsk(struct task_struct *t)
  {
-       if ((t->group_stop & GROUP_STOP_PENDING) ||
+       if ((t->jobctl & JOBCTL_PENDING_MASK) ||
            PENDING(&t->pending, &t->blocked) ||
            PENDING(&t->signal->shared_pending, &t->blocked)) {
                set_tsk_thread_flag(t, TIF_SIGPENDING);
@@@ -150,9 -150,7 +150,7 @@@ void recalc_sigpending_and_wake(struct 
  
  void recalc_sigpending(void)
  {
-       if (unlikely(tracehook_force_sigpending()))
-               set_thread_flag(TIF_SIGPENDING);
-       else if (!recalc_sigpending_tsk(current) && !freezing(current))
+       if (!recalc_sigpending_tsk(current) && !freezing(current))
                clear_thread_flag(TIF_SIGPENDING);
  
  }
@@@ -224,47 -222,93 +222,93 @@@ static inline void print_dropped_signal
  }
  
  /**
-  * task_clear_group_stop_trapping - clear group stop trapping bit
+  * task_set_jobctl_pending - set jobctl pending bits
   * @task: target task
+  * @mask: pending bits to set
   *
-  * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us.  Clear it
-  * and wake up the ptracer.  Note that we don't need any further locking.
-  * @task->siglock guarantees that @task->parent points to the ptracer.
+  * Clear @mask from @task->jobctl.  @mask must be subset of
+  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
+  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
+  * cleared.  If @task is already being killed or exiting, this function
+  * becomes noop.
+  *
+  * CONTEXT:
+  * Must be called with @task->sighand->siglock held.
+  *
+  * RETURNS:
+  * %true if @mask is set, %false if made noop because @task was dying.
+  */
+ bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
+ {
+       BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
+                       JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
+       BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
+       if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
+               return false;
+       if (mask & JOBCTL_STOP_SIGMASK)
+               task->jobctl &= ~JOBCTL_STOP_SIGMASK;
+       task->jobctl |= mask;
+       return true;
+ }
+ /**
+  * task_clear_jobctl_trapping - clear jobctl trapping bit
+  * @task: target task
+  *
+  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
+  * Clear it and wake up the ptracer.  Note that we don't need any further
+  * locking.  @task->siglock guarantees that @task->parent points to the
+  * ptracer.
   *
   * CONTEXT:
   * Must be called with @task->sighand->siglock held.
   */
- static void task_clear_group_stop_trapping(struct task_struct *task)
+ void task_clear_jobctl_trapping(struct task_struct *task)
  {
-       if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) {
-               task->group_stop &= ~GROUP_STOP_TRAPPING;
-               __wake_up_sync_key(&task->parent->signal->wait_chldexit,
-                                  TASK_UNINTERRUPTIBLE, 1, task);
+       if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
+               task->jobctl &= ~JOBCTL_TRAPPING;
+               wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
        }
  }
  
  /**
-  * task_clear_group_stop_pending - clear pending group stop
+  * task_clear_jobctl_pending - clear jobctl pending bits
   * @task: target task
+  * @mask: pending bits to clear
+  *
+  * Clear @mask from @task->jobctl.  @mask must be subset of
+  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
+  * STOP bits are cleared together.
   *
-  * Clear group stop states for @task.
+  * If clearing of @mask leaves no stop or trap pending, this function calls
+  * task_clear_jobctl_trapping().
   *
   * CONTEXT:
   * Must be called with @task->sighand->siglock held.
   */
- void task_clear_group_stop_pending(struct task_struct *task)
+ void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
  {
-       task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME |
-                             GROUP_STOP_DEQUEUED);
+       BUG_ON(mask & ~JOBCTL_PENDING_MASK);
+       if (mask & JOBCTL_STOP_PENDING)
+               mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
+       task->jobctl &= ~mask;
+       if (!(task->jobctl & JOBCTL_PENDING_MASK))
+               task_clear_jobctl_trapping(task);
  }
  
  /**
   * task_participate_group_stop - participate in a group stop
   * @task: task participating in a group stop
   *
-  * @task has GROUP_STOP_PENDING set and is participating in a group stop.
+  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
   * Group stop states are cleared and the group stop count is consumed if
-  * %GROUP_STOP_CONSUME was set.  If the consumption completes the group
+  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
   * stop, the appropriate %SIGNAL_* flags are set.
   *
   * CONTEXT:
  static bool task_participate_group_stop(struct task_struct *task)
  {
        struct signal_struct *sig = task->signal;
-       bool consume = task->group_stop & GROUP_STOP_CONSUME;
+       bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
  
-       WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
+       WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
  
-       task_clear_group_stop_pending(task);
+       task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
  
        if (!consume)
                return false;
@@@ -449,7 -493,8 +493,8 @@@ int unhandled_signal(struct task_struc
                return 1;
        if (handler != SIG_IGN && handler != SIG_DFL)
                return 0;
-       return !tracehook_consider_fatal_signal(tsk, sig);
+       /* if ptraced, let the tracer determine */
+       return !tsk->ptrace;
  }
  
  /*
@@@ -604,7 -649,7 +649,7 @@@ int dequeue_signal(struct task_struct *
                 * is to alert stop-signal processing code when another
                 * processor has come along and cleared the flag.
                 */
-               current->group_stop |= GROUP_STOP_DEQUEUED;
+               current->jobctl |= JOBCTL_STOP_DEQUEUED;
        }
        if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
                /*
@@@ -773,6 -818,32 +818,32 @@@ static int check_kill_permission(int si
        return security_task_kill(t, info, sig, 0);
  }
  
+ /**
+  * ptrace_trap_notify - schedule trap to notify ptracer
+  * @t: tracee wanting to notify tracer
+  *
+  * This function schedules sticky ptrace trap which is cleared on the next
+  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
+  * ptracer.
+  *
+  * If @t is running, STOP trap will be taken.  If trapped for STOP and
+  * ptracer is listening for events, tracee is woken up so that it can
+  * re-trap for the new event.  If trapped otherwise, STOP trap will be
+  * eventually taken without returning to userland after the existing traps
+  * are finished by PTRACE_CONT.
+  *
+  * CONTEXT:
+  * Must be called with @task->sighand->siglock held.
+  */
+ static void ptrace_trap_notify(struct task_struct *t)
+ {
+       WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
+       assert_spin_locked(&t->sighand->siglock);
+       task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
+       signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
+ }
  /*
   * Handle magic process-wide effects of stop/continue signals. Unlike
   * the signal actions, these happen immediately at signal-generation
@@@ -809,9 -880,12 +880,12 @@@ static int prepare_signal(int sig, stru
                rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
                t = p;
                do {
-                       task_clear_group_stop_pending(t);
+                       task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
                        rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
-                       wake_up_state(t, __TASK_STOPPED);
+                       if (likely(!(t->ptrace & PT_SEIZED)))
+                               wake_up_state(t, __TASK_STOPPED);
+                       else
+                               ptrace_trap_notify(t);
                } while_each_thread(p, t);
  
                /*
@@@ -908,8 -982,7 +982,7 @@@ static void complete_signal(int sig, st
        if (sig_fatal(p, sig) &&
            !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
            !sigismember(&t->real_blocked, sig) &&
-           (sig == SIGKILL ||
-            !tracehook_consider_fatal_signal(t, sig))) {
+           (sig == SIGKILL || !t->ptrace)) {
                /*
                 * This signal will be fatal to the whole group.
                 */
                        signal->group_stop_count = 0;
                        t = p;
                        do {
-                               task_clear_group_stop_pending(t);
+                               task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
                                sigaddset(&t->pending.signal, SIGKILL);
                                signal_wake_up(t, 1);
                        } while_each_thread(p, t);
@@@ -1160,7 -1233,7 +1233,7 @@@ int zap_other_threads(struct task_struc
        p->signal->group_stop_count = 0;
  
        while_each_thread(p, t) {
-               task_clear_group_stop_pending(t);
+               task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
                count++;
  
                /* Don't bother with already dead threads */
@@@ -1178,25 -1251,18 +1251,25 @@@ struct sighand_struct *__lock_task_sigh
  {
        struct sighand_struct *sighand;
  
 -      rcu_read_lock();
        for (;;) {
 +              local_irq_save(*flags);
 +              rcu_read_lock();
                sighand = rcu_dereference(tsk->sighand);
 -              if (unlikely(sighand == NULL))
 +              if (unlikely(sighand == NULL)) {
 +                      rcu_read_unlock();
 +                      local_irq_restore(*flags);
                        break;
 +              }
  
 -              spin_lock_irqsave(&sighand->siglock, *flags);
 -              if (likely(sighand == tsk->sighand))
 +              spin_lock(&sighand->siglock);
 +              if (likely(sighand == tsk->sighand)) {
 +                      rcu_read_unlock();
                        break;
 -              spin_unlock_irqrestore(&sighand->siglock, *flags);
 +              }
 +              spin_unlock(&sighand->siglock);
 +              rcu_read_unlock();
 +              local_irq_restore(*flags);
        }
 -      rcu_read_unlock();
  
        return sighand;
  }
   * Let a parent know about the death of a child.
   * For a stopped/continued status change, use do_notify_parent_cldstop instead.
   *
-  * Returns -1 if our parent ignored us and so we've switched to
-  * self-reaping, or else @sig.
+  * Returns true if our parent ignored us and so we've switched to
+  * self-reaping.
   */
- int do_notify_parent(struct task_struct *tsk, int sig)
+ bool do_notify_parent(struct task_struct *tsk, int sig)
  {
        struct siginfo info;
        unsigned long flags;
        struct sighand_struct *psig;
-       int ret = sig;
+       bool autoreap = false;
  
        BUG_ON(sig == -1);
  
        /* do_notify_parent_cldstop should have been called instead.  */
        BUG_ON(task_is_stopped_or_traced(tsk));
  
-       BUG_ON(!task_ptrace(tsk) &&
+       BUG_ON(!tsk->ptrace &&
               (tsk->group_leader != tsk || !thread_group_empty(tsk)));
  
        info.si_signo = sig;
  
        psig = tsk->parent->sighand;
        spin_lock_irqsave(&psig->siglock, flags);
-       if (!task_ptrace(tsk) && sig == SIGCHLD &&
+       if (!tsk->ptrace && sig == SIGCHLD &&
            (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
             (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
                /*
                 * is implementation-defined: we do (if you don't want
                 * it, just use SIG_IGN instead).
                 */
-               ret = tsk->exit_signal = -1;
+               autoreap = true;
                if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
-                       sig = -1;
+                       sig = 0;
        }
-       if (valid_signal(sig) && sig > 0)
+       if (valid_signal(sig) && sig)
                __group_send_sig_info(sig, &info, tsk->parent);
        __wake_up_parent(tsk, tsk->parent);
        spin_unlock_irqrestore(&psig->siglock, flags);
  
-       return ret;
+       return autoreap;
  }
  
  /**
@@@ -1665,7 -1731,7 +1738,7 @@@ static void do_notify_parent_cldstop(st
  
  static inline int may_ptrace_stop(void)
  {
-       if (!likely(task_ptrace(current)))
+       if (!likely(current->ptrace))
                return 0;
        /*
         * Are we in the middle of do_coredump?
@@@ -1694,15 -1760,6 +1767,6 @@@ static int sigkill_pending(struct task_
  }
  
  /*
-  * Test whether the target task of the usual cldstop notification - the
-  * real_parent of @child - is in the same group as the ptracer.
-  */
- static bool real_parent_is_ptracer(struct task_struct *child)
- {
-       return same_thread_group(child->parent, child->real_parent);
- }
- /*
   * This must be called with current->sighand->siglock held.
   *
   * This should be the path for all ptrace stops.
@@@ -1739,31 -1796,34 +1803,34 @@@ static void ptrace_stop(int exit_code, 
        }
  
        /*
-        * If @why is CLD_STOPPED, we're trapping to participate in a group
-        * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
-        * while siglock was released for the arch hook, PENDING could be
-        * clear now.  We act as if SIGCONT is received after TASK_TRACED
-        * is entered - ignore it.
+        * We're committing to trapping.  TRACED should be visible before
+        * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
+        * Also, transition to TRACED and updates to ->jobctl should be
+        * atomic with respect to siglock and should be done after the arch
+        * hook as siglock is released and regrabbed across it.
         */
-       if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING))
-               gstop_done = task_participate_group_stop(current);
+       set_current_state(TASK_TRACED);
  
        current->last_siginfo = info;
        current->exit_code = exit_code;
  
        /*
-        * TRACED should be visible before TRAPPING is cleared; otherwise,
-        * the tracer might fail do_wait().
+        * If @why is CLD_STOPPED, we're trapping to participate in a group
+        * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
+        * across siglock relocks since INTERRUPT was scheduled, PENDING
+        * could be clear now.  We act as if SIGCONT is received after
+        * TASK_TRACED is entered - ignore it.
         */
-       set_current_state(TASK_TRACED);
+       if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
+               gstop_done = task_participate_group_stop(current);
  
-       /*
-        * We're committing to trapping.  Clearing GROUP_STOP_TRAPPING and
-        * transition to TASK_TRACED should be atomic with respect to
-        * siglock.  This hsould be done after the arch hook as siglock is
-        * released and regrabbed across it.
-        */
-       task_clear_group_stop_trapping(current);
+       /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
+       task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
+       if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
+               task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
+       /* entering a trap, clear TRAPPING */
+       task_clear_jobctl_trapping(current);
  
        spin_unlock_irq(&current->sighand->siglock);
        read_lock(&tasklist_lock);
                 * separately unless they're gonna be duplicates.
                 */
                do_notify_parent_cldstop(current, true, why);
-               if (gstop_done && !real_parent_is_ptracer(current))
+               if (gstop_done && ptrace_reparented(current))
                        do_notify_parent_cldstop(current, false, why);
  
                /*
                 *
                 * If @gstop_done, the ptracer went away between group stop
                 * completion and here.  During detach, it would have set
-                * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED
-                * in do_signal_stop() on return, so notifying the real
-                * parent of the group stop completion is enough.
+                * JOBCTL_STOP_PENDING on us and we'll re-enter
+                * TASK_STOPPED in do_signal_stop() on return, so notifying
+                * the real parent of the group stop completion is enough.
                 */
                if (gstop_done)
                        do_notify_parent_cldstop(current, false, why);
        spin_lock_irq(&current->sighand->siglock);
        current->last_siginfo = NULL;
  
+       /* LISTENING can be set only during STOP traps, clear it */
+       current->jobctl &= ~JOBCTL_LISTENING;
        /*
         * Queued signals ignored us while we were stopped for tracing.
         * So check for any that we should take before resuming user mode.
        recalc_sigpending_tsk(current);
  }
  
- void ptrace_notify(int exit_code)
+ static void ptrace_do_notify(int signr, int exit_code, int why)
  {
        siginfo_t info;
  
-       BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
        memset(&info, 0, sizeof info);
-       info.si_signo = SIGTRAP;
+       info.si_signo = signr;
        info.si_code = exit_code;
        info.si_pid = task_pid_vnr(current);
        info.si_uid = current_uid();
  
        /* Let the debugger run.  */
+       ptrace_stop(exit_code, why, 1, &info);
+ }
+ void ptrace_notify(int exit_code)
+ {
+       BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
        spin_lock_irq(&current->sighand->siglock);
-       ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
+       ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
        spin_unlock_irq(&current->sighand->siglock);
  }
  
- /*
-  * This performs the stopping for SIGSTOP and other stop signals.
-  * We have to stop all threads in the thread group.
-  * Returns non-zero if we've actually stopped and released the siglock.
-  * Returns zero if we didn't stop and still hold the siglock.
+ /**
+  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
+  * @signr: signr causing group stop if initiating
+  *
+  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
+  * and participate in it.  If already set, participate in the existing
+  * group stop.  If participated in a group stop (and thus slept), %true is
+  * returned with siglock released.
+  *
+  * If ptraced, this function doesn't handle stop itself.  Instead,
+  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
+  * untouched.  The caller must ensure that INTERRUPT trap handling takes
+  * places afterwards.
+  *
+  * CONTEXT:
+  * Must be called with @current->sighand->siglock held, which is released
+  * on %true return.
+  *
+  * RETURNS:
+  * %false if group stop is already cancelled or ptrace trap is scheduled.
+  * %true if participated in group stop.
   */
- static int do_signal_stop(int signr)
+ static bool do_signal_stop(int signr)
+       __releases(&current->sighand->siglock)
  {
        struct signal_struct *sig = current->signal;
  
-       if (!(current->group_stop & GROUP_STOP_PENDING)) {
-               unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
+       if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
+               unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
                struct task_struct *t;
  
-               /* signr will be recorded in task->group_stop for retries */
-               WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK);
+               /* signr will be recorded in task->jobctl for retries */
+               WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
  
-               if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) ||
+               if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
                    unlikely(signal_group_exit(sig)))
-                       return 0;
+                       return false;
                /*
                 * There is no group stop already in progress.  We must
                 * initiate one now.
                if (!(sig->flags & SIGNAL_STOP_STOPPED))
                        sig->group_exit_code = signr;
                else
-                       WARN_ON_ONCE(!task_ptrace(current));
+                       WARN_ON_ONCE(!current->ptrace);
+               sig->group_stop_count = 0;
+               if (task_set_jobctl_pending(current, signr | gstop))
+                       sig->group_stop_count++;
  
-               current->group_stop &= ~GROUP_STOP_SIGMASK;
-               current->group_stop |= signr | gstop;
-               sig->group_stop_count = 1;
                for (t = next_thread(current); t != current;
                     t = next_thread(t)) {
-                       t->group_stop &= ~GROUP_STOP_SIGMASK;
                        /*
                         * Setting state to TASK_STOPPED for a group
                         * stop is always done with the siglock held,
                         * so this check has no races.
                         */
-                       if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
-                               t->group_stop |= signr | gstop;
+                       if (!task_is_stopped(t) &&
+                           task_set_jobctl_pending(t, signr | gstop)) {
                                sig->group_stop_count++;
-                               signal_wake_up(t, 0);
+                               if (likely(!(t->ptrace & PT_SEIZED)))
+                                       signal_wake_up(t, 0);
+                               else
+                                       ptrace_trap_notify(t);
                        }
                }
        }
- retry:
-       if (likely(!task_ptrace(current))) {
+       if (likely(!current->ptrace)) {
                int notify = 0;
  
                /*
  
                /* Now we don't run again until woken by SIGCONT or SIGKILL */
                schedule();
-               spin_lock_irq(&current->sighand->siglock);
+               return true;
        } else {
-               ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK,
-                           CLD_STOPPED, 0, NULL);
-               current->exit_code = 0;
+               /*
+                * While ptraced, group stop is handled by STOP trap.
+                * Schedule it and let the caller deal with it.
+                */
+               task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
+               return false;
        }
+ }
  
-       /*
-        * GROUP_STOP_PENDING could be set if another group stop has
-        * started since being woken up or ptrace wants us to transit
-        * between TASK_STOPPED and TRACED.  Retry group stop.
-        */
-       if (current->group_stop & GROUP_STOP_PENDING) {
-               WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK));
-               goto retry;
+ /**
+  * do_jobctl_trap - take care of ptrace jobctl traps
+  *
+  * When PT_SEIZED, it's used for both group stop and explicit
+  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
+  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
+  * the stop signal; otherwise, %SIGTRAP.
+  *
+  * When !PT_SEIZED, it's used only for group stop trap with stop signal
+  * number as exit_code and no siginfo.
+  *
+  * CONTEXT:
+  * Must be called with @current->sighand->siglock held, which may be
+  * released and re-acquired before returning with intervening sleep.
+  */
+ static void do_jobctl_trap(void)
+ {
+       struct signal_struct *signal = current->signal;
+       int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
+       if (current->ptrace & PT_SEIZED) {
+               if (!signal->group_stop_count &&
+                   !(signal->flags & SIGNAL_STOP_STOPPED))
+                       signr = SIGTRAP;
+               WARN_ON_ONCE(!signr);
+               ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
+                                CLD_STOPPED);
+       } else {
+               WARN_ON_ONCE(!signr);
+               ptrace_stop(signr, CLD_STOPPED, 0, NULL);
+               current->exit_code = 0;
        }
-       /* PTRACE_ATTACH might have raced with task killing, clear trapping */
-       task_clear_group_stop_trapping(current);
-       spin_unlock_irq(&current->sighand->siglock);
-       tracehook_finish_jctl();
-       return 1;
  }
  
  static int ptrace_signal(int signr, siginfo_t *info,
                         struct pt_regs *regs, void *cookie)
  {
-       if (!task_ptrace(current))
-               return signr;
        ptrace_signal_deliver(regs, cookie);
-       /* Let the debugger run.  */
+       /*
+        * We do not check sig_kernel_stop(signr) but set this marker
+        * unconditionally because we do not know whether debugger will
+        * change signr. This flag has no meaning unless we are going
+        * to stop after return from ptrace_stop(). In this case it will
+        * be checked in do_signal_stop(), we should only stop if it was
+        * not cleared by SIGCONT while we were sleeping. See also the
+        * comment in dequeue_signal().
+        */
+       current->jobctl |= JOBCTL_STOP_DEQUEUED;
        ptrace_stop(signr, CLD_TRAPPED, 0, info);
  
        /* We're back.  Did the debugger cancel the sig?  */
@@@ -2039,7 -2150,6 +2157,6 @@@ relock
         * the CLD_ si_code into SIGNAL_CLD_MASK bits.
         */
        if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
-               struct task_struct *leader;
                int why;
  
                if (signal->flags & SIGNAL_CLD_CONTINUED)
                 * a duplicate.
                 */
                read_lock(&tasklist_lock);
                do_notify_parent_cldstop(current, false, why);
  
-               leader = current->group_leader;
-               if (task_ptrace(leader) && !real_parent_is_ptracer(leader))
-                       do_notify_parent_cldstop(leader, true, why);
+               if (ptrace_reparented(current->group_leader))
+                       do_notify_parent_cldstop(current->group_leader,
+                                               true, why);
                read_unlock(&tasklist_lock);
  
                goto relock;
  
        for (;;) {
                struct k_sigaction *ka;
-               /*
-                * Tracing can induce an artificial signal and choose sigaction.
-                * The return value in @signr determines the default action,
-                * but @info->si_signo is the signal number we will report.
-                */
-               signr = tracehook_get_signal(current, regs, info, return_ka);
-               if (unlikely(signr < 0))
+               if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
+                   do_signal_stop(0))
                        goto relock;
-               if (unlikely(signr != 0))
-                       ka = return_ka;
-               else {
-                       if (unlikely(current->group_stop &
-                                    GROUP_STOP_PENDING) && do_signal_stop(0))
-                               goto relock;
  
-                       signr = dequeue_signal(current, &current->blocked,
-                                              info);
+               if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
+                       do_jobctl_trap();
+                       spin_unlock_irq(&sighand->siglock);
+                       goto relock;
+               }
  
-                       if (!signr)
-                               break; /* will return 0 */
+               signr = dequeue_signal(current, &current->blocked, info);
  
-                       if (signr != SIGKILL) {
-                               signr = ptrace_signal(signr, info,
-                                                     regs, cookie);
-                               if (!signr)
-                                       continue;
-                       }
+               if (!signr)
+                       break; /* will return 0 */
  
-                       ka = &sighand->action[signr-1];
+               if (unlikely(current->ptrace) && signr != SIGKILL) {
+                       signr = ptrace_signal(signr, info,
+                                             regs, cookie);
+                       if (!signr)
+                               continue;
                }
  
+               ka = &sighand->action[signr-1];
                /* Trace actually delivered signals. */
                trace_signal_deliver(signr, info, ka);
  
@@@ -2260,7 -2362,7 +2369,7 @@@ void exit_signals(struct task_struct *t
        signotset(&unblocked);
        retarget_shared_pending(tsk, &unblocked);
  
-       if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
+       if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
            task_participate_group_stop(tsk))
                group_stop = CLD_STOPPED;
  out:
@@@ -2372,7 -2474,7 +2481,7 @@@ int sigprocmask(int how, sigset_t *set
  /**
   *  sys_rt_sigprocmask - change the list of currently blocked signals
   *  @how: whether to add, remove, or set signals
 - *  @set: stores pending signals
 + *  @nset: stores pending signals
   *  @oset: previous value of signal mask if non-null
   *  @sigsetsize: size of sigset_t type
   */
diff --combined mm/nommu.c
@@@ -22,7 -22,6 +22,6 @@@
  #include <linux/pagemap.h>
  #include <linux/slab.h>
  #include <linux/vmalloc.h>
- #include <linux/tracehook.h>
  #include <linux/blkdev.h>
  #include <linux/backing-dev.h>
  #include <linux/mount.h>
@@@ -1087,7 -1086,7 +1086,7 @@@ static unsigned long determine_vm_flags
         * it's being traced - otherwise breakpoints set in it may interfere
         * with another untraced process
         */
-       if ((flags & MAP_PRIVATE) && tracehook_expect_breakpoints(current))
+       if ((flags & MAP_PRIVATE) && current->ptrace)
                vm_flags &= ~VM_MAYSHARE;
  
        return vm_flags;
@@@ -1813,13 -1812,10 +1812,13 @@@ struct page *follow_page(struct vm_area
        return NULL;
  }
  
 -int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
 -              unsigned long to, unsigned long size, pgprot_t prot)
 +int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
 +              unsigned long pfn, unsigned long size, pgprot_t prot)
  {
 -      vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
 +      if (addr != (pfn << PAGE_SHIFT))
 +              return -EINVAL;
 +
 +      vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
        return 0;
  }
  EXPORT_SYMBOL(remap_pfn_range);
diff --combined security/selinux/hooks.c
@@@ -1476,6 -1476,7 +1476,6 @@@ static int inode_has_perm(const struct 
                          unsigned flags)
  {
        struct inode_security_struct *isec;
 -      struct common_audit_data ad;
        u32 sid;
  
        validate_creds(cred);
        sid = cred_sid(cred);
        isec = inode->i_security;
  
        return avc_has_perm_flags(sid, isec->sid, isec->sclass, perms, adp, flags);
  }
  
 +static int inode_has_perm_noadp(const struct cred *cred,
 +                              struct inode *inode,
 +                              u32 perms,
 +                              unsigned flags)
 +{
 +      struct common_audit_data ad;
 +
 +      COMMON_AUDIT_DATA_INIT(&ad, INODE);
 +      ad.u.inode = inode;
 +      return inode_has_perm(cred, inode, perms, &ad, flags);
 +}
 +
  /* Same as inode_has_perm, but pass explicit audit data containing
     the dentry to help the auditing code to more easily generate the
     pathname if needed. */
@@@ -2053,7 -2048,7 +2053,7 @@@ static int selinux_bprm_set_creds(struc
                        u32 ptsid = 0;
  
                        rcu_read_lock();
-                       tracer = tracehook_tracer_task(current);
+                       tracer = ptrace_parent(current);
                        if (likely(tracer != NULL)) {
                                sec = __task_cred(tracer)->security;
                                ptsid = sec->sid;
@@@ -2127,8 -2122,8 +2127,8 @@@ static inline void flush_unauthorized_f
                                                struct tty_file_private, list);
                        file = file_priv->file;
                        inode = file->f_path.dentry->d_inode;
 -                      if (inode_has_perm(cred, inode,
 -                                         FILE__READ | FILE__WRITE, NULL, 0)) {
 +                      if (inode_has_perm_noadp(cred, inode,
 +                                         FILE__READ | FILE__WRITE, 0)) {
                                drop_tty = 1;
                        }
                }
@@@ -3233,7 -3228,7 +3233,7 @@@ static int selinux_dentry_open(struct f
         * new inode label or new policy.
         * This check is not redundant - do not remove.
         */
 -      return inode_has_perm(cred, inode, open_file_to_av(file), NULL, 0);
 +      return inode_has_perm_noadp(cred, inode, open_file_to_av(file), 0);
  }
  
  /* task security operations */
@@@ -5319,7 -5314,7 +5319,7 @@@ static int selinux_setprocattr(struct t
                   Otherwise, leave SID unchanged and fail. */
                ptsid = 0;
                task_lock(p);
-               tracer = tracehook_tracer_task(p);
+               tracer = ptrace_parent(p);
                if (tracer)
                        ptsid = task_sid(tracer);
                task_unlock(p);