coredump: move mm->core_waiters into struct core_state
Oleg Nesterov [Fri, 25 Jul 2008 08:47:41 +0000 (01:47 -0700)]
Move mm->core_waiters into "struct core_state" allocated on stack.  This
shrinks mm_struct a little bit and allows further changes.

This patch mostly does s/core_waiters/core_state.  The only essential
change is that coredump_wait() must clear mm->core_state before return.

The coredump_wait()'s path is uglified and .text grows by 30 bytes, this
is fixed by the next patch.

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

fs/exec.c
include/linux/mm_types.h
kernel/exit.c
kernel/fork.c
kernel/signal.c

index 7173456..50de3aa 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -722,12 +722,10 @@ static int exec_mmap(struct mm_struct *mm)
                 * Make sure that if there is a core dump in progress
                 * for the old mm, we get out and die instead of going
                 * through with the exec.  We must hold mmap_sem around
-                * checking core_waiters and changing tsk->mm.  The
-                * core-inducing thread will increment core_waiters for
-                * each thread whose ->mm == old_mm.
+                * checking core_state and changing tsk->mm.
                 */
                down_read(&old_mm->mmap_sem);
-               if (unlikely(old_mm->core_waiters)) {
+               if (unlikely(old_mm->core_state)) {
                        up_read(&old_mm->mmap_sem);
                        return -EINTR;
                }
@@ -1514,7 +1512,7 @@ static void zap_process(struct task_struct *start)
        t = start;
        do {
                if (t != current && t->mm) {
-                       t->mm->core_waiters++;
+                       t->mm->core_state->nr_threads++;
                        sigaddset(&t->pending.signal, SIGKILL);
                        signal_wake_up(t, 1);
                }
@@ -1538,11 +1536,11 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
        if (err)
                return err;
 
-       if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
+       if (atomic_read(&mm->mm_users) == mm->core_state->nr_threads + 1)
                goto done;
        /*
         * We should find and kill all tasks which use this mm, and we should
-        * count them correctly into mm->core_waiters. We don't take tasklist
+        * count them correctly into ->nr_threads. We don't take tasklist
         * lock, but this is safe wrt:
         *
         * fork:
@@ -1590,7 +1588,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
        }
        rcu_read_unlock();
 done:
-       return mm->core_waiters;
+       return mm->core_state->nr_threads;
 }
 
 static int coredump_wait(int exit_code)
@@ -1603,9 +1601,12 @@ static int coredump_wait(int exit_code)
 
        init_completion(&mm->core_done);
        init_completion(&core_state.startup);
+       core_state.nr_threads = 0;
        mm->core_state = &core_state;
 
        core_waiters = zap_threads(tsk, mm, exit_code);
+       if (core_waiters < 0)
+               mm->core_state = NULL;
        up_write(&mm->mmap_sem);
 
        if (unlikely(core_waiters < 0))
@@ -1623,8 +1624,8 @@ static int coredump_wait(int exit_code)
 
        if (core_waiters)
                wait_for_completion(&core_state.startup);
+       mm->core_state = NULL;
 fail:
-       BUG_ON(mm->core_waiters);
        return core_waiters;
 }
 
@@ -1702,7 +1703,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
        /*
         * If another thread got here first, or we are not dumpable, bail out.
         */
-       if (mm->core_waiters || !get_dumpable(mm)) {
+       if (mm->core_state || !get_dumpable(mm)) {
                up_write(&mm->mmap_sem);
                goto fail;
        }
index 97819ef..c0b1747 100644 (file)
@@ -160,6 +160,7 @@ struct vm_area_struct {
 };
 
 struct core_state {
+       int nr_threads;
        struct completion startup;
 };
 
@@ -179,7 +180,6 @@ struct mm_struct {
        atomic_t mm_users;                      /* How many users with user space? */
        atomic_t mm_count;                      /* How many references to "struct mm_struct" (users count as 1) */
        int map_count;                          /* number of VMAs */
-       int core_waiters;
        struct rw_semaphore mmap_sem;
        spinlock_t page_table_lock;             /* Protects page tables and some counters */
 
index f7fa21d..988e232 100644 (file)
@@ -670,16 +670,16 @@ static void exit_mm(struct task_struct * tsk)
                return;
        /*
         * Serialize with any possible pending coredump.
-        * We must hold mmap_sem around checking core_waiters
+        * We must hold mmap_sem around checking core_state
         * and clearing tsk->mm.  The core-inducing thread
-        * will increment core_waiters for each thread in the
+        * will increment ->nr_threads for each thread in the
         * group with ->mm != NULL.
         */
        down_read(&mm->mmap_sem);
-       if (mm->core_waiters) {
+       if (mm->core_state) {
                up_read(&mm->mmap_sem);
                down_write(&mm->mmap_sem);
-               if (!--mm->core_waiters)
+               if (!--mm->core_state->nr_threads)
                        complete(&mm->core_state->startup);
                up_write(&mm->mmap_sem);
 
index eeaec68..813d5c8 100644 (file)
@@ -400,7 +400,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
        INIT_LIST_HEAD(&mm->mmlist);
        mm->flags = (current->mm) ? current->mm->flags
                                  : MMF_DUMP_FILTER_DEFAULT;
-       mm->core_waiters = 0;
+       mm->core_state = NULL;
        mm->nr_ptes = 0;
        set_mm_counter(mm, file_rss, 0);
        set_mm_counter(mm, anon_rss, 0);
index 39c1706..5c7b7ea 100644 (file)
@@ -1480,10 +1480,10 @@ static inline int may_ptrace_stop(void)
         * is a deadlock situation, and pointless because our tracer
         * is dead so don't allow us to stop.
         * If SIGKILL was already sent before the caller unlocked
-        * ->siglock we must see ->core_waiters != 0. Otherwise it
+        * ->siglock we must see ->core_state != NULL. Otherwise it
         * is safe to enter schedule().
         */
-       if (unlikely(current->mm->core_waiters) &&
+       if (unlikely(current->mm->core_state) &&
            unlikely(current->mm == current->parent->mm))
                return 0;