]> nv-tegra.nvidia Code Review - linux-3.10.git/blobdiff - fs/binfmt_elf.c
nfsd: make sure to balance get/put_write_access
[linux-3.10.git] / fs / binfmt_elf.c
index bd08332079cf89b460d5e62314760af704c7b8f8..3aac8e9edac32e41795484d890f81e114ca3e26e 100644 (file)
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/fs.h>
-#include <linux/stat.h>
-#include <linux/time.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
-#include <linux/a.out.h>
 #include <linux/errno.h>
 #include <linux/signal.h>
 #include <linux/binfmts.h>
 #include <linux/string.h>
 #include <linux/file.h>
-#include <linux/fcntl.h>
-#include <linux/ptrace.h>
 #include <linux/slab.h>
-#include <linux/shm.h>
 #include <linux/personality.h>
 #include <linux/elfcore.h>
 #include <linux/init.h>
 #include <linux/highuid.h>
-#include <linux/smp.h>
 #include <linux/compiler.h>
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
+#include <linux/vmalloc.h>
 #include <linux/security.h>
-#include <linux/syscalls.h>
 #include <linux/random.h>
 #include <linux/elf.h>
 #include <linux/utsname.h>
+#include <linux/coredump.h>
+#include <linux/sched.h>
 #include <asm/uaccess.h>
 #include <asm/param.h>
 #include <asm/page.h>
 
-static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
+#ifndef user_long_t
+#define user_long_t long
+#endif
+#ifndef user_siginfo_t
+#define user_siginfo_t siginfo_t
+#endif
+
+static int load_elf_binary(struct linux_binprm *bprm);
 static int load_elf_library(struct file *);
 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
                                int, int, unsigned long);
@@ -52,8 +54,8 @@ static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
  * If we don't support core dumping, then supply a NULL so we
  * don't even try.
  */
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
-static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
+#ifdef CONFIG_ELF_CORE
+static int elf_core_dump(struct coredump_params *cprm);
 #else
 #define elf_core_dump  NULL
 #endif
@@ -73,12 +75,11 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
 
 static struct linux_binfmt elf_format = {
-               .module         = THIS_MODULE,
-               .load_binary    = load_elf_binary,
-               .load_shlib     = load_elf_library,
-               .core_dump      = elf_core_dump,
-               .min_coredump   = ELF_EXEC_PAGESIZE,
-               .hasvdso        = 1
+       .module         = THIS_MODULE,
+       .load_binary    = load_elf_binary,
+       .load_shlib     = load_elf_library,
+       .core_dump      = elf_core_dump,
+       .min_coredump   = ELF_EXEC_PAGESIZE,
 };
 
 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
@@ -89,9 +90,7 @@ static int set_brk(unsigned long start, unsigned long end)
        end = ELF_PAGEALIGN(end);
        if (end > start) {
                unsigned long addr;
-               down_write(&current->mm->mmap_sem);
-               addr = do_brk(start, end - start);
-               up_write(&current->mm->mmap_sem);
+               addr = vm_brk(start, end - start);
                if (BAD_ADDR(addr))
                        return addr;
        }
@@ -132,6 +131,15 @@ static int padzero(unsigned long elf_bss)
 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
 #endif
 
+#ifndef ELF_BASE_PLATFORM
+/*
+ * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
+ * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
+ * will be copied to the user stack in the same manner as AT_PLATFORM.
+ */
+#define ELF_BASE_PLATFORM NULL
+#endif
+
 static int
 create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
                unsigned long load_addr, unsigned long interp_load_addr)
@@ -143,11 +151,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
        elf_addr_t __user *envp;
        elf_addr_t __user *sp;
        elf_addr_t __user *u_platform;
+       elf_addr_t __user *u_base_platform;
+       elf_addr_t __user *u_rand_bytes;
        const char *k_platform = ELF_PLATFORM;
+       const char *k_base_platform = ELF_BASE_PLATFORM;
+       unsigned char k_rand_bytes[16];
        int items;
        elf_addr_t *elf_info;
        int ei_index = 0;
-       struct task_struct *tsk = current;
+       const struct cred *cred = current_cred();
        struct vm_area_struct *vma;
 
        /*
@@ -173,6 +185,28 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
                        return -EFAULT;
        }
 
+       /*
+        * If this architecture has a "base" platform capability
+        * string, copy it to userspace.
+        */
+       u_base_platform = NULL;
+       if (k_base_platform) {
+               size_t len = strlen(k_base_platform) + 1;
+
+               u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
+               if (__copy_to_user(u_base_platform, k_base_platform, len))
+                       return -EFAULT;
+       }
+
+       /*
+        * Generate 16 random bytes for userspace PRNG seeding.
+        */
+       get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
+       u_rand_bytes = (elf_addr_t __user *)
+                      STACK_ALLOC(p, sizeof(k_rand_bytes));
+       if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
+               return -EFAULT;
+
        /* Create the ELF interpreter info */
        elf_info = (elf_addr_t *)current->mm->saved_auxv;
        /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
@@ -200,15 +234,24 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
        NEW_AUX_ENT(AT_BASE, interp_load_addr);
        NEW_AUX_ENT(AT_FLAGS, 0);
        NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
-       NEW_AUX_ENT(AT_UID, tsk->uid);
-       NEW_AUX_ENT(AT_EUID, tsk->euid);
-       NEW_AUX_ENT(AT_GID, tsk->gid);
-       NEW_AUX_ENT(AT_EGID, tsk->egid);
+       NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
+       NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
+       NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
+       NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
        NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
+       NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
+#ifdef ELF_HWCAP2
+       NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
+#endif
+       NEW_AUX_ENT(AT_EXECFN, bprm->exec);
        if (k_platform) {
                NEW_AUX_ENT(AT_PLATFORM,
                            (elf_addr_t)(unsigned long)u_platform);
        }
+       if (k_base_platform) {
+               NEW_AUX_ENT(AT_BASE_PLATFORM,
+                           (elf_addr_t)(unsigned long)u_base_platform);
+       }
        if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
                NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
        }
@@ -256,7 +299,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
                        return -EFAULT;
                len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
                if (!len || len > MAX_ARG_STRLEN)
-                       return 0;
+                       return -EINVAL;
                p += len;
        }
        if (__put_user(0, argv))
@@ -268,7 +311,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
                        return -EFAULT;
                len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
                if (!len || len > MAX_ARG_STRLEN)
-                       return 0;
+                       return -EINVAL;
                p += len;
        }
        if (__put_user(0, envp))
@@ -299,7 +342,6 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
        if (!size)
                return addr;
 
-       down_write(&current->mm->mmap_sem);
        /*
        * total_size is the size of the ELF (interpreter) image.
        * The _first_ mmap needs to know the full size, otherwise
@@ -310,13 +352,12 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
        */
        if (total_size) {
                total_size = ELF_PAGEALIGN(total_size);
-               map_addr = do_mmap(filep, addr, total_size, prot, type, off);
+               map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
                if (!BAD_ADDR(map_addr))
-                       do_munmap(current->mm, map_addr+size, total_size-size);
+                       vm_munmap(map_addr+size, total_size-size);
        } else
-               map_addr = do_mmap(filep, addr, size, prot, type, off);
+               map_addr = vm_mmap(filep, addr, size, prot, type, off);
 
-       up_write(&current->mm->mmap_sem);
        return(map_addr);
 }
 
@@ -387,7 +428,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
                goto out;
 
        retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
-                            (char *)elf_phdata,size);
+                            (char *)elf_phdata, size);
        error = -EIO;
        if (retval != size) {
                if (retval < 0)
@@ -468,25 +509,23 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
                }
        }
 
-       /*
-        * Now fill out the bss section.  First pad the last page up
-        * to the page boundary, and then perform a mmap to make sure
-        * that there are zero-mapped pages up to and including the 
-        * last bss page.
-        */
-       if (padzero(elf_bss)) {
-               error = -EFAULT;
-               goto out_close;
-       }
+       if (last_bss > elf_bss) {
+               /*
+                * Now fill out the bss section.  First pad the last page up
+                * to the page boundary, and then perform a mmap to make sure
+                * that there are zero-mapped pages up to and including the
+                * last bss page.
+                */
+               if (padzero(elf_bss)) {
+                       error = -EFAULT;
+                       goto out_close;
+               }
 
-       /* What we have mapped so far */
-       elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
+               /* What we have mapped so far */
+               elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
 
-       /* Map the last of the bss segment */
-       if (last_bss > elf_bss) {
-               down_write(&current->mm->mmap_sem);
-               error = do_brk(elf_bss, last_bss - elf_bss);
-               up_write(&current->mm->mmap_sem);
+               /* Map the last of the bss segment */
+               error = vm_brk(elf_bss, last_bss - elf_bss);
                if (BAD_ADDR(error))
                        goto out_close;
        }
@@ -527,7 +566,7 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
 #endif
 }
 
-static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+static int load_elf_binary(struct linux_binprm *bprm)
 {
        struct file *interpreter = NULL; /* to shut gcc up */
        unsigned long load_addr = 0, load_bias = 0;
@@ -536,19 +575,18 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
        unsigned long error;
        struct elf_phdr *elf_ppnt, *elf_phdata;
        unsigned long elf_bss, elf_brk;
-       int elf_exec_fileno;
        int retval, i;
        unsigned int size;
        unsigned long elf_entry;
        unsigned long interp_load_addr = 0;
        unsigned long start_code, end_code, start_data, end_data;
-       unsigned long reloc_func_desc = 0;
+       unsigned long reloc_func_desc __maybe_unused = 0;
        int executable_stack = EXSTACK_DEFAULT;
        unsigned long def_flags = 0;
+       struct pt_regs *regs = current_pt_regs();
        struct {
                struct elfhdr elf_ex;
                struct elfhdr interp_elf_ex;
-               struct exec interp_ex;
        } *loc;
 
        loc = kmalloc(sizeof(*loc), GFP_KERNEL);
@@ -569,7 +607,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                goto out;
        if (!elf_check_arch(&loc->elf_ex))
                goto out;
-       if (!bprm->file->f_op||!bprm->file->f_op->mmap)
+       if (!bprm->file->f_op || !bprm->file->f_op->mmap)
                goto out;
 
        /* Now read in all of the header information */
@@ -592,12 +630,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                goto out_free_ph;
        }
 
-       retval = get_unused_fd();
-       if (retval < 0)
-               goto out_free_ph;
-       get_file(bprm->file);
-       fd_install(elf_exec_fileno = retval, bprm->file);
-
        elf_ppnt = elf_phdata;
        elf_bss = 0;
        elf_brk = 0;
@@ -616,13 +648,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                        retval = -ENOEXEC;
                        if (elf_ppnt->p_filesz > PATH_MAX || 
                            elf_ppnt->p_filesz < 2)
-                               goto out_free_file;
+                               goto out_free_ph;
 
                        retval = -ENOMEM;
                        elf_interpreter = kmalloc(elf_ppnt->p_filesz,
                                                  GFP_KERNEL);
                        if (!elf_interpreter)
-                               goto out_free_file;
+                               goto out_free_ph;
 
                        retval = kernel_read(bprm->file, elf_ppnt->p_offset,
                                             elf_interpreter,
@@ -637,27 +669,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                        if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
                                goto out_free_interp;
 
-                       /*
-                        * The early SET_PERSONALITY here is so that the lookup
-                        * for the interpreter happens in the namespace of the 
-                        * to-be-execed image.  SET_PERSONALITY can select an
-                        * alternate root.
-                        *
-                        * However, SET_PERSONALITY is NOT allowed to switch
-                        * this task into the new images's memory mapping
-                        * policy - that is, TASK_SIZE must still evaluate to
-                        * that which is appropriate to the execing application.
-                        * This is because exit_mmap() needs to have TASK_SIZE
-                        * evaluate to the size of the old image.
-                        *
-                        * So if (say) a 64-bit application is execing a 32-bit
-                        * application it is the architecture's responsibility
-                        * to defer changing the value of TASK_SIZE until the
-                        * switch really is going to happen - do this in
-                        * flush_thread().      - akpm
-                        */
-                       SET_PERSONALITY(loc->elf_ex, 0);
-
                        interpreter = open_exec(elf_interpreter);
                        retval = PTR_ERR(interpreter);
                        if (IS_ERR(interpreter))
@@ -668,8 +679,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                         * mm->dumpable = 0 regardless of the interpreter's
                         * permissions.
                         */
-                       if (file_permission(interpreter, MAY_READ) < 0)
-                               bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
+                       would_dump(bprm, interpreter);
 
                        retval = kernel_read(interpreter, 0, bprm->buf,
                                             BINPRM_BUF_SIZE);
@@ -680,7 +690,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                        }
 
                        /* Get the exec headers */
-                       loc->interp_ex = *((struct exec *)bprm->buf);
                        loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
                        break;
                }
@@ -706,9 +715,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                /* Verify the interpreter has a valid arch */
                if (!elf_check_arch(&loc->interp_elf_ex))
                        goto out_free_dentry;
-       } else {
-               /* Executables without an interpreter also need a personality  */
-               SET_PERSONALITY(loc->elf_ex, 0);
        }
 
        /* Flush all traces of the currently running executable */
@@ -717,18 +723,18 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                goto out_free_dentry;
 
        /* OK, This is the point of no return */
-       current->flags &= ~PF_FORKNOEXEC;
        current->mm->def_flags = def_flags;
 
        /* Do this immediately, since STACK_TOP as used in setup_arg_pages
           may depend on the personality.  */
-       SET_PERSONALITY(loc->elf_ex, 0);
+       SET_PERSONALITY(loc->elf_ex);
        if (elf_read_implies_exec(loc->elf_ex, executable_stack))
                current->personality |= READ_IMPLIES_EXEC;
 
        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
                current->flags |= PF_RANDOMIZE;
-       arch_pick_mmap_layout(current->mm);
+
+       setup_new_exec(bprm);
 
        /* Do this so that we can load the interpreter, if need be.  We will
           change some of these later */
@@ -743,7 +749,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
        
        current->mm->start_stack = bprm->p;
 
-       /* Now we do a little grungy work by mmaping the ELF image into
+       /* Now we do a little grungy work by mmapping the ELF image into
           the correct location in memory. */
        for(i = 0, elf_ppnt = elf_phdata;
            i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
@@ -759,8 +765,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                        /* There was a PT_LOAD segment with p_memsz > p_filesz
                           before this one. Map anonymous pages, if needed,
                           and clear the area.  */
-                       retval = set_brk (elf_bss + load_bias,
-                                         elf_brk + load_bias);
+                       retval = set_brk(elf_bss + load_bias,
+                                        elf_brk + load_bias);
                        if (retval) {
                                send_sig(SIGKILL, current, 0);
                                goto out_free_dentry;
@@ -798,8 +804,18 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                         * default mmap base, as well as whatever program they
                         * might try to exec.  This is because the brk will
                         * follow the loader, and is not movable.  */
-#ifdef CONFIG_X86
-                       load_bias = 0;
+#ifdef CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE
+                       /* Memory randomization might have been switched off
+                        * in runtime via sysctl or explicit setting of
+                        * personality flags.
+                        * If that is the case, retain the original non-zero
+                        * load_bias value in order to establish proper
+                        * non-randomized mappings.
+                        */
+                       if (current->flags & PF_RANDOMIZE)
+                               load_bias = 0;
+                       else
+                               load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
 #else
                        load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
 #endif
@@ -882,7 +898,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
        }
 
        if (elf_interpreter) {
-               unsigned long uninitialized_var(interp_map_addr);
+               unsigned long interp_map_addr = 0;
 
                elf_entry = load_elf_interp(&loc->interp_elf_ex,
                                            interpreter,
@@ -918,20 +934,17 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
 
        kfree(elf_phdata);
 
-       sys_close(elf_exec_fileno);
-
        set_binfmt(&elf_format);
 
 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
-       retval = arch_setup_additional_pages(bprm, executable_stack);
+       retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
        if (retval < 0) {
                send_sig(SIGKILL, current, 0);
                goto out;
        }
 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
 
-       compute_creds(bprm);
-       current->flags &= ~PF_FORKNOEXEC;
+       install_exec_creds(bprm);
        retval = create_elf_tables(bprm, &loc->elf_ex,
                          load_addr, interp_load_addr);
        if (retval < 0) {
@@ -946,9 +959,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
        current->mm->start_stack = bprm->p;
 
 #ifdef arch_randomize_brk
-       if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1))
+       if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
                current->mm->brk = current->mm->start_brk =
                        arch_randomize_brk(current->mm);
+#ifdef CONFIG_COMPAT_BRK
+               current->brk_randomized = 1;
+#endif
+       }
 #endif
 
        if (current->personality & MMAP_PAGE_ZERO) {
@@ -956,10 +973,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                   and some applications "depend" upon this behavior.
                   Since we do not have the power to recompile these, we
                   emulate the SVr4 behavior. Sigh. */
-               down_write(&current->mm->mmap_sem);
-               error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
+               error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
                                MAP_FIXED | MAP_PRIVATE, 0);
-               up_write(&current->mm->mmap_sem);
        }
 
 #ifdef ELF_PLAT_INIT
@@ -977,12 +992,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
 #endif
 
        start_thread(regs, elf_entry, bprm->p);
-       if (unlikely(current->ptrace & PT_PTRACED)) {
-               if (current->ptrace & PT_TRACE_EXEC)
-                       ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
-               else
-                       send_sig(SIGTRAP, current, 0);
-       }
        retval = 0;
 out:
        kfree(loc);
@@ -996,8 +1005,6 @@ out_free_dentry:
                fput(interpreter);
 out_free_interp:
        kfree(elf_interpreter);
-out_free_file:
-       sys_close(elf_exec_fileno);
 out_free_ph:
        kfree(elf_phdata);
        goto out;
@@ -1052,8 +1059,7 @@ static int load_elf_library(struct file *file)
                eppnt++;
 
        /* Now use mmap to map the library into memory. */
-       down_write(&current->mm->mmap_sem);
-       error = do_mmap(file,
+       error = vm_mmap(file,
                        ELF_PAGESTART(eppnt->p_vaddr),
                        (eppnt->p_filesz +
                         ELF_PAGEOFFSET(eppnt->p_vaddr)),
@@ -1061,7 +1067,6 @@ static int load_elf_library(struct file *file)
                        MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
                        (eppnt->p_offset -
                         ELF_PAGEOFFSET(eppnt->p_vaddr)));
-       up_write(&current->mm->mmap_sem);
        if (error != ELF_PAGESTART(eppnt->p_vaddr))
                goto out_free_ph;
 
@@ -1074,11 +1079,8 @@ static int load_elf_library(struct file *file)
        len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
                            ELF_MIN_ALIGN - 1);
        bss = eppnt->p_memsz + eppnt->p_vaddr;
-       if (bss > len) {
-               down_write(&current->mm->mmap_sem);
-               do_brk(len, bss - len);
-               up_write(&current->mm->mmap_sem);
-       }
+       if (bss > len)
+               vm_brk(len, bss - len);
        error = 0;
 
 out_free_ph:
@@ -1087,47 +1089,35 @@ out:
        return error;
 }
 
-/*
- * Note that some platforms still use traditional core dumps and not
- * the ELF core dump.  Each platform can select it as appropriate.
- */
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
-
+#ifdef CONFIG_ELF_CORE
 /*
  * ELF core dumper
  *
  * Modelled on fs/exec.c:aout_core_dump()
  * Jeremy Fitzhardinge <jeremy@sw.oz.au>
  */
+
 /*
- * These are the only things you should do on a core-file: use only these
- * functions to write out all the necessary info.
+ * The purpose of always_dump_vma() is to make sure that special kernel mappings
+ * that are useful for post-mortem analysis are included in every core dump.
+ * In that way we ensure that the core dump is fully interpretable later
+ * without matching up the same kernel and hardware config to see what PC values
+ * meant. These special mappings include - vDSO, vsyscall, and other
+ * architecture specific mappings
  */
-static int dump_write(struct file *file, const void *addr, int nr)
+static bool always_dump_vma(struct vm_area_struct *vma)
 {
-       return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
-}
+       /* Any vsyscall mappings? */
+       if (vma == get_gate_vma(vma->vm_mm))
+               return true;
+       /*
+        * arch_vma_name() returns non-NULL for special architecture mappings,
+        * such as vDSO sections.
+        */
+       if (arch_vma_name(vma))
+               return true;
 
-static int dump_seek(struct file *file, loff_t off)
-{
-       if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
-               if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
-                       return 0;
-       } else {
-               char *buf = (char *)get_zeroed_page(GFP_KERNEL);
-               if (!buf)
-                       return 0;
-               while (off > 0) {
-                       unsigned long n = off;
-                       if (n > PAGE_SIZE)
-                               n = PAGE_SIZE;
-                       if (!dump_write(file, buf, n))
-                               return 0;
-                       off -= n;
-               }
-               free_page((unsigned long)buf);
-       }
-       return 1;
+       return false;
 }
 
 /*
@@ -1136,19 +1126,31 @@ static int dump_seek(struct file *file, loff_t off)
 static unsigned long vma_dump_size(struct vm_area_struct *vma,
                                   unsigned long mm_flags)
 {
-       /* The vma can be set up to tell us the answer directly.  */
-       if (vma->vm_flags & VM_ALWAYSDUMP)
+#define FILTER(type)   (mm_flags & (1UL << MMF_DUMP_##type))
+
+       /* always dump the vdso and vsyscall sections */
+       if (always_dump_vma(vma))
                goto whole;
 
-       /* Do not dump I/O mapped devices or special mappings */
-       if (vma->vm_flags & (VM_IO | VM_RESERVED))
+       if (vma->vm_flags & VM_DONTDUMP)
                return 0;
 
-#define FILTER(type)   (mm_flags & (1UL << MMF_DUMP_##type))
+       /* Hugetlb memory check */
+       if (vma->vm_flags & VM_HUGETLB) {
+               if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
+                       goto whole;
+               if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
+                       goto whole;
+               return 0;
+       }
+
+       /* Do not dump I/O mapped devices or special mappings */
+       if (vma->vm_flags & VM_IO)
+               return 0;
 
        /* By default, dump shared memory if mapped from an anonymous file. */
        if (vma->vm_flags & VM_SHARED) {
-               if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0 ?
+               if (file_inode(vma->vm_file)->i_nlink == 0 ?
                    FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
                        goto whole;
                return 0;
@@ -1168,9 +1170,11 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
         * check for an ELF header.  If we find one, dump the first page to
         * aid in determining what was mapped here.
         */
-       if (FILTER(ELF_HEADERS) && vma->vm_file != NULL && vma->vm_pgoff == 0) {
+       if (FILTER(ELF_HEADERS) &&
+           vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
                u32 __user *header = (u32 __user *) vma->vm_start;
                u32 word;
+               mm_segment_t fs = get_fs();
                /*
                 * Doing it this way gets the constant folded by GCC.
                 */
@@ -1183,7 +1187,15 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
                magic.elfmag[EI_MAG1] = ELFMAG1;
                magic.elfmag[EI_MAG2] = ELFMAG2;
                magic.elfmag[EI_MAG3] = ELFMAG3;
-               if (get_user(word, header) == 0 && word == magic.cmp)
+               /*
+                * Switch to the user "segment" for get_user(),
+                * then put back what elf_core_dump() had in place.
+                */
+               set_fs(USER_DS);
+               if (unlikely(get_user(word, header)))
+                       word = 0;
+               set_fs(fs);
+               if (word == magic.cmp)
                        return PAGE_SIZE;
        }
 
@@ -1245,15 +1257,8 @@ static int writenote(struct memelfnote *men, struct file *file,
 }
 #undef DUMP_WRITE
 
-#define DUMP_WRITE(addr, nr)   \
-       if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
-               goto end_coredump;
-#define DUMP_SEEK(off) \
-       if (!dump_seek(file, (off))) \
-               goto end_coredump;
-
 static void fill_elf_header(struct elfhdr *elf, int segs,
-                           u16 machine, u32 flags, u8 osabi)
+                           u16 machine, u32 flags)
 {
        memset(elf, 0, sizeof(*elf));
 
@@ -1308,28 +1313,28 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
        prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
        prstatus->pr_sigpend = p->pending.signal.sig[0];
        prstatus->pr_sighold = p->blocked.sig[0];
+       rcu_read_lock();
+       prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
+       rcu_read_unlock();
        prstatus->pr_pid = task_pid_vnr(p);
-       prstatus->pr_ppid = task_pid_vnr(p->real_parent);
        prstatus->pr_pgrp = task_pgrp_vnr(p);
        prstatus->pr_sid = task_session_vnr(p);
        if (thread_group_leader(p)) {
+               struct task_cputime cputime;
+
                /*
-                * This is the record for the group leader.  Add in the
-                * cumulative times of previous dead threads.  This total
-                * won't include the time of each live thread whose state
-                * is included in the core dump.  The final total reported
-                * to our parent process when it calls wait4 will include
-                * those sums as well as the little bit more time it takes
-                * this and each other thread to finish dying after the
-                * core dump synchronization phase.
+                * This is the record for the group leader.  It shows the
+                * group-wide total, not its individual thread total.
                 */
-               cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
-                                  &prstatus->pr_utime);
-               cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
-                                  &prstatus->pr_stime);
+               thread_group_cputime(p, &cputime);
+               cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
+               cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
        } else {
-               cputime_to_timeval(p->utime, &prstatus->pr_utime);
-               cputime_to_timeval(p->stime, &prstatus->pr_stime);
+               cputime_t utime, stime;
+
+               task_cputime(p, &utime, &stime);
+               cputime_to_timeval(utime, &prstatus->pr_utime);
+               cputime_to_timeval(stime, &prstatus->pr_stime);
        }
        cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
        cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
@@ -1338,6 +1343,7 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
                       struct mm_struct *mm)
 {
+       const struct cred *cred;
        unsigned int i, len;
        
        /* first copy the parameters from user space */
@@ -1354,8 +1360,10 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
                        psinfo->pr_psargs[i] = ' ';
        psinfo->pr_psargs[len] = 0;
 
+       rcu_read_lock();
+       psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
+       rcu_read_unlock();
        psinfo->pr_pid = task_pid_vnr(p);
-       psinfo->pr_ppid = task_pid_vnr(p->real_parent);
        psinfo->pr_pgrp = task_pgrp_vnr(p);
        psinfo->pr_sid = task_session_vnr(p);
 
@@ -1365,8 +1373,11 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
        psinfo->pr_zomb = psinfo->pr_sname == 'Z';
        psinfo->pr_nice = task_nice(p);
        psinfo->pr_flag = p->flags;
-       SET_UID(psinfo->pr_uid, p->uid);
-       SET_GID(psinfo->pr_gid, p->gid);
+       rcu_read_lock();
+       cred = __task_cred(p);
+       SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
+       SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
+       rcu_read_unlock();
        strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
        
        return 0;
@@ -1382,6 +1393,103 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
        fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
 }
 
+static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
+               siginfo_t *siginfo)
+{
+       mm_segment_t old_fs = get_fs();
+       set_fs(KERNEL_DS);
+       copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
+       set_fs(old_fs);
+       fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
+}
+
+#define MAX_FILE_NOTE_SIZE (4*1024*1024)
+/*
+ * Format of NT_FILE note:
+ *
+ * long count     -- how many files are mapped
+ * long page_size -- units for file_ofs
+ * array of [COUNT] elements of
+ *   long start
+ *   long end
+ *   long file_ofs
+ * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
+ */
+static int fill_files_note(struct memelfnote *note)
+{
+       struct vm_area_struct *vma;
+       unsigned count, size, names_ofs, remaining, n;
+       user_long_t *data;
+       user_long_t *start_end_ofs;
+       char *name_base, *name_curpos;
+
+       /* *Estimated* file count and total data size needed */
+       count = current->mm->map_count;
+       size = count * 64;
+
+       names_ofs = (2 + 3 * count) * sizeof(data[0]);
+ alloc:
+       if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
+               return -EINVAL;
+       size = round_up(size, PAGE_SIZE);
+       data = vmalloc(size);
+       if (!data)
+               return -ENOMEM;
+
+       start_end_ofs = data + 2;
+       name_base = name_curpos = ((char *)data) + names_ofs;
+       remaining = size - names_ofs;
+       count = 0;
+       for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
+               struct file *file;
+               const char *filename;
+
+               file = vma->vm_file;
+               if (!file)
+                       continue;
+               filename = d_path(&file->f_path, name_curpos, remaining);
+               if (IS_ERR(filename)) {
+                       if (PTR_ERR(filename) == -ENAMETOOLONG) {
+                               vfree(data);
+                               size = size * 5 / 4;
+                               goto alloc;
+                       }
+                       continue;
+               }
+
+               /* d_path() fills at the end, move name down */
+               /* n = strlen(filename) + 1: */
+               n = (name_curpos + remaining) - filename;
+               remaining = filename - name_curpos;
+               memmove(name_curpos, filename, n);
+               name_curpos += n;
+
+               *start_end_ofs++ = vma->vm_start;
+               *start_end_ofs++ = vma->vm_end;
+               *start_end_ofs++ = vma->vm_pgoff;
+               count++;
+       }
+
+       /* Now we know exact count of files, can store it */
+       data[0] = count;
+       data[1] = PAGE_SIZE;
+       /*
+        * Count usually is less than current->mm->map_count,
+        * we need to move filenames down.
+        */
+       n = current->mm->map_count - count;
+       if (n != 0) {
+               unsigned shift_bytes = n * 3 * sizeof(data[0]);
+               memmove(name_base - shift_bytes, name_base,
+                       name_curpos - name_base);
+               name_curpos -= shift_bytes;
+       }
+
+       size = name_curpos - (char *)data;
+       fill_note(note, "CORE", NT_FILE, size, data);
+       return 0;
+}
+
 #ifdef CORE_DUMP_USE_REGSET
 #include <linux/regset.h>
 
@@ -1395,7 +1503,10 @@ struct elf_thread_core_info {
 struct elf_note_info {
        struct elf_thread_core_info *thread;
        struct memelfnote psinfo;
+       struct memelfnote signote;
        struct memelfnote auxv;
+       struct memelfnote files;
+       user_siginfo_t csigdata;
        size_t size;
        int thread_notes;
 };
@@ -1412,6 +1523,22 @@ static void do_thread_regset_writeback(struct task_struct *task,
                regset->writeback(task, regset, 1);
 }
 
+#ifndef PR_REG_SIZE
+#define PR_REG_SIZE(S) sizeof(S)
+#endif
+
+#ifndef PRSTATUS_SIZE
+#define PRSTATUS_SIZE(S) sizeof(S)
+#endif
+
+#ifndef PR_REG_PTR
+#define PR_REG_PTR(S) (&((S)->pr_reg))
+#endif
+
+#ifndef SET_PR_FPVALID
+#define SET_PR_FPVALID(S, V) ((S)->pr_fpvalid = (V))
+#endif
+
 static int fill_thread_core_info(struct elf_thread_core_info *t,
                                 const struct user_regset_view *view,
                                 long signr, size_t *total)
@@ -1426,11 +1553,11 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
         */
        fill_prstatus(&t->prstatus, t->task, signr);
        (void) view->regsets[0].get(t->task, &view->regsets[0],
-                                   0, sizeof(t->prstatus.pr_reg),
-                                   &t->prstatus.pr_reg, NULL);
+                                   0, PR_REG_SIZE(t->prstatus.pr_reg),
+                                   PR_REG_PTR(&t->prstatus), NULL);
 
        fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
-                 sizeof(t->prstatus), &t->prstatus);
+                 PRSTATUS_SIZE(t->prstatus), &t->prstatus);
        *total += notesize(&t->notes[0]);
 
        do_thread_regset_writeback(t->task, &view->regsets[0]);
@@ -1443,7 +1570,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
        for (i = 1; i < view->n; ++i) {
                const struct user_regset *regset = &view->regsets[i];
                do_thread_regset_writeback(t->task, regset);
-               if (regset->core_note_type &&
+               if (regset->core_note_type && regset->get &&
                    (!regset->active || regset->active(t->task, regset))) {
                        int ret;
                        size_t size = regset->n * regset->size;
@@ -1460,7 +1587,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
                                                  regset->core_note_type,
                                                  size, data);
                                else {
-                                       t->prstatus.pr_fpvalid = 1;
+                                       SET_PR_FPVALID(&t->prstatus, 1);
                                        fill_note(&t->notes[i], "CORE",
                                                  NT_PRFPREG, size, data);
                                }
@@ -1474,23 +1601,25 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
 
 static int fill_note_info(struct elfhdr *elf, int phdrs,
                          struct elf_note_info *info,
-                         long signr, struct pt_regs *regs)
+                         siginfo_t *siginfo, struct pt_regs *regs)
 {
        struct task_struct *dump_task = current;
        const struct user_regset_view *view = task_user_regset_view(dump_task);
        struct elf_thread_core_info *t;
        struct elf_prpsinfo *psinfo;
-       struct task_struct *g, *p;
+       struct core_thread *ct;
        unsigned int i;
 
        info->size = 0;
        info->thread = NULL;
 
        psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
-       fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
-
-       if (psinfo == NULL)
+       if (psinfo == NULL) {
+               info->psinfo.data = NULL; /* So we don't free this wrongly */
                return 0;
+       }
+
+       fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
 
        /*
         * Figure out how many notes we're going to need for each thread.
@@ -1514,42 +1643,37 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
         * Initialize the ELF file header.
         */
        fill_elf_header(elf, phdrs,
-                       view->e_machine, view->e_flags, view->ei_osabi);
+                       view->e_machine, view->e_flags);
 
        /*
         * Allocate a structure for each thread.
         */
-       rcu_read_lock();
-       do_each_thread(g, p)
-               if (p->mm == dump_task->mm) {
-                       t = kzalloc(offsetof(struct elf_thread_core_info,
-                                            notes[info->thread_notes]),
-                                   GFP_ATOMIC);
-                       if (unlikely(!t)) {
-                               rcu_read_unlock();
-                               return 0;
-                       }
-                       t->task = p;
-                       if (p == dump_task || !info->thread) {
-                               t->next = info->thread;
-                               info->thread = t;
-                       } else {
-                               /*
-                                * Make sure to keep the original task at
-                                * the head of the list.
-                                */
-                               t->next = info->thread->next;
-                               info->thread->next = t;
-                       }
+       for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
+               t = kzalloc(offsetof(struct elf_thread_core_info,
+                                    notes[info->thread_notes]),
+                           GFP_KERNEL);
+               if (unlikely(!t))
+                       return 0;
+
+               t->task = ct->task;
+               if (ct->task == dump_task || !info->thread) {
+                       t->next = info->thread;
+                       info->thread = t;
+               } else {
+                       /*
+                        * Make sure to keep the original task at
+                        * the head of the list.
+                        */
+                       t->next = info->thread->next;
+                       info->thread->next = t;
                }
-       while_each_thread(g, p);
-       rcu_read_unlock();
+       }
 
        /*
         * Now fill in each thread's information.
         */
        for (t = info->thread; t != NULL; t = t->next)
-               if (!fill_thread_core_info(t, view, signr, &info->size))
+               if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
                        return 0;
 
        /*
@@ -1558,9 +1682,15 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
        fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
        info->size += notesize(&info->psinfo);
 
+       fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
+       info->size += notesize(&info->signote);
+
        fill_auxv_note(&info->auxv, current->mm);
        info->size += notesize(&info->auxv);
 
+       if (fill_files_note(&info->files) == 0)
+               info->size += notesize(&info->files);
+
        return 1;
 }
 
@@ -1587,8 +1717,13 @@ static int write_note_info(struct elf_note_info *info,
 
                if (first && !writenote(&info->psinfo, file, foffset))
                        return 0;
+               if (first && !writenote(&info->signote, file, foffset))
+                       return 0;
                if (first && !writenote(&info->auxv, file, foffset))
                        return 0;
+               if (first && info->files.data &&
+                               !writenote(&info->files, file, foffset))
+                       return 0;
 
                for (i = 1; i < info->thread_notes; ++i)
                        if (t->notes[i].data &&
@@ -1615,6 +1750,7 @@ static void free_note_info(struct elf_note_info *info)
                kfree(t);
        }
        kfree(info->psinfo.data);
+       vfree(info->files.data);
 }
 
 #else
@@ -1673,6 +1809,7 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
 
 struct elf_note_info {
        struct memelfnote *notes;
+       struct memelfnote *notes_files;
        struct elf_prstatus *prstatus;  /* NT_PRSTATUS */
        struct elf_prpsinfo *psinfo;    /* NT_PRPSINFO */
        struct list_head thread_list;
@@ -1680,29 +1817,18 @@ struct elf_note_info {
 #ifdef ELF_CORE_COPY_XFPREGS
        elf_fpxregset_t *xfpu;
 #endif
+       user_siginfo_t csigdata;
        int thread_status_size;
        int numnote;
 };
 
-static int fill_note_info(struct elfhdr *elf, int phdrs,
-                         struct elf_note_info *info,
-                         long signr, struct pt_regs *regs)
+static int elf_note_info_init(struct elf_note_info *info)
 {
-#define        NUM_NOTES       6
-       struct list_head *t;
-       struct task_struct *g, *p;
-
-       info->notes = NULL;
-       info->prstatus = NULL;
-       info->psinfo = NULL;
-       info->fpu = NULL;
-#ifdef ELF_CORE_COPY_XFPREGS
-       info->xfpu = NULL;
-#endif
+       memset(info, 0, sizeof(*info));
        INIT_LIST_HEAD(&info->thread_list);
 
-       info->notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote),
-                             GFP_KERNEL);
+       /* Allocate space for ELF notes */
+       info->notes = kmalloc(8 * sizeof(struct memelfnote), GFP_KERNEL);
        if (!info->notes)
                return 0;
        info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
@@ -1719,38 +1845,47 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
        if (!info->xfpu)
                return 0;
 #endif
+       return 1;
+}
+
+static int fill_note_info(struct elfhdr *elf, int phdrs,
+                         struct elf_note_info *info,
+                         siginfo_t *siginfo, struct pt_regs *regs)
+{
+       struct list_head *t;
 
-       info->thread_status_size = 0;
-       if (signr) {
+       if (!elf_note_info_init(info))
+               return 0;
+
+       if (siginfo->si_signo) {
+               struct core_thread *ct;
                struct elf_thread_status *ets;
-               rcu_read_lock();
-               do_each_thread(g, p)
-                       if (current->mm == p->mm && current != p) {
-                               ets = kzalloc(sizeof(*ets), GFP_ATOMIC);
-                               if (!ets) {
-                                       rcu_read_unlock();
-                                       return 0;
-                               }
-                               ets->thread = p;
-                               list_add(&ets->list, &info->thread_list);
-                       }
-               while_each_thread(g, p);
-               rcu_read_unlock();
+
+               for (ct = current->mm->core_state->dumper.next;
+                                               ct; ct = ct->next) {
+                       ets = kzalloc(sizeof(*ets), GFP_KERNEL);
+                       if (!ets)
+                               return 0;
+
+                       ets->thread = ct->task;
+                       list_add(&ets->list, &info->thread_list);
+               }
+
                list_for_each(t, &info->thread_list) {
                        int sz;
 
                        ets = list_entry(t, struct elf_thread_status, list);
-                       sz = elf_dump_thread_status(signr, ets);
+                       sz = elf_dump_thread_status(siginfo->si_signo, ets);
                        info->thread_status_size += sz;
                }
        }
        /* now collect the dump for the current */
        memset(info->prstatus, 0, sizeof(*info->prstatus));
-       fill_prstatus(info->prstatus, current, signr);
+       fill_prstatus(info->prstatus, current, siginfo->si_signo);
        elf_core_copy_regs(&info->prstatus->pr_reg, regs);
 
        /* Set up header */
-       fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS, ELF_OSABI);
+       fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
 
        /*
         * Set up the notes in similar form to SVR4 core dumps made
@@ -1763,9 +1898,14 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
        fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
                  sizeof(*info->psinfo), info->psinfo);
 
-       info->numnote = 2;
+       fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
+       fill_auxv_note(info->notes + 3, current->mm);
+       info->numnote = 4;
 
-       fill_auxv_note(&info->notes[info->numnote++], current->mm);
+       if (fill_files_note(info->notes + info->numnote) == 0) {
+               info->notes_files = info->notes + info->numnote;
+               info->numnote++;
+       }
 
        /* Try to dump the FPU. */
        info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
@@ -1781,8 +1921,6 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
 #endif
 
        return 1;
-
-#undef NUM_NOTES
 }
 
 static size_t get_note_info_size(struct elf_note_info *info)
@@ -1829,6 +1967,10 @@ static void free_note_info(struct elf_note_info *info)
                kfree(list_entry(tmp, struct elf_thread_status, list));
        }
 
+       /* Free data possibly allocated by fill_files_note(): */
+       if (info->notes_files)
+               vfree(info->notes_files->data);
+
        kfree(info->prstatus);
        kfree(info->psinfo);
        kfree(info->notes);
@@ -1866,6 +2008,34 @@ static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
        return gate_vma;
 }
 
+static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
+                            elf_addr_t e_shoff, int segs)
+{
+       elf->e_shoff = e_shoff;
+       elf->e_shentsize = sizeof(*shdr4extnum);
+       elf->e_shnum = 1;
+       elf->e_shstrndx = SHN_UNDEF;
+
+       memset(shdr4extnum, 0, sizeof(*shdr4extnum));
+
+       shdr4extnum->sh_type = SHT_NULL;
+       shdr4extnum->sh_size = elf->e_shnum;
+       shdr4extnum->sh_link = elf->e_shstrndx;
+       shdr4extnum->sh_info = segs;
+}
+
+static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
+                                    unsigned long mm_flags)
+{
+       struct vm_area_struct *vma;
+       size_t size = 0;
+
+       for (vma = first_vma(current, gate_vma); vma != NULL;
+            vma = next_vma(vma, gate_vma))
+               size += vma_dump_size(vma, mm_flags);
+       return size;
+}
+
 /*
  * Actual dumper
  *
@@ -1873,7 +2043,7 @@ static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
  * and then they are actually written out.  If we run out of core limit
  * we just truncate.
  */
-static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
+static int elf_core_dump(struct coredump_params *cprm)
 {
        int has_dumped = 0;
        mm_segment_t fs;
@@ -1882,8 +2052,11 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
        struct vm_area_struct *vma, *gate_vma;
        struct elfhdr *elf = NULL;
        loff_t offset = 0, dataoff, foffset;
-       unsigned long mm_flags;
-       struct elf_note_info info;
+       struct elf_note_info info = { };
+       struct elf_phdr *phdr4note = NULL;
+       struct elf_shdr *shdr4extnum = NULL;
+       Elf_Half e_phnum;
+       elf_addr_t e_shoff;
 
        /*
         * We no longer stop all VM operations.
@@ -1901,55 +2074,78 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
        elf = kmalloc(sizeof(*elf), GFP_KERNEL);
        if (!elf)
                goto out;
-       
+       /*
+        * The number of segs are recored into ELF header as 16bit value.
+        * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
+        */
        segs = current->mm->map_count;
-#ifdef ELF_CORE_EXTRA_PHDRS
-       segs += ELF_CORE_EXTRA_PHDRS;
-#endif
+       segs += elf_core_extra_phdrs();
 
-       gate_vma = get_gate_vma(current);
+       gate_vma = get_gate_vma(current->mm);
        if (gate_vma != NULL)
                segs++;
 
+       /* for notes section */
+       segs++;
+
+       /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
+        * this, kernel supports extended numbering. Have a look at
+        * include/linux/elf.h for further information. */
+       e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
+
        /*
         * Collect all the non-memory information about the process for the
         * notes.  This also sets up the file header.
         */
-       if (!fill_note_info(elf, segs + 1, /* including notes section */
-                           &info, signr, regs))
+       if (!fill_note_info(elf, e_phnum, &info, cprm->siginfo, cprm->regs))
                goto cleanup;
 
        has_dumped = 1;
-       current->flags |= PF_DUMPCORE;
-  
+
        fs = get_fs();
        set_fs(KERNEL_DS);
 
-       DUMP_WRITE(elf, sizeof(*elf));
        offset += sizeof(*elf);                         /* Elf header */
-       offset += (segs + 1) * sizeof(struct elf_phdr); /* Program headers */
+       offset += segs * sizeof(struct elf_phdr);       /* Program headers */
        foffset = offset;
 
        /* Write notes phdr entry */
        {
-               struct elf_phdr phdr;
                size_t sz = get_note_info_size(&info);
 
                sz += elf_coredump_extra_notes_size();
 
-               fill_elf_note_phdr(&phdr, sz, offset);
+               phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
+               if (!phdr4note)
+                       goto end_coredump;
+
+               fill_elf_note_phdr(phdr4note, sz, offset);
                offset += sz;
-               DUMP_WRITE(&phdr, sizeof(phdr));
        }
 
        dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
 
-       /*
-        * We must use the same mm->flags while dumping core to avoid
-        * inconsistency between the program headers and bodies, otherwise an
-        * unusable core file can be generated.
-        */
-       mm_flags = current->mm->flags;
+       offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
+       offset += elf_core_extra_data_size();
+       e_shoff = offset;
+
+       if (e_phnum == PN_XNUM) {
+               shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
+               if (!shdr4extnum)
+                       goto end_coredump;
+               fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
+       }
+
+       offset = dataoff;
+
+       size += sizeof(*elf);
+       if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
+               goto end_coredump;
+
+       size += sizeof(*phdr4note);
+       if (size > cprm->limit
+           || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
+               goto end_coredump;
 
        /* Write program headers for segments dump */
        for (vma = first_vma(current, gate_vma); vma != NULL;
@@ -1960,7 +2156,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
                phdr.p_offset = offset;
                phdr.p_vaddr = vma->vm_start;
                phdr.p_paddr = 0;
-               phdr.p_filesz = vma_dump_size(vma, mm_flags);
+               phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
                phdr.p_memsz = vma->vm_end - vma->vm_start;
                offset += phdr.p_filesz;
                phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
@@ -1970,81 +2166,81 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
                        phdr.p_flags |= PF_X;
                phdr.p_align = ELF_EXEC_PAGESIZE;
 
-               DUMP_WRITE(&phdr, sizeof(phdr));
+               size += sizeof(phdr);
+               if (size > cprm->limit
+                   || !dump_write(cprm->file, &phdr, sizeof(phdr)))
+                       goto end_coredump;
        }
 
-#ifdef ELF_CORE_WRITE_EXTRA_PHDRS
-       ELF_CORE_WRITE_EXTRA_PHDRS;
-#endif
+       if (!elf_core_write_extra_phdrs(cprm->file, offset, &size, cprm->limit))
+               goto end_coredump;
 
        /* write out the notes section */
-       if (!write_note_info(&info, file, &foffset))
+       if (!write_note_info(&info, cprm->file, &foffset))
                goto end_coredump;
 
-       if (elf_coredump_extra_notes_write(file, &foffset))
+       if (elf_coredump_extra_notes_write(cprm->file, &foffset))
                goto end_coredump;
 
        /* Align to page */
-       DUMP_SEEK(dataoff - foffset);
+       if (!dump_seek(cprm->file, dataoff - foffset))
+               goto end_coredump;
 
        for (vma = first_vma(current, gate_vma); vma != NULL;
                        vma = next_vma(vma, gate_vma)) {
                unsigned long addr;
                unsigned long end;
 
-               end = vma->vm_start + vma_dump_size(vma, mm_flags);
+               end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
 
                for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
                        struct page *page;
-                       struct vm_area_struct *tmp_vma;
-
-                       if (get_user_pages(current, current->mm, addr, 1, 0, 1,
-                                               &page, &tmp_vma) <= 0) {
-                               DUMP_SEEK(PAGE_SIZE);
-                       } else {
-                               if (page == ZERO_PAGE(0)) {
-                                       if (!dump_seek(file, PAGE_SIZE)) {
-                                               page_cache_release(page);
-                                               goto end_coredump;
-                                       }
-                               } else {
-                                       void *kaddr;
-                                       flush_cache_page(tmp_vma, addr,
-                                                        page_to_pfn(page));
-                                       kaddr = kmap(page);
-                                       if ((size += PAGE_SIZE) > limit ||
-                                           !dump_write(file, kaddr,
-                                           PAGE_SIZE)) {
-                                               kunmap(page);
-                                               page_cache_release(page);
-                                               goto end_coredump;
-                                       }
-                                       kunmap(page);
-                               }
+                       int stop;
+
+                       page = get_dump_page(addr);
+                       if (page) {
+                               void *kaddr = kmap(page);
+                               stop = ((size += PAGE_SIZE) > cprm->limit) ||
+                                       !dump_write(cprm->file, kaddr,
+                                                   PAGE_SIZE);
+                               kunmap(page);
                                page_cache_release(page);
-                       }
+                       } else
+                               stop = !dump_seek(cprm->file, PAGE_SIZE);
+                       if (stop)
+                               goto end_coredump;
                }
        }
 
-#ifdef ELF_CORE_WRITE_EXTRA_DATA
-       ELF_CORE_WRITE_EXTRA_DATA;
-#endif
+       if (!elf_core_write_extra_data(cprm->file, &size, cprm->limit))
+               goto end_coredump;
+
+       if (e_phnum == PN_XNUM) {
+               size += sizeof(*shdr4extnum);
+               if (size > cprm->limit
+                   || !dump_write(cprm->file, shdr4extnum,
+                                  sizeof(*shdr4extnum)))
+                       goto end_coredump;
+       }
 
 end_coredump:
        set_fs(fs);
 
 cleanup:
        free_note_info(&info);
+       kfree(shdr4extnum);
+       kfree(phdr4note);
        kfree(elf);
 out:
        return has_dumped;
 }
 
-#endif         /* USE_ELF_CORE_DUMP */
+#endif         /* CONFIG_ELF_CORE */
 
 static int __init init_elf_binfmt(void)
 {
-       return register_binfmt(&elf_format);
+       register_binfmt(&elf_format);
+       return 0;
 }
 
 static void __exit exit_elf_binfmt(void)