]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - fs/binfmt_elf.c
nfs: Do not allow multiple mounts on same mountpoint when using -o noac
[linux-2.6.git] / fs / binfmt_elf.c
index 786ee275ec0ad88711bdcc41666b4ea74c83c111..dd0fdfc56d38c4753091ff44da049158048a8b6d 100644 (file)
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/fs.h>
-#include <linux/stat.h>
-#include <linux/time.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
-#include <linux/a.out.h>
 #include <linux/errno.h>
 #include <linux/signal.h>
 #include <linux/binfmts.h>
 #include <linux/string.h>
 #include <linux/file.h>
-#include <linux/fcntl.h>
-#include <linux/ptrace.h>
 #include <linux/slab.h>
-#include <linux/shm.h>
 #include <linux/personality.h>
 #include <linux/elfcore.h>
 #include <linux/init.h>
 #include <linux/highuid.h>
-#include <linux/smp.h>
 #include <linux/compiler.h>
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
 #include <linux/security.h>
-#include <linux/syscalls.h>
 #include <linux/random.h>
 #include <linux/elf.h>
 #include <linux/utsname.h>
+#include <linux/coredump.h>
 #include <asm/uaccess.h>
 #include <asm/param.h>
 #include <asm/page.h>
@@ -52,8 +45,8 @@ static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
  * If we don't support core dumping, then supply a NULL so we
  * don't even try.
  */
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
-static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
+#ifdef CONFIG_ELF_CORE
+static int elf_core_dump(struct coredump_params *cprm);
 #else
 #define elf_core_dump  NULL
 #endif
@@ -73,12 +66,11 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
 
 static struct linux_binfmt elf_format = {
-               .module         = THIS_MODULE,
-               .load_binary    = load_elf_binary,
-               .load_shlib     = load_elf_library,
-               .core_dump      = elf_core_dump,
-               .min_coredump   = ELF_EXEC_PAGESIZE,
-               .hasvdso        = 1
+       .module         = THIS_MODULE,
+       .load_binary    = load_elf_binary,
+       .load_shlib     = load_elf_library,
+       .core_dump      = elf_core_dump,
+       .min_coredump   = ELF_EXEC_PAGESIZE,
 };
 
 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
@@ -117,7 +109,7 @@ static int padzero(unsigned long elf_bss)
        return 0;
 }
 
-/* Let's use some macros to make this stack manipulation a litle clearer */
+/* Let's use some macros to make this stack manipulation a little clearer */
 #ifdef CONFIG_STACK_GROWSUP
 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
 #define STACK_ROUND(sp, items) \
@@ -132,10 +124,18 @@ static int padzero(unsigned long elf_bss)
 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
 #endif
 
+#ifndef ELF_BASE_PLATFORM
+/*
+ * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
+ * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
+ * will be copied to the user stack in the same manner as AT_PLATFORM.
+ */
+#define ELF_BASE_PLATFORM NULL
+#endif
+
 static int
 create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
-               int interp_aout, unsigned long load_addr,
-               unsigned long interp_load_addr)
+               unsigned long load_addr, unsigned long interp_load_addr)
 {
        unsigned long p = bprm->p;
        int argc = bprm->argc;
@@ -144,11 +144,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
        elf_addr_t __user *envp;
        elf_addr_t __user *sp;
        elf_addr_t __user *u_platform;
+       elf_addr_t __user *u_base_platform;
+       elf_addr_t __user *u_rand_bytes;
        const char *k_platform = ELF_PLATFORM;
+       const char *k_base_platform = ELF_BASE_PLATFORM;
+       unsigned char k_rand_bytes[16];
        int items;
        elf_addr_t *elf_info;
        int ei_index = 0;
-       struct task_struct *tsk = current;
+       const struct cred *cred = current_cred();
        struct vm_area_struct *vma;
 
        /*
@@ -174,6 +178,28 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
                        return -EFAULT;
        }
 
+       /*
+        * If this architecture has a "base" platform capability
+        * string, copy it to userspace.
+        */
+       u_base_platform = NULL;
+       if (k_base_platform) {
+               size_t len = strlen(k_base_platform) + 1;
+
+               u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
+               if (__copy_to_user(u_base_platform, k_base_platform, len))
+                       return -EFAULT;
+       }
+
+       /*
+        * Generate 16 random bytes for userspace PRNG seeding.
+        */
+       get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
+       u_rand_bytes = (elf_addr_t __user *)
+                      STACK_ALLOC(p, sizeof(k_rand_bytes));
+       if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
+               return -EFAULT;
+
        /* Create the ELF interpreter info */
        elf_info = (elf_addr_t *)current->mm->saved_auxv;
        /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
@@ -201,15 +227,21 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
        NEW_AUX_ENT(AT_BASE, interp_load_addr);
        NEW_AUX_ENT(AT_FLAGS, 0);
        NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
-       NEW_AUX_ENT(AT_UID, tsk->uid);
-       NEW_AUX_ENT(AT_EUID, tsk->euid);
-       NEW_AUX_ENT(AT_GID, tsk->gid);
-       NEW_AUX_ENT(AT_EGID, tsk->egid);
+       NEW_AUX_ENT(AT_UID, cred->uid);
+       NEW_AUX_ENT(AT_EUID, cred->euid);
+       NEW_AUX_ENT(AT_GID, cred->gid);
+       NEW_AUX_ENT(AT_EGID, cred->egid);
        NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
+       NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
+       NEW_AUX_ENT(AT_EXECFN, bprm->exec);
        if (k_platform) {
                NEW_AUX_ENT(AT_PLATFORM,
                            (elf_addr_t)(unsigned long)u_platform);
        }
+       if (k_base_platform) {
+               NEW_AUX_ENT(AT_BASE_PLATFORM,
+                           (elf_addr_t)(unsigned long)u_base_platform);
+       }
        if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
                NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
        }
@@ -223,12 +255,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
 
        sp = STACK_ADD(p, ei_index);
 
-       items = (argc + 1) + (envc + 1);
-       if (interp_aout) {
-               items += 3; /* a.out interpreters require argv & envp too */
-       } else {
-               items += 1; /* ELF interpreters only put argc on the stack */
-       }
+       items = (argc + 1) + (envc + 1) + 1;
        bprm->p = STACK_ROUND(sp, items);
 
        /* Point sp at the lowest address on the stack */
@@ -251,16 +278,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
        /* Now, let's put argc (and argv, envp if appropriate) on the stack */
        if (__put_user(argc, sp++))
                return -EFAULT;
-       if (interp_aout) {
-               argv = sp + 2;
-               envp = argv + argc + 1;
-               if (__put_user((elf_addr_t)(unsigned long)argv, sp++) ||
-                   __put_user((elf_addr_t)(unsigned long)envp, sp++))
-                       return -EFAULT;
-       } else {
-               argv = sp;
-               envp = argv + argc + 1;
-       }
+       argv = sp;
+       envp = argv + argc + 1;
 
        /* Populate argv and envp */
        p = current->mm->arg_end = current->mm->arg_start;
@@ -270,7 +289,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
                        return -EFAULT;
                len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
                if (!len || len > MAX_ARG_STRLEN)
-                       return 0;
+                       return -EINVAL;
                p += len;
        }
        if (__put_user(0, argv))
@@ -282,7 +301,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
                        return -EFAULT;
                len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
                if (!len || len > MAX_ARG_STRLEN)
-                       return 0;
+                       return -EINVAL;
                p += len;
        }
        if (__put_user(0, envp))
@@ -296,8 +315,6 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
        return 0;
 }
 
-#ifndef elf_map
-
 static unsigned long elf_map(struct file *filep, unsigned long addr,
                struct elf_phdr *eppnt, int prot, int type,
                unsigned long total_size)
@@ -334,8 +351,6 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
        return(map_addr);
 }
 
-#endif /* !elf_map */
-
 static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
 {
        int i, first_idx = -1, last_idx = -1;
@@ -401,7 +416,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
                goto out;
 
        retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
-                            (char *)elf_phdata,size);
+                            (char *)elf_phdata, size);
        error = -EIO;
        if (retval != size) {
                if (retval < 0)
@@ -482,22 +497,22 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
                }
        }
 
-       /*
-        * Now fill out the bss section.  First pad the last page up
-        * to the page boundary, and then perform a mmap to make sure
-        * that there are zero-mapped pages up to and including the 
-        * last bss page.
-        */
-       if (padzero(elf_bss)) {
-               error = -EFAULT;
-               goto out_close;
-       }
+       if (last_bss > elf_bss) {
+               /*
+                * Now fill out the bss section.  First pad the last page up
+                * to the page boundary, and then perform a mmap to make sure
+                * that there are zero-mapped pages up to and including the
+                * last bss page.
+                */
+               if (padzero(elf_bss)) {
+                       error = -EFAULT;
+                       goto out_close;
+               }
 
-       /* What we have mapped so far */
-       elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
+               /* What we have mapped so far */
+               elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
 
-       /* Map the last of the bss segment */
-       if (last_bss > elf_bss) {
+               /* Map the last of the bss segment */
                down_write(&current->mm->mmap_sem);
                error = do_brk(elf_bss, last_bss - elf_bss);
                up_write(&current->mm->mmap_sem);
@@ -513,59 +528,12 @@ out:
        return error;
 }
 
-static unsigned long load_aout_interp(struct exec *interp_ex,
-               struct file *interpreter)
-{
-       unsigned long text_data, elf_entry = ~0UL;
-       char __user * addr;
-       loff_t offset;
-
-       current->mm->end_code = interp_ex->a_text;
-       text_data = interp_ex->a_text + interp_ex->a_data;
-       current->mm->end_data = text_data;
-       current->mm->brk = interp_ex->a_bss + text_data;
-
-       switch (N_MAGIC(*interp_ex)) {
-       case OMAGIC:
-               offset = 32;
-               addr = (char __user *)0;
-               break;
-       case ZMAGIC:
-       case QMAGIC:
-               offset = N_TXTOFF(*interp_ex);
-               addr = (char __user *)N_TXTADDR(*interp_ex);
-               break;
-       default:
-               goto out;
-       }
-
-       down_write(&current->mm->mmap_sem);     
-       do_brk(0, text_data);
-       up_write(&current->mm->mmap_sem);
-       if (!interpreter->f_op || !interpreter->f_op->read)
-               goto out;
-       if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
-               goto out;
-       flush_icache_range((unsigned long)addr,
-                          (unsigned long)addr + text_data);
-
-       down_write(&current->mm->mmap_sem);     
-       do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
-               interp_ex->a_bss);
-       up_write(&current->mm->mmap_sem);
-       elf_entry = interp_ex->a_entry;
-
-out:
-       return elf_entry;
-}
-
 /*
  * These are the functions used to load ELF style executables and shared
  * libraries.  There is no binary dependent code anywhere else.
  */
 
 #define INTERPRETER_NONE 0
-#define INTERPRETER_AOUT 1
 #define INTERPRETER_ELF 2
 
 #ifndef STACK_RND_MASK
@@ -594,26 +562,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
        unsigned long load_addr = 0, load_bias = 0;
        int load_addr_set = 0;
        char * elf_interpreter = NULL;
-       unsigned int interpreter_type = INTERPRETER_NONE;
-       unsigned char ibcs2_interpreter = 0;
        unsigned long error;
        struct elf_phdr *elf_ppnt, *elf_phdata;
        unsigned long elf_bss, elf_brk;
-       int elf_exec_fileno;
        int retval, i;
        unsigned int size;
        unsigned long elf_entry;
        unsigned long interp_load_addr = 0;
        unsigned long start_code, end_code, start_data, end_data;
-       unsigned long reloc_func_desc = 0;
-       char passed_fileno[6];
-       struct files_struct *files;
+       unsigned long reloc_func_desc __maybe_unused = 0;
        int executable_stack = EXSTACK_DEFAULT;
        unsigned long def_flags = 0;
        struct {
                struct elfhdr elf_ex;
                struct elfhdr interp_elf_ex;
-               struct exec interp_ex;
        } *loc;
 
        loc = kmalloc(sizeof(*loc), GFP_KERNEL);
@@ -634,7 +596,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                goto out;
        if (!elf_check_arch(&loc->elf_ex))
                goto out;
-       if (!bprm->file->f_op||!bprm->file->f_op->mmap)
+       if (!bprm->file->f_op || !bprm->file->f_op->mmap)
                goto out;
 
        /* Now read in all of the header information */
@@ -657,23 +619,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                goto out_free_ph;
        }
 
-       files = current->files; /* Refcounted so ok */
-       retval = unshare_files();
-       if (retval < 0)
-               goto out_free_ph;
-       if (files == current->files) {
-               put_files_struct(files);
-               files = NULL;
-       }
-
-       /* exec will make our files private anyway, but for the a.out
-          loader stuff we need to do it earlier */
-       retval = get_unused_fd();
-       if (retval < 0)
-               goto out_free_fh;
-       get_file(bprm->file);
-       fd_install(elf_exec_fileno = retval, bprm->file);
-
        elf_ppnt = elf_phdata;
        elf_bss = 0;
        elf_brk = 0;
@@ -692,13 +637,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                        retval = -ENOEXEC;
                        if (elf_ppnt->p_filesz > PATH_MAX || 
                            elf_ppnt->p_filesz < 2)
-                               goto out_free_file;
+                               goto out_free_ph;
 
                        retval = -ENOMEM;
                        elf_interpreter = kmalloc(elf_ppnt->p_filesz,
                                                  GFP_KERNEL);
                        if (!elf_interpreter)
-                               goto out_free_file;
+                               goto out_free_ph;
 
                        retval = kernel_read(bprm->file, elf_ppnt->p_offset,
                                             elf_interpreter,
@@ -713,35 +658,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                        if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
                                goto out_free_interp;
 
-                       /* If the program interpreter is one of these two,
-                        * then assume an iBCS2 image. Otherwise assume
-                        * a native linux image.
-                        */
-                       if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
-                           strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
-                               ibcs2_interpreter = 1;
-
-                       /*
-                        * The early SET_PERSONALITY here is so that the lookup
-                        * for the interpreter happens in the namespace of the 
-                        * to-be-execed image.  SET_PERSONALITY can select an
-                        * alternate root.
-                        *
-                        * However, SET_PERSONALITY is NOT allowed to switch
-                        * this task into the new images's memory mapping
-                        * policy - that is, TASK_SIZE must still evaluate to
-                        * that which is appropriate to the execing application.
-                        * This is because exit_mmap() needs to have TASK_SIZE
-                        * evaluate to the size of the old image.
-                        *
-                        * So if (say) a 64-bit application is execing a 32-bit
-                        * application it is the architecture's responsibility
-                        * to defer changing the value of TASK_SIZE until the
-                        * switch really is going to happen - do this in
-                        * flush_thread().      - akpm
-                        */
-                       SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
-
                        interpreter = open_exec(elf_interpreter);
                        retval = PTR_ERR(interpreter);
                        if (IS_ERR(interpreter))
@@ -752,8 +668,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                         * mm->dumpable = 0 regardless of the interpreter's
                         * permissions.
                         */
-                       if (file_permission(interpreter, MAY_READ) < 0)
-                               bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
+                       would_dump(bprm, interpreter);
 
                        retval = kernel_read(interpreter, 0, bprm->buf,
                                             BINPRM_BUF_SIZE);
@@ -764,7 +679,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                        }
 
                        /* Get the exec headers */
-                       loc->interp_ex = *((struct exec *)bprm->buf);
                        loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
                        break;
                }
@@ -783,57 +697,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
 
        /* Some simple consistency checks for the interpreter */
        if (elf_interpreter) {
-               static int warn;
-               interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
-
-               /* Now figure out which format our binary is */
-               if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
-                   (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
-                   (N_MAGIC(loc->interp_ex) != QMAGIC))
-                       interpreter_type = INTERPRETER_ELF;
-
-               if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
-                       interpreter_type &= ~INTERPRETER_ELF;
-
-               if (interpreter_type == INTERPRETER_AOUT && warn < 10) {
-                       printk(KERN_WARNING "a.out ELF interpreter %s is "
-                               "deprecated and will not be supported "
-                               "after Linux 2.6.25\n", elf_interpreter);
-                       warn++;
-               }
-
                retval = -ELIBBAD;
-               if (!interpreter_type)
+               /* Not an ELF interpreter */
+               if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
                        goto out_free_dentry;
-
-               /* Make sure only one type was selected */
-               if ((interpreter_type & INTERPRETER_ELF) &&
-                    interpreter_type != INTERPRETER_ELF) {
-                       // FIXME - ratelimit this before re-enabling
-                       // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
-                       interpreter_type = INTERPRETER_ELF;
-               }
                /* Verify the interpreter has a valid arch */
-               if ((interpreter_type == INTERPRETER_ELF) &&
-                   !elf_check_arch(&loc->interp_elf_ex))
+               if (!elf_check_arch(&loc->interp_elf_ex))
                        goto out_free_dentry;
-       } else {
-               /* Executables without an interpreter also need a personality  */
-               SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
-       }
-
-       /* OK, we are done with that, now set up the arg stuff,
-          and then start this sucker up */
-       if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
-               char *passed_p = passed_fileno;
-               sprintf(passed_fileno, "%d", elf_exec_fileno);
-
-               if (elf_interpreter) {
-                       retval = copy_strings_kernel(1, &passed_p, bprm);
-                       if (retval)
-                               goto out_free_dentry; 
-                       bprm->argc++;
-               }
        }
 
        /* Flush all traces of the currently running executable */
@@ -841,25 +711,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
        if (retval)
                goto out_free_dentry;
 
-       /* Discard our unneeded old files struct */
-       if (files) {
-               put_files_struct(files);
-               files = NULL;
-       }
-
        /* OK, This is the point of no return */
        current->flags &= ~PF_FORKNOEXEC;
        current->mm->def_flags = def_flags;
 
        /* Do this immediately, since STACK_TOP as used in setup_arg_pages
           may depend on the personality.  */
-       SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
+       SET_PERSONALITY(loc->elf_ex);
        if (elf_read_implies_exec(loc->elf_ex, executable_stack))
                current->personality |= READ_IMPLIES_EXEC;
 
        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
                current->flags |= PF_RANDOMIZE;
-       arch_pick_mmap_layout(current->mm);
+
+       setup_new_exec(bprm);
 
        /* Do this so that we can load the interpreter, if need be.  We will
           change some of these later */
@@ -874,7 +739,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
        
        current->mm->start_stack = bprm->p;
 
-       /* Now we do a little grungy work by mmaping the ELF image into
+       /* Now we do a little grungy work by mmapping the ELF image into
           the correct location in memory. */
        for(i = 0, elf_ppnt = elf_phdata;
            i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
@@ -890,8 +755,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                        /* There was a PT_LOAD segment with p_memsz > p_filesz
                           before this one. Map anonymous pages, if needed,
                           and clear the area.  */
-                       retval = set_brk (elf_bss + load_bias,
-                                         elf_brk + load_bias);
+                       retval = set_brk(elf_bss + load_bias,
+                                        elf_brk + load_bias);
                        if (retval) {
                                send_sig(SIGKILL, current, 0);
                                goto out_free_dentry;
@@ -929,7 +794,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                         * default mmap base, as well as whatever program they
                         * might try to exec.  This is because the brk will
                         * follow the loader, and is not movable.  */
-#ifdef CONFIG_X86
+#if defined(CONFIG_X86) || defined(CONFIG_ARM)
                        load_bias = 0;
 #else
                        load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
@@ -1013,24 +878,19 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
        }
 
        if (elf_interpreter) {
-               if (interpreter_type == INTERPRETER_AOUT) {
-                       elf_entry = load_aout_interp(&loc->interp_ex,
-                                                    interpreter);
-               } else {
-                       unsigned long uninitialized_var(interp_map_addr);
-
-                       elf_entry = load_elf_interp(&loc->interp_elf_ex,
-                                                   interpreter,
-                                                   &interp_map_addr,
-                                                   load_bias);
-                       if (!IS_ERR((void *)elf_entry)) {
-                               /*
-                                * load_elf_interp() returns relocation
-                                * adjustment
-                                */
-                               interp_load_addr = elf_entry;
-                               elf_entry += loc->interp_elf_ex.e_entry;
-                       }
+               unsigned long uninitialized_var(interp_map_addr);
+
+               elf_entry = load_elf_interp(&loc->interp_elf_ex,
+                                           interpreter,
+                                           &interp_map_addr,
+                                           load_bias);
+               if (!IS_ERR((void *)elf_entry)) {
+                       /*
+                        * load_elf_interp() returns relocation
+                        * adjustment
+                        */
+                       interp_load_addr = elf_entry;
+                       elf_entry += loc->interp_elf_ex.e_entry;
                }
                if (BAD_ADDR(elf_entry)) {
                        force_sig(SIGSEGV, current);
@@ -1054,31 +914,25 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
 
        kfree(elf_phdata);
 
-       if (interpreter_type != INTERPRETER_AOUT)
-               sys_close(elf_exec_fileno);
-
        set_binfmt(&elf_format);
 
 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
-       retval = arch_setup_additional_pages(bprm, executable_stack);
+       retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
        if (retval < 0) {
                send_sig(SIGKILL, current, 0);
                goto out;
        }
 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
 
-       compute_creds(bprm);
+       install_exec_creds(bprm);
        current->flags &= ~PF_FORKNOEXEC;
        retval = create_elf_tables(bprm, &loc->elf_ex,
-                         (interpreter_type == INTERPRETER_AOUT),
                          load_addr, interp_load_addr);
        if (retval < 0) {
                send_sig(SIGKILL, current, 0);
                goto out;
        }
        /* N.B. passed_fileno might not be initialized? */
-       if (interpreter_type == INTERPRETER_AOUT)
-               current->mm->arg_start += strlen(passed_fileno) + 1;
        current->mm->end_code = end_code;
        current->mm->start_code = start_code;
        current->mm->start_data = start_data;
@@ -1086,9 +940,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
        current->mm->start_stack = bprm->p;
 
 #ifdef arch_randomize_brk
-       if (current->flags & PF_RANDOMIZE)
+       if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
                current->mm->brk = current->mm->start_brk =
                        arch_randomize_brk(current->mm);
+#ifdef CONFIG_COMPAT_BRK
+               current->brk_randomized = 1;
+#endif
+       }
 #endif
 
        if (current->personality & MMAP_PAGE_ZERO) {
@@ -1117,12 +975,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
 #endif
 
        start_thread(regs, elf_entry, bprm->p);
-       if (unlikely(current->ptrace & PT_PTRACED)) {
-               if (current->ptrace & PT_TRACE_EXEC)
-                       ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
-               else
-                       send_sig(SIGTRAP, current, 0);
-       }
        retval = 0;
 out:
        kfree(loc);
@@ -1136,11 +988,6 @@ out_free_dentry:
                fput(interpreter);
 out_free_interp:
        kfree(elf_interpreter);
-out_free_file:
-       sys_close(elf_exec_fileno);
-out_free_fh:
-       if (files)
-               reset_files_struct(current, files);
 out_free_ph:
        kfree(elf_phdata);
        goto out;
@@ -1230,48 +1077,13 @@ out:
        return error;
 }
 
-/*
- * Note that some platforms still use traditional core dumps and not
- * the ELF core dump.  Each platform can select it as appropriate.
- */
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
-
+#ifdef CONFIG_ELF_CORE
 /*
  * ELF core dumper
  *
  * Modelled on fs/exec.c:aout_core_dump()
  * Jeremy Fitzhardinge <jeremy@sw.oz.au>
  */
-/*
- * These are the only things you should do on a core-file: use only these
- * functions to write out all the necessary info.
- */
-static int dump_write(struct file *file, const void *addr, int nr)
-{
-       return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
-}
-
-static int dump_seek(struct file *file, loff_t off)
-{
-       if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
-               if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
-                       return 0;
-       } else {
-               char *buf = (char *)get_zeroed_page(GFP_KERNEL);
-               if (!buf)
-                       return 0;
-               while (off > 0) {
-                       unsigned long n = off;
-                       if (n > PAGE_SIZE)
-                               n = PAGE_SIZE;
-                       if (!dump_write(file, buf, n))
-                               return 0;
-                       off -= n;
-               }
-               free_page((unsigned long)buf);
-       }
-       return 1;
-}
 
 /*
  * Decide what to dump of a segment, part, all or none.
@@ -1279,16 +1091,24 @@ static int dump_seek(struct file *file, loff_t off)
 static unsigned long vma_dump_size(struct vm_area_struct *vma,
                                   unsigned long mm_flags)
 {
+#define FILTER(type)   (mm_flags & (1UL << MMF_DUMP_##type))
+
        /* The vma can be set up to tell us the answer directly.  */
        if (vma->vm_flags & VM_ALWAYSDUMP)
                goto whole;
 
+       /* Hugetlb memory check */
+       if (vma->vm_flags & VM_HUGETLB) {
+               if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
+                       goto whole;
+               if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
+                       goto whole;
+       }
+
        /* Do not dump I/O mapped devices or special mappings */
        if (vma->vm_flags & (VM_IO | VM_RESERVED))
                return 0;
 
-#define FILTER(type)   (mm_flags & (1UL << MMF_DUMP_##type))
-
        /* By default, dump shared memory if mapped from an anonymous file. */
        if (vma->vm_flags & VM_SHARED) {
                if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0 ?
@@ -1311,9 +1131,11 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
         * check for an ELF header.  If we find one, dump the first page to
         * aid in determining what was mapped here.
         */
-       if (FILTER(ELF_HEADERS) && vma->vm_file != NULL && vma->vm_pgoff == 0) {
+       if (FILTER(ELF_HEADERS) &&
+           vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
                u32 __user *header = (u32 __user *) vma->vm_start;
                u32 word;
+               mm_segment_t fs = get_fs();
                /*
                 * Doing it this way gets the constant folded by GCC.
                 */
@@ -1326,7 +1148,15 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
                magic.elfmag[EI_MAG1] = ELFMAG1;
                magic.elfmag[EI_MAG2] = ELFMAG2;
                magic.elfmag[EI_MAG3] = ELFMAG3;
-               if (get_user(word, header) == 0 && word == magic.cmp)
+               /*
+                * Switch to the user "segment" for get_user(),
+                * then put back what elf_core_dump() had in place.
+                */
+               set_fs(USER_DS);
+               if (unlikely(get_user(word, header)))
+                       word = 0;
+               set_fs(fs);
+               if (word == magic.cmp)
                        return PAGE_SIZE;
        }
 
@@ -1388,36 +1218,26 @@ static int writenote(struct memelfnote *men, struct file *file,
 }
 #undef DUMP_WRITE
 
-#define DUMP_WRITE(addr, nr)   \
-       if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
-               goto end_coredump;
-#define DUMP_SEEK(off) \
-       if (!dump_seek(file, (off))) \
-               goto end_coredump;
-
 static void fill_elf_header(struct elfhdr *elf, int segs,
                            u16 machine, u32 flags, u8 osabi)
 {
+       memset(elf, 0, sizeof(*elf));
+
        memcpy(elf->e_ident, ELFMAG, SELFMAG);
        elf->e_ident[EI_CLASS] = ELF_CLASS;
        elf->e_ident[EI_DATA] = ELF_DATA;
        elf->e_ident[EI_VERSION] = EV_CURRENT;
        elf->e_ident[EI_OSABI] = ELF_OSABI;
-       memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
 
        elf->e_type = ET_CORE;
        elf->e_machine = machine;
        elf->e_version = EV_CURRENT;
-       elf->e_entry = 0;
        elf->e_phoff = sizeof(struct elfhdr);
-       elf->e_shoff = 0;
        elf->e_flags = flags;
        elf->e_ehsize = sizeof(struct elfhdr);
        elf->e_phentsize = sizeof(struct elf_phdr);
        elf->e_phnum = segs;
-       elf->e_shentsize = 0;
-       elf->e_shnum = 0;
-       elf->e_shstrndx = 0;
+
        return;
 }
 
@@ -1454,25 +1274,22 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
        prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
        prstatus->pr_sigpend = p->pending.signal.sig[0];
        prstatus->pr_sighold = p->blocked.sig[0];
+       rcu_read_lock();
+       prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
+       rcu_read_unlock();
        prstatus->pr_pid = task_pid_vnr(p);
-       prstatus->pr_ppid = task_pid_vnr(p->real_parent);
        prstatus->pr_pgrp = task_pgrp_vnr(p);
        prstatus->pr_sid = task_session_vnr(p);
        if (thread_group_leader(p)) {
+               struct task_cputime cputime;
+
                /*
-                * This is the record for the group leader.  Add in the
-                * cumulative times of previous dead threads.  This total
-                * won't include the time of each live thread whose state
-                * is included in the core dump.  The final total reported
-                * to our parent process when it calls wait4 will include
-                * those sums as well as the little bit more time it takes
-                * this and each other thread to finish dying after the
-                * core dump synchronization phase.
+                * This is the record for the group leader.  It shows the
+                * group-wide total, not its individual thread total.
                 */
-               cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
-                                  &prstatus->pr_utime);
-               cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
-                                  &prstatus->pr_stime);
+               thread_group_cputime(p, &cputime);
+               cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
+               cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
        } else {
                cputime_to_timeval(p->utime, &prstatus->pr_utime);
                cputime_to_timeval(p->stime, &prstatus->pr_stime);
@@ -1484,6 +1301,7 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
                       struct mm_struct *mm)
 {
+       const struct cred *cred;
        unsigned int i, len;
        
        /* first copy the parameters from user space */
@@ -1500,8 +1318,10 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
                        psinfo->pr_psargs[i] = ' ';
        psinfo->pr_psargs[len] = 0;
 
+       rcu_read_lock();
+       psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
+       rcu_read_unlock();
        psinfo->pr_pid = task_pid_vnr(p);
-       psinfo->pr_ppid = task_pid_vnr(p->real_parent);
        psinfo->pr_pgrp = task_pgrp_vnr(p);
        psinfo->pr_sid = task_session_vnr(p);
 
@@ -1511,8 +1331,11 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
        psinfo->pr_zomb = psinfo->pr_sname == 'Z';
        psinfo->pr_nice = task_nice(p);
        psinfo->pr_flag = p->flags;
-       SET_UID(psinfo->pr_uid, p->uid);
-       SET_GID(psinfo->pr_gid, p->gid);
+       rcu_read_lock();
+       cred = __task_cred(p);
+       SET_UID(psinfo->pr_uid, cred->uid);
+       SET_GID(psinfo->pr_gid, cred->gid);
+       rcu_read_unlock();
        strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
        
        return 0;
@@ -1546,6 +1369,18 @@ struct elf_note_info {
        int thread_notes;
 };
 
+/*
+ * When a regset has a writeback hook, we call it on each thread before
+ * dumping user memory.  On register window machines, this makes sure the
+ * user memory backing the register data is up to date before we read it.
+ */
+static void do_thread_regset_writeback(struct task_struct *task,
+                                      const struct user_regset *regset)
+{
+       if (regset->writeback)
+               regset->writeback(task, regset, 1);
+}
+
 static int fill_thread_core_info(struct elf_thread_core_info *t,
                                 const struct user_regset_view *view,
                                 long signr, size_t *total)
@@ -1567,6 +1402,8 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
                  sizeof(t->prstatus), &t->prstatus);
        *total += notesize(&t->notes[0]);
 
+       do_thread_regset_writeback(t->task, &view->regsets[0]);
+
        /*
         * Each other regset might generate a note too.  For each regset
         * that has no core_note_type or is inactive, we leave t->notes[i]
@@ -1574,6 +1411,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
         */
        for (i = 1; i < view->n; ++i) {
                const struct user_regset *regset = &view->regsets[i];
+               do_thread_regset_writeback(t->task, regset);
                if (regset->core_note_type &&
                    (!regset->active || regset->active(t->task, regset))) {
                        int ret;
@@ -1611,18 +1449,18 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
        const struct user_regset_view *view = task_user_regset_view(dump_task);
        struct elf_thread_core_info *t;
        struct elf_prpsinfo *psinfo;
-       struct task_struct *g, *p;
+       struct core_thread *ct;
        unsigned int i;
 
        info->size = 0;
        info->thread = NULL;
 
        psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
-       fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
-
        if (psinfo == NULL)
                return 0;
 
+       fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
+
        /*
         * Figure out how many notes we're going to need for each thread.
         */
@@ -1650,31 +1488,26 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
        /*
         * Allocate a structure for each thread.
         */
-       rcu_read_lock();
-       do_each_thread(g, p)
-               if (p->mm == dump_task->mm) {
-                       t = kzalloc(offsetof(struct elf_thread_core_info,
-                                            notes[info->thread_notes]),
-                                   GFP_ATOMIC);
-                       if (unlikely(!t)) {
-                               rcu_read_unlock();
-                               return 0;
-                       }
-                       t->task = p;
-                       if (p == dump_task || !info->thread) {
-                               t->next = info->thread;
-                               info->thread = t;
-                       } else {
-                               /*
-                                * Make sure to keep the original task at
-                                * the head of the list.
-                                */
-                               t->next = info->thread->next;
-                               info->thread->next = t;
-                       }
+       for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
+               t = kzalloc(offsetof(struct elf_thread_core_info,
+                                    notes[info->thread_notes]),
+                           GFP_KERNEL);
+               if (unlikely(!t))
+                       return 0;
+
+               t->task = ct->task;
+               if (ct->task == dump_task || !info->thread) {
+                       t->next = info->thread;
+                       info->thread = t;
+               } else {
+                       /*
+                        * Make sure to keep the original task at
+                        * the head of the list.
+                        */
+                       t->next = info->thread->next;
+                       info->thread->next = t;
                }
-       while_each_thread(g, p);
-       rcu_read_unlock();
+       }
 
        /*
         * Now fill in each thread's information.
@@ -1815,64 +1648,71 @@ struct elf_note_info {
        int numnote;
 };
 
-static int fill_note_info(struct elfhdr *elf, int phdrs,
-                         struct elf_note_info *info,
-                         long signr, struct pt_regs *regs)
+static int elf_note_info_init(struct elf_note_info *info)
 {
-#define        NUM_NOTES       6
-       struct list_head *t;
-       struct task_struct *g, *p;
-
-       info->notes = NULL;
-       info->prstatus = NULL;
-       info->psinfo = NULL;
-       info->fpu = NULL;
-#ifdef ELF_CORE_COPY_XFPREGS
-       info->xfpu = NULL;
-#endif
+       memset(info, 0, sizeof(*info));
        INIT_LIST_HEAD(&info->thread_list);
 
-       info->notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote),
-                             GFP_KERNEL);
+       /* Allocate space for six ELF notes */
+       info->notes = kmalloc(6 * sizeof(struct memelfnote), GFP_KERNEL);
        if (!info->notes)
                return 0;
        info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
        if (!info->psinfo)
-               return 0;
+               goto notes_free;
        info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
        if (!info->prstatus)
-               return 0;
+               goto psinfo_free;
        info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
        if (!info->fpu)
-               return 0;
+               goto prstatus_free;
 #ifdef ELF_CORE_COPY_XFPREGS
        info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
        if (!info->xfpu)
-               return 0;
+               goto fpu_free;
 #endif
+       return 1;
+#ifdef ELF_CORE_COPY_XFPREGS
+ fpu_free:
+       kfree(info->fpu);
+#endif
+ prstatus_free:
+       kfree(info->prstatus);
+ psinfo_free:
+       kfree(info->psinfo);
+ notes_free:
+       kfree(info->notes);
+       return 0;
+}
+
+static int fill_note_info(struct elfhdr *elf, int phdrs,
+                         struct elf_note_info *info,
+                         long signr, struct pt_regs *regs)
+{
+       struct list_head *t;
+
+       if (!elf_note_info_init(info))
+               return 0;
 
-       info->thread_status_size = 0;
        if (signr) {
-               struct elf_thread_status *tmp;
-               rcu_read_lock();
-               do_each_thread(g, p)
-                       if (current->mm == p->mm && current != p) {
-                               tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
-                               if (!tmp) {
-                                       rcu_read_unlock();
-                                       return 0;
-                               }
-                               tmp->thread = p;
-                               list_add(&tmp->list, &info->thread_list);
-                       }
-               while_each_thread(g, p);
-               rcu_read_unlock();
+               struct core_thread *ct;
+               struct elf_thread_status *ets;
+
+               for (ct = current->mm->core_state->dumper.next;
+                                               ct; ct = ct->next) {
+                       ets = kzalloc(sizeof(*ets), GFP_KERNEL);
+                       if (!ets)
+                               return 0;
+
+                       ets->thread = ct->task;
+                       list_add(&ets->list, &info->thread_list);
+               }
+
                list_for_each(t, &info->thread_list) {
-                       struct elf_thread_status *tmp;
                        int sz;
 
-                       tmp = list_entry(t, struct elf_thread_status, list);
-                       sz = elf_dump_thread_status(signr, tmp);
+                       ets = list_entry(t, struct elf_thread_status, list);
+                       sz = elf_dump_thread_status(signr, ets);
                        info->thread_status_size += sz;
                }
        }
@@ -1913,8 +1753,6 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
 #endif
 
        return 1;
-
-#undef NUM_NOTES
 }
 
 static size_t get_note_info_size(struct elf_note_info *info)
@@ -1998,6 +1836,34 @@ static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
        return gate_vma;
 }
 
+static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
+                            elf_addr_t e_shoff, int segs)
+{
+       elf->e_shoff = e_shoff;
+       elf->e_shentsize = sizeof(*shdr4extnum);
+       elf->e_shnum = 1;
+       elf->e_shstrndx = SHN_UNDEF;
+
+       memset(shdr4extnum, 0, sizeof(*shdr4extnum));
+
+       shdr4extnum->sh_type = SHT_NULL;
+       shdr4extnum->sh_size = elf->e_shnum;
+       shdr4extnum->sh_link = elf->e_shstrndx;
+       shdr4extnum->sh_info = segs;
+}
+
+static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
+                                    unsigned long mm_flags)
+{
+       struct vm_area_struct *vma;
+       size_t size = 0;
+
+       for (vma = first_vma(current, gate_vma); vma != NULL;
+            vma = next_vma(vma, gate_vma))
+               size += vma_dump_size(vma, mm_flags);
+       return size;
+}
+
 /*
  * Actual dumper
  *
@@ -2005,7 +1871,7 @@ static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
  * and then they are actually written out.  If we run out of core limit
  * we just truncate.
  */
-static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
+static int elf_core_dump(struct coredump_params *cprm)
 {
        int has_dumped = 0;
        mm_segment_t fs;
@@ -2014,8 +1880,11 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
        struct vm_area_struct *vma, *gate_vma;
        struct elfhdr *elf = NULL;
        loff_t offset = 0, dataoff, foffset;
-       unsigned long mm_flags;
        struct elf_note_info info;
+       struct elf_phdr *phdr4note = NULL;
+       struct elf_shdr *shdr4extnum = NULL;
+       Elf_Half e_phnum;
+       elf_addr_t e_shoff;
 
        /*
         * We no longer stop all VM operations.
@@ -2032,23 +1901,31 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
        /* alloc memory for large data structures: too large to be on stack */
        elf = kmalloc(sizeof(*elf), GFP_KERNEL);
        if (!elf)
-               goto cleanup;
-       
+               goto out;
+       /*
+        * The number of segs are recored into ELF header as 16bit value.
+        * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
+        */
        segs = current->mm->map_count;
-#ifdef ELF_CORE_EXTRA_PHDRS
-       segs += ELF_CORE_EXTRA_PHDRS;
-#endif
+       segs += elf_core_extra_phdrs();
 
-       gate_vma = get_gate_vma(current);
+       gate_vma = get_gate_vma(current->mm);
        if (gate_vma != NULL)
                segs++;
 
+       /* for notes section */
+       segs++;
+
+       /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
+        * this, kernel supports extended numbering. Have a look at
+        * include/linux/elf.h for further information. */
+       e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
+
        /*
         * Collect all the non-memory information about the process for the
         * notes.  This also sets up the file header.
         */
-       if (!fill_note_info(elf, segs + 1, /* including notes section */
-                           &info, signr, regs))
+       if (!fill_note_info(elf, e_phnum, &info, cprm->signr, cprm->regs))
                goto cleanup;
 
        has_dumped = 1;
@@ -2057,31 +1934,47 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
        fs = get_fs();
        set_fs(KERNEL_DS);
 
-       DUMP_WRITE(elf, sizeof(*elf));
        offset += sizeof(*elf);                         /* Elf header */
-       offset += (segs + 1) * sizeof(struct elf_phdr); /* Program headers */
+       offset += segs * sizeof(struct elf_phdr);       /* Program headers */
        foffset = offset;
 
        /* Write notes phdr entry */
        {
-               struct elf_phdr phdr;
                size_t sz = get_note_info_size(&info);
 
                sz += elf_coredump_extra_notes_size();
 
-               fill_elf_note_phdr(&phdr, sz, offset);
+               phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
+               if (!phdr4note)
+                       goto end_coredump;
+
+               fill_elf_note_phdr(phdr4note, sz, offset);
                offset += sz;
-               DUMP_WRITE(&phdr, sizeof(phdr));
        }
 
        dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
 
-       /*
-        * We must use the same mm->flags while dumping core to avoid
-        * inconsistency between the program headers and bodies, otherwise an
-        * unusable core file can be generated.
-        */
-       mm_flags = current->mm->flags;
+       offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
+       offset += elf_core_extra_data_size();
+       e_shoff = offset;
+
+       if (e_phnum == PN_XNUM) {
+               shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
+               if (!shdr4extnum)
+                       goto end_coredump;
+               fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
+       }
+
+       offset = dataoff;
+
+       size += sizeof(*elf);
+       if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
+               goto end_coredump;
+
+       size += sizeof(*phdr4note);
+       if (size > cprm->limit
+           || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
+               goto end_coredump;
 
        /* Write program headers for segments dump */
        for (vma = first_vma(current, gate_vma); vma != NULL;
@@ -2092,7 +1985,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
                phdr.p_offset = offset;
                phdr.p_vaddr = vma->vm_start;
                phdr.p_paddr = 0;
-               phdr.p_filesz = vma_dump_size(vma, mm_flags);
+               phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
                phdr.p_memsz = vma->vm_end - vma->vm_start;
                offset += phdr.p_filesz;
                phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
@@ -2102,76 +1995,76 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
                        phdr.p_flags |= PF_X;
                phdr.p_align = ELF_EXEC_PAGESIZE;
 
-               DUMP_WRITE(&phdr, sizeof(phdr));
+               size += sizeof(phdr);
+               if (size > cprm->limit
+                   || !dump_write(cprm->file, &phdr, sizeof(phdr)))
+                       goto end_coredump;
        }
 
-#ifdef ELF_CORE_WRITE_EXTRA_PHDRS
-       ELF_CORE_WRITE_EXTRA_PHDRS;
-#endif
+       if (!elf_core_write_extra_phdrs(cprm->file, offset, &size, cprm->limit))
+               goto end_coredump;
 
        /* write out the notes section */
-       if (!write_note_info(&info, file, &foffset))
+       if (!write_note_info(&info, cprm->file, &foffset))
                goto end_coredump;
 
-       if (elf_coredump_extra_notes_write(file, &foffset))
+       if (elf_coredump_extra_notes_write(cprm->file, &foffset))
                goto end_coredump;
 
        /* Align to page */
-       DUMP_SEEK(dataoff - foffset);
+       if (!dump_seek(cprm->file, dataoff - foffset))
+               goto end_coredump;
 
        for (vma = first_vma(current, gate_vma); vma != NULL;
                        vma = next_vma(vma, gate_vma)) {
                unsigned long addr;
                unsigned long end;
 
-               end = vma->vm_start + vma_dump_size(vma, mm_flags);
+               end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
 
                for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
                        struct page *page;
-                       struct vm_area_struct *vma;
-
-                       if (get_user_pages(current, current->mm, addr, 1, 0, 1,
-                                               &page, &vma) <= 0) {
-                               DUMP_SEEK(PAGE_SIZE);
-                       } else {
-                               if (page == ZERO_PAGE(0)) {
-                                       if (!dump_seek(file, PAGE_SIZE)) {
-                                               page_cache_release(page);
-                                               goto end_coredump;
-                                       }
-                               } else {
-                                       void *kaddr;
-                                       flush_cache_page(vma, addr,
-                                                        page_to_pfn(page));
-                                       kaddr = kmap(page);
-                                       if ((size += PAGE_SIZE) > limit ||
-                                           !dump_write(file, kaddr,
-                                           PAGE_SIZE)) {
-                                               kunmap(page);
-                                               page_cache_release(page);
-                                               goto end_coredump;
-                                       }
-                                       kunmap(page);
-                               }
+                       int stop;
+
+                       page = get_dump_page(addr);
+                       if (page) {
+                               void *kaddr = kmap(page);
+                               stop = ((size += PAGE_SIZE) > cprm->limit) ||
+                                       !dump_write(cprm->file, kaddr,
+                                                   PAGE_SIZE);
+                               kunmap(page);
                                page_cache_release(page);
-                       }
+                       } else
+                               stop = !dump_seek(cprm->file, PAGE_SIZE);
+                       if (stop)
+                               goto end_coredump;
                }
        }
 
-#ifdef ELF_CORE_WRITE_EXTRA_DATA
-       ELF_CORE_WRITE_EXTRA_DATA;
-#endif
+       if (!elf_core_write_extra_data(cprm->file, &size, cprm->limit))
+               goto end_coredump;
+
+       if (e_phnum == PN_XNUM) {
+               size += sizeof(*shdr4extnum);
+               if (size > cprm->limit
+                   || !dump_write(cprm->file, shdr4extnum,
+                                  sizeof(*shdr4extnum)))
+                       goto end_coredump;
+       }
 
 end_coredump:
        set_fs(fs);
 
 cleanup:
-       kfree(elf);
        free_note_info(&info);
+       kfree(shdr4extnum);
+       kfree(phdr4note);
+       kfree(elf);
+out:
        return has_dumped;
 }
 
-#endif         /* USE_ELF_CORE_DUMP */
+#endif         /* CONFIG_ELF_CORE */
 
 static int __init init_elf_binfmt(void)
 {