Add #includes needed to permit the removal of asm/system.h
[linux-2.6.git] / fs / binfmt_elf.c
1 /*
2  * linux/fs/binfmt_elf.c
3  *
4  * These are the functions used to load ELF format executables as used
5  * on SVr4 machines.  Information on the format may be found in the book
6  * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7  * Tools".
8  *
9  * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
10  */
11
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/fs.h>
15 #include <linux/mm.h>
16 #include <linux/mman.h>
17 #include <linux/errno.h>
18 #include <linux/signal.h>
19 #include <linux/binfmts.h>
20 #include <linux/string.h>
21 #include <linux/file.h>
22 #include <linux/slab.h>
23 #include <linux/personality.h>
24 #include <linux/elfcore.h>
25 #include <linux/init.h>
26 #include <linux/highuid.h>
27 #include <linux/compiler.h>
28 #include <linux/highmem.h>
29 #include <linux/pagemap.h>
30 #include <linux/security.h>
31 #include <linux/random.h>
32 #include <linux/elf.h>
33 #include <linux/utsname.h>
34 #include <linux/coredump.h>
35 #include <asm/uaccess.h>
36 #include <asm/param.h>
37 #include <asm/page.h>
38 #include <asm/exec.h>
39
40 static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
41 static int load_elf_library(struct file *);
42 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
43                                 int, int, unsigned long);
44
45 /*
46  * If we don't support core dumping, then supply a NULL so we
47  * don't even try.
48  */
49 #ifdef CONFIG_ELF_CORE
50 static int elf_core_dump(struct coredump_params *cprm);
51 #else
52 #define elf_core_dump   NULL
53 #endif
54
55 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
56 #define ELF_MIN_ALIGN   ELF_EXEC_PAGESIZE
57 #else
58 #define ELF_MIN_ALIGN   PAGE_SIZE
59 #endif
60
61 #ifndef ELF_CORE_EFLAGS
62 #define ELF_CORE_EFLAGS 0
63 #endif
64
65 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
66 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
67 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
68
69 static struct linux_binfmt elf_format = {
70         .module         = THIS_MODULE,
71         .load_binary    = load_elf_binary,
72         .load_shlib     = load_elf_library,
73         .core_dump      = elf_core_dump,
74         .min_coredump   = ELF_EXEC_PAGESIZE,
75 };
76
77 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
78
79 static int set_brk(unsigned long start, unsigned long end)
80 {
81         start = ELF_PAGEALIGN(start);
82         end = ELF_PAGEALIGN(end);
83         if (end > start) {
84                 unsigned long addr;
85                 down_write(&current->mm->mmap_sem);
86                 addr = do_brk(start, end - start);
87                 up_write(&current->mm->mmap_sem);
88                 if (BAD_ADDR(addr))
89                         return addr;
90         }
91         current->mm->start_brk = current->mm->brk = end;
92         return 0;
93 }
94
95 /* We need to explicitly zero any fractional pages
96    after the data section (i.e. bss).  This would
97    contain the junk from the file that should not
98    be in memory
99  */
100 static int padzero(unsigned long elf_bss)
101 {
102         unsigned long nbyte;
103
104         nbyte = ELF_PAGEOFFSET(elf_bss);
105         if (nbyte) {
106                 nbyte = ELF_MIN_ALIGN - nbyte;
107                 if (clear_user((void __user *) elf_bss, nbyte))
108                         return -EFAULT;
109         }
110         return 0;
111 }
112
113 /* Let's use some macros to make this stack manipulation a little clearer */
114 #ifdef CONFIG_STACK_GROWSUP
115 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
116 #define STACK_ROUND(sp, items) \
117         ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
118 #define STACK_ALLOC(sp, len) ({ \
119         elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
120         old_sp; })
121 #else
122 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
123 #define STACK_ROUND(sp, items) \
124         (((unsigned long) (sp - items)) &~ 15UL)
125 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
126 #endif
127
128 #ifndef ELF_BASE_PLATFORM
129 /*
130  * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
131  * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
132  * will be copied to the user stack in the same manner as AT_PLATFORM.
133  */
134 #define ELF_BASE_PLATFORM NULL
135 #endif
136
137 static int
138 create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
139                 unsigned long load_addr, unsigned long interp_load_addr)
140 {
141         unsigned long p = bprm->p;
142         int argc = bprm->argc;
143         int envc = bprm->envc;
144         elf_addr_t __user *argv;
145         elf_addr_t __user *envp;
146         elf_addr_t __user *sp;
147         elf_addr_t __user *u_platform;
148         elf_addr_t __user *u_base_platform;
149         elf_addr_t __user *u_rand_bytes;
150         const char *k_platform = ELF_PLATFORM;
151         const char *k_base_platform = ELF_BASE_PLATFORM;
152         unsigned char k_rand_bytes[16];
153         int items;
154         elf_addr_t *elf_info;
155         int ei_index = 0;
156         const struct cred *cred = current_cred();
157         struct vm_area_struct *vma;
158
159         /*
160          * In some cases (e.g. Hyper-Threading), we want to avoid L1
161          * evictions by the processes running on the same package. One
162          * thing we can do is to shuffle the initial stack for them.
163          */
164
165         p = arch_align_stack(p);
166
167         /*
168          * If this architecture has a platform capability string, copy it
169          * to userspace.  In some cases (Sparc), this info is impossible
170          * for userspace to get any other way, in others (i386) it is
171          * merely difficult.
172          */
173         u_platform = NULL;
174         if (k_platform) {
175                 size_t len = strlen(k_platform) + 1;
176
177                 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
178                 if (__copy_to_user(u_platform, k_platform, len))
179                         return -EFAULT;
180         }
181
182         /*
183          * If this architecture has a "base" platform capability
184          * string, copy it to userspace.
185          */
186         u_base_platform = NULL;
187         if (k_base_platform) {
188                 size_t len = strlen(k_base_platform) + 1;
189
190                 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
191                 if (__copy_to_user(u_base_platform, k_base_platform, len))
192                         return -EFAULT;
193         }
194
195         /*
196          * Generate 16 random bytes for userspace PRNG seeding.
197          */
198         get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
199         u_rand_bytes = (elf_addr_t __user *)
200                        STACK_ALLOC(p, sizeof(k_rand_bytes));
201         if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
202                 return -EFAULT;
203
204         /* Create the ELF interpreter info */
205         elf_info = (elf_addr_t *)current->mm->saved_auxv;
206         /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
207 #define NEW_AUX_ENT(id, val) \
208         do { \
209                 elf_info[ei_index++] = id; \
210                 elf_info[ei_index++] = val; \
211         } while (0)
212
213 #ifdef ARCH_DLINFO
214         /* 
215          * ARCH_DLINFO must come first so PPC can do its special alignment of
216          * AUXV.
217          * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
218          * ARCH_DLINFO changes
219          */
220         ARCH_DLINFO;
221 #endif
222         NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
223         NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
224         NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
225         NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
226         NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
227         NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
228         NEW_AUX_ENT(AT_BASE, interp_load_addr);
229         NEW_AUX_ENT(AT_FLAGS, 0);
230         NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
231         NEW_AUX_ENT(AT_UID, cred->uid);
232         NEW_AUX_ENT(AT_EUID, cred->euid);
233         NEW_AUX_ENT(AT_GID, cred->gid);
234         NEW_AUX_ENT(AT_EGID, cred->egid);
235         NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
236         NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
237         NEW_AUX_ENT(AT_EXECFN, bprm->exec);
238         if (k_platform) {
239                 NEW_AUX_ENT(AT_PLATFORM,
240                             (elf_addr_t)(unsigned long)u_platform);
241         }
242         if (k_base_platform) {
243                 NEW_AUX_ENT(AT_BASE_PLATFORM,
244                             (elf_addr_t)(unsigned long)u_base_platform);
245         }
246         if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
247                 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
248         }
249 #undef NEW_AUX_ENT
250         /* AT_NULL is zero; clear the rest too */
251         memset(&elf_info[ei_index], 0,
252                sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
253
254         /* And advance past the AT_NULL entry.  */
255         ei_index += 2;
256
257         sp = STACK_ADD(p, ei_index);
258
259         items = (argc + 1) + (envc + 1) + 1;
260         bprm->p = STACK_ROUND(sp, items);
261
262         /* Point sp at the lowest address on the stack */
263 #ifdef CONFIG_STACK_GROWSUP
264         sp = (elf_addr_t __user *)bprm->p - items - ei_index;
265         bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
266 #else
267         sp = (elf_addr_t __user *)bprm->p;
268 #endif
269
270
271         /*
272          * Grow the stack manually; some architectures have a limit on how
273          * far ahead a user-space access may be in order to grow the stack.
274          */
275         vma = find_extend_vma(current->mm, bprm->p);
276         if (!vma)
277                 return -EFAULT;
278
279         /* Now, let's put argc (and argv, envp if appropriate) on the stack */
280         if (__put_user(argc, sp++))
281                 return -EFAULT;
282         argv = sp;
283         envp = argv + argc + 1;
284
285         /* Populate argv and envp */
286         p = current->mm->arg_end = current->mm->arg_start;
287         while (argc-- > 0) {
288                 size_t len;
289                 if (__put_user((elf_addr_t)p, argv++))
290                         return -EFAULT;
291                 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
292                 if (!len || len > MAX_ARG_STRLEN)
293                         return -EINVAL;
294                 p += len;
295         }
296         if (__put_user(0, argv))
297                 return -EFAULT;
298         current->mm->arg_end = current->mm->env_start = p;
299         while (envc-- > 0) {
300                 size_t len;
301                 if (__put_user((elf_addr_t)p, envp++))
302                         return -EFAULT;
303                 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
304                 if (!len || len > MAX_ARG_STRLEN)
305                         return -EINVAL;
306                 p += len;
307         }
308         if (__put_user(0, envp))
309                 return -EFAULT;
310         current->mm->env_end = p;
311
312         /* Put the elf_info on the stack in the right place.  */
313         sp = (elf_addr_t __user *)envp + 1;
314         if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
315                 return -EFAULT;
316         return 0;
317 }
318
319 static unsigned long elf_map(struct file *filep, unsigned long addr,
320                 struct elf_phdr *eppnt, int prot, int type,
321                 unsigned long total_size)
322 {
323         unsigned long map_addr;
324         unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
325         unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
326         addr = ELF_PAGESTART(addr);
327         size = ELF_PAGEALIGN(size);
328
329         /* mmap() will return -EINVAL if given a zero size, but a
330          * segment with zero filesize is perfectly valid */
331         if (!size)
332                 return addr;
333
334         down_write(&current->mm->mmap_sem);
335         /*
336         * total_size is the size of the ELF (interpreter) image.
337         * The _first_ mmap needs to know the full size, otherwise
338         * randomization might put this image into an overlapping
339         * position with the ELF binary image. (since size < total_size)
340         * So we first map the 'big' image - and unmap the remainder at
341         * the end. (which unmap is needed for ELF images with holes.)
342         */
343         if (total_size) {
344                 total_size = ELF_PAGEALIGN(total_size);
345                 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
346                 if (!BAD_ADDR(map_addr))
347                         do_munmap(current->mm, map_addr+size, total_size-size);
348         } else
349                 map_addr = do_mmap(filep, addr, size, prot, type, off);
350
351         up_write(&current->mm->mmap_sem);
352         return(map_addr);
353 }
354
355 static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
356 {
357         int i, first_idx = -1, last_idx = -1;
358
359         for (i = 0; i < nr; i++) {
360                 if (cmds[i].p_type == PT_LOAD) {
361                         last_idx = i;
362                         if (first_idx == -1)
363                                 first_idx = i;
364                 }
365         }
366         if (first_idx == -1)
367                 return 0;
368
369         return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
370                                 ELF_PAGESTART(cmds[first_idx].p_vaddr);
371 }
372
373
374 /* This is much more generalized than the library routine read function,
375    so we keep this separate.  Technically the library read function
376    is only provided so that we can read a.out libraries that have
377    an ELF header */
378
379 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
380                 struct file *interpreter, unsigned long *interp_map_addr,
381                 unsigned long no_base)
382 {
383         struct elf_phdr *elf_phdata;
384         struct elf_phdr *eppnt;
385         unsigned long load_addr = 0;
386         int load_addr_set = 0;
387         unsigned long last_bss = 0, elf_bss = 0;
388         unsigned long error = ~0UL;
389         unsigned long total_size;
390         int retval, i, size;
391
392         /* First of all, some simple consistency checks */
393         if (interp_elf_ex->e_type != ET_EXEC &&
394             interp_elf_ex->e_type != ET_DYN)
395                 goto out;
396         if (!elf_check_arch(interp_elf_ex))
397                 goto out;
398         if (!interpreter->f_op || !interpreter->f_op->mmap)
399                 goto out;
400
401         /*
402          * If the size of this structure has changed, then punt, since
403          * we will be doing the wrong thing.
404          */
405         if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
406                 goto out;
407         if (interp_elf_ex->e_phnum < 1 ||
408                 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
409                 goto out;
410
411         /* Now read in all of the header information */
412         size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
413         if (size > ELF_MIN_ALIGN)
414                 goto out;
415         elf_phdata = kmalloc(size, GFP_KERNEL);
416         if (!elf_phdata)
417                 goto out;
418
419         retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
420                              (char *)elf_phdata, size);
421         error = -EIO;
422         if (retval != size) {
423                 if (retval < 0)
424                         error = retval; 
425                 goto out_close;
426         }
427
428         total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
429         if (!total_size) {
430                 error = -EINVAL;
431                 goto out_close;
432         }
433
434         eppnt = elf_phdata;
435         for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
436                 if (eppnt->p_type == PT_LOAD) {
437                         int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
438                         int elf_prot = 0;
439                         unsigned long vaddr = 0;
440                         unsigned long k, map_addr;
441
442                         if (eppnt->p_flags & PF_R)
443                                 elf_prot = PROT_READ;
444                         if (eppnt->p_flags & PF_W)
445                                 elf_prot |= PROT_WRITE;
446                         if (eppnt->p_flags & PF_X)
447                                 elf_prot |= PROT_EXEC;
448                         vaddr = eppnt->p_vaddr;
449                         if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
450                                 elf_type |= MAP_FIXED;
451                         else if (no_base && interp_elf_ex->e_type == ET_DYN)
452                                 load_addr = -vaddr;
453
454                         map_addr = elf_map(interpreter, load_addr + vaddr,
455                                         eppnt, elf_prot, elf_type, total_size);
456                         total_size = 0;
457                         if (!*interp_map_addr)
458                                 *interp_map_addr = map_addr;
459                         error = map_addr;
460                         if (BAD_ADDR(map_addr))
461                                 goto out_close;
462
463                         if (!load_addr_set &&
464                             interp_elf_ex->e_type == ET_DYN) {
465                                 load_addr = map_addr - ELF_PAGESTART(vaddr);
466                                 load_addr_set = 1;
467                         }
468
469                         /*
470                          * Check to see if the section's size will overflow the
471                          * allowed task size. Note that p_filesz must always be
472                          * <= p_memsize so it's only necessary to check p_memsz.
473                          */
474                         k = load_addr + eppnt->p_vaddr;
475                         if (BAD_ADDR(k) ||
476                             eppnt->p_filesz > eppnt->p_memsz ||
477                             eppnt->p_memsz > TASK_SIZE ||
478                             TASK_SIZE - eppnt->p_memsz < k) {
479                                 error = -ENOMEM;
480                                 goto out_close;
481                         }
482
483                         /*
484                          * Find the end of the file mapping for this phdr, and
485                          * keep track of the largest address we see for this.
486                          */
487                         k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
488                         if (k > elf_bss)
489                                 elf_bss = k;
490
491                         /*
492                          * Do the same thing for the memory mapping - between
493                          * elf_bss and last_bss is the bss section.
494                          */
495                         k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
496                         if (k > last_bss)
497                                 last_bss = k;
498                 }
499         }
500
501         if (last_bss > elf_bss) {
502                 /*
503                  * Now fill out the bss section.  First pad the last page up
504                  * to the page boundary, and then perform a mmap to make sure
505                  * that there are zero-mapped pages up to and including the
506                  * last bss page.
507                  */
508                 if (padzero(elf_bss)) {
509                         error = -EFAULT;
510                         goto out_close;
511                 }
512
513                 /* What we have mapped so far */
514                 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
515
516                 /* Map the last of the bss segment */
517                 down_write(&current->mm->mmap_sem);
518                 error = do_brk(elf_bss, last_bss - elf_bss);
519                 up_write(&current->mm->mmap_sem);
520                 if (BAD_ADDR(error))
521                         goto out_close;
522         }
523
524         error = load_addr;
525
526 out_close:
527         kfree(elf_phdata);
528 out:
529         return error;
530 }
531
532 /*
533  * These are the functions used to load ELF style executables and shared
534  * libraries.  There is no binary dependent code anywhere else.
535  */
536
537 #define INTERPRETER_NONE 0
538 #define INTERPRETER_ELF 2
539
540 #ifndef STACK_RND_MASK
541 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
542 #endif
543
544 static unsigned long randomize_stack_top(unsigned long stack_top)
545 {
546         unsigned int random_variable = 0;
547
548         if ((current->flags & PF_RANDOMIZE) &&
549                 !(current->personality & ADDR_NO_RANDOMIZE)) {
550                 random_variable = get_random_int() & STACK_RND_MASK;
551                 random_variable <<= PAGE_SHIFT;
552         }
553 #ifdef CONFIG_STACK_GROWSUP
554         return PAGE_ALIGN(stack_top) + random_variable;
555 #else
556         return PAGE_ALIGN(stack_top) - random_variable;
557 #endif
558 }
559
560 static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
561 {
562         struct file *interpreter = NULL; /* to shut gcc up */
563         unsigned long load_addr = 0, load_bias = 0;
564         int load_addr_set = 0;
565         char * elf_interpreter = NULL;
566         unsigned long error;
567         struct elf_phdr *elf_ppnt, *elf_phdata;
568         unsigned long elf_bss, elf_brk;
569         int retval, i;
570         unsigned int size;
571         unsigned long elf_entry;
572         unsigned long interp_load_addr = 0;
573         unsigned long start_code, end_code, start_data, end_data;
574         unsigned long reloc_func_desc __maybe_unused = 0;
575         int executable_stack = EXSTACK_DEFAULT;
576         unsigned long def_flags = 0;
577         struct {
578                 struct elfhdr elf_ex;
579                 struct elfhdr interp_elf_ex;
580         } *loc;
581
582         loc = kmalloc(sizeof(*loc), GFP_KERNEL);
583         if (!loc) {
584                 retval = -ENOMEM;
585                 goto out_ret;
586         }
587         
588         /* Get the exec-header */
589         loc->elf_ex = *((struct elfhdr *)bprm->buf);
590
591         retval = -ENOEXEC;
592         /* First of all, some simple consistency checks */
593         if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
594                 goto out;
595
596         if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
597                 goto out;
598         if (!elf_check_arch(&loc->elf_ex))
599                 goto out;
600         if (!bprm->file->f_op || !bprm->file->f_op->mmap)
601                 goto out;
602
603         /* Now read in all of the header information */
604         if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
605                 goto out;
606         if (loc->elf_ex.e_phnum < 1 ||
607                 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
608                 goto out;
609         size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
610         retval = -ENOMEM;
611         elf_phdata = kmalloc(size, GFP_KERNEL);
612         if (!elf_phdata)
613                 goto out;
614
615         retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
616                              (char *)elf_phdata, size);
617         if (retval != size) {
618                 if (retval >= 0)
619                         retval = -EIO;
620                 goto out_free_ph;
621         }
622
623         elf_ppnt = elf_phdata;
624         elf_bss = 0;
625         elf_brk = 0;
626
627         start_code = ~0UL;
628         end_code = 0;
629         start_data = 0;
630         end_data = 0;
631
632         for (i = 0; i < loc->elf_ex.e_phnum; i++) {
633                 if (elf_ppnt->p_type == PT_INTERP) {
634                         /* This is the program interpreter used for
635                          * shared libraries - for now assume that this
636                          * is an a.out format binary
637                          */
638                         retval = -ENOEXEC;
639                         if (elf_ppnt->p_filesz > PATH_MAX || 
640                             elf_ppnt->p_filesz < 2)
641                                 goto out_free_ph;
642
643                         retval = -ENOMEM;
644                         elf_interpreter = kmalloc(elf_ppnt->p_filesz,
645                                                   GFP_KERNEL);
646                         if (!elf_interpreter)
647                                 goto out_free_ph;
648
649                         retval = kernel_read(bprm->file, elf_ppnt->p_offset,
650                                              elf_interpreter,
651                                              elf_ppnt->p_filesz);
652                         if (retval != elf_ppnt->p_filesz) {
653                                 if (retval >= 0)
654                                         retval = -EIO;
655                                 goto out_free_interp;
656                         }
657                         /* make sure path is NULL terminated */
658                         retval = -ENOEXEC;
659                         if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
660                                 goto out_free_interp;
661
662                         interpreter = open_exec(elf_interpreter);
663                         retval = PTR_ERR(interpreter);
664                         if (IS_ERR(interpreter))
665                                 goto out_free_interp;
666
667                         /*
668                          * If the binary is not readable then enforce
669                          * mm->dumpable = 0 regardless of the interpreter's
670                          * permissions.
671                          */
672                         would_dump(bprm, interpreter);
673
674                         retval = kernel_read(interpreter, 0, bprm->buf,
675                                              BINPRM_BUF_SIZE);
676                         if (retval != BINPRM_BUF_SIZE) {
677                                 if (retval >= 0)
678                                         retval = -EIO;
679                                 goto out_free_dentry;
680                         }
681
682                         /* Get the exec headers */
683                         loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
684                         break;
685                 }
686                 elf_ppnt++;
687         }
688
689         elf_ppnt = elf_phdata;
690         for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
691                 if (elf_ppnt->p_type == PT_GNU_STACK) {
692                         if (elf_ppnt->p_flags & PF_X)
693                                 executable_stack = EXSTACK_ENABLE_X;
694                         else
695                                 executable_stack = EXSTACK_DISABLE_X;
696                         break;
697                 }
698
699         /* Some simple consistency checks for the interpreter */
700         if (elf_interpreter) {
701                 retval = -ELIBBAD;
702                 /* Not an ELF interpreter */
703                 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
704                         goto out_free_dentry;
705                 /* Verify the interpreter has a valid arch */
706                 if (!elf_check_arch(&loc->interp_elf_ex))
707                         goto out_free_dentry;
708         }
709
710         /* Flush all traces of the currently running executable */
711         retval = flush_old_exec(bprm);
712         if (retval)
713                 goto out_free_dentry;
714
715         /* OK, This is the point of no return */
716         current->mm->def_flags = def_flags;
717
718         /* Do this immediately, since STACK_TOP as used in setup_arg_pages
719            may depend on the personality.  */
720         SET_PERSONALITY(loc->elf_ex);
721         if (elf_read_implies_exec(loc->elf_ex, executable_stack))
722                 current->personality |= READ_IMPLIES_EXEC;
723
724         if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
725                 current->flags |= PF_RANDOMIZE;
726
727         setup_new_exec(bprm);
728
729         /* Do this so that we can load the interpreter, if need be.  We will
730            change some of these later */
731         current->mm->free_area_cache = current->mm->mmap_base;
732         current->mm->cached_hole_size = 0;
733         retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
734                                  executable_stack);
735         if (retval < 0) {
736                 send_sig(SIGKILL, current, 0);
737                 goto out_free_dentry;
738         }
739         
740         current->mm->start_stack = bprm->p;
741
742         /* Now we do a little grungy work by mmapping the ELF image into
743            the correct location in memory. */
744         for(i = 0, elf_ppnt = elf_phdata;
745             i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
746                 int elf_prot = 0, elf_flags;
747                 unsigned long k, vaddr;
748
749                 if (elf_ppnt->p_type != PT_LOAD)
750                         continue;
751
752                 if (unlikely (elf_brk > elf_bss)) {
753                         unsigned long nbyte;
754                     
755                         /* There was a PT_LOAD segment with p_memsz > p_filesz
756                            before this one. Map anonymous pages, if needed,
757                            and clear the area.  */
758                         retval = set_brk(elf_bss + load_bias,
759                                          elf_brk + load_bias);
760                         if (retval) {
761                                 send_sig(SIGKILL, current, 0);
762                                 goto out_free_dentry;
763                         }
764                         nbyte = ELF_PAGEOFFSET(elf_bss);
765                         if (nbyte) {
766                                 nbyte = ELF_MIN_ALIGN - nbyte;
767                                 if (nbyte > elf_brk - elf_bss)
768                                         nbyte = elf_brk - elf_bss;
769                                 if (clear_user((void __user *)elf_bss +
770                                                         load_bias, nbyte)) {
771                                         /*
772                                          * This bss-zeroing can fail if the ELF
773                                          * file specifies odd protections. So
774                                          * we don't check the return value
775                                          */
776                                 }
777                         }
778                 }
779
780                 if (elf_ppnt->p_flags & PF_R)
781                         elf_prot |= PROT_READ;
782                 if (elf_ppnt->p_flags & PF_W)
783                         elf_prot |= PROT_WRITE;
784                 if (elf_ppnt->p_flags & PF_X)
785                         elf_prot |= PROT_EXEC;
786
787                 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
788
789                 vaddr = elf_ppnt->p_vaddr;
790                 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
791                         elf_flags |= MAP_FIXED;
792                 } else if (loc->elf_ex.e_type == ET_DYN) {
793                         /* Try and get dynamic programs out of the way of the
794                          * default mmap base, as well as whatever program they
795                          * might try to exec.  This is because the brk will
796                          * follow the loader, and is not movable.  */
797 #ifdef CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE
798                         /* Memory randomization might have been switched off
799                          * in runtime via sysctl.
800                          * If that is the case, retain the original non-zero
801                          * load_bias value in order to establish proper
802                          * non-randomized mappings.
803                          */
804                         if (current->flags & PF_RANDOMIZE)
805                                 load_bias = 0;
806                         else
807                                 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
808 #else
809                         load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
810 #endif
811                 }
812
813                 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
814                                 elf_prot, elf_flags, 0);
815                 if (BAD_ADDR(error)) {
816                         send_sig(SIGKILL, current, 0);
817                         retval = IS_ERR((void *)error) ?
818                                 PTR_ERR((void*)error) : -EINVAL;
819                         goto out_free_dentry;
820                 }
821
822                 if (!load_addr_set) {
823                         load_addr_set = 1;
824                         load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
825                         if (loc->elf_ex.e_type == ET_DYN) {
826                                 load_bias += error -
827                                              ELF_PAGESTART(load_bias + vaddr);
828                                 load_addr += load_bias;
829                                 reloc_func_desc = load_bias;
830                         }
831                 }
832                 k = elf_ppnt->p_vaddr;
833                 if (k < start_code)
834                         start_code = k;
835                 if (start_data < k)
836                         start_data = k;
837
838                 /*
839                  * Check to see if the section's size will overflow the
840                  * allowed task size. Note that p_filesz must always be
841                  * <= p_memsz so it is only necessary to check p_memsz.
842                  */
843                 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
844                     elf_ppnt->p_memsz > TASK_SIZE ||
845                     TASK_SIZE - elf_ppnt->p_memsz < k) {
846                         /* set_brk can never work. Avoid overflows. */
847                         send_sig(SIGKILL, current, 0);
848                         retval = -EINVAL;
849                         goto out_free_dentry;
850                 }
851
852                 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
853
854                 if (k > elf_bss)
855                         elf_bss = k;
856                 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
857                         end_code = k;
858                 if (end_data < k)
859                         end_data = k;
860                 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
861                 if (k > elf_brk)
862                         elf_brk = k;
863         }
864
865         loc->elf_ex.e_entry += load_bias;
866         elf_bss += load_bias;
867         elf_brk += load_bias;
868         start_code += load_bias;
869         end_code += load_bias;
870         start_data += load_bias;
871         end_data += load_bias;
872
873         /* Calling set_brk effectively mmaps the pages that we need
874          * for the bss and break sections.  We must do this before
875          * mapping in the interpreter, to make sure it doesn't wind
876          * up getting placed where the bss needs to go.
877          */
878         retval = set_brk(elf_bss, elf_brk);
879         if (retval) {
880                 send_sig(SIGKILL, current, 0);
881                 goto out_free_dentry;
882         }
883         if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
884                 send_sig(SIGSEGV, current, 0);
885                 retval = -EFAULT; /* Nobody gets to see this, but.. */
886                 goto out_free_dentry;
887         }
888
889         if (elf_interpreter) {
890                 unsigned long uninitialized_var(interp_map_addr);
891
892                 elf_entry = load_elf_interp(&loc->interp_elf_ex,
893                                             interpreter,
894                                             &interp_map_addr,
895                                             load_bias);
896                 if (!IS_ERR((void *)elf_entry)) {
897                         /*
898                          * load_elf_interp() returns relocation
899                          * adjustment
900                          */
901                         interp_load_addr = elf_entry;
902                         elf_entry += loc->interp_elf_ex.e_entry;
903                 }
904                 if (BAD_ADDR(elf_entry)) {
905                         force_sig(SIGSEGV, current);
906                         retval = IS_ERR((void *)elf_entry) ?
907                                         (int)elf_entry : -EINVAL;
908                         goto out_free_dentry;
909                 }
910                 reloc_func_desc = interp_load_addr;
911
912                 allow_write_access(interpreter);
913                 fput(interpreter);
914                 kfree(elf_interpreter);
915         } else {
916                 elf_entry = loc->elf_ex.e_entry;
917                 if (BAD_ADDR(elf_entry)) {
918                         force_sig(SIGSEGV, current);
919                         retval = -EINVAL;
920                         goto out_free_dentry;
921                 }
922         }
923
924         kfree(elf_phdata);
925
926         set_binfmt(&elf_format);
927
928 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
929         retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
930         if (retval < 0) {
931                 send_sig(SIGKILL, current, 0);
932                 goto out;
933         }
934 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
935
936         install_exec_creds(bprm);
937         retval = create_elf_tables(bprm, &loc->elf_ex,
938                           load_addr, interp_load_addr);
939         if (retval < 0) {
940                 send_sig(SIGKILL, current, 0);
941                 goto out;
942         }
943         /* N.B. passed_fileno might not be initialized? */
944         current->mm->end_code = end_code;
945         current->mm->start_code = start_code;
946         current->mm->start_data = start_data;
947         current->mm->end_data = end_data;
948         current->mm->start_stack = bprm->p;
949
950 #ifdef arch_randomize_brk
951         if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
952                 current->mm->brk = current->mm->start_brk =
953                         arch_randomize_brk(current->mm);
954 #ifdef CONFIG_COMPAT_BRK
955                 current->brk_randomized = 1;
956 #endif
957         }
958 #endif
959
960         if (current->personality & MMAP_PAGE_ZERO) {
961                 /* Why this, you ask???  Well SVr4 maps page 0 as read-only,
962                    and some applications "depend" upon this behavior.
963                    Since we do not have the power to recompile these, we
964                    emulate the SVr4 behavior. Sigh. */
965                 down_write(&current->mm->mmap_sem);
966                 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
967                                 MAP_FIXED | MAP_PRIVATE, 0);
968                 up_write(&current->mm->mmap_sem);
969         }
970
971 #ifdef ELF_PLAT_INIT
972         /*
973          * The ABI may specify that certain registers be set up in special
974          * ways (on i386 %edx is the address of a DT_FINI function, for
975          * example.  In addition, it may also specify (eg, PowerPC64 ELF)
976          * that the e_entry field is the address of the function descriptor
977          * for the startup routine, rather than the address of the startup
978          * routine itself.  This macro performs whatever initialization to
979          * the regs structure is required as well as any relocations to the
980          * function descriptor entries when executing dynamically links apps.
981          */
982         ELF_PLAT_INIT(regs, reloc_func_desc);
983 #endif
984
985         start_thread(regs, elf_entry, bprm->p);
986         retval = 0;
987 out:
988         kfree(loc);
989 out_ret:
990         return retval;
991
992         /* error cleanup */
993 out_free_dentry:
994         allow_write_access(interpreter);
995         if (interpreter)
996                 fput(interpreter);
997 out_free_interp:
998         kfree(elf_interpreter);
999 out_free_ph:
1000         kfree(elf_phdata);
1001         goto out;
1002 }
1003
1004 /* This is really simpleminded and specialized - we are loading an
1005    a.out library that is given an ELF header. */
1006 static int load_elf_library(struct file *file)
1007 {
1008         struct elf_phdr *elf_phdata;
1009         struct elf_phdr *eppnt;
1010         unsigned long elf_bss, bss, len;
1011         int retval, error, i, j;
1012         struct elfhdr elf_ex;
1013
1014         error = -ENOEXEC;
1015         retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
1016         if (retval != sizeof(elf_ex))
1017                 goto out;
1018
1019         if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1020                 goto out;
1021
1022         /* First of all, some simple consistency checks */
1023         if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1024             !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1025                 goto out;
1026
1027         /* Now read in all of the header information */
1028
1029         j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1030         /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1031
1032         error = -ENOMEM;
1033         elf_phdata = kmalloc(j, GFP_KERNEL);
1034         if (!elf_phdata)
1035                 goto out;
1036
1037         eppnt = elf_phdata;
1038         error = -ENOEXEC;
1039         retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1040         if (retval != j)
1041                 goto out_free_ph;
1042
1043         for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1044                 if ((eppnt + i)->p_type == PT_LOAD)
1045                         j++;
1046         if (j != 1)
1047                 goto out_free_ph;
1048
1049         while (eppnt->p_type != PT_LOAD)
1050                 eppnt++;
1051
1052         /* Now use mmap to map the library into memory. */
1053         down_write(&current->mm->mmap_sem);
1054         error = do_mmap(file,
1055                         ELF_PAGESTART(eppnt->p_vaddr),
1056                         (eppnt->p_filesz +
1057                          ELF_PAGEOFFSET(eppnt->p_vaddr)),
1058                         PROT_READ | PROT_WRITE | PROT_EXEC,
1059                         MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1060                         (eppnt->p_offset -
1061                          ELF_PAGEOFFSET(eppnt->p_vaddr)));
1062         up_write(&current->mm->mmap_sem);
1063         if (error != ELF_PAGESTART(eppnt->p_vaddr))
1064                 goto out_free_ph;
1065
1066         elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1067         if (padzero(elf_bss)) {
1068                 error = -EFAULT;
1069                 goto out_free_ph;
1070         }
1071
1072         len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1073                             ELF_MIN_ALIGN - 1);
1074         bss = eppnt->p_memsz + eppnt->p_vaddr;
1075         if (bss > len) {
1076                 down_write(&current->mm->mmap_sem);
1077                 do_brk(len, bss - len);
1078                 up_write(&current->mm->mmap_sem);
1079         }
1080         error = 0;
1081
1082 out_free_ph:
1083         kfree(elf_phdata);
1084 out:
1085         return error;
1086 }
1087
1088 #ifdef CONFIG_ELF_CORE
1089 /*
1090  * ELF core dumper
1091  *
1092  * Modelled on fs/exec.c:aout_core_dump()
1093  * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1094  */
1095
1096 /*
1097  * Decide what to dump of a segment, part, all or none.
1098  */
1099 static unsigned long vma_dump_size(struct vm_area_struct *vma,
1100                                    unsigned long mm_flags)
1101 {
1102 #define FILTER(type)    (mm_flags & (1UL << MMF_DUMP_##type))
1103
1104         /* The vma can be set up to tell us the answer directly.  */
1105         if (vma->vm_flags & VM_ALWAYSDUMP)
1106                 goto whole;
1107
1108         /* Hugetlb memory check */
1109         if (vma->vm_flags & VM_HUGETLB) {
1110                 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1111                         goto whole;
1112                 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1113                         goto whole;
1114         }
1115
1116         /* Do not dump I/O mapped devices or special mappings */
1117         if (vma->vm_flags & (VM_IO | VM_RESERVED))
1118                 return 0;
1119
1120         /* By default, dump shared memory if mapped from an anonymous file. */
1121         if (vma->vm_flags & VM_SHARED) {
1122                 if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0 ?
1123                     FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1124                         goto whole;
1125                 return 0;
1126         }
1127
1128         /* Dump segments that have been written to.  */
1129         if (vma->anon_vma && FILTER(ANON_PRIVATE))
1130                 goto whole;
1131         if (vma->vm_file == NULL)
1132                 return 0;
1133
1134         if (FILTER(MAPPED_PRIVATE))
1135                 goto whole;
1136
1137         /*
1138          * If this looks like the beginning of a DSO or executable mapping,
1139          * check for an ELF header.  If we find one, dump the first page to
1140          * aid in determining what was mapped here.
1141          */
1142         if (FILTER(ELF_HEADERS) &&
1143             vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
1144                 u32 __user *header = (u32 __user *) vma->vm_start;
1145                 u32 word;
1146                 mm_segment_t fs = get_fs();
1147                 /*
1148                  * Doing it this way gets the constant folded by GCC.
1149                  */
1150                 union {
1151                         u32 cmp;
1152                         char elfmag[SELFMAG];
1153                 } magic;
1154                 BUILD_BUG_ON(SELFMAG != sizeof word);
1155                 magic.elfmag[EI_MAG0] = ELFMAG0;
1156                 magic.elfmag[EI_MAG1] = ELFMAG1;
1157                 magic.elfmag[EI_MAG2] = ELFMAG2;
1158                 magic.elfmag[EI_MAG3] = ELFMAG3;
1159                 /*
1160                  * Switch to the user "segment" for get_user(),
1161                  * then put back what elf_core_dump() had in place.
1162                  */
1163                 set_fs(USER_DS);
1164                 if (unlikely(get_user(word, header)))
1165                         word = 0;
1166                 set_fs(fs);
1167                 if (word == magic.cmp)
1168                         return PAGE_SIZE;
1169         }
1170
1171 #undef  FILTER
1172
1173         return 0;
1174
1175 whole:
1176         return vma->vm_end - vma->vm_start;
1177 }
1178
1179 /* An ELF note in memory */
1180 struct memelfnote
1181 {
1182         const char *name;
1183         int type;
1184         unsigned int datasz;
1185         void *data;
1186 };
1187
1188 static int notesize(struct memelfnote *en)
1189 {
1190         int sz;
1191
1192         sz = sizeof(struct elf_note);
1193         sz += roundup(strlen(en->name) + 1, 4);
1194         sz += roundup(en->datasz, 4);
1195
1196         return sz;
1197 }
1198
1199 #define DUMP_WRITE(addr, nr, foffset)   \
1200         do { if (!dump_write(file, (addr), (nr))) return 0; *foffset += (nr); } while(0)
1201
1202 static int alignfile(struct file *file, loff_t *foffset)
1203 {
1204         static const char buf[4] = { 0, };
1205         DUMP_WRITE(buf, roundup(*foffset, 4) - *foffset, foffset);
1206         return 1;
1207 }
1208
1209 static int writenote(struct memelfnote *men, struct file *file,
1210                         loff_t *foffset)
1211 {
1212         struct elf_note en;
1213         en.n_namesz = strlen(men->name) + 1;
1214         en.n_descsz = men->datasz;
1215         en.n_type = men->type;
1216
1217         DUMP_WRITE(&en, sizeof(en), foffset);
1218         DUMP_WRITE(men->name, en.n_namesz, foffset);
1219         if (!alignfile(file, foffset))
1220                 return 0;
1221         DUMP_WRITE(men->data, men->datasz, foffset);
1222         if (!alignfile(file, foffset))
1223                 return 0;
1224
1225         return 1;
1226 }
1227 #undef DUMP_WRITE
1228
1229 static void fill_elf_header(struct elfhdr *elf, int segs,
1230                             u16 machine, u32 flags, u8 osabi)
1231 {
1232         memset(elf, 0, sizeof(*elf));
1233
1234         memcpy(elf->e_ident, ELFMAG, SELFMAG);
1235         elf->e_ident[EI_CLASS] = ELF_CLASS;
1236         elf->e_ident[EI_DATA] = ELF_DATA;
1237         elf->e_ident[EI_VERSION] = EV_CURRENT;
1238         elf->e_ident[EI_OSABI] = ELF_OSABI;
1239
1240         elf->e_type = ET_CORE;
1241         elf->e_machine = machine;
1242         elf->e_version = EV_CURRENT;
1243         elf->e_phoff = sizeof(struct elfhdr);
1244         elf->e_flags = flags;
1245         elf->e_ehsize = sizeof(struct elfhdr);
1246         elf->e_phentsize = sizeof(struct elf_phdr);
1247         elf->e_phnum = segs;
1248
1249         return;
1250 }
1251
1252 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1253 {
1254         phdr->p_type = PT_NOTE;
1255         phdr->p_offset = offset;
1256         phdr->p_vaddr = 0;
1257         phdr->p_paddr = 0;
1258         phdr->p_filesz = sz;
1259         phdr->p_memsz = 0;
1260         phdr->p_flags = 0;
1261         phdr->p_align = 0;
1262         return;
1263 }
1264
1265 static void fill_note(struct memelfnote *note, const char *name, int type, 
1266                 unsigned int sz, void *data)
1267 {
1268         note->name = name;
1269         note->type = type;
1270         note->datasz = sz;
1271         note->data = data;
1272         return;
1273 }
1274
1275 /*
1276  * fill up all the fields in prstatus from the given task struct, except
1277  * registers which need to be filled up separately.
1278  */
1279 static void fill_prstatus(struct elf_prstatus *prstatus,
1280                 struct task_struct *p, long signr)
1281 {
1282         prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1283         prstatus->pr_sigpend = p->pending.signal.sig[0];
1284         prstatus->pr_sighold = p->blocked.sig[0];
1285         rcu_read_lock();
1286         prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1287         rcu_read_unlock();
1288         prstatus->pr_pid = task_pid_vnr(p);
1289         prstatus->pr_pgrp = task_pgrp_vnr(p);
1290         prstatus->pr_sid = task_session_vnr(p);
1291         if (thread_group_leader(p)) {
1292                 struct task_cputime cputime;
1293
1294                 /*
1295                  * This is the record for the group leader.  It shows the
1296                  * group-wide total, not its individual thread total.
1297                  */
1298                 thread_group_cputime(p, &cputime);
1299                 cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
1300                 cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
1301         } else {
1302                 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1303                 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1304         }
1305         cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1306         cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1307 }
1308
1309 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1310                        struct mm_struct *mm)
1311 {
1312         const struct cred *cred;
1313         unsigned int i, len;
1314         
1315         /* first copy the parameters from user space */
1316         memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1317
1318         len = mm->arg_end - mm->arg_start;
1319         if (len >= ELF_PRARGSZ)
1320                 len = ELF_PRARGSZ-1;
1321         if (copy_from_user(&psinfo->pr_psargs,
1322                            (const char __user *)mm->arg_start, len))
1323                 return -EFAULT;
1324         for(i = 0; i < len; i++)
1325                 if (psinfo->pr_psargs[i] == 0)
1326                         psinfo->pr_psargs[i] = ' ';
1327         psinfo->pr_psargs[len] = 0;
1328
1329         rcu_read_lock();
1330         psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1331         rcu_read_unlock();
1332         psinfo->pr_pid = task_pid_vnr(p);
1333         psinfo->pr_pgrp = task_pgrp_vnr(p);
1334         psinfo->pr_sid = task_session_vnr(p);
1335
1336         i = p->state ? ffz(~p->state) + 1 : 0;
1337         psinfo->pr_state = i;
1338         psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1339         psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1340         psinfo->pr_nice = task_nice(p);
1341         psinfo->pr_flag = p->flags;
1342         rcu_read_lock();
1343         cred = __task_cred(p);
1344         SET_UID(psinfo->pr_uid, cred->uid);
1345         SET_GID(psinfo->pr_gid, cred->gid);
1346         rcu_read_unlock();
1347         strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1348         
1349         return 0;
1350 }
1351
1352 static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1353 {
1354         elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1355         int i = 0;
1356         do
1357                 i += 2;
1358         while (auxv[i - 2] != AT_NULL);
1359         fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1360 }
1361
1362 #ifdef CORE_DUMP_USE_REGSET
1363 #include <linux/regset.h>
1364
1365 struct elf_thread_core_info {
1366         struct elf_thread_core_info *next;
1367         struct task_struct *task;
1368         struct elf_prstatus prstatus;
1369         struct memelfnote notes[0];
1370 };
1371
1372 struct elf_note_info {
1373         struct elf_thread_core_info *thread;
1374         struct memelfnote psinfo;
1375         struct memelfnote auxv;
1376         size_t size;
1377         int thread_notes;
1378 };
1379
1380 /*
1381  * When a regset has a writeback hook, we call it on each thread before
1382  * dumping user memory.  On register window machines, this makes sure the
1383  * user memory backing the register data is up to date before we read it.
1384  */
1385 static void do_thread_regset_writeback(struct task_struct *task,
1386                                        const struct user_regset *regset)
1387 {
1388         if (regset->writeback)
1389                 regset->writeback(task, regset, 1);
1390 }
1391
1392 static int fill_thread_core_info(struct elf_thread_core_info *t,
1393                                  const struct user_regset_view *view,
1394                                  long signr, size_t *total)
1395 {
1396         unsigned int i;
1397
1398         /*
1399          * NT_PRSTATUS is the one special case, because the regset data
1400          * goes into the pr_reg field inside the note contents, rather
1401          * than being the whole note contents.  We fill the reset in here.
1402          * We assume that regset 0 is NT_PRSTATUS.
1403          */
1404         fill_prstatus(&t->prstatus, t->task, signr);
1405         (void) view->regsets[0].get(t->task, &view->regsets[0],
1406                                     0, sizeof(t->prstatus.pr_reg),
1407                                     &t->prstatus.pr_reg, NULL);
1408
1409         fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
1410                   sizeof(t->prstatus), &t->prstatus);
1411         *total += notesize(&t->notes[0]);
1412
1413         do_thread_regset_writeback(t->task, &view->regsets[0]);
1414
1415         /*
1416          * Each other regset might generate a note too.  For each regset
1417          * that has no core_note_type or is inactive, we leave t->notes[i]
1418          * all zero and we'll know to skip writing it later.
1419          */
1420         for (i = 1; i < view->n; ++i) {
1421                 const struct user_regset *regset = &view->regsets[i];
1422                 do_thread_regset_writeback(t->task, regset);
1423                 if (regset->core_note_type && regset->get &&
1424                     (!regset->active || regset->active(t->task, regset))) {
1425                         int ret;
1426                         size_t size = regset->n * regset->size;
1427                         void *data = kmalloc(size, GFP_KERNEL);
1428                         if (unlikely(!data))
1429                                 return 0;
1430                         ret = regset->get(t->task, regset,
1431                                           0, size, data, NULL);
1432                         if (unlikely(ret))
1433                                 kfree(data);
1434                         else {
1435                                 if (regset->core_note_type != NT_PRFPREG)
1436                                         fill_note(&t->notes[i], "LINUX",
1437                                                   regset->core_note_type,
1438                                                   size, data);
1439                                 else {
1440                                         t->prstatus.pr_fpvalid = 1;
1441                                         fill_note(&t->notes[i], "CORE",
1442                                                   NT_PRFPREG, size, data);
1443                                 }
1444                                 *total += notesize(&t->notes[i]);
1445                         }
1446                 }
1447         }
1448
1449         return 1;
1450 }
1451
1452 static int fill_note_info(struct elfhdr *elf, int phdrs,
1453                           struct elf_note_info *info,
1454                           long signr, struct pt_regs *regs)
1455 {
1456         struct task_struct *dump_task = current;
1457         const struct user_regset_view *view = task_user_regset_view(dump_task);
1458         struct elf_thread_core_info *t;
1459         struct elf_prpsinfo *psinfo;
1460         struct core_thread *ct;
1461         unsigned int i;
1462
1463         info->size = 0;
1464         info->thread = NULL;
1465
1466         psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1467         if (psinfo == NULL)
1468                 return 0;
1469
1470         fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1471
1472         /*
1473          * Figure out how many notes we're going to need for each thread.
1474          */
1475         info->thread_notes = 0;
1476         for (i = 0; i < view->n; ++i)
1477                 if (view->regsets[i].core_note_type != 0)
1478                         ++info->thread_notes;
1479
1480         /*
1481          * Sanity check.  We rely on regset 0 being in NT_PRSTATUS,
1482          * since it is our one special case.
1483          */
1484         if (unlikely(info->thread_notes == 0) ||
1485             unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1486                 WARN_ON(1);
1487                 return 0;
1488         }
1489
1490         /*
1491          * Initialize the ELF file header.
1492          */
1493         fill_elf_header(elf, phdrs,
1494                         view->e_machine, view->e_flags, view->ei_osabi);
1495
1496         /*
1497          * Allocate a structure for each thread.
1498          */
1499         for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1500                 t = kzalloc(offsetof(struct elf_thread_core_info,
1501                                      notes[info->thread_notes]),
1502                             GFP_KERNEL);
1503                 if (unlikely(!t))
1504                         return 0;
1505
1506                 t->task = ct->task;
1507                 if (ct->task == dump_task || !info->thread) {
1508                         t->next = info->thread;
1509                         info->thread = t;
1510                 } else {
1511                         /*
1512                          * Make sure to keep the original task at
1513                          * the head of the list.
1514                          */
1515                         t->next = info->thread->next;
1516                         info->thread->next = t;
1517                 }
1518         }
1519
1520         /*
1521          * Now fill in each thread's information.
1522          */
1523         for (t = info->thread; t != NULL; t = t->next)
1524                 if (!fill_thread_core_info(t, view, signr, &info->size))
1525                         return 0;
1526
1527         /*
1528          * Fill in the two process-wide notes.
1529          */
1530         fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1531         info->size += notesize(&info->psinfo);
1532
1533         fill_auxv_note(&info->auxv, current->mm);
1534         info->size += notesize(&info->auxv);
1535
1536         return 1;
1537 }
1538
1539 static size_t get_note_info_size(struct elf_note_info *info)
1540 {
1541         return info->size;
1542 }
1543
1544 /*
1545  * Write all the notes for each thread.  When writing the first thread, the
1546  * process-wide notes are interleaved after the first thread-specific note.
1547  */
1548 static int write_note_info(struct elf_note_info *info,
1549                            struct file *file, loff_t *foffset)
1550 {
1551         bool first = 1;
1552         struct elf_thread_core_info *t = info->thread;
1553
1554         do {
1555                 int i;
1556
1557                 if (!writenote(&t->notes[0], file, foffset))
1558                         return 0;
1559
1560                 if (first && !writenote(&info->psinfo, file, foffset))
1561                         return 0;
1562                 if (first && !writenote(&info->auxv, file, foffset))
1563                         return 0;
1564
1565                 for (i = 1; i < info->thread_notes; ++i)
1566                         if (t->notes[i].data &&
1567                             !writenote(&t->notes[i], file, foffset))
1568                                 return 0;
1569
1570                 first = 0;
1571                 t = t->next;
1572         } while (t);
1573
1574         return 1;
1575 }
1576
1577 static void free_note_info(struct elf_note_info *info)
1578 {
1579         struct elf_thread_core_info *threads = info->thread;
1580         while (threads) {
1581                 unsigned int i;
1582                 struct elf_thread_core_info *t = threads;
1583                 threads = t->next;
1584                 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
1585                 for (i = 1; i < info->thread_notes; ++i)
1586                         kfree(t->notes[i].data);
1587                 kfree(t);
1588         }
1589         kfree(info->psinfo.data);
1590 }
1591
1592 #else
1593
1594 /* Here is the structure in which status of each thread is captured. */
1595 struct elf_thread_status
1596 {
1597         struct list_head list;
1598         struct elf_prstatus prstatus;   /* NT_PRSTATUS */
1599         elf_fpregset_t fpu;             /* NT_PRFPREG */
1600         struct task_struct *thread;
1601 #ifdef ELF_CORE_COPY_XFPREGS
1602         elf_fpxregset_t xfpu;           /* ELF_CORE_XFPREG_TYPE */
1603 #endif
1604         struct memelfnote notes[3];
1605         int num_notes;
1606 };
1607
1608 /*
1609  * In order to add the specific thread information for the elf file format,
1610  * we need to keep a linked list of every threads pr_status and then create
1611  * a single section for them in the final core file.
1612  */
1613 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1614 {
1615         int sz = 0;
1616         struct task_struct *p = t->thread;
1617         t->num_notes = 0;
1618
1619         fill_prstatus(&t->prstatus, p, signr);
1620         elf_core_copy_task_regs(p, &t->prstatus.pr_reg);        
1621         
1622         fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1623                   &(t->prstatus));
1624         t->num_notes++;
1625         sz += notesize(&t->notes[0]);
1626
1627         if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1628                                                                 &t->fpu))) {
1629                 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1630                           &(t->fpu));
1631                 t->num_notes++;
1632                 sz += notesize(&t->notes[1]);
1633         }
1634
1635 #ifdef ELF_CORE_COPY_XFPREGS
1636         if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1637                 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1638                           sizeof(t->xfpu), &t->xfpu);
1639                 t->num_notes++;
1640                 sz += notesize(&t->notes[2]);
1641         }
1642 #endif  
1643         return sz;
1644 }
1645
1646 struct elf_note_info {
1647         struct memelfnote *notes;
1648         struct elf_prstatus *prstatus;  /* NT_PRSTATUS */
1649         struct elf_prpsinfo *psinfo;    /* NT_PRPSINFO */
1650         struct list_head thread_list;
1651         elf_fpregset_t *fpu;
1652 #ifdef ELF_CORE_COPY_XFPREGS
1653         elf_fpxregset_t *xfpu;
1654 #endif
1655         int thread_status_size;
1656         int numnote;
1657 };
1658
1659 static int elf_note_info_init(struct elf_note_info *info)
1660 {
1661         memset(info, 0, sizeof(*info));
1662         INIT_LIST_HEAD(&info->thread_list);
1663
1664         /* Allocate space for six ELF notes */
1665         info->notes = kmalloc(6 * sizeof(struct memelfnote), GFP_KERNEL);
1666         if (!info->notes)
1667                 return 0;
1668         info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
1669         if (!info->psinfo)
1670                 goto notes_free;
1671         info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
1672         if (!info->prstatus)
1673                 goto psinfo_free;
1674         info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
1675         if (!info->fpu)
1676                 goto prstatus_free;
1677 #ifdef ELF_CORE_COPY_XFPREGS
1678         info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
1679         if (!info->xfpu)
1680                 goto fpu_free;
1681 #endif
1682         return 1;
1683 #ifdef ELF_CORE_COPY_XFPREGS
1684  fpu_free:
1685         kfree(info->fpu);
1686 #endif
1687  prstatus_free:
1688         kfree(info->prstatus);
1689  psinfo_free:
1690         kfree(info->psinfo);
1691  notes_free:
1692         kfree(info->notes);
1693         return 0;
1694 }
1695
1696 static int fill_note_info(struct elfhdr *elf, int phdrs,
1697                           struct elf_note_info *info,
1698                           long signr, struct pt_regs *regs)
1699 {
1700         struct list_head *t;
1701
1702         if (!elf_note_info_init(info))
1703                 return 0;
1704
1705         if (signr) {
1706                 struct core_thread *ct;
1707                 struct elf_thread_status *ets;
1708
1709                 for (ct = current->mm->core_state->dumper.next;
1710                                                 ct; ct = ct->next) {
1711                         ets = kzalloc(sizeof(*ets), GFP_KERNEL);
1712                         if (!ets)
1713                                 return 0;
1714
1715                         ets->thread = ct->task;
1716                         list_add(&ets->list, &info->thread_list);
1717                 }
1718
1719                 list_for_each(t, &info->thread_list) {
1720                         int sz;
1721
1722                         ets = list_entry(t, struct elf_thread_status, list);
1723                         sz = elf_dump_thread_status(signr, ets);
1724                         info->thread_status_size += sz;
1725                 }
1726         }
1727         /* now collect the dump for the current */
1728         memset(info->prstatus, 0, sizeof(*info->prstatus));
1729         fill_prstatus(info->prstatus, current, signr);
1730         elf_core_copy_regs(&info->prstatus->pr_reg, regs);
1731
1732         /* Set up header */
1733         fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS, ELF_OSABI);
1734
1735         /*
1736          * Set up the notes in similar form to SVR4 core dumps made
1737          * with info from their /proc.
1738          */
1739
1740         fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
1741                   sizeof(*info->prstatus), info->prstatus);
1742         fill_psinfo(info->psinfo, current->group_leader, current->mm);
1743         fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
1744                   sizeof(*info->psinfo), info->psinfo);
1745
1746         info->numnote = 2;
1747
1748         fill_auxv_note(&info->notes[info->numnote++], current->mm);
1749
1750         /* Try to dump the FPU. */
1751         info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
1752                                                                info->fpu);
1753         if (info->prstatus->pr_fpvalid)
1754                 fill_note(info->notes + info->numnote++,
1755                           "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
1756 #ifdef ELF_CORE_COPY_XFPREGS
1757         if (elf_core_copy_task_xfpregs(current, info->xfpu))
1758                 fill_note(info->notes + info->numnote++,
1759                           "LINUX", ELF_CORE_XFPREG_TYPE,
1760                           sizeof(*info->xfpu), info->xfpu);
1761 #endif
1762
1763         return 1;
1764 }
1765
1766 static size_t get_note_info_size(struct elf_note_info *info)
1767 {
1768         int sz = 0;
1769         int i;
1770
1771         for (i = 0; i < info->numnote; i++)
1772                 sz += notesize(info->notes + i);
1773
1774         sz += info->thread_status_size;
1775
1776         return sz;
1777 }
1778
1779 static int write_note_info(struct elf_note_info *info,
1780                            struct file *file, loff_t *foffset)
1781 {
1782         int i;
1783         struct list_head *t;
1784
1785         for (i = 0; i < info->numnote; i++)
1786                 if (!writenote(info->notes + i, file, foffset))
1787                         return 0;
1788
1789         /* write out the thread status notes section */
1790         list_for_each(t, &info->thread_list) {
1791                 struct elf_thread_status *tmp =
1792                                 list_entry(t, struct elf_thread_status, list);
1793
1794                 for (i = 0; i < tmp->num_notes; i++)
1795                         if (!writenote(&tmp->notes[i], file, foffset))
1796                                 return 0;
1797         }
1798
1799         return 1;
1800 }
1801
1802 static void free_note_info(struct elf_note_info *info)
1803 {
1804         while (!list_empty(&info->thread_list)) {
1805                 struct list_head *tmp = info->thread_list.next;
1806                 list_del(tmp);
1807                 kfree(list_entry(tmp, struct elf_thread_status, list));
1808         }
1809
1810         kfree(info->prstatus);
1811         kfree(info->psinfo);
1812         kfree(info->notes);
1813         kfree(info->fpu);
1814 #ifdef ELF_CORE_COPY_XFPREGS
1815         kfree(info->xfpu);
1816 #endif
1817 }
1818
1819 #endif
1820
1821 static struct vm_area_struct *first_vma(struct task_struct *tsk,
1822                                         struct vm_area_struct *gate_vma)
1823 {
1824         struct vm_area_struct *ret = tsk->mm->mmap;
1825
1826         if (ret)
1827                 return ret;
1828         return gate_vma;
1829 }
1830 /*
1831  * Helper function for iterating across a vma list.  It ensures that the caller
1832  * will visit `gate_vma' prior to terminating the search.
1833  */
1834 static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
1835                                         struct vm_area_struct *gate_vma)
1836 {
1837         struct vm_area_struct *ret;
1838
1839         ret = this_vma->vm_next;
1840         if (ret)
1841                 return ret;
1842         if (this_vma == gate_vma)
1843                 return NULL;
1844         return gate_vma;
1845 }
1846
1847 static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
1848                              elf_addr_t e_shoff, int segs)
1849 {
1850         elf->e_shoff = e_shoff;
1851         elf->e_shentsize = sizeof(*shdr4extnum);
1852         elf->e_shnum = 1;
1853         elf->e_shstrndx = SHN_UNDEF;
1854
1855         memset(shdr4extnum, 0, sizeof(*shdr4extnum));
1856
1857         shdr4extnum->sh_type = SHT_NULL;
1858         shdr4extnum->sh_size = elf->e_shnum;
1859         shdr4extnum->sh_link = elf->e_shstrndx;
1860         shdr4extnum->sh_info = segs;
1861 }
1862
1863 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
1864                                      unsigned long mm_flags)
1865 {
1866         struct vm_area_struct *vma;
1867         size_t size = 0;
1868
1869         for (vma = first_vma(current, gate_vma); vma != NULL;
1870              vma = next_vma(vma, gate_vma))
1871                 size += vma_dump_size(vma, mm_flags);
1872         return size;
1873 }
1874
1875 /*
1876  * Actual dumper
1877  *
1878  * This is a two-pass process; first we find the offsets of the bits,
1879  * and then they are actually written out.  If we run out of core limit
1880  * we just truncate.
1881  */
1882 static int elf_core_dump(struct coredump_params *cprm)
1883 {
1884         int has_dumped = 0;
1885         mm_segment_t fs;
1886         int segs;
1887         size_t size = 0;
1888         struct vm_area_struct *vma, *gate_vma;
1889         struct elfhdr *elf = NULL;
1890         loff_t offset = 0, dataoff, foffset;
1891         struct elf_note_info info;
1892         struct elf_phdr *phdr4note = NULL;
1893         struct elf_shdr *shdr4extnum = NULL;
1894         Elf_Half e_phnum;
1895         elf_addr_t e_shoff;
1896
1897         /*
1898          * We no longer stop all VM operations.
1899          * 
1900          * This is because those proceses that could possibly change map_count
1901          * or the mmap / vma pages are now blocked in do_exit on current
1902          * finishing this core dump.
1903          *
1904          * Only ptrace can touch these memory addresses, but it doesn't change
1905          * the map_count or the pages allocated. So no possibility of crashing
1906          * exists while dumping the mm->vm_next areas to the core file.
1907          */
1908   
1909         /* alloc memory for large data structures: too large to be on stack */
1910         elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1911         if (!elf)
1912                 goto out;
1913         /*
1914          * The number of segs are recored into ELF header as 16bit value.
1915          * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
1916          */
1917         segs = current->mm->map_count;
1918         segs += elf_core_extra_phdrs();
1919
1920         gate_vma = get_gate_vma(current->mm);
1921         if (gate_vma != NULL)
1922                 segs++;
1923
1924         /* for notes section */
1925         segs++;
1926
1927         /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
1928          * this, kernel supports extended numbering. Have a look at
1929          * include/linux/elf.h for further information. */
1930         e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
1931
1932         /*
1933          * Collect all the non-memory information about the process for the
1934          * notes.  This also sets up the file header.
1935          */
1936         if (!fill_note_info(elf, e_phnum, &info, cprm->signr, cprm->regs))
1937                 goto cleanup;
1938
1939         has_dumped = 1;
1940         current->flags |= PF_DUMPCORE;
1941   
1942         fs = get_fs();
1943         set_fs(KERNEL_DS);
1944
1945         offset += sizeof(*elf);                         /* Elf header */
1946         offset += segs * sizeof(struct elf_phdr);       /* Program headers */
1947         foffset = offset;
1948
1949         /* Write notes phdr entry */
1950         {
1951                 size_t sz = get_note_info_size(&info);
1952
1953                 sz += elf_coredump_extra_notes_size();
1954
1955                 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
1956                 if (!phdr4note)
1957                         goto end_coredump;
1958
1959                 fill_elf_note_phdr(phdr4note, sz, offset);
1960                 offset += sz;
1961         }
1962
1963         dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1964
1965         offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
1966         offset += elf_core_extra_data_size();
1967         e_shoff = offset;
1968
1969         if (e_phnum == PN_XNUM) {
1970                 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
1971                 if (!shdr4extnum)
1972                         goto end_coredump;
1973                 fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
1974         }
1975
1976         offset = dataoff;
1977
1978         size += sizeof(*elf);
1979         if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
1980                 goto end_coredump;
1981
1982         size += sizeof(*phdr4note);
1983         if (size > cprm->limit
1984             || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
1985                 goto end_coredump;
1986
1987         /* Write program headers for segments dump */
1988         for (vma = first_vma(current, gate_vma); vma != NULL;
1989                         vma = next_vma(vma, gate_vma)) {
1990                 struct elf_phdr phdr;
1991
1992                 phdr.p_type = PT_LOAD;
1993                 phdr.p_offset = offset;
1994                 phdr.p_vaddr = vma->vm_start;
1995                 phdr.p_paddr = 0;
1996                 phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
1997                 phdr.p_memsz = vma->vm_end - vma->vm_start;
1998                 offset += phdr.p_filesz;
1999                 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
2000                 if (vma->vm_flags & VM_WRITE)
2001                         phdr.p_flags |= PF_W;
2002                 if (vma->vm_flags & VM_EXEC)
2003                         phdr.p_flags |= PF_X;
2004                 phdr.p_align = ELF_EXEC_PAGESIZE;
2005
2006                 size += sizeof(phdr);
2007                 if (size > cprm->limit
2008                     || !dump_write(cprm->file, &phdr, sizeof(phdr)))
2009                         goto end_coredump;
2010         }
2011
2012         if (!elf_core_write_extra_phdrs(cprm->file, offset, &size, cprm->limit))
2013                 goto end_coredump;
2014
2015         /* write out the notes section */
2016         if (!write_note_info(&info, cprm->file, &foffset))
2017                 goto end_coredump;
2018
2019         if (elf_coredump_extra_notes_write(cprm->file, &foffset))
2020                 goto end_coredump;
2021
2022         /* Align to page */
2023         if (!dump_seek(cprm->file, dataoff - foffset))
2024                 goto end_coredump;
2025
2026         for (vma = first_vma(current, gate_vma); vma != NULL;
2027                         vma = next_vma(vma, gate_vma)) {
2028                 unsigned long addr;
2029                 unsigned long end;
2030
2031                 end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
2032
2033                 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
2034                         struct page *page;
2035                         int stop;
2036
2037                         page = get_dump_page(addr);
2038                         if (page) {
2039                                 void *kaddr = kmap(page);
2040                                 stop = ((size += PAGE_SIZE) > cprm->limit) ||
2041                                         !dump_write(cprm->file, kaddr,
2042                                                     PAGE_SIZE);
2043                                 kunmap(page);
2044                                 page_cache_release(page);
2045                         } else
2046                                 stop = !dump_seek(cprm->file, PAGE_SIZE);
2047                         if (stop)
2048                                 goto end_coredump;
2049                 }
2050         }
2051
2052         if (!elf_core_write_extra_data(cprm->file, &size, cprm->limit))
2053                 goto end_coredump;
2054
2055         if (e_phnum == PN_XNUM) {
2056                 size += sizeof(*shdr4extnum);
2057                 if (size > cprm->limit
2058                     || !dump_write(cprm->file, shdr4extnum,
2059                                    sizeof(*shdr4extnum)))
2060                         goto end_coredump;
2061         }
2062
2063 end_coredump:
2064         set_fs(fs);
2065
2066 cleanup:
2067         free_note_info(&info);
2068         kfree(shdr4extnum);
2069         kfree(phdr4note);
2070         kfree(elf);
2071 out:
2072         return has_dumped;
2073 }
2074
2075 #endif          /* CONFIG_ELF_CORE */
2076
2077 static int __init init_elf_binfmt(void)
2078 {
2079         register_binfmt(&elf_format);
2080         return 0;
2081 }
2082
2083 static void __exit exit_elf_binfmt(void)
2084 {
2085         /* Remove the COFF and ELF loaders. */
2086         unregister_binfmt(&elf_format);
2087 }
2088
2089 core_initcall(init_elf_binfmt);
2090 module_exit(exit_elf_binfmt);
2091 MODULE_LICENSE("GPL");