kcore: use registerd physmem information
[linux-2.6.git] / fs / proc / kcore.c
1 /*
2  *      fs/proc/kcore.c kernel ELF core dumper
3  *
4  *      Modelled on fs/exec.c:aout_core_dump()
5  *      Jeremy Fitzhardinge <jeremy@sw.oz.au>
6  *      ELF version written by David Howells <David.Howells@nexor.co.uk>
7  *      Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
8  *      Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
9  *      Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
10  */
11
12 #include <linux/mm.h>
13 #include <linux/proc_fs.h>
14 #include <linux/user.h>
15 #include <linux/capability.h>
16 #include <linux/elf.h>
17 #include <linux/elfcore.h>
18 #include <linux/vmalloc.h>
19 #include <linux/highmem.h>
20 #include <linux/bootmem.h>
21 #include <linux/init.h>
22 #include <asm/uaccess.h>
23 #include <asm/io.h>
24 #include <linux/list.h>
25 #include <linux/ioport.h>
26 #include <linux/mm.h>
27 #include <linux/memory.h>
28 #include <asm/sections.h>
29
30 #define CORE_STR "CORE"
31
32 #ifndef ELF_CORE_EFLAGS
33 #define ELF_CORE_EFLAGS 0
34 #endif
35
36 static struct proc_dir_entry *proc_root_kcore;
37
38
39 #ifndef kc_vaddr_to_offset
40 #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
41 #endif
42 #ifndef kc_offset_to_vaddr
43 #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
44 #endif
45
46 /* An ELF note in memory */
47 struct memelfnote
48 {
49         const char *name;
50         int type;
51         unsigned int datasz;
52         void *data;
53 };
54
55 static LIST_HEAD(kclist_head);
56 static DEFINE_RWLOCK(kclist_lock);
57 static int kcore_need_update = 1;
58
59 void
60 kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
61 {
62         new->addr = (unsigned long)addr;
63         new->size = size;
64         new->type = type;
65
66         write_lock(&kclist_lock);
67         list_add_tail(&new->list, &kclist_head);
68         write_unlock(&kclist_lock);
69 }
70
71 static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
72 {
73         size_t try, size;
74         struct kcore_list *m;
75
76         *nphdr = 1; /* PT_NOTE */
77         size = 0;
78
79         list_for_each_entry(m, &kclist_head, list) {
80                 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
81                 if (try > size)
82                         size = try;
83                 *nphdr = *nphdr + 1;
84         }
85         *elf_buflen =   sizeof(struct elfhdr) + 
86                         (*nphdr + 2)*sizeof(struct elf_phdr) + 
87                         3 * ((sizeof(struct elf_note)) +
88                              roundup(sizeof(CORE_STR), 4)) +
89                         roundup(sizeof(struct elf_prstatus), 4) +
90                         roundup(sizeof(struct elf_prpsinfo), 4) +
91                         roundup(sizeof(struct task_struct), 4);
92         *elf_buflen = PAGE_ALIGN(*elf_buflen);
93         return size + *elf_buflen;
94 }
95
96 static void free_kclist_ents(struct list_head *head)
97 {
98         struct kcore_list *tmp, *pos;
99
100         list_for_each_entry_safe(pos, tmp, head, list) {
101                 list_del(&pos->list);
102                 kfree(pos);
103         }
104 }
105 /*
106  * Replace all KCORE_RAM information with passed list.
107  */
108 static void __kcore_update_ram(struct list_head *list)
109 {
110         struct kcore_list *tmp, *pos;
111         LIST_HEAD(garbage);
112
113         write_lock(&kclist_lock);
114         if (kcore_need_update) {
115                 list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
116                         if (pos->type == KCORE_RAM)
117                                 list_move(&pos->list, &garbage);
118                 }
119                 list_splice_tail(list, &kclist_head);
120         } else
121                 list_splice(list, &garbage);
122         kcore_need_update = 0;
123         write_unlock(&kclist_lock);
124
125         free_kclist_ents(&garbage);
126 }
127
128
129 #ifdef CONFIG_HIGHMEM
130 /*
131  * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
132  * because memory hole is not as big as !HIGHMEM case.
133  * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
134  */
135 static int kcore_update_ram(void)
136 {
137         LIST_HEAD(head);
138         struct kcore_list *ent;
139         int ret = 0;
140
141         ent = kmalloc(sizeof(*ent), GFP_KERNEL);
142         if (!ent)
143                 return -ENOMEM;
144         ent->addr = (unsigned long)__va(0);
145         ent->size = max_low_pfn << PAGE_SHIFT;
146         ent->type = KCORE_RAM;
147         list_add(&ent->list, &head);
148         __kcore_update_ram(&head);
149         return ret;
150 }
151
152 #else /* !CONFIG_HIGHMEM */
153
154 static int
155 kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
156 {
157         struct list_head *head = (struct list_head *)arg;
158         struct kcore_list *ent;
159
160         ent = kmalloc(sizeof(*ent), GFP_KERNEL);
161         if (!ent)
162                 return -ENOMEM;
163         ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
164         ent->size = nr_pages << PAGE_SHIFT;
165
166         /* Sanity check: Can happen in 32bit arch...maybe */
167         if (ent->addr < (unsigned long) __va(0))
168                 goto free_out;
169
170         /* cut not-mapped area. ....from ppc-32 code. */
171         if (ULONG_MAX - ent->addr < ent->size)
172                 ent->size = ULONG_MAX - ent->addr;
173
174         /* cut when vmalloc() area is higher than direct-map area */
175         if (VMALLOC_START > (unsigned long)__va(0)) {
176                 if (ent->addr > VMALLOC_START)
177                         goto free_out;
178                 if (VMALLOC_START - ent->addr < ent->size)
179                         ent->size = VMALLOC_START - ent->addr;
180         }
181
182         ent->type = KCORE_RAM;
183         list_add_tail(&ent->list, head);
184         return 0;
185 free_out:
186         kfree(ent);
187         return 1;
188 }
189
190 static int kcore_update_ram(void)
191 {
192         int nid, ret;
193         unsigned long end_pfn;
194         LIST_HEAD(head);
195
196         /* Not inialized....update now */
197         /* find out "max pfn" */
198         end_pfn = 0;
199         for_each_node_state(nid, N_HIGH_MEMORY) {
200                 unsigned long node_end;
201                 node_end  = NODE_DATA(nid)->node_start_pfn +
202                         NODE_DATA(nid)->node_spanned_pages;
203                 if (end_pfn < node_end)
204                         end_pfn = node_end;
205         }
206         /* scan 0 to max_pfn */
207         ret = walk_system_ram_range(0, end_pfn, &head, kclist_add_private);
208         if (ret) {
209                 free_kclist_ents(&head);
210                 return -ENOMEM;
211         }
212         __kcore_update_ram(&head);
213         return ret;
214 }
215 #endif /* CONFIG_HIGHMEM */
216
217 /*****************************************************************************/
218 /*
219  * determine size of ELF note
220  */
221 static int notesize(struct memelfnote *en)
222 {
223         int sz;
224
225         sz = sizeof(struct elf_note);
226         sz += roundup((strlen(en->name) + 1), 4);
227         sz += roundup(en->datasz, 4);
228
229         return sz;
230 } /* end notesize() */
231
232 /*****************************************************************************/
233 /*
234  * store a note in the header buffer
235  */
236 static char *storenote(struct memelfnote *men, char *bufp)
237 {
238         struct elf_note en;
239
240 #define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
241
242         en.n_namesz = strlen(men->name) + 1;
243         en.n_descsz = men->datasz;
244         en.n_type = men->type;
245
246         DUMP_WRITE(&en, sizeof(en));
247         DUMP_WRITE(men->name, en.n_namesz);
248
249         /* XXX - cast from long long to long to avoid need for libgcc.a */
250         bufp = (char*) roundup((unsigned long)bufp,4);
251         DUMP_WRITE(men->data, men->datasz);
252         bufp = (char*) roundup((unsigned long)bufp,4);
253
254 #undef DUMP_WRITE
255
256         return bufp;
257 } /* end storenote() */
258
259 /*
260  * store an ELF coredump header in the supplied buffer
261  * nphdr is the number of elf_phdr to insert
262  */
263 static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
264 {
265         struct elf_prstatus prstatus;   /* NT_PRSTATUS */
266         struct elf_prpsinfo prpsinfo;   /* NT_PRPSINFO */
267         struct elf_phdr *nhdr, *phdr;
268         struct elfhdr *elf;
269         struct memelfnote notes[3];
270         off_t offset = 0;
271         struct kcore_list *m;
272
273         /* setup ELF header */
274         elf = (struct elfhdr *) bufp;
275         bufp += sizeof(struct elfhdr);
276         offset += sizeof(struct elfhdr);
277         memcpy(elf->e_ident, ELFMAG, SELFMAG);
278         elf->e_ident[EI_CLASS]  = ELF_CLASS;
279         elf->e_ident[EI_DATA]   = ELF_DATA;
280         elf->e_ident[EI_VERSION]= EV_CURRENT;
281         elf->e_ident[EI_OSABI] = ELF_OSABI;
282         memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
283         elf->e_type     = ET_CORE;
284         elf->e_machine  = ELF_ARCH;
285         elf->e_version  = EV_CURRENT;
286         elf->e_entry    = 0;
287         elf->e_phoff    = sizeof(struct elfhdr);
288         elf->e_shoff    = 0;
289         elf->e_flags    = ELF_CORE_EFLAGS;
290         elf->e_ehsize   = sizeof(struct elfhdr);
291         elf->e_phentsize= sizeof(struct elf_phdr);
292         elf->e_phnum    = nphdr;
293         elf->e_shentsize= 0;
294         elf->e_shnum    = 0;
295         elf->e_shstrndx = 0;
296
297         /* setup ELF PT_NOTE program header */
298         nhdr = (struct elf_phdr *) bufp;
299         bufp += sizeof(struct elf_phdr);
300         offset += sizeof(struct elf_phdr);
301         nhdr->p_type    = PT_NOTE;
302         nhdr->p_offset  = 0;
303         nhdr->p_vaddr   = 0;
304         nhdr->p_paddr   = 0;
305         nhdr->p_filesz  = 0;
306         nhdr->p_memsz   = 0;
307         nhdr->p_flags   = 0;
308         nhdr->p_align   = 0;
309
310         /* setup ELF PT_LOAD program header for every area */
311         list_for_each_entry(m, &kclist_head, list) {
312                 phdr = (struct elf_phdr *) bufp;
313                 bufp += sizeof(struct elf_phdr);
314                 offset += sizeof(struct elf_phdr);
315
316                 phdr->p_type    = PT_LOAD;
317                 phdr->p_flags   = PF_R|PF_W|PF_X;
318                 phdr->p_offset  = kc_vaddr_to_offset(m->addr) + dataoff;
319                 phdr->p_vaddr   = (size_t)m->addr;
320                 phdr->p_paddr   = 0;
321                 phdr->p_filesz  = phdr->p_memsz = m->size;
322                 phdr->p_align   = PAGE_SIZE;
323         }
324
325         /*
326          * Set up the notes in similar form to SVR4 core dumps made
327          * with info from their /proc.
328          */
329         nhdr->p_offset  = offset;
330
331         /* set up the process status */
332         notes[0].name = CORE_STR;
333         notes[0].type = NT_PRSTATUS;
334         notes[0].datasz = sizeof(struct elf_prstatus);
335         notes[0].data = &prstatus;
336
337         memset(&prstatus, 0, sizeof(struct elf_prstatus));
338
339         nhdr->p_filesz  = notesize(&notes[0]);
340         bufp = storenote(&notes[0], bufp);
341
342         /* set up the process info */
343         notes[1].name   = CORE_STR;
344         notes[1].type   = NT_PRPSINFO;
345         notes[1].datasz = sizeof(struct elf_prpsinfo);
346         notes[1].data   = &prpsinfo;
347
348         memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
349         prpsinfo.pr_state       = 0;
350         prpsinfo.pr_sname       = 'R';
351         prpsinfo.pr_zomb        = 0;
352
353         strcpy(prpsinfo.pr_fname, "vmlinux");
354         strncpy(prpsinfo.pr_psargs, saved_command_line, ELF_PRARGSZ);
355
356         nhdr->p_filesz  += notesize(&notes[1]);
357         bufp = storenote(&notes[1], bufp);
358
359         /* set up the task structure */
360         notes[2].name   = CORE_STR;
361         notes[2].type   = NT_TASKSTRUCT;
362         notes[2].datasz = sizeof(struct task_struct);
363         notes[2].data   = current;
364
365         nhdr->p_filesz  += notesize(&notes[2]);
366         bufp = storenote(&notes[2], bufp);
367
368 } /* end elf_kcore_store_hdr() */
369
370 /*****************************************************************************/
371 /*
372  * read from the ELF header and then kernel memory
373  */
374 static ssize_t
375 read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
376 {
377         ssize_t acc = 0;
378         size_t size, tsz;
379         size_t elf_buflen;
380         int nphdr;
381         unsigned long start;
382
383         read_lock(&kclist_lock);
384         proc_root_kcore->size = size = get_kcore_size(&nphdr, &elf_buflen);
385         if (buflen == 0 || *fpos >= size) {
386                 read_unlock(&kclist_lock);
387                 return 0;
388         }
389
390         /* trim buflen to not go beyond EOF */
391         if (buflen > size - *fpos)
392                 buflen = size - *fpos;
393
394         /* construct an ELF core header if we'll need some of it */
395         if (*fpos < elf_buflen) {
396                 char * elf_buf;
397
398                 tsz = elf_buflen - *fpos;
399                 if (buflen < tsz)
400                         tsz = buflen;
401                 elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
402                 if (!elf_buf) {
403                         read_unlock(&kclist_lock);
404                         return -ENOMEM;
405                 }
406                 elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
407                 read_unlock(&kclist_lock);
408                 if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
409                         kfree(elf_buf);
410                         return -EFAULT;
411                 }
412                 kfree(elf_buf);
413                 buflen -= tsz;
414                 *fpos += tsz;
415                 buffer += tsz;
416                 acc += tsz;
417
418                 /* leave now if filled buffer already */
419                 if (buflen == 0)
420                         return acc;
421         } else
422                 read_unlock(&kclist_lock);
423
424         /*
425          * Check to see if our file offset matches with any of
426          * the addresses in the elf_phdr on our list.
427          */
428         start = kc_offset_to_vaddr(*fpos - elf_buflen);
429         if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
430                 tsz = buflen;
431                 
432         while (buflen) {
433                 struct kcore_list *m;
434
435                 read_lock(&kclist_lock);
436                 list_for_each_entry(m, &kclist_head, list) {
437                         if (start >= m->addr && start < (m->addr+m->size))
438                                 break;
439                 }
440                 read_unlock(&kclist_lock);
441
442                 if (m == NULL) {
443                         if (clear_user(buffer, tsz))
444                                 return -EFAULT;
445                 } else if (is_vmalloc_addr((void *)start)) {
446                         char * elf_buf;
447
448                         elf_buf = kzalloc(tsz, GFP_KERNEL);
449                         if (!elf_buf)
450                                 return -ENOMEM;
451                         vread(elf_buf, (char *)start, tsz);
452                         /* we have to zero-fill user buffer even if no read */
453                         if (copy_to_user(buffer, elf_buf, tsz)) {
454                                 kfree(elf_buf);
455                                 return -EFAULT;
456                         }
457                         kfree(elf_buf);
458                 } else {
459                         if (kern_addr_valid(start)) {
460                                 unsigned long n;
461
462                                 n = copy_to_user(buffer, (char *)start, tsz);
463                                 /*
464                                  * We cannot distingush between fault on source
465                                  * and fault on destination. When this happens
466                                  * we clear too and hope it will trigger the
467                                  * EFAULT again.
468                                  */
469                                 if (n) { 
470                                         if (clear_user(buffer + tsz - n,
471                                                                 n))
472                                                 return -EFAULT;
473                                 }
474                         } else {
475                                 if (clear_user(buffer, tsz))
476                                         return -EFAULT;
477                         }
478                 }
479                 buflen -= tsz;
480                 *fpos += tsz;
481                 buffer += tsz;
482                 acc += tsz;
483                 start += tsz;
484                 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
485         }
486
487         return acc;
488 }
489
490
491 static int open_kcore(struct inode *inode, struct file *filp)
492 {
493         if (!capable(CAP_SYS_RAWIO))
494                 return -EPERM;
495         if (kcore_need_update)
496                 kcore_update_ram();
497         return 0;
498 }
499
500
501 static const struct file_operations proc_kcore_operations = {
502         .read           = read_kcore,
503         .open           = open_kcore,
504 };
505
506 #ifdef CONFIG_MEMORY_HOTPLUG
507 /* just remember that we have to update kcore */
508 static int __meminit kcore_callback(struct notifier_block *self,
509                                     unsigned long action, void *arg)
510 {
511         switch (action) {
512         case MEM_ONLINE:
513         case MEM_OFFLINE:
514                 write_lock(&kclist_lock);
515                 kcore_need_update = 1;
516                 write_unlock(&kclist_lock);
517         }
518         return NOTIFY_OK;
519 }
520 #endif
521
522
523 static struct kcore_list kcore_vmalloc;
524
525 #ifdef CONFIG_ARCH_PROC_KCORE_TEXT
526 static struct kcore_list kcore_text;
527 /*
528  * If defined, special segment is used for mapping kernel text instead of
529  * direct-map area. We need to create special TEXT section.
530  */
531 static void __init proc_kcore_text_init(void)
532 {
533         kclist_add(&kcore_text, _stext, _end - _stext, KCORE_TEXT);
534 }
535 #else
536 static void __init proc_kcore_text_init(void)
537 {
538 }
539 #endif
540
541 static int __init proc_kcore_init(void)
542 {
543         proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
544                                       &proc_kcore_operations);
545         /* Store text area if it's special */
546         proc_kcore_text_init();
547         /* Store vmalloc area */
548         kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
549                 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
550         /* Store direct-map area from physical memory map */
551         kcore_update_ram();
552         hotplug_memory_notifier(kcore_callback, 0);
553         /* Other special area, area-for-module etc is arch specific. */
554
555         return 0;
556 }
557 module_init(proc_kcore_init);