blob: 7bb96fdd38ad3e466e95454a04540af480610c82 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Vivek Goyal666bfdd2005-06-25 14:58:21 -07002/*
3 * fs/proc/vmcore.c Interface for accessing the crash
4 * dump from the system's previous life.
5 * Heavily borrowed from fs/proc/kcore.c
6 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
7 * Copyright (C) IBM Corporation, 2004. All rights reserved
8 *
9 */
10
Vivek Goyal666bfdd2005-06-25 14:58:21 -070011#include <linux/mm.h>
David Howells2f96b8c2013-04-12 00:10:25 +010012#include <linux/kcore.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070013#include <linux/user.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070014#include <linux/elf.h>
15#include <linux/elfcore.h>
Paul Gortmakerafeacc82011-05-26 16:00:52 -040016#include <linux/export.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070018#include <linux/highmem.h>
Andrew Morton87ebdc02013-02-27 17:03:16 -080019#include <linux/printk.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070020#include <linux/memblock.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070021#include <linux/init.h>
22#include <linux/crash_dump.h>
23#include <linux/list.h>
Rahul Lakkireddy27242732018-05-02 15:17:17 +053024#include <linux/mutex.h>
HATAYAMA Daisuke83086972013-07-03 15:02:23 -070025#include <linux/vmalloc.h>
Michael Holzheu9cb21812013-09-11 14:24:51 -070026#include <linux/pagemap.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080027#include <linux/uaccess.h>
Lianbo Jiang992b6492018-09-30 16:37:41 +080028#include <linux/mem_encrypt.h>
29#include <asm/pgtable.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070030#include <asm/io.h>
David Howells2f96b8c2013-04-12 00:10:25 +010031#include "internal.h"
Vivek Goyal666bfdd2005-06-25 14:58:21 -070032
33/* List representing chunks of contiguous memory areas and their offsets in
34 * vmcore file.
35 */
36static LIST_HEAD(vmcore_list);
37
38/* Stores the pointer to the buffer containing kernel elf core headers. */
39static char *elfcorebuf;
40static size_t elfcorebuf_sz;
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -070041static size_t elfcorebuf_sz_orig;
Vivek Goyal666bfdd2005-06-25 14:58:21 -070042
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -070043static char *elfnotes_buf;
44static size_t elfnotes_sz;
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +053045/* Size of all notes minus the device dump notes */
46static size_t elfnotes_orig_sz;
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -070047
Vivek Goyal666bfdd2005-06-25 14:58:21 -070048/* Total size of vmcore file. */
49static u64 vmcore_size;
50
Fabian Fredericka05e16ad2014-06-06 14:37:04 -070051static struct proc_dir_entry *proc_vmcore;
Vivek Goyal666bfdd2005-06-25 14:58:21 -070052
Rahul Lakkireddy27242732018-05-02 15:17:17 +053053#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
54/* Device Dump list and mutex to synchronize access to list */
55static LIST_HEAD(vmcoredd_list);
56static DEFINE_MUTEX(vmcoredd_mutex);
57#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
58
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +053059/* Device Dump Size */
60static size_t vmcoredd_orig_sz;
61
Olaf Hering997c1362011-05-26 16:25:54 -070062/*
63 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
64 * The called function has to take care of module refcounting.
65 */
66static int (*oldmem_pfn_is_ram)(unsigned long pfn);
67
68int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
69{
70 if (oldmem_pfn_is_ram)
71 return -EBUSY;
72 oldmem_pfn_is_ram = fn;
73 return 0;
74}
75EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
76
77void unregister_oldmem_pfn_is_ram(void)
78{
79 oldmem_pfn_is_ram = NULL;
80 wmb();
81}
82EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
83
84static int pfn_is_ram(unsigned long pfn)
85{
86 int (*fn)(unsigned long pfn);
87 /* pfn is ram unless fn() checks pagetype */
88 int ret = 1;
89
90 /*
91 * Ask hypervisor if the pfn is really ram.
92 * A ballooned page contains no data and reading from such a page
93 * will cause high load in the hypervisor.
94 */
95 fn = oldmem_pfn_is_ram;
96 if (fn)
97 ret = fn(pfn);
98
99 return ret;
100}
101
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700102/* Reads a page from the oldmem device from given offset. */
103static ssize_t read_from_oldmem(char *buf, size_t count,
Lianbo Jiang992b6492018-09-30 16:37:41 +0800104 u64 *ppos, int userbuf,
105 bool encrypted)
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700106{
107 unsigned long pfn, offset;
108 size_t nr_bytes;
109 ssize_t read = 0, tmp;
110
111 if (!count)
112 return 0;
113
114 offset = (unsigned long)(*ppos % PAGE_SIZE);
115 pfn = (unsigned long)(*ppos / PAGE_SIZE);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700116
117 do {
118 if (count > (PAGE_SIZE - offset))
119 nr_bytes = PAGE_SIZE - offset;
120 else
121 nr_bytes = count;
122
Olaf Hering997c1362011-05-26 16:25:54 -0700123 /* If pfn is not ram, return zeros for sparse dump files */
124 if (pfn_is_ram(pfn) == 0)
125 memset(buf, 0, nr_bytes);
126 else {
Lianbo Jiang992b6492018-09-30 16:37:41 +0800127 if (encrypted)
128 tmp = copy_oldmem_page_encrypted(pfn, buf,
129 nr_bytes,
130 offset,
131 userbuf);
132 else
133 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
134 offset, userbuf);
135
Olaf Hering997c1362011-05-26 16:25:54 -0700136 if (tmp < 0)
137 return tmp;
138 }
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700139 *ppos += nr_bytes;
140 count -= nr_bytes;
141 buf += nr_bytes;
142 read += nr_bytes;
143 ++pfn;
144 offset = 0;
145 } while (count);
146
147 return read;
148}
149
Michael Holzheube8a8d02013-09-11 14:24:49 -0700150/*
151 * Architectures may override this function to allocate ELF header in 2nd kernel
152 */
153int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
154{
155 return 0;
156}
157
158/*
159 * Architectures may override this function to free header
160 */
161void __weak elfcorehdr_free(unsigned long long addr)
162{}
163
164/*
165 * Architectures may override this function to read from ELF header
166 */
167ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
168{
Lianbo Jiang992b6492018-09-30 16:37:41 +0800169 return read_from_oldmem(buf, count, ppos, 0, false);
Michael Holzheube8a8d02013-09-11 14:24:49 -0700170}
171
172/*
173 * Architectures may override this function to read from notes sections
174 */
175ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
176{
Lianbo Jiang992b6492018-09-30 16:37:41 +0800177 return read_from_oldmem(buf, count, ppos, 0, sme_active());
Michael Holzheube8a8d02013-09-11 14:24:49 -0700178}
179
Michael Holzheu9cb21812013-09-11 14:24:51 -0700180/*
181 * Architectures may override this function to map oldmem
182 */
183int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
184 unsigned long from, unsigned long pfn,
185 unsigned long size, pgprot_t prot)
186{
Lianbo Jiang992b6492018-09-30 16:37:41 +0800187 prot = pgprot_encrypted(prot);
Michael Holzheu9cb21812013-09-11 14:24:51 -0700188 return remap_pfn_range(vma, from, pfn, size, prot);
189}
190
191/*
Borislav Petkovcf089612018-10-08 10:05:20 +0200192 * Architectures which support memory encryption override this.
193 */
194ssize_t __weak
195copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
196 unsigned long offset, int userbuf)
197{
198 return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
199}
200
201/*
Michael Holzheu9cb21812013-09-11 14:24:51 -0700202 * Copy to either kernel or user space
203 */
204static int copy_to(void *target, void *src, size_t size, int userbuf)
205{
206 if (userbuf) {
207 if (copy_to_user((char __user *) target, src, size))
208 return -EFAULT;
209 } else {
210 memcpy(target, src, size);
211 }
212 return 0;
213}
214
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530215#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
216static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
217{
218 struct vmcoredd_node *dump;
219 u64 offset = 0;
220 int ret = 0;
221 size_t tsz;
222 char *buf;
223
224 mutex_lock(&vmcoredd_mutex);
225 list_for_each_entry(dump, &vmcoredd_list, list) {
226 if (start < offset + dump->size) {
227 tsz = min(offset + (u64)dump->size - start, (u64)size);
228 buf = dump->buf + start - offset;
229 if (copy_to(dst, buf, tsz, userbuf)) {
230 ret = -EFAULT;
231 goto out_unlock;
232 }
233
234 size -= tsz;
235 start += tsz;
236 dst += tsz;
237
238 /* Leave now if buffer filled already */
239 if (!size)
240 goto out_unlock;
241 }
242 offset += dump->size;
243 }
244
245out_unlock:
246 mutex_unlock(&vmcoredd_mutex);
247 return ret;
248}
249
Arnd Bergmanna2036a12018-08-23 17:00:55 -0700250#ifdef CONFIG_MMU
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530251static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
252 u64 start, size_t size)
253{
254 struct vmcoredd_node *dump;
255 u64 offset = 0;
256 int ret = 0;
257 size_t tsz;
258 char *buf;
259
260 mutex_lock(&vmcoredd_mutex);
261 list_for_each_entry(dump, &vmcoredd_list, list) {
262 if (start < offset + dump->size) {
263 tsz = min(offset + (u64)dump->size - start, (u64)size);
264 buf = dump->buf + start - offset;
265 if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) {
266 ret = -EFAULT;
267 goto out_unlock;
268 }
269
270 size -= tsz;
271 start += tsz;
272 dst += tsz;
273
274 /* Leave now if buffer filled already */
275 if (!size)
276 goto out_unlock;
277 }
278 offset += dump->size;
279 }
280
281out_unlock:
282 mutex_unlock(&vmcoredd_mutex);
283 return ret;
284}
Arnd Bergmanna2036a12018-08-23 17:00:55 -0700285#endif /* CONFIG_MMU */
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530286#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
287
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700288/* Read from the ELF header and then the crash dump. On error, negative value is
289 * returned otherwise number of bytes read are returned.
290 */
Michael Holzheu9cb21812013-09-11 14:24:51 -0700291static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
292 int userbuf)
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700293{
294 ssize_t acc = 0, tmp;
Vivek Goyal80e8ff62006-04-10 22:54:10 -0700295 size_t tsz;
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700296 u64 start;
297 struct vmcore *m = NULL;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700298
299 if (buflen == 0 || *fpos >= vmcore_size)
300 return 0;
301
302 /* trim buflen to not go beyond EOF */
303 if (buflen > vmcore_size - *fpos)
304 buflen = vmcore_size - *fpos;
305
306 /* Read ELF core header */
307 if (*fpos < elfcorebuf_sz) {
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700308 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
Michael Holzheu9cb21812013-09-11 14:24:51 -0700309 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700310 return -EFAULT;
311 buflen -= tsz;
312 *fpos += tsz;
313 buffer += tsz;
314 acc += tsz;
315
316 /* leave now if filled buffer already */
317 if (buflen == 0)
318 return acc;
319 }
320
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700321 /* Read Elf note segment */
322 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
323 void *kaddr;
324
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530325 /* We add device dumps before other elf notes because the
326 * other elf notes may not fill the elf notes buffer
327 * completely and we will end up with zero-filled data
328 * between the elf notes and the device dumps. Tools will
329 * then try to decode this zero-filled data as valid notes
330 * and we don't want that. Hence, adding device dumps before
331 * the other elf notes ensure that zero-filled data can be
332 * avoided.
333 */
334#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
335 /* Read device dumps */
336 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
337 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
338 (size_t)*fpos, buflen);
339 start = *fpos - elfcorebuf_sz;
340 if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
341 return -EFAULT;
342
343 buflen -= tsz;
344 *fpos += tsz;
345 buffer += tsz;
346 acc += tsz;
347
348 /* leave now if filled buffer already */
349 if (!buflen)
350 return acc;
351 }
352#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
353
354 /* Read remaining elf notes */
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700355 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530356 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
Michael Holzheu9cb21812013-09-11 14:24:51 -0700357 if (copy_to(buffer, kaddr, tsz, userbuf))
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700358 return -EFAULT;
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530359
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700360 buflen -= tsz;
361 *fpos += tsz;
362 buffer += tsz;
363 acc += tsz;
364
365 /* leave now if filled buffer already */
366 if (buflen == 0)
367 return acc;
368 }
369
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700370 list_for_each_entry(m, &vmcore_list, list) {
371 if (*fpos < m->offset + m->size) {
Dave Young0b50a2d2016-03-17 14:21:03 -0700372 tsz = (size_t)min_t(unsigned long long,
373 m->offset + m->size - *fpos,
374 buflen);
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700375 start = m->paddr + *fpos - m->offset;
Lianbo Jiang992b6492018-09-30 16:37:41 +0800376 tmp = read_from_oldmem(buffer, tsz, &start,
377 userbuf, sme_active());
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700378 if (tmp < 0)
379 return tmp;
380 buflen -= tsz;
381 *fpos += tsz;
382 buffer += tsz;
383 acc += tsz;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700384
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700385 /* leave now if filled buffer already */
386 if (buflen == 0)
387 return acc;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700388 }
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700389 }
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700390
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700391 return acc;
392}
393
Michael Holzheu9cb21812013-09-11 14:24:51 -0700394static ssize_t read_vmcore(struct file *file, char __user *buffer,
395 size_t buflen, loff_t *fpos)
396{
397 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
398}
399
400/*
401 * The vmcore fault handler uses the page cache and fills data using the
402 * standard __vmcore_read() function.
403 *
404 * On s390 the fault handler is used for memory regions that can't be mapped
405 * directly with remap_pfn_range().
406 */
Souptick Joarder36f06202018-08-21 21:54:44 -0700407static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
Michael Holzheu9cb21812013-09-11 14:24:51 -0700408{
409#ifdef CONFIG_S390
Dave Jiang11bac802017-02-24 14:56:41 -0800410 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
Michael Holzheu9cb21812013-09-11 14:24:51 -0700411 pgoff_t index = vmf->pgoff;
412 struct page *page;
413 loff_t offset;
414 char *buf;
415 int rc;
416
417 page = find_or_create_page(mapping, index, GFP_KERNEL);
418 if (!page)
419 return VM_FAULT_OOM;
420 if (!PageUptodate(page)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300421 offset = (loff_t) index << PAGE_SHIFT;
Michael Holzheu9cb21812013-09-11 14:24:51 -0700422 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
423 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
424 if (rc < 0) {
425 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300426 put_page(page);
Souptick Joarderb5c21232018-10-30 15:04:35 -0700427 return vmf_error(rc);
Michael Holzheu9cb21812013-09-11 14:24:51 -0700428 }
429 SetPageUptodate(page);
430 }
431 unlock_page(page);
432 vmf->page = page;
433 return 0;
434#else
435 return VM_FAULT_SIGBUS;
436#endif
437}
438
439static const struct vm_operations_struct vmcore_mmap_ops = {
440 .fault = mmap_vmcore_fault,
441};
442
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700443/**
Rahul Lakkireddy27242732018-05-02 15:17:17 +0530444 * vmcore_alloc_buf - allocate buffer in vmalloc memory
445 * @sizez: size of buffer
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700446 *
447 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
448 * the buffer to user-space by means of remap_vmalloc_range().
449 *
450 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
451 * disabled and there's no need to allow users to mmap the buffer.
452 */
Rahul Lakkireddy27242732018-05-02 15:17:17 +0530453static inline char *vmcore_alloc_buf(size_t size)
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700454{
455#ifdef CONFIG_MMU
Rahul Lakkireddy27242732018-05-02 15:17:17 +0530456 return vmalloc_user(size);
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700457#else
Rahul Lakkireddy27242732018-05-02 15:17:17 +0530458 return vzalloc(size);
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700459#endif
460}
461
462/*
463 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
464 * essential for mmap_vmcore() in order to map physically
465 * non-contiguous objects (ELF header, ELF note segment and memory
466 * regions in the 1st kernel pointed to by PT_LOAD entries) into
467 * virtually contiguous user-space in ELF layout.
468 */
Michael Holzheu11e376a2013-09-11 14:24:53 -0700469#ifdef CONFIG_MMU
Vitaly Kuznetsov0692ded2014-08-08 14:22:05 -0700470/*
471 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
472 * reported as not being ram with the zero page.
473 *
474 * @vma: vm_area_struct describing requested mapping
475 * @from: start remapping from
476 * @pfn: page frame number to start remapping to
477 * @size: remapping size
478 * @prot: protection bits
479 *
480 * Returns zero on success, -EAGAIN on failure.
481 */
482static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
483 unsigned long from, unsigned long pfn,
484 unsigned long size, pgprot_t prot)
485{
486 unsigned long map_size;
487 unsigned long pos_start, pos_end, pos;
488 unsigned long zeropage_pfn = my_zero_pfn(0);
489 size_t len = 0;
490
491 pos_start = pfn;
492 pos_end = pfn + (size >> PAGE_SHIFT);
493
494 for (pos = pos_start; pos < pos_end; ++pos) {
495 if (!pfn_is_ram(pos)) {
496 /*
497 * We hit a page which is not ram. Remap the continuous
498 * region between pos_start and pos-1 and replace
499 * the non-ram page at pos with the zero page.
500 */
501 if (pos > pos_start) {
502 /* Remap continuous region */
503 map_size = (pos - pos_start) << PAGE_SHIFT;
504 if (remap_oldmem_pfn_range(vma, from + len,
505 pos_start, map_size,
506 prot))
507 goto fail;
508 len += map_size;
509 }
510 /* Remap the zero page */
511 if (remap_oldmem_pfn_range(vma, from + len,
512 zeropage_pfn,
513 PAGE_SIZE, prot))
514 goto fail;
515 len += PAGE_SIZE;
516 pos_start = pos + 1;
517 }
518 }
519 if (pos > pos_start) {
520 /* Remap the rest */
521 map_size = (pos - pos_start) << PAGE_SHIFT;
522 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
523 map_size, prot))
524 goto fail;
525 }
526 return 0;
527fail:
Mike Rapoport897ab3e2017-02-24 14:58:22 -0800528 do_munmap(vma->vm_mm, from, len, NULL);
Vitaly Kuznetsov0692ded2014-08-08 14:22:05 -0700529 return -EAGAIN;
530}
531
532static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
533 unsigned long from, unsigned long pfn,
534 unsigned long size, pgprot_t prot)
535{
536 /*
537 * Check if oldmem_pfn_is_ram was registered to avoid
538 * looping over all pages without a reason.
539 */
540 if (oldmem_pfn_is_ram)
541 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
542 else
543 return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
544}
545
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700546static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
547{
548 size_t size = vma->vm_end - vma->vm_start;
549 u64 start, end, len, tsz;
550 struct vmcore *m;
551
552 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
553 end = start + size;
554
555 if (size > vmcore_size || end > vmcore_size)
556 return -EINVAL;
557
558 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
559 return -EPERM;
560
561 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
562 vma->vm_flags |= VM_MIXEDMAP;
Michael Holzheu9cb21812013-09-11 14:24:51 -0700563 vma->vm_ops = &vmcore_mmap_ops;
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700564
565 len = 0;
566
567 if (start < elfcorebuf_sz) {
568 u64 pfn;
569
570 tsz = min(elfcorebuf_sz - (size_t)start, size);
571 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
572 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
573 vma->vm_page_prot))
574 return -EAGAIN;
575 size -= tsz;
576 start += tsz;
577 len += tsz;
578
579 if (size == 0)
580 return 0;
581 }
582
583 if (start < elfcorebuf_sz + elfnotes_sz) {
584 void *kaddr;
585
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530586 /* We add device dumps before other elf notes because the
587 * other elf notes may not fill the elf notes buffer
588 * completely and we will end up with zero-filled data
589 * between the elf notes and the device dumps. Tools will
590 * then try to decode this zero-filled data as valid notes
591 * and we don't want that. Hence, adding device dumps before
592 * the other elf notes ensure that zero-filled data can be
593 * avoided. This also ensures that the device dumps and
594 * other elf notes can be properly mmaped at page aligned
595 * address.
596 */
597#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
598 /* Read device dumps */
599 if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
600 u64 start_off;
601
602 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
603 (size_t)start, size);
604 start_off = start - elfcorebuf_sz;
605 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
606 start_off, tsz))
607 goto fail;
608
609 size -= tsz;
610 start += tsz;
611 len += tsz;
612
613 /* leave now if filled buffer already */
614 if (!size)
615 return 0;
616 }
617#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
618
619 /* Read remaining elf notes */
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700620 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530621 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700622 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
623 kaddr, tsz))
624 goto fail;
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530625
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700626 size -= tsz;
627 start += tsz;
628 len += tsz;
629
630 if (size == 0)
631 return 0;
632 }
633
634 list_for_each_entry(m, &vmcore_list, list) {
635 if (start < m->offset + m->size) {
636 u64 paddr = 0;
637
Dave Young0b50a2d2016-03-17 14:21:03 -0700638 tsz = (size_t)min_t(unsigned long long,
639 m->offset + m->size - start, size);
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700640 paddr = m->paddr + start - m->offset;
Vitaly Kuznetsov0692ded2014-08-08 14:22:05 -0700641 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
642 paddr >> PAGE_SHIFT, tsz,
643 vma->vm_page_prot))
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700644 goto fail;
645 size -= tsz;
646 start += tsz;
647 len += tsz;
648
649 if (size == 0)
650 return 0;
651 }
652 }
653
654 return 0;
655fail:
Mike Rapoport897ab3e2017-02-24 14:58:22 -0800656 do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700657 return -EAGAIN;
658}
659#else
660static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
661{
662 return -ENOSYS;
663}
664#endif
665
Alexey Dobriyan5aa140c2008-10-06 14:36:31 +0400666static const struct file_operations proc_vmcore_operations = {
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700667 .read = read_vmcore,
Arnd Bergmannc227e692010-09-22 13:04:54 -0700668 .llseek = default_llseek,
HATAYAMA Daisuke83086972013-07-03 15:02:23 -0700669 .mmap = mmap_vmcore,
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700670};
671
672static struct vmcore* __init get_new_element(void)
673{
Cyrill Gorcunov2f6d3112009-06-17 16:26:00 -0700674 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700675}
676
Rahul Lakkireddy44c752f2018-05-21 19:07:50 +0530677static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
678 struct list_head *vc_list)
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700679{
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700680 u64 size;
HATAYAMA Daisuke591ff712013-07-03 15:02:22 -0700681 struct vmcore *m;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700682
HATAYAMA Daisuke591ff712013-07-03 15:02:22 -0700683 size = elfsz + elfnotesegsz;
684 list_for_each_entry(m, vc_list, list) {
685 size += m->size;
Vivek Goyal72658e92005-06-25 14:58:22 -0700686 }
687 return size;
688}
689
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700690/**
691 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
692 *
693 * @ehdr_ptr: ELF header
694 *
695 * This function updates p_memsz member of each PT_NOTE entry in the
696 * program header table pointed to by @ehdr_ptr to real size of ELF
697 * note segment.
698 */
699static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700700{
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700701 int i, rc=0;
702 Elf64_Phdr *phdr_ptr;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700703 Elf64_Nhdr *nhdr_ptr;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700704
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700705 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700706 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700707 void *notes_section;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700708 u64 offset, max_sz, sz, real_sz = 0;
709 if (phdr_ptr->p_type != PT_NOTE)
710 continue;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700711 max_sz = phdr_ptr->p_memsz;
712 offset = phdr_ptr->p_offset;
713 notes_section = kmalloc(max_sz, GFP_KERNEL);
714 if (!notes_section)
715 return -ENOMEM;
Michael Holzheube8a8d02013-09-11 14:24:49 -0700716 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700717 if (rc < 0) {
718 kfree(notes_section);
719 return rc;
720 }
721 nhdr_ptr = notes_section;
Greg Pearson38dfac82014-02-10 14:25:36 -0800722 while (nhdr_ptr->n_namesz != 0) {
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700723 sz = sizeof(Elf64_Nhdr) +
WANG Chao34b47762015-02-17 13:46:01 -0800724 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
725 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
Greg Pearson38dfac82014-02-10 14:25:36 -0800726 if ((real_sz + sz) > max_sz) {
727 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
728 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
729 break;
730 }
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700731 real_sz += sz;
732 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
733 }
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700734 kfree(notes_section);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700735 phdr_ptr->p_memsz = real_sz;
Greg Pearson38dfac82014-02-10 14:25:36 -0800736 if (real_sz == 0) {
737 pr_warn("Warning: Zero PT_NOTE entries found\n");
Greg Pearson38dfac82014-02-10 14:25:36 -0800738 }
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700739 }
740
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700741 return 0;
742}
743
744/**
745 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
746 * headers and sum of real size of their ELF note segment headers and
747 * data.
748 *
749 * @ehdr_ptr: ELF header
750 * @nr_ptnote: buffer for the number of PT_NOTE program headers
751 * @sz_ptnote: buffer for size of unique PT_NOTE program header
752 *
753 * This function is used to merge multiple PT_NOTE program headers
754 * into a unique single one. The resulting unique entry will have
755 * @sz_ptnote in its phdr->p_mem.
756 *
757 * It is assumed that program headers with PT_NOTE type pointed to by
758 * @ehdr_ptr has already been updated by update_note_header_size_elf64
759 * and each of PT_NOTE program headers has actual ELF note segment
760 * size in its p_memsz member.
761 */
762static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
763 int *nr_ptnote, u64 *sz_ptnote)
764{
765 int i;
766 Elf64_Phdr *phdr_ptr;
767
768 *nr_ptnote = *sz_ptnote = 0;
769
770 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
771 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
772 if (phdr_ptr->p_type != PT_NOTE)
773 continue;
774 *nr_ptnote += 1;
775 *sz_ptnote += phdr_ptr->p_memsz;
776 }
777
778 return 0;
779}
780
781/**
782 * copy_notes_elf64 - copy ELF note segments in a given buffer
783 *
784 * @ehdr_ptr: ELF header
785 * @notes_buf: buffer into which ELF note segments are copied
786 *
787 * This function is used to copy ELF note segment in the 1st kernel
788 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
789 * size of the buffer @notes_buf is equal to or larger than sum of the
790 * real ELF note segment headers and data.
791 *
792 * It is assumed that program headers with PT_NOTE type pointed to by
793 * @ehdr_ptr has already been updated by update_note_header_size_elf64
794 * and each of PT_NOTE program headers has actual ELF note segment
795 * size in its p_memsz member.
796 */
797static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
798{
799 int i, rc=0;
800 Elf64_Phdr *phdr_ptr;
801
802 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
803
804 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
805 u64 offset;
806 if (phdr_ptr->p_type != PT_NOTE)
807 continue;
808 offset = phdr_ptr->p_offset;
Michael Holzheube8a8d02013-09-11 14:24:49 -0700809 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
810 &offset);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700811 if (rc < 0)
812 return rc;
813 notes_buf += phdr_ptr->p_memsz;
814 }
815
816 return 0;
817}
818
819/* Merges all the PT_NOTE headers into one. */
820static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
821 char **notes_buf, size_t *notes_sz)
822{
823 int i, nr_ptnote=0, rc=0;
824 char *tmp;
825 Elf64_Ehdr *ehdr_ptr;
826 Elf64_Phdr phdr;
827 u64 phdr_sz = 0, note_off;
828
829 ehdr_ptr = (Elf64_Ehdr *)elfptr;
830
831 rc = update_note_header_size_elf64(ehdr_ptr);
832 if (rc < 0)
833 return rc;
834
835 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
836 if (rc < 0)
837 return rc;
838
839 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
Rahul Lakkireddy27242732018-05-02 15:17:17 +0530840 *notes_buf = vmcore_alloc_buf(*notes_sz);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700841 if (!*notes_buf)
842 return -ENOMEM;
843
844 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
845 if (rc < 0)
846 return rc;
847
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700848 /* Prepare merged PT_NOTE program header. */
849 phdr.p_type = PT_NOTE;
850 phdr.p_flags = 0;
851 note_off = sizeof(Elf64_Ehdr) +
852 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700853 phdr.p_offset = roundup(note_off, PAGE_SIZE);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700854 phdr.p_vaddr = phdr.p_paddr = 0;
855 phdr.p_filesz = phdr.p_memsz = phdr_sz;
856 phdr.p_align = 0;
857
858 /* Add merged PT_NOTE program header*/
859 tmp = elfptr + sizeof(Elf64_Ehdr);
860 memcpy(tmp, &phdr, sizeof(phdr));
861 tmp += sizeof(phdr);
862
863 /* Remove unwanted PT_NOTE program headers. */
864 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
865 *elfsz = *elfsz - i;
866 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -0700867 memset(elfptr + *elfsz, 0, i);
868 *elfsz = roundup(*elfsz, PAGE_SIZE);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700869
870 /* Modify e_phnum to reflect merged headers. */
871 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
872
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +0530873 /* Store the size of all notes. We need this to update the note
874 * header when the device dumps will be added.
875 */
876 elfnotes_orig_sz = phdr.p_memsz;
877
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700878 return 0;
879}
880
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700881/**
882 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
883 *
884 * @ehdr_ptr: ELF header
885 *
886 * This function updates p_memsz member of each PT_NOTE entry in the
887 * program header table pointed to by @ehdr_ptr to real size of ELF
888 * note segment.
889 */
890static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
Vivek Goyal72658e92005-06-25 14:58:22 -0700891{
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700892 int i, rc=0;
893 Elf32_Phdr *phdr_ptr;
Vivek Goyal72658e92005-06-25 14:58:22 -0700894 Elf32_Nhdr *nhdr_ptr;
Vivek Goyal72658e92005-06-25 14:58:22 -0700895
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700896 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
Vivek Goyal72658e92005-06-25 14:58:22 -0700897 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
Vivek Goyal72658e92005-06-25 14:58:22 -0700898 void *notes_section;
Vivek Goyal72658e92005-06-25 14:58:22 -0700899 u64 offset, max_sz, sz, real_sz = 0;
900 if (phdr_ptr->p_type != PT_NOTE)
901 continue;
Vivek Goyal72658e92005-06-25 14:58:22 -0700902 max_sz = phdr_ptr->p_memsz;
903 offset = phdr_ptr->p_offset;
904 notes_section = kmalloc(max_sz, GFP_KERNEL);
905 if (!notes_section)
906 return -ENOMEM;
Michael Holzheube8a8d02013-09-11 14:24:49 -0700907 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
Vivek Goyal72658e92005-06-25 14:58:22 -0700908 if (rc < 0) {
909 kfree(notes_section);
910 return rc;
911 }
912 nhdr_ptr = notes_section;
Greg Pearson38dfac82014-02-10 14:25:36 -0800913 while (nhdr_ptr->n_namesz != 0) {
Vivek Goyal72658e92005-06-25 14:58:22 -0700914 sz = sizeof(Elf32_Nhdr) +
WANG Chao34b47762015-02-17 13:46:01 -0800915 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
916 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
Greg Pearson38dfac82014-02-10 14:25:36 -0800917 if ((real_sz + sz) > max_sz) {
918 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
919 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
920 break;
921 }
Vivek Goyal72658e92005-06-25 14:58:22 -0700922 real_sz += sz;
923 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
924 }
Vivek Goyal72658e92005-06-25 14:58:22 -0700925 kfree(notes_section);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700926 phdr_ptr->p_memsz = real_sz;
Greg Pearson38dfac82014-02-10 14:25:36 -0800927 if (real_sz == 0) {
928 pr_warn("Warning: Zero PT_NOTE entries found\n");
Greg Pearson38dfac82014-02-10 14:25:36 -0800929 }
Vivek Goyal72658e92005-06-25 14:58:22 -0700930 }
931
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -0700932 return 0;
933}
934
935/**
936 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
937 * headers and sum of real size of their ELF note segment headers and
938 * data.
939 *
940 * @ehdr_ptr: ELF header
941 * @nr_ptnote: buffer for the number of PT_NOTE program headers
942 * @sz_ptnote: buffer for size of unique PT_NOTE program header
943 *
944 * This function is used to merge multiple PT_NOTE program headers
945 * into a unique single one. The resulting unique entry will have
946 * @sz_ptnote in its phdr->p_mem.
947 *
948 * It is assumed that program headers with PT_NOTE type pointed to by
949 * @ehdr_ptr has already been updated by update_note_header_size_elf32
950 * and each of PT_NOTE program headers has actual ELF note segment
951 * size in its p_memsz member.
952 */
953static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
954 int *nr_ptnote, u64 *sz_ptnote)
955{
956 int i;
957 Elf32_Phdr *phdr_ptr;
958
959 *nr_ptnote = *sz_ptnote = 0;
960
961 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
962 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
963 if (phdr_ptr->p_type != PT_NOTE)
964 continue;
965 *nr_ptnote += 1;
966 *sz_ptnote += phdr_ptr->p_memsz;
967 }
968
969 return 0;
970}
971
972/**
973 * copy_notes_elf32 - copy ELF note segments in a given buffer
974 *
975 * @ehdr_ptr: ELF header
976 * @notes_buf: buffer into which ELF note segments are copied
977 *
978 * This function is used to copy ELF note segment in the 1st kernel
979 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
980 * size of the buffer @notes_buf is equal to or larger than sum of the
981 * real ELF note segment headers and data.
982 *
983 * It is assumed that program headers with PT_NOTE type pointed to by
984 * @ehdr_ptr has already been updated by update_note_header_size_elf32
985 * and each of PT_NOTE program headers has actual ELF note segment
986 * size in its p_memsz member.
987 */
988static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
989{
990 int i, rc=0;
991 Elf32_Phdr *phdr_ptr;
992
993 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
994
995 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
996 u64 offset;
997 if (phdr_ptr->p_type != PT_NOTE)
998 continue;
999 offset = phdr_ptr->p_offset;
Michael Holzheube8a8d02013-09-11 14:24:49 -07001000 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1001 &offset);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001002 if (rc < 0)
1003 return rc;
1004 notes_buf += phdr_ptr->p_memsz;
1005 }
1006
1007 return 0;
1008}
1009
1010/* Merges all the PT_NOTE headers into one. */
1011static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1012 char **notes_buf, size_t *notes_sz)
1013{
1014 int i, nr_ptnote=0, rc=0;
1015 char *tmp;
1016 Elf32_Ehdr *ehdr_ptr;
1017 Elf32_Phdr phdr;
1018 u64 phdr_sz = 0, note_off;
1019
1020 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1021
1022 rc = update_note_header_size_elf32(ehdr_ptr);
1023 if (rc < 0)
1024 return rc;
1025
1026 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1027 if (rc < 0)
1028 return rc;
1029
1030 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301031 *notes_buf = vmcore_alloc_buf(*notes_sz);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001032 if (!*notes_buf)
1033 return -ENOMEM;
1034
1035 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1036 if (rc < 0)
1037 return rc;
1038
Vivek Goyal72658e92005-06-25 14:58:22 -07001039 /* Prepare merged PT_NOTE program header. */
1040 phdr.p_type = PT_NOTE;
1041 phdr.p_flags = 0;
1042 note_off = sizeof(Elf32_Ehdr) +
1043 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001044 phdr.p_offset = roundup(note_off, PAGE_SIZE);
Vivek Goyal72658e92005-06-25 14:58:22 -07001045 phdr.p_vaddr = phdr.p_paddr = 0;
1046 phdr.p_filesz = phdr.p_memsz = phdr_sz;
1047 phdr.p_align = 0;
1048
1049 /* Add merged PT_NOTE program header*/
1050 tmp = elfptr + sizeof(Elf32_Ehdr);
1051 memcpy(tmp, &phdr, sizeof(phdr));
1052 tmp += sizeof(phdr);
1053
1054 /* Remove unwanted PT_NOTE program headers. */
1055 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1056 *elfsz = *elfsz - i;
1057 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001058 memset(elfptr + *elfsz, 0, i);
1059 *elfsz = roundup(*elfsz, PAGE_SIZE);
Vivek Goyal72658e92005-06-25 14:58:22 -07001060
1061 /* Modify e_phnum to reflect merged headers. */
1062 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1063
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +05301064 /* Store the size of all notes. We need this to update the note
1065 * header when the device dumps will be added.
1066 */
1067 elfnotes_orig_sz = phdr.p_memsz;
1068
Vivek Goyal72658e92005-06-25 14:58:22 -07001069 return 0;
1070}
1071
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001072/* Add memory chunks represented by program headers to vmcore list. Also update
1073 * the new offset fields of exported program headers. */
1074static int __init process_ptload_program_headers_elf64(char *elfptr,
1075 size_t elfsz,
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001076 size_t elfnotes_sz,
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001077 struct list_head *vc_list)
1078{
1079 int i;
1080 Elf64_Ehdr *ehdr_ptr;
1081 Elf64_Phdr *phdr_ptr;
1082 loff_t vmcore_off;
1083 struct vmcore *new;
1084
1085 ehdr_ptr = (Elf64_Ehdr *)elfptr;
1086 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1087
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001088 /* Skip Elf header, program headers and Elf note segment. */
1089 vmcore_off = elfsz + elfnotes_sz;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001090
1091 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001092 u64 paddr, start, end, size;
1093
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001094 if (phdr_ptr->p_type != PT_LOAD)
1095 continue;
1096
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001097 paddr = phdr_ptr->p_offset;
1098 start = rounddown(paddr, PAGE_SIZE);
1099 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1100 size = end - start;
1101
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001102 /* Add this contiguous chunk of memory to vmcore list.*/
1103 new = get_new_element();
1104 if (!new)
1105 return -ENOMEM;
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001106 new->paddr = start;
1107 new->size = size;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001108 list_add_tail(&new->list, vc_list);
1109
1110 /* Update the program header offset. */
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001111 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1112 vmcore_off = vmcore_off + size;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001113 }
1114 return 0;
1115}
1116
Vivek Goyal72658e92005-06-25 14:58:22 -07001117static int __init process_ptload_program_headers_elf32(char *elfptr,
1118 size_t elfsz,
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001119 size_t elfnotes_sz,
Vivek Goyal72658e92005-06-25 14:58:22 -07001120 struct list_head *vc_list)
1121{
1122 int i;
1123 Elf32_Ehdr *ehdr_ptr;
1124 Elf32_Phdr *phdr_ptr;
1125 loff_t vmcore_off;
1126 struct vmcore *new;
1127
1128 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1129 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1130
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001131 /* Skip Elf header, program headers and Elf note segment. */
1132 vmcore_off = elfsz + elfnotes_sz;
Vivek Goyal72658e92005-06-25 14:58:22 -07001133
1134 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001135 u64 paddr, start, end, size;
1136
Vivek Goyal72658e92005-06-25 14:58:22 -07001137 if (phdr_ptr->p_type != PT_LOAD)
1138 continue;
1139
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001140 paddr = phdr_ptr->p_offset;
1141 start = rounddown(paddr, PAGE_SIZE);
1142 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1143 size = end - start;
1144
Vivek Goyal72658e92005-06-25 14:58:22 -07001145 /* Add this contiguous chunk of memory to vmcore list.*/
1146 new = get_new_element();
1147 if (!new)
1148 return -ENOMEM;
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001149 new->paddr = start;
1150 new->size = size;
Vivek Goyal72658e92005-06-25 14:58:22 -07001151 list_add_tail(&new->list, vc_list);
1152
1153 /* Update the program header offset */
HATAYAMA Daisuke7f614cd2013-07-03 15:02:15 -07001154 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1155 vmcore_off = vmcore_off + size;
Vivek Goyal72658e92005-06-25 14:58:22 -07001156 }
1157 return 0;
1158}
1159
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001160/* Sets offset fields of vmcore elements. */
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +05301161static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1162 struct list_head *vc_list)
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001163{
1164 loff_t vmcore_off;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001165 struct vmcore *m;
1166
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001167 /* Skip Elf header, program headers and Elf note segment. */
1168 vmcore_off = elfsz + elfnotes_sz;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001169
1170 list_for_each_entry(m, vc_list, list) {
1171 m->offset = vmcore_off;
1172 vmcore_off += m->size;
1173 }
1174}
1175
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001176static void free_elfcorebuf(void)
Vivek Goyal72658e92005-06-25 14:58:22 -07001177{
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001178 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1179 elfcorebuf = NULL;
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001180 vfree(elfnotes_buf);
1181 elfnotes_buf = NULL;
Vivek Goyal72658e92005-06-25 14:58:22 -07001182}
1183
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001184static int __init parse_crash_elf64_headers(void)
1185{
1186 int rc=0;
1187 Elf64_Ehdr ehdr;
1188 u64 addr;
1189
1190 addr = elfcorehdr_addr;
1191
1192 /* Read Elf header */
Michael Holzheube8a8d02013-09-11 14:24:49 -07001193 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001194 if (rc < 0)
1195 return rc;
1196
1197 /* Do some basic Verification. */
1198 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1199 (ehdr.e_type != ET_CORE) ||
Mika Westerberg9833c392010-11-19 09:29:24 +01001200 !vmcore_elf64_check_arch(&ehdr) ||
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001201 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1202 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1203 ehdr.e_version != EV_CURRENT ||
1204 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1205 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1206 ehdr.e_phnum == 0) {
Andrew Morton87ebdc02013-02-27 17:03:16 -08001207 pr_warn("Warning: Core image elf header is not sane\n");
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001208 return -EINVAL;
1209 }
1210
1211 /* Read in all elf headers. */
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001212 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1213 ehdr.e_phnum * sizeof(Elf64_Phdr);
1214 elfcorebuf_sz = elfcorebuf_sz_orig;
1215 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1216 get_order(elfcorebuf_sz_orig));
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001217 if (!elfcorebuf)
1218 return -ENOMEM;
1219 addr = elfcorehdr_addr;
Michael Holzheube8a8d02013-09-11 14:24:49 -07001220 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001221 if (rc < 0)
1222 goto fail;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001223
1224 /* Merge all PT_NOTE headers into one. */
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001225 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1226 &elfnotes_buf, &elfnotes_sz);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001227 if (rc)
1228 goto fail;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001229 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001230 elfnotes_sz, &vmcore_list);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001231 if (rc)
1232 goto fail;
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001233 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001234 return 0;
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001235fail:
1236 free_elfcorebuf();
1237 return rc;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001238}
1239
Vivek Goyal72658e92005-06-25 14:58:22 -07001240static int __init parse_crash_elf32_headers(void)
1241{
1242 int rc=0;
1243 Elf32_Ehdr ehdr;
1244 u64 addr;
1245
1246 addr = elfcorehdr_addr;
1247
1248 /* Read Elf header */
Michael Holzheube8a8d02013-09-11 14:24:49 -07001249 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
Vivek Goyal72658e92005-06-25 14:58:22 -07001250 if (rc < 0)
1251 return rc;
1252
1253 /* Do some basic Verification. */
1254 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1255 (ehdr.e_type != ET_CORE) ||
Daniel Wagnere55d5312016-02-11 13:36:54 +01001256 !vmcore_elf32_check_arch(&ehdr) ||
Vivek Goyal72658e92005-06-25 14:58:22 -07001257 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1258 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1259 ehdr.e_version != EV_CURRENT ||
1260 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1261 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1262 ehdr.e_phnum == 0) {
Andrew Morton87ebdc02013-02-27 17:03:16 -08001263 pr_warn("Warning: Core image elf header is not sane\n");
Vivek Goyal72658e92005-06-25 14:58:22 -07001264 return -EINVAL;
1265 }
1266
1267 /* Read in all elf headers. */
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001268 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1269 elfcorebuf_sz = elfcorebuf_sz_orig;
1270 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1271 get_order(elfcorebuf_sz_orig));
Vivek Goyal72658e92005-06-25 14:58:22 -07001272 if (!elfcorebuf)
1273 return -ENOMEM;
1274 addr = elfcorehdr_addr;
Michael Holzheube8a8d02013-09-11 14:24:49 -07001275 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001276 if (rc < 0)
1277 goto fail;
Vivek Goyal72658e92005-06-25 14:58:22 -07001278
1279 /* Merge all PT_NOTE headers into one. */
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001280 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1281 &elfnotes_buf, &elfnotes_sz);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001282 if (rc)
1283 goto fail;
Vivek Goyal72658e92005-06-25 14:58:22 -07001284 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001285 elfnotes_sz, &vmcore_list);
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001286 if (rc)
1287 goto fail;
HATAYAMA Daisuke087350c2013-07-03 15:02:19 -07001288 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
Vivek Goyal72658e92005-06-25 14:58:22 -07001289 return 0;
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001290fail:
1291 free_elfcorebuf();
1292 return rc;
Vivek Goyal72658e92005-06-25 14:58:22 -07001293}
1294
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001295static int __init parse_crash_elf_headers(void)
1296{
1297 unsigned char e_ident[EI_NIDENT];
1298 u64 addr;
1299 int rc=0;
1300
1301 addr = elfcorehdr_addr;
Michael Holzheube8a8d02013-09-11 14:24:49 -07001302 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001303 if (rc < 0)
1304 return rc;
1305 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
Andrew Morton87ebdc02013-02-27 17:03:16 -08001306 pr_warn("Warning: Core image elf header not found\n");
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001307 return -EINVAL;
1308 }
1309
1310 if (e_ident[EI_CLASS] == ELFCLASS64) {
1311 rc = parse_crash_elf64_headers();
1312 if (rc)
1313 return rc;
Vivek Goyal72658e92005-06-25 14:58:22 -07001314 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1315 rc = parse_crash_elf32_headers();
1316 if (rc)
1317 return rc;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001318 } else {
Andrew Morton87ebdc02013-02-27 17:03:16 -08001319 pr_warn("Warning: Core image elf header is not sane\n");
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001320 return -EINVAL;
1321 }
HATAYAMA Daisuke591ff712013-07-03 15:02:22 -07001322
1323 /* Determine vmcore size. */
1324 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1325 &vmcore_list);
1326
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001327 return 0;
1328}
1329
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301330#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1331/**
1332 * vmcoredd_write_header - Write vmcore device dump header at the
1333 * beginning of the dump's buffer.
1334 * @buf: Output buffer where the note is written
1335 * @data: Dump info
1336 * @size: Size of the dump
1337 *
1338 * Fills beginning of the dump's buffer with vmcore device dump header.
1339 */
1340static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1341 u32 size)
1342{
1343 struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1344
1345 vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1346 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1347 vdd_hdr->n_type = NT_VMCOREDD;
1348
1349 strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1350 sizeof(vdd_hdr->name));
1351 memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1352}
1353
1354/**
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +05301355 * vmcoredd_update_program_headers - Update all Elf program headers
1356 * @elfptr: Pointer to elf header
1357 * @elfnotesz: Size of elf notes aligned to page size
1358 * @vmcoreddsz: Size of device dumps to be added to elf note header
1359 *
1360 * Determine type of Elf header (Elf64 or Elf32) and update the elf note size.
1361 * Also update the offsets of all the program headers after the elf note header.
1362 */
1363static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1364 size_t vmcoreddsz)
1365{
1366 unsigned char *e_ident = (unsigned char *)elfptr;
1367 u64 start, end, size;
1368 loff_t vmcore_off;
1369 u32 i;
1370
1371 vmcore_off = elfcorebuf_sz + elfnotesz;
1372
1373 if (e_ident[EI_CLASS] == ELFCLASS64) {
1374 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1375 Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1376
1377 /* Update all program headers */
1378 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1379 if (phdr->p_type == PT_NOTE) {
1380 /* Update note size */
1381 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1382 phdr->p_filesz = phdr->p_memsz;
1383 continue;
1384 }
1385
1386 start = rounddown(phdr->p_offset, PAGE_SIZE);
1387 end = roundup(phdr->p_offset + phdr->p_memsz,
1388 PAGE_SIZE);
1389 size = end - start;
1390 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1391 vmcore_off += size;
1392 }
1393 } else {
1394 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1395 Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1396
1397 /* Update all program headers */
1398 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1399 if (phdr->p_type == PT_NOTE) {
1400 /* Update note size */
1401 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1402 phdr->p_filesz = phdr->p_memsz;
1403 continue;
1404 }
1405
1406 start = rounddown(phdr->p_offset, PAGE_SIZE);
1407 end = roundup(phdr->p_offset + phdr->p_memsz,
1408 PAGE_SIZE);
1409 size = end - start;
1410 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1411 vmcore_off += size;
1412 }
1413 }
1414}
1415
1416/**
1417 * vmcoredd_update_size - Update the total size of the device dumps and update
1418 * Elf header
1419 * @dump_size: Size of the current device dump to be added to total size
1420 *
1421 * Update the total size of all the device dumps and update the Elf program
1422 * headers. Calculate the new offsets for the vmcore list and update the
1423 * total vmcore size.
1424 */
1425static void vmcoredd_update_size(size_t dump_size)
1426{
1427 vmcoredd_orig_sz += dump_size;
1428 elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1429 vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1430 vmcoredd_orig_sz);
1431
1432 /* Update vmcore list offsets */
1433 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1434
1435 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1436 &vmcore_list);
1437 proc_vmcore->size = vmcore_size;
1438}
1439
1440/**
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301441 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1442 * @data: dump info.
1443 *
1444 * Allocate a buffer and invoke the calling driver's dump collect routine.
1445 * Write Elf note at the beginning of the buffer to indicate vmcore device
1446 * dump and add the dump to global list.
1447 */
1448int vmcore_add_device_dump(struct vmcoredd_data *data)
1449{
1450 struct vmcoredd_node *dump;
1451 void *buf = NULL;
1452 size_t data_size;
1453 int ret;
1454
1455 if (!data || !strlen(data->dump_name) ||
1456 !data->vmcoredd_callback || !data->size)
1457 return -EINVAL;
1458
1459 dump = vzalloc(sizeof(*dump));
1460 if (!dump) {
1461 ret = -ENOMEM;
1462 goto out_err;
1463 }
1464
1465 /* Keep size of the buffer page aligned so that it can be mmaped */
1466 data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1467 PAGE_SIZE);
1468
1469 /* Allocate buffer for driver's to write their dumps */
1470 buf = vmcore_alloc_buf(data_size);
1471 if (!buf) {
1472 ret = -ENOMEM;
1473 goto out_err;
1474 }
1475
1476 vmcoredd_write_header(buf, data, data_size -
1477 sizeof(struct vmcoredd_header));
1478
1479 /* Invoke the driver's dump collection routing */
1480 ret = data->vmcoredd_callback(data, buf +
1481 sizeof(struct vmcoredd_header));
1482 if (ret)
1483 goto out_err;
1484
1485 dump->buf = buf;
1486 dump->size = data_size;
1487
1488 /* Add the dump to driver sysfs list */
1489 mutex_lock(&vmcoredd_mutex);
1490 list_add_tail(&dump->list, &vmcoredd_list);
1491 mutex_unlock(&vmcoredd_mutex);
1492
Rahul Lakkireddy7efe48d2018-05-02 15:17:18 +05301493 vmcoredd_update_size(data_size);
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301494 return 0;
1495
1496out_err:
1497 if (buf)
1498 vfree(buf);
1499
1500 if (dump)
1501 vfree(dump);
1502
1503 return ret;
1504}
1505EXPORT_SYMBOL(vmcore_add_device_dump);
1506#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1507
1508/* Free all dumps in vmcore device dump list */
1509static void vmcore_free_device_dumps(void)
1510{
1511#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1512 mutex_lock(&vmcoredd_mutex);
1513 while (!list_empty(&vmcoredd_list)) {
1514 struct vmcoredd_node *dump;
1515
1516 dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1517 list);
1518 list_del(&dump->list);
1519 vfree(dump->buf);
1520 vfree(dump);
1521 }
1522 mutex_unlock(&vmcoredd_mutex);
1523#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1524}
1525
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001526/* Init function for vmcore module. */
1527static int __init vmcore_init(void)
1528{
1529 int rc = 0;
1530
Michael Holzheube8a8d02013-09-11 14:24:49 -07001531 /* Allow architectures to allocate ELF header in 2nd kernel */
1532 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1533 if (rc)
1534 return rc;
1535 /*
1536 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1537 * then capture the dump.
1538 */
Simon Horman85a0ee32008-10-18 20:28:29 -07001539 if (!(is_vmcore_usable()))
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001540 return rc;
1541 rc = parse_crash_elf_headers();
1542 if (rc) {
Andrew Morton87ebdc02013-02-27 17:03:16 -08001543 pr_warn("Kdump: vmcore not initialized\n");
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001544 return rc;
1545 }
Michael Holzheube8a8d02013-09-11 14:24:49 -07001546 elfcorehdr_free(elfcorehdr_addr);
1547 elfcorehdr_addr = ELFCORE_ADDR_ERR;
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001548
Alexey Dobriyan5aa140c2008-10-06 14:36:31 +04001549 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001550 if (proc_vmcore)
1551 proc_vmcore->size = vmcore_size;
1552 return 0;
1553}
Paul Gortmakerabaf3782014-01-23 15:55:45 -08001554fs_initcall(vmcore_init);
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001555
1556/* Cleanup function for vmcore module. */
1557void vmcore_cleanup(void)
1558{
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001559 if (proc_vmcore) {
David Howellsa8ca16e2013-04-12 17:27:28 +01001560 proc_remove(proc_vmcore);
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001561 proc_vmcore = NULL;
1562 }
1563
1564 /* clear the vmcore list. */
Alexey Dobriyan593bc692018-02-06 15:37:02 -08001565 while (!list_empty(&vmcore_list)) {
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001566 struct vmcore *m;
1567
Alexey Dobriyan593bc692018-02-06 15:37:02 -08001568 m = list_first_entry(&vmcore_list, struct vmcore, list);
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001569 list_del(&m->list);
1570 kfree(m);
1571 }
HATAYAMA Daisukef2bdacd2013-07-03 15:02:14 -07001572 free_elfcorebuf();
Rahul Lakkireddy27242732018-05-02 15:17:17 +05301573
1574 /* clear vmcore device dump list */
1575 vmcore_free_device_dumps();
Mahesh Salgaonkar162573932012-02-16 01:15:00 +00001576}