blob: 32340e7dd6a50599b2b166aa2ad55411b55c3470 [file] [log] [blame]
Tony Luckca01d6d2010-12-28 14:25:21 -08001/*
2 * Persistent Storage - platform driver interface parts.
3 *
Anton Vorontsovf29e5952012-05-26 06:20:19 -07004 * Copyright (C) 2007-2008 Google, Inc.
Tony Luckca01d6d2010-12-28 14:25:21 -08005 * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
Fabian Frederickef748852014-06-06 14:37:31 -070021#define pr_fmt(fmt) "pstore: " fmt
22
Tony Luckca01d6d2010-12-28 14:25:21 -080023#include <linux/atomic.h>
24#include <linux/types.h>
25#include <linux/errno.h>
26#include <linux/init.h>
27#include <linux/kmsg_dump.h>
Anton Vorontsovf29e5952012-05-26 06:20:19 -070028#include <linux/console.h>
Tony Luckca01d6d2010-12-28 14:25:21 -080029#include <linux/module.h>
30#include <linux/pstore.h>
Arnd Bergmann58eb5b672018-03-15 16:34:08 +010031#if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
Geliang Tang8cfc8dd2016-02-18 22:04:22 +080032#include <linux/lzo.h>
33#endif
Arnd Bergmann58eb5b672018-03-15 16:34:08 +010034#if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
Geliang Tang8cfc8dd2016-02-18 22:04:22 +080035#include <linux/lz4.h>
36#endif
Geliang Tang1021bcf2018-08-01 19:23:37 +080037#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
38#include <linux/zstd.h>
39#endif
Geliang Tangcb3bee02018-03-09 18:51:07 +080040#include <linux/crypto.h>
Tony Luckca01d6d2010-12-28 14:25:21 -080041#include <linux/string.h>
Luck, Tony6dda9262011-08-11 15:14:39 -070042#include <linux/timer.h>
Tony Luckca01d6d2010-12-28 14:25:21 -080043#include <linux/slab.h>
44#include <linux/uaccess.h>
Anton Vorontsova3f5f072012-05-26 06:20:28 -070045#include <linux/jiffies.h>
Luck, Tony6dda9262011-08-11 15:14:39 -070046#include <linux/workqueue.h>
Tony Luckca01d6d2010-12-28 14:25:21 -080047
48#include "internal.h"
49
50/*
Luck, Tony6dda9262011-08-11 15:14:39 -070051 * We defer making "oops" entries appear in pstore - see
52 * whether the system is actually still running well enough
53 * to let someone see the entry
54 */
Anton Vorontsov521f72882012-05-26 06:20:29 -070055static int pstore_update_ms = -1;
Anton Vorontsova3f5f072012-05-26 06:20:28 -070056module_param_named(update_ms, pstore_update_ms, int, 0600);
57MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
Anton Vorontsov521f72882012-05-26 06:20:29 -070058 "(default is -1, which means runtime updates are disabled; "
59 "enabling this option is not safe, it may lead to further "
60 "corruption on Oopses)");
Luck, Tony6dda9262011-08-11 15:14:39 -070061
62static int pstore_new_entry;
63
Kees Cook24ed9602017-08-28 11:28:21 -070064static void pstore_timefunc(struct timer_list *);
Kees Cook1d27e3e2017-10-04 16:27:04 -070065static DEFINE_TIMER(pstore_timer, pstore_timefunc);
Luck, Tony6dda9262011-08-11 15:14:39 -070066
67static void pstore_dowork(struct work_struct *);
68static DECLARE_WORK(pstore_work, pstore_dowork);
69
70/*
Tony Luckca01d6d2010-12-28 14:25:21 -080071 * pstore_lock just protects "psinfo" during
72 * calls to pstore_register()
73 */
74static DEFINE_SPINLOCK(pstore_lock);
Anton Vorontsov060287b2012-07-09 17:10:41 -070075struct pstore_info *psinfo;
Tony Luckca01d6d2010-12-28 14:25:21 -080076
Matthew Garrettdee28e72011-07-21 16:57:55 -040077static char *backend;
Kees Cookfe1d4752018-03-06 15:57:38 -080078static char *compress =
79#ifdef CONFIG_PSTORE_COMPRESS_DEFAULT
80 CONFIG_PSTORE_COMPRESS_DEFAULT;
81#else
82 NULL;
83#endif
Matthew Garrettdee28e72011-07-21 16:57:55 -040084
Aruna Balakrishnaiahb0aad7a2013-08-16 13:53:10 -070085/* Compression parameters */
Geliang Tangcb3bee02018-03-09 18:51:07 +080086static struct crypto_comp *tfm;
Geliang Tang8cfc8dd2016-02-18 22:04:22 +080087
88struct pstore_zbackend {
Geliang Tangcb3bee02018-03-09 18:51:07 +080089 int (*zbufsize)(size_t size);
Geliang Tang8cfc8dd2016-02-18 22:04:22 +080090 const char *name;
91};
Aruna Balakrishnaiahb0aad7a2013-08-16 13:53:10 -070092
93static char *big_oops_buf;
94static size_t big_oops_buf_sz;
95
Luck, Tony366f7e72011-03-18 15:33:43 -070096/* How much of the console log to snapshot */
David Howells349d7432017-07-05 16:24:34 +010097unsigned long kmsg_bytes = PSTORE_DEFAULT_KMSG_BYTES;
Tony Luckca01d6d2010-12-28 14:25:21 -080098
Luck, Tony366f7e72011-03-18 15:33:43 -070099void pstore_set_kmsg_bytes(int bytes)
Tony Luckca01d6d2010-12-28 14:25:21 -0800100{
Luck, Tony366f7e72011-03-18 15:33:43 -0700101 kmsg_bytes = bytes;
Tony Luckca01d6d2010-12-28 14:25:21 -0800102}
103
Tony Luckca01d6d2010-12-28 14:25:21 -0800104/* Tag each group of saved records with a sequence number */
105static int oopscount;
106
Seiji Aguchi381b8722012-03-16 15:36:59 -0700107static const char *get_reason_str(enum kmsg_dump_reason reason)
108{
109 switch (reason) {
110 case KMSG_DUMP_PANIC:
111 return "Panic";
112 case KMSG_DUMP_OOPS:
113 return "Oops";
114 case KMSG_DUMP_EMERG:
115 return "Emergency";
116 case KMSG_DUMP_RESTART:
117 return "Restart";
118 case KMSG_DUMP_HALT:
119 return "Halt";
120 case KMSG_DUMP_POWEROFF:
121 return "Poweroff";
122 default:
123 return "Unknown";
124 }
125}
Tony Luck9f6af272011-03-22 16:01:49 -0700126
Seiji Aguchi9f244e92013-01-11 18:09:41 +0000127bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
128{
129 /*
130 * In case of NMI path, pstore shouldn't be blocked
131 * regardless of reason.
132 */
133 if (in_nmi())
134 return true;
135
136 switch (reason) {
137 /* In panic case, other cpus are stopped by smp_send_stop(). */
138 case KMSG_DUMP_PANIC:
139 /* Emergency restart shouldn't be blocked by spin lock. */
140 case KMSG_DUMP_EMERG:
141 return true;
142 default:
143 return false;
144 }
145}
146EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
147
Arnd Bergmann58eb5b672018-03-15 16:34:08 +0100148#if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
Geliang Tangcb3bee02018-03-09 18:51:07 +0800149static int zbufsize_deflate(size_t size)
Aruna Balakrishnaiahb0aad7a2013-08-16 13:53:10 -0700150{
Aruna Balakrishnaiah7de8fe22013-09-11 10:57:41 -0700151 size_t cmpr;
Aruna Balakrishnaiahb0aad7a2013-08-16 13:53:10 -0700152
Geliang Tangcb3bee02018-03-09 18:51:07 +0800153 switch (size) {
Aruna Balakrishnaiah7de8fe22013-09-11 10:57:41 -0700154 /* buffer range for efivars */
155 case 1000 ... 2000:
156 cmpr = 56;
157 break;
158 case 2001 ... 3000:
159 cmpr = 54;
160 break;
161 case 3001 ... 3999:
162 cmpr = 52;
163 break;
164 /* buffer range for nvram, erst */
165 case 4000 ... 10000:
166 cmpr = 45;
167 break;
168 default:
169 cmpr = 60;
170 break;
171 }
172
Geliang Tangcb3bee02018-03-09 18:51:07 +0800173 return (size * 100) / cmpr;
Geliang Tang8cfc8dd2016-02-18 22:04:22 +0800174}
Geliang Tang8cfc8dd2016-02-18 22:04:22 +0800175#endif
176
Arnd Bergmann58eb5b672018-03-15 16:34:08 +0100177#if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
Geliang Tangcb3bee02018-03-09 18:51:07 +0800178static int zbufsize_lzo(size_t size)
Geliang Tang8cfc8dd2016-02-18 22:04:22 +0800179{
Geliang Tangcb3bee02018-03-09 18:51:07 +0800180 return lzo1x_worst_compress(size);
Geliang Tang8cfc8dd2016-02-18 22:04:22 +0800181}
Geliang Tang8cfc8dd2016-02-18 22:04:22 +0800182#endif
183
Arnd Bergmann58eb5b672018-03-15 16:34:08 +0100184#if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
Geliang Tangcb3bee02018-03-09 18:51:07 +0800185static int zbufsize_lz4(size_t size)
Geliang Tang8cfc8dd2016-02-18 22:04:22 +0800186{
Geliang Tangcb3bee02018-03-09 18:51:07 +0800187 return LZ4_compressBound(size);
Geliang Tang239b7162018-02-13 14:40:39 +0800188}
Geliang Tang239b7162018-02-13 14:40:39 +0800189#endif
190
Arnd Bergmann58eb5b672018-03-15 16:34:08 +0100191#if IS_ENABLED(CONFIG_PSTORE_842_COMPRESS)
Geliang Tangcb3bee02018-03-09 18:51:07 +0800192static int zbufsize_842(size_t size)
Geliang Tang239b7162018-02-13 14:40:39 +0800193{
Kees Cook55597402018-03-06 15:15:24 -0800194 return size;
Geliang Tang239b7162018-02-13 14:40:39 +0800195}
Kees Cookfe1d4752018-03-06 15:57:38 -0800196#endif
Geliang Tang8cfc8dd2016-02-18 22:04:22 +0800197
Geliang Tang1021bcf2018-08-01 19:23:37 +0800198#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
199static int zbufsize_zstd(size_t size)
200{
201 return ZSTD_compressBound(size);
202}
203#endif
204
Kees Cookfe1d4752018-03-06 15:57:38 -0800205static const struct pstore_zbackend *zbackend __ro_after_init;
206
207static const struct pstore_zbackend zbackends[] = {
Arnd Bergmann58eb5b672018-03-15 16:34:08 +0100208#if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
Kees Cookfe1d4752018-03-06 15:57:38 -0800209 {
Geliang Tangcb3bee02018-03-09 18:51:07 +0800210 .zbufsize = zbufsize_deflate,
211 .name = "deflate",
Kees Cookfe1d4752018-03-06 15:57:38 -0800212 },
213#endif
Arnd Bergmann58eb5b672018-03-15 16:34:08 +0100214#if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
Kees Cookfe1d4752018-03-06 15:57:38 -0800215 {
Geliang Tangcb3bee02018-03-09 18:51:07 +0800216 .zbufsize = zbufsize_lzo,
Kees Cookfe1d4752018-03-06 15:57:38 -0800217 .name = "lzo",
218 },
219#endif
Arnd Bergmann58eb5b672018-03-15 16:34:08 +0100220#if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS)
Kees Cookfe1d4752018-03-06 15:57:38 -0800221 {
Geliang Tangcb3bee02018-03-09 18:51:07 +0800222 .zbufsize = zbufsize_lz4,
Kees Cookfe1d4752018-03-06 15:57:38 -0800223 .name = "lz4",
224 },
225#endif
Arnd Bergmann58eb5b672018-03-15 16:34:08 +0100226#if IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
Kees Cookfe1d4752018-03-06 15:57:38 -0800227 {
Geliang Tangcb3bee02018-03-09 18:51:07 +0800228 .zbufsize = zbufsize_lz4,
Kees Cookfe1d4752018-03-06 15:57:38 -0800229 .name = "lz4hc",
230 },
231#endif
Arnd Bergmann58eb5b672018-03-15 16:34:08 +0100232#if IS_ENABLED(CONFIG_PSTORE_842_COMPRESS)
Kees Cookfe1d4752018-03-06 15:57:38 -0800233 {
Geliang Tangcb3bee02018-03-09 18:51:07 +0800234 .zbufsize = zbufsize_842,
Kees Cookfe1d4752018-03-06 15:57:38 -0800235 .name = "842",
236 },
237#endif
Geliang Tang1021bcf2018-08-01 19:23:37 +0800238#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
239 {
240 .zbufsize = zbufsize_zstd,
241 .name = "zstd",
242 },
243#endif
Kees Cookfe1d4752018-03-06 15:57:38 -0800244 { }
Geliang Tang8cfc8dd2016-02-18 22:04:22 +0800245};
Geliang Tang8cfc8dd2016-02-18 22:04:22 +0800246
247static int pstore_compress(const void *in, void *out,
Geliang Tangcb3bee02018-03-09 18:51:07 +0800248 unsigned int inlen, unsigned int outlen)
Geliang Tang8cfc8dd2016-02-18 22:04:22 +0800249{
Geliang Tangcb3bee02018-03-09 18:51:07 +0800250 int ret;
251
252 ret = crypto_comp_compress(tfm, in, inlen, out, &outlen);
253 if (ret) {
254 pr_err("crypto_comp_compress failed, ret = %d!\n", ret);
255 return ret;
256 }
257
258 return outlen;
Geliang Tang8cfc8dd2016-02-18 22:04:22 +0800259}
260
Geliang Tang8cfc8dd2016-02-18 22:04:22 +0800261static void allocate_buf_for_compression(void)
262{
Kees Cook95047b02018-10-17 14:00:12 -0700263 struct crypto_comp *ctx;
264 int size;
265 char *buf;
266
267 /* Skip if not built-in or compression backend not selected yet. */
Tobias Regnerye698aaf2018-04-06 09:25:17 +0200268 if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend)
Geliang Tangcb3bee02018-03-09 18:51:07 +0800269 return;
270
Kees Cook95047b02018-10-17 14:00:12 -0700271 /* Skip if no pstore backend yet or compression init already done. */
272 if (!psinfo || tfm)
273 return;
274
Geliang Tangcb3bee02018-03-09 18:51:07 +0800275 if (!crypto_has_comp(zbackend->name, 0, 0)) {
Kees Cook95047b02018-10-17 14:00:12 -0700276 pr_err("Unknown compression: %s\n", zbackend->name);
Geliang Tangcb3bee02018-03-09 18:51:07 +0800277 return;
278 }
279
Kees Cook95047b02018-10-17 14:00:12 -0700280 size = zbackend->zbufsize(psinfo->bufsize);
281 if (size <= 0) {
282 pr_err("Invalid compression size for %s: %d\n",
283 zbackend->name, size);
Geliang Tangcb3bee02018-03-09 18:51:07 +0800284 return;
285 }
286
Kees Cook95047b02018-10-17 14:00:12 -0700287 buf = kmalloc(size, GFP_KERNEL);
288 if (!buf) {
289 pr_err("Failed %d byte compression buffer allocation for: %s\n",
290 size, zbackend->name);
Geliang Tangcb3bee02018-03-09 18:51:07 +0800291 return;
Geliang Tang8cfc8dd2016-02-18 22:04:22 +0800292 }
Kees Cook95047b02018-10-17 14:00:12 -0700293
294 ctx = crypto_alloc_comp(zbackend->name, 0, 0);
295 if (IS_ERR_OR_NULL(ctx)) {
296 kfree(buf);
297 pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name,
298 PTR_ERR(ctx));
299 return;
300 }
301
302 /* A non-NULL big_oops_buf indicates compression is available. */
303 tfm = ctx;
304 big_oops_buf_sz = size;
305 big_oops_buf = buf;
306
Kees Cook0eed84f2018-11-01 14:03:07 -0700307 pr_info("Using crash dump compression: %s\n", zbackend->name);
Geliang Tang8cfc8dd2016-02-18 22:04:22 +0800308}
309
310static void free_buf_for_compression(void)
311{
Kees Cook95047b02018-10-17 14:00:12 -0700312 if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm)
Geliang Tangcb3bee02018-03-09 18:51:07 +0800313 crypto_free_comp(tfm);
314 kfree(big_oops_buf);
315 big_oops_buf = NULL;
316 big_oops_buf_sz = 0;
Geliang Tangee1d2672015-10-20 00:39:03 -0700317}
318
Aruna Balakrishnaiahb0aad7a2013-08-16 13:53:10 -0700319/*
320 * Called when compression fails, since the printk buffer
321 * would be fetched for compression calling it again when
322 * compression fails would have moved the iterator of
323 * printk buffer which results in fetching old contents.
324 * Copy the recent messages from big_oops_buf to psinfo->buf
325 */
326static size_t copy_kmsg_to_buffer(int hsize, size_t len)
327{
328 size_t total_len;
329 size_t diff;
330
331 total_len = hsize + len;
332
333 if (total_len > psinfo->bufsize) {
334 diff = total_len - psinfo->bufsize + hsize;
335 memcpy(psinfo->buf, big_oops_buf, hsize);
336 memcpy(psinfo->buf + hsize, big_oops_buf + diff,
337 psinfo->bufsize - hsize);
338 total_len = psinfo->bufsize;
339 } else
340 memcpy(psinfo->buf, big_oops_buf, total_len);
341
342 return total_len;
343}
344
Kees Cooke581ca82017-05-19 15:10:31 -0700345void pstore_record_init(struct pstore_record *record,
346 struct pstore_info *psinfo)
347{
348 memset(record, 0, sizeof(*record));
349
350 record->psi = psinfo;
Kees Cookc7f3c5952017-05-19 15:29:10 -0700351
352 /* Report zeroed timestamp if called before timekeeping has resumed. */
Kees Cook7aaa8222018-05-14 15:50:52 -0700353 record->time = ns_to_timespec64(ktime_get_real_fast_ns());
Kees Cooke581ca82017-05-19 15:10:31 -0700354}
355
Tony Luckca01d6d2010-12-28 14:25:21 -0800356/*
Kees Cook0eed84f2018-11-01 14:03:07 -0700357 * callback from kmsg_dump. Save as much as we can (up to kmsg_bytes) from the
358 * end of the buffer.
Tony Luckca01d6d2010-12-28 14:25:21 -0800359 */
360static void pstore_dump(struct kmsg_dumper *dumper,
Kay Sieverse2ae7152012-06-15 14:07:51 +0200361 enum kmsg_dump_reason reason)
Tony Luckca01d6d2010-12-28 14:25:21 -0800362{
Kay Sieverse2ae7152012-06-15 14:07:51 +0200363 unsigned long total = 0;
Seiji Aguchi381b8722012-03-16 15:36:59 -0700364 const char *why;
Matthew Garrettb94fdd02011-07-21 16:57:54 -0400365 unsigned int part = 1;
Don Zickusabd4d552011-08-12 10:54:51 -0700366 unsigned long flags = 0;
Namhyung Kim98e44fda2016-05-18 21:00:05 +0900367 int is_locked;
Kay Sieverse2ae7152012-06-15 14:07:51 +0200368 int ret;
Tony Luckca01d6d2010-12-28 14:25:21 -0800369
Seiji Aguchi381b8722012-03-16 15:36:59 -0700370 why = get_reason_str(reason);
Tony Luck9f6af272011-03-22 16:01:49 -0700371
Seiji Aguchi9f244e92013-01-11 18:09:41 +0000372 if (pstore_cannot_block_path(reason)) {
373 is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags);
374 if (!is_locked) {
375 pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
376 , in_nmi() ? "NMI" : why);
Li Pengcheng959217c2016-11-05 10:15:59 +0800377 return;
Seiji Aguchi9f244e92013-01-11 18:09:41 +0000378 }
Namhyung Kim98e44fda2016-05-18 21:00:05 +0900379 } else {
Don Zickusabd4d552011-08-12 10:54:51 -0700380 spin_lock_irqsave(&psinfo->buf_lock, flags);
Namhyung Kim98e44fda2016-05-18 21:00:05 +0900381 is_locked = 1;
382 }
Tony Luckca01d6d2010-12-28 14:25:21 -0800383 oopscount++;
384 while (total < kmsg_bytes) {
Kay Sieverse2ae7152012-06-15 14:07:51 +0200385 char *dst;
Kees Cook76cc9582017-03-03 23:28:53 -0800386 size_t dst_size;
387 int header_size;
Aruna Balakrishnaiahb0aad7a2013-08-16 13:53:10 -0700388 int zipped_len = -1;
Kees Cook76cc9582017-03-03 23:28:53 -0800389 size_t dump_size;
Kees Cooke581ca82017-05-19 15:10:31 -0700390 struct pstore_record record;
391
392 pstore_record_init(&record, psinfo);
393 record.type = PSTORE_TYPE_DMESG;
394 record.count = oopscount;
395 record.reason = reason;
396 record.part = part;
397 record.buf = psinfo->buf;
Kay Sieverse2ae7152012-06-15 14:07:51 +0200398
Konstantin Khlebnikovf0e2efc2015-05-21 09:26:19 -0700399 if (big_oops_buf && is_locked) {
Aruna Balakrishnaiahb0aad7a2013-08-16 13:53:10 -0700400 dst = big_oops_buf;
Kees Cook76cc9582017-03-03 23:28:53 -0800401 dst_size = big_oops_buf_sz;
Namhyung Kim235f6d12016-05-18 21:00:06 +0900402 } else {
403 dst = psinfo->buf;
Kees Cook76cc9582017-03-03 23:28:53 -0800404 dst_size = psinfo->bufsize;
Namhyung Kim235f6d12016-05-18 21:00:06 +0900405 }
Tony Luckca01d6d2010-12-28 14:25:21 -0800406
Kees Cook76cc9582017-03-03 23:28:53 -0800407 /* Write dump header. */
408 header_size = snprintf(dst, dst_size, "%s#%d Part%u\n", why,
409 oopscount, part);
410 dst_size -= header_size;
Aruna Balakrishnaiahb0aad7a2013-08-16 13:53:10 -0700411
Kees Cook76cc9582017-03-03 23:28:53 -0800412 /* Write dump contents. */
413 if (!kmsg_dump_get_buffer(dumper, true, dst + header_size,
414 dst_size, &dump_size))
Namhyung Kim235f6d12016-05-18 21:00:06 +0900415 break;
416
417 if (big_oops_buf && is_locked) {
Aruna Balakrishnaiahb0aad7a2013-08-16 13:53:10 -0700418 zipped_len = pstore_compress(dst, psinfo->buf,
Kees Cook76cc9582017-03-03 23:28:53 -0800419 header_size + dump_size,
420 psinfo->bufsize);
Aruna Balakrishnaiahb0aad7a2013-08-16 13:53:10 -0700421
422 if (zipped_len > 0) {
Kees Cook76cc9582017-03-03 23:28:53 -0800423 record.compressed = true;
424 record.size = zipped_len;
Aruna Balakrishnaiahb0aad7a2013-08-16 13:53:10 -0700425 } else {
Kees Cook76cc9582017-03-03 23:28:53 -0800426 record.size = copy_kmsg_to_buffer(header_size,
427 dump_size);
Aruna Balakrishnaiahb0aad7a2013-08-16 13:53:10 -0700428 }
429 } else {
Kees Cook76cc9582017-03-03 23:28:53 -0800430 record.size = header_size + dump_size;
Aruna Balakrishnaiahb0aad7a2013-08-16 13:53:10 -0700431 }
Tony Luckca01d6d2010-12-28 14:25:21 -0800432
Kees Cook76cc9582017-03-03 23:28:53 -0800433 ret = psinfo->write(&record);
Chen Gongb238b8f2011-10-12 09:17:24 -0700434 if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
Luck, Tony6dda9262011-08-11 15:14:39 -0700435 pstore_new_entry = 1;
Kay Sieverse2ae7152012-06-15 14:07:51 +0200436
Kees Cook76cc9582017-03-03 23:28:53 -0800437 total += record.size;
Matthew Garrett56280682011-07-21 16:57:53 -0400438 part++;
Tony Luckca01d6d2010-12-28 14:25:21 -0800439 }
Namhyung Kim98e44fda2016-05-18 21:00:05 +0900440 if (is_locked)
Don Zickusabd4d552011-08-12 10:54:51 -0700441 spin_unlock_irqrestore(&psinfo->buf_lock, flags);
Tony Luckca01d6d2010-12-28 14:25:21 -0800442}
443
444static struct kmsg_dumper pstore_dumper = {
445 .dump = pstore_dump,
446};
447
Geliang Tang306e5c22015-10-31 23:23:15 +0800448/*
449 * Register with kmsg_dump to save last part of console log on panic.
450 */
Geliang Tang18730412015-10-20 00:39:02 -0700451static void pstore_register_kmsg(void)
452{
453 kmsg_dump_register(&pstore_dumper);
454}
455
Geliang Tangee1d2672015-10-20 00:39:03 -0700456static void pstore_unregister_kmsg(void)
457{
458 kmsg_dump_unregister(&pstore_dumper);
459}
460
Anton Vorontsovf29e5952012-05-26 06:20:19 -0700461#ifdef CONFIG_PSTORE_CONSOLE
462static void pstore_console_write(struct console *con, const char *s, unsigned c)
463{
Kees Cookb77fa612018-11-01 14:08:07 -0700464 struct pstore_record record;
Anton Vorontsovf29e5952012-05-26 06:20:19 -0700465
Kees Cookb77fa612018-11-01 14:08:07 -0700466 pstore_record_init(&record, psinfo);
467 record.type = PSTORE_TYPE_CONSOLE;
Anton Vorontsovf29e5952012-05-26 06:20:19 -0700468
Kees Cookb77fa612018-11-01 14:08:07 -0700469 record.buf = (char *)s;
470 record.size = c;
471 psinfo->write(&record);
Anton Vorontsovf29e5952012-05-26 06:20:19 -0700472}
473
474static struct console pstore_console = {
475 .name = "pstore",
476 .write = pstore_console_write,
477 .flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME,
478 .index = -1,
479};
480
481static void pstore_register_console(void)
482{
483 register_console(&pstore_console);
484}
Geliang Tangee1d2672015-10-20 00:39:03 -0700485
486static void pstore_unregister_console(void)
487{
488 unregister_console(&pstore_console);
489}
Anton Vorontsovf29e5952012-05-26 06:20:19 -0700490#else
491static void pstore_register_console(void) {}
Geliang Tangee1d2672015-10-20 00:39:03 -0700492static void pstore_unregister_console(void) {}
Anton Vorontsovf29e5952012-05-26 06:20:19 -0700493#endif
494
Kees Cook4c9ec212017-03-05 22:41:10 -0800495static int pstore_write_user_compat(struct pstore_record *record,
496 const char __user *buf)
Mark Salyzyn5bf6d1b2016-09-01 08:13:46 -0700497{
Kees Cook30800d92017-03-07 13:57:11 -0800498 int ret = 0;
Mark Salyzyn5bf6d1b2016-09-01 08:13:46 -0700499
Kees Cook30800d92017-03-07 13:57:11 -0800500 if (record->buf)
501 return -EINVAL;
Mark Salyzyn5bf6d1b2016-09-01 08:13:46 -0700502
Geliang Tang077090a2017-04-29 09:45:16 +0800503 record->buf = memdup_user(buf, record->size);
Hirofumi Nakagawadfd6fa32017-09-26 03:21:27 +0900504 if (IS_ERR(record->buf)) {
Geliang Tang077090a2017-04-29 09:45:16 +0800505 ret = PTR_ERR(record->buf);
Kees Cook30800d92017-03-07 13:57:11 -0800506 goto out;
Mark Salyzyn5bf6d1b2016-09-01 08:13:46 -0700507 }
Kees Cook30800d92017-03-07 13:57:11 -0800508
509 ret = record->psi->write(record);
510
Kees Cook30800d92017-03-07 13:57:11 -0800511 kfree(record->buf);
Geliang Tang077090a2017-04-29 09:45:16 +0800512out:
Kees Cook30800d92017-03-07 13:57:11 -0800513 record->buf = NULL;
514
515 return unlikely(ret < 0) ? ret : record->size;
Mark Salyzyn5bf6d1b2016-09-01 08:13:46 -0700516}
517
Tony Luckca01d6d2010-12-28 14:25:21 -0800518/*
519 * platform specific persistent storage driver registers with
520 * us here. If pstore is already mounted, call the platform
521 * read function right away to populate the file system. If not
522 * then the pstore mount code will call us later to fill out
523 * the file system.
Tony Luckca01d6d2010-12-28 14:25:21 -0800524 */
525int pstore_register(struct pstore_info *psi)
526{
527 struct module *owner = psi->owner;
528
Kees Cook0d7cd092017-03-03 12:11:40 -0800529 if (backend && strcmp(backend, psi->name)) {
530 pr_warn("ignoring unexpected backend '%s'\n", psi->name);
Lenny Szubowicz8e48b1a2013-06-28 17:11:33 -0400531 return -EPERM;
Kees Cook0d7cd092017-03-03 12:11:40 -0800532 }
Lenny Szubowicz8e48b1a2013-06-28 17:11:33 -0400533
Kees Cook4c9ec212017-03-05 22:41:10 -0800534 /* Sanity check flags. */
535 if (!psi->flags) {
536 pr_warn("backend '%s' must support at least one frontend\n",
537 psi->name);
538 return -EINVAL;
539 }
540
541 /* Check for required functions. */
542 if (!psi->read || !psi->write) {
543 pr_warn("backend '%s' must implement read() and write()\n",
544 psi->name);
545 return -EINVAL;
546 }
547
Tony Luckca01d6d2010-12-28 14:25:21 -0800548 spin_lock(&pstore_lock);
549 if (psinfo) {
Kees Cook0d7cd092017-03-03 12:11:40 -0800550 pr_warn("backend '%s' already loaded: ignoring '%s'\n",
551 psinfo->name, psi->name);
Tony Luckca01d6d2010-12-28 14:25:21 -0800552 spin_unlock(&pstore_lock);
553 return -EBUSY;
554 }
Matthew Garrettdee28e72011-07-21 16:57:55 -0400555
Kees Cook4c9ec212017-03-05 22:41:10 -0800556 if (!psi->write_user)
557 psi->write_user = pstore_write_user_compat;
Tony Luckca01d6d2010-12-28 14:25:21 -0800558 psinfo = psi;
Kees Cookf6f82852011-11-17 12:58:07 -0800559 mutex_init(&psinfo->read_mutex);
Tony Luckca01d6d2010-12-28 14:25:21 -0800560 spin_unlock(&pstore_lock);
561
562 if (owner && !try_module_get(owner)) {
563 psinfo = NULL;
564 return -EINVAL;
565 }
566
Aruna Balakrishnaiahb0aad7a2013-08-16 13:53:10 -0700567 allocate_buf_for_compression();
568
Tony Luckca01d6d2010-12-28 14:25:21 -0800569 if (pstore_is_mounted())
Luck, Tony6dda9262011-08-11 15:14:39 -0700570 pstore_get_records(0);
Tony Luckca01d6d2010-12-28 14:25:21 -0800571
Namhyung Kimc950fd62016-07-28 00:08:25 +0900572 if (psi->flags & PSTORE_FLAGS_DMESG)
573 pstore_register_kmsg();
574 if (psi->flags & PSTORE_FLAGS_CONSOLE)
Luck, Tonydf36ac12013-12-18 15:17:10 -0800575 pstore_register_console();
Namhyung Kimc950fd62016-07-28 00:08:25 +0900576 if (psi->flags & PSTORE_FLAGS_FTRACE)
Luck, Tonydf36ac12013-12-18 15:17:10 -0800577 pstore_register_ftrace();
Namhyung Kimc950fd62016-07-28 00:08:25 +0900578 if (psi->flags & PSTORE_FLAGS_PMSG)
Mark Salyzyn9d5438f2015-01-16 16:01:10 -0800579 pstore_register_pmsg();
Tony Luckca01d6d2010-12-28 14:25:21 -0800580
Kees Cook6330d552017-03-06 12:42:12 -0800581 /* Start watching for new records, if desired. */
Anton Vorontsova3f5f072012-05-26 06:20:28 -0700582 if (pstore_update_ms >= 0) {
583 pstore_timer.expires = jiffies +
584 msecs_to_jiffies(pstore_update_ms);
585 add_timer(&pstore_timer);
586 }
Luck, Tony6dda9262011-08-11 15:14:39 -0700587
Wang Long42222c22015-05-21 09:34:22 -0700588 /*
589 * Update the module parameter backend, so it is visible
590 * through /sys/module/pstore/parameters/backend
591 */
592 backend = psi->name;
593
Fabian Frederickef748852014-06-06 14:37:31 -0700594 pr_info("Registered %s as persistent store backend\n", psi->name);
Lenny Szubowicz8e48b1a2013-06-28 17:11:33 -0400595
Kees Cook1344dd82017-03-03 17:45:38 -0800596 module_put(owner);
597
Tony Luckca01d6d2010-12-28 14:25:21 -0800598 return 0;
599}
600EXPORT_SYMBOL_GPL(pstore_register);
601
Geliang Tangee1d2672015-10-20 00:39:03 -0700602void pstore_unregister(struct pstore_info *psi)
603{
Kees Cook6330d552017-03-06 12:42:12 -0800604 /* Stop timer and make sure all work has finished. */
605 pstore_update_ms = -1;
606 del_timer_sync(&pstore_timer);
607 flush_work(&pstore_work);
608
Namhyung Kimc950fd62016-07-28 00:08:25 +0900609 if (psi->flags & PSTORE_FLAGS_PMSG)
Kees Cooka1db8062016-05-19 10:59:03 -0400610 pstore_unregister_pmsg();
Namhyung Kimc950fd62016-07-28 00:08:25 +0900611 if (psi->flags & PSTORE_FLAGS_FTRACE)
Kees Cooka1db8062016-05-19 10:59:03 -0400612 pstore_unregister_ftrace();
Namhyung Kimc950fd62016-07-28 00:08:25 +0900613 if (psi->flags & PSTORE_FLAGS_CONSOLE)
Kees Cooka1db8062016-05-19 10:59:03 -0400614 pstore_unregister_console();
Namhyung Kimc950fd62016-07-28 00:08:25 +0900615 if (psi->flags & PSTORE_FLAGS_DMESG)
616 pstore_unregister_kmsg();
Geliang Tangee1d2672015-10-20 00:39:03 -0700617
618 free_buf_for_compression();
619
620 psinfo = NULL;
621 backend = NULL;
622}
623EXPORT_SYMBOL_GPL(pstore_unregister);
624
Kees Cook634f8f52017-03-03 17:35:25 -0800625static void decompress_record(struct pstore_record *record)
626{
Kees Cookbdabc8e2018-10-26 01:17:07 -0700627 int ret;
Kees Cook634f8f52017-03-03 17:35:25 -0800628 int unzipped_len;
Kees Cookbdabc8e2018-10-26 01:17:07 -0700629 char *unzipped, *workspace;
Kees Cook634f8f52017-03-03 17:35:25 -0800630
Ankit Kumar4a16d1c2017-05-23 11:16:52 +0530631 if (!record->compressed)
632 return;
633
Kees Cook634f8f52017-03-03 17:35:25 -0800634 /* Only PSTORE_TYPE_DMESG support compression. */
Ankit Kumar4a16d1c2017-05-23 11:16:52 +0530635 if (record->type != PSTORE_TYPE_DMESG) {
Kees Cook634f8f52017-03-03 17:35:25 -0800636 pr_warn("ignored compressed record type %d\n", record->type);
637 return;
638 }
639
Kees Cookbdabc8e2018-10-26 01:17:07 -0700640 /* Missing compression buffer means compression was not initialized. */
Kees Cook634f8f52017-03-03 17:35:25 -0800641 if (!big_oops_buf) {
Kees Cookbdabc8e2018-10-26 01:17:07 -0700642 pr_warn("no decompression method initialized!\n");
Kees Cook634f8f52017-03-03 17:35:25 -0800643 return;
644 }
645
Kees Cookbdabc8e2018-10-26 01:17:07 -0700646 /* Allocate enough space to hold max decompression and ECC. */
647 unzipped_len = big_oops_buf_sz;
648 workspace = kmalloc(unzipped_len + record->ecc_notice_size,
649 GFP_KERNEL);
650 if (!workspace)
Kees Cook7e8cc8d2017-03-04 22:28:46 -0800651 return;
Kees Cook7e8cc8d2017-03-04 22:28:46 -0800652
Kees Cookbdabc8e2018-10-26 01:17:07 -0700653 /* After decompression "unzipped_len" is almost certainly smaller. */
654 ret = crypto_comp_decompress(tfm, record->buf, record->size,
655 workspace, &unzipped_len);
656 if (ret) {
657 pr_err("crypto_comp_decompress failed, ret = %d!\n", ret);
658 kfree(workspace);
Kees Cook7e8cc8d2017-03-04 22:28:46 -0800659 return;
660 }
Kees Cook7e8cc8d2017-03-04 22:28:46 -0800661
662 /* Append ECC notice to decompressed buffer. */
Kees Cookbdabc8e2018-10-26 01:17:07 -0700663 memcpy(workspace + unzipped_len, record->buf + record->size,
Kees Cook7e8cc8d2017-03-04 22:28:46 -0800664 record->ecc_notice_size);
665
Kees Cookbdabc8e2018-10-26 01:17:07 -0700666 /* Copy decompressed contents into an minimum-sized allocation. */
667 unzipped = kmemdup(workspace, unzipped_len + record->ecc_notice_size,
668 GFP_KERNEL);
669 kfree(workspace);
670 if (!unzipped)
671 return;
672
673 /* Swap out compressed contents with decompressed contents. */
Kees Cook7e8cc8d2017-03-04 22:28:46 -0800674 kfree(record->buf);
Kees Cookbdabc8e2018-10-26 01:17:07 -0700675 record->buf = unzipped;
Kees Cook7e8cc8d2017-03-04 22:28:46 -0800676 record->size = unzipped_len;
677 record->compressed = false;
Kees Cook634f8f52017-03-03 17:35:25 -0800678}
679
Tony Luckca01d6d2010-12-28 14:25:21 -0800680/*
Kees Cook3a7d2fd2017-04-27 15:53:21 -0700681 * Read all the records from one persistent store backend. Create
Luck, Tony6dda9262011-08-11 15:14:39 -0700682 * files in our filesystem. Don't warn about -EEXIST errors
683 * when we are re-scanning the backing store looking to add new
684 * error records.
Tony Luckca01d6d2010-12-28 14:25:21 -0800685 */
Kees Cook3a7d2fd2017-04-27 15:53:21 -0700686void pstore_get_backend_records(struct pstore_info *psi,
687 struct dentry *root, int quiet)
Tony Luckca01d6d2010-12-28 14:25:21 -0800688{
Kees Cook2a2b0ac2017-03-04 22:57:26 -0800689 int failed = 0;
Kees Cook656de422017-05-16 12:03:31 -0700690 unsigned int stop_loop = 65536;
Tony Luckca01d6d2010-12-28 14:25:21 -0800691
Kees Cook3a7d2fd2017-04-27 15:53:21 -0700692 if (!psi || !root)
Tony Luckca01d6d2010-12-28 14:25:21 -0800693 return;
694
Kees Cookf6f82852011-11-17 12:58:07 -0800695 mutex_lock(&psi->read_mutex);
Kees Cook2174f6d2011-11-18 13:49:00 -0800696 if (psi->open && psi->open(psi))
Chen Gong06cf91b2011-05-16 11:00:27 -0700697 goto out;
698
Kees Cook1dfff7d2017-03-04 22:46:41 -0800699 /*
700 * Backend callback read() allocates record.buf. decompress_record()
701 * may reallocate record.buf. On success, pstore_mkfile() will keep
702 * the record.buf, so free it only on failure.
703 */
Kees Cook656de422017-05-16 12:03:31 -0700704 for (; stop_loop; stop_loop--) {
Kees Cook2a2b0ac2017-03-04 22:57:26 -0800705 struct pstore_record *record;
706 int rc;
707
708 record = kzalloc(sizeof(*record), GFP_KERNEL);
709 if (!record) {
710 pr_err("out of memory creating record\n");
711 break;
712 }
Kees Cooke581ca82017-05-19 15:10:31 -0700713 pstore_record_init(record, psi);
Kees Cook2a2b0ac2017-03-04 22:57:26 -0800714
715 record->size = psi->read(record);
716
717 /* No more records left in backend? */
Douglas Andersonf6525b92017-05-30 15:50:38 -0700718 if (record->size <= 0) {
719 kfree(record);
Kees Cook2a2b0ac2017-03-04 22:57:26 -0800720 break;
Douglas Andersonf6525b92017-05-30 15:50:38 -0700721 }
Kees Cook2a2b0ac2017-03-04 22:57:26 -0800722
723 decompress_record(record);
Kees Cook3a7d2fd2017-04-27 15:53:21 -0700724 rc = pstore_mkfile(root, record);
Kees Cook1dfff7d2017-03-04 22:46:41 -0800725 if (rc) {
Kees Cook83f70f02017-03-04 23:12:24 -0800726 /* pstore_mkfile() did not take record, so free it. */
Kees Cook2a2b0ac2017-03-04 22:57:26 -0800727 kfree(record->buf);
Kees Cook83f70f02017-03-04 23:12:24 -0800728 kfree(record);
Kees Cook1dfff7d2017-03-04 22:46:41 -0800729 if (rc != -EEXIST || !quiet)
730 failed++;
731 }
Tony Luckca01d6d2010-12-28 14:25:21 -0800732 }
Kees Cook2174f6d2011-11-18 13:49:00 -0800733 if (psi->close)
734 psi->close(psi);
Chen Gong06cf91b2011-05-16 11:00:27 -0700735out:
Kees Cookf6f82852011-11-17 12:58:07 -0800736 mutex_unlock(&psi->read_mutex);
Tony Luckca01d6d2010-12-28 14:25:21 -0800737
738 if (failed)
Kees Cook656de422017-05-16 12:03:31 -0700739 pr_warn("failed to create %d record(s) from '%s'\n",
Fabian Frederickef748852014-06-06 14:37:31 -0700740 failed, psi->name);
Kees Cook656de422017-05-16 12:03:31 -0700741 if (!stop_loop)
742 pr_err("looping? Too many records seen from '%s'\n",
743 psi->name);
Tony Luckca01d6d2010-12-28 14:25:21 -0800744}
745
Luck, Tony6dda9262011-08-11 15:14:39 -0700746static void pstore_dowork(struct work_struct *work)
747{
748 pstore_get_records(1);
749}
750
Kees Cook24ed9602017-08-28 11:28:21 -0700751static void pstore_timefunc(struct timer_list *unused)
Luck, Tony6dda9262011-08-11 15:14:39 -0700752{
753 if (pstore_new_entry) {
754 pstore_new_entry = 0;
755 schedule_work(&pstore_work);
756 }
757
Kees Cook6330d552017-03-06 12:42:12 -0800758 if (pstore_update_ms >= 0)
759 mod_timer(&pstore_timer,
760 jiffies + msecs_to_jiffies(pstore_update_ms));
Luck, Tony6dda9262011-08-11 15:14:39 -0700761}
762
Kees Cookfe1d4752018-03-06 15:57:38 -0800763void __init pstore_choose_compression(void)
764{
765 const struct pstore_zbackend *step;
766
767 if (!compress)
768 return;
769
770 for (step = zbackends; step->name; step++) {
771 if (!strcmp(compress, step->name)) {
772 zbackend = step;
Kees Cookfe1d4752018-03-06 15:57:38 -0800773 return;
774 }
775 }
776}
777
Kees Cookcb095af2018-10-18 11:17:42 -0700778static int __init pstore_init(void)
779{
780 int ret;
781
782 pstore_choose_compression();
783
Joel Fernandes (Google)41603162018-10-17 03:13:55 -0700784 /*
785 * Check if any pstore backends registered earlier but did not
786 * initialize compression because crypto was not ready. If so,
787 * initialize compression now.
788 */
Kees Cook95047b02018-10-17 14:00:12 -0700789 allocate_buf_for_compression();
Joel Fernandes (Google)41603162018-10-17 03:13:55 -0700790
Kees Cookcb095af2018-10-18 11:17:42 -0700791 ret = pstore_init_fs();
792 if (ret)
793 return ret;
794
795 return 0;
796}
Joel Fernandes (Google)41603162018-10-17 03:13:55 -0700797late_initcall(pstore_init);
Kees Cookcb095af2018-10-18 11:17:42 -0700798
799static void __exit pstore_exit(void)
800{
801 pstore_exit_fs();
802}
803module_exit(pstore_exit)
804
Kees Cookfe1d4752018-03-06 15:57:38 -0800805module_param(compress, charp, 0444);
806MODULE_PARM_DESC(compress, "Pstore compression to use");
807
Matthew Garrettdee28e72011-07-21 16:57:55 -0400808module_param(backend, charp, 0444);
809MODULE_PARM_DESC(backend, "Pstore backend to use");
Kees Cookcb095af2018-10-18 11:17:42 -0700810
811MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
812MODULE_LICENSE("GPL");