blob: 453242d6cf562b3ca9e3dde9eb44f35545cf6bdf [file] [log] [blame]
Thomas Gleixnerfd534e92019-05-23 11:14:39 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Core registration and callback routines for MTD
4 * drivers and users.
5 *
David Woodhousea1452a32010-08-08 20:58:20 +01006 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7 * Copyright © 2006 Red Hat UK Limited
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/module.h>
11#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/ptrace.h>
Alexey Dobriyan447d9bd2011-05-13 23:34:19 +030013#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/string.h>
15#include <linux/timer.h>
16#include <linux/major.h>
17#include <linux/fs.h>
Artem Bityutskiy77993082006-10-11 14:52:44 +030018#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/ioctl.h>
20#include <linux/init.h>
Brian Norris215a02f2015-11-11 16:26:04 -080021#include <linux/of.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/proc_fs.h>
Ben Hutchingsb520e412010-01-29 20:59:42 +000023#include <linux/idr.h>
Jörn Engela33eb6b2010-04-27 09:40:52 +020024#include <linux/backing-dev.h>
Tejun Heo05d71b462010-03-30 02:52:39 +090025#include <linux/gfp.h>
David Howells0d01ff22013-04-11 23:51:01 +010026#include <linux/slab.h>
Brian Norris3efe41b2014-11-26 01:01:08 -080027#include <linux/reboot.h>
Ezequiel Garciafea728c2016-04-12 17:46:42 -030028#include <linux/leds.h>
Mario Rugieroe8e3edb2017-05-29 08:38:41 -030029#include <linux/debugfs.h>
Alban Bedelc4dfa252018-11-13 15:01:10 +010030#include <linux/nvmem-provider.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32#include <linux/mtd/mtd.h>
Jamie Ilesf5671ab2011-05-23 17:15:46 +010033#include <linux/mtd/partitions.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Ben Dooks356d70f2007-05-28 20:28:34 +010035#include "mtdcore.h"
Artem Bityutskiy660685d2013-03-14 13:27:40 +020036
Jan Karafa060522017-04-12 12:24:37 +020037struct backing_dev_info *mtd_bdi;
Ben Dooks356d70f2007-05-28 20:28:34 +010038
Lars-Peter Clausen57b8045d2015-04-06 12:00:39 +020039#ifdef CONFIG_PM_SLEEP
40
41static int mtd_cls_suspend(struct device *dev)
42{
43 struct mtd_info *mtd = dev_get_drvdata(dev);
44
45 return mtd ? mtd_suspend(mtd) : 0;
46}
47
48static int mtd_cls_resume(struct device *dev)
49{
50 struct mtd_info *mtd = dev_get_drvdata(dev);
51
52 if (mtd)
53 mtd_resume(mtd);
54 return 0;
55}
56
57static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
58#define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
59#else
60#define MTD_CLS_PM_OPS NULL
61#endif
David Brownell1f24b5a2009-03-26 00:42:41 -070062
David Woodhouse15bce402009-04-05 07:40:58 -070063static struct class mtd_class = {
64 .name = "mtd",
65 .owner = THIS_MODULE,
Lars-Peter Clausen57b8045d2015-04-06 12:00:39 +020066 .pm = MTD_CLS_PM_OPS,
David Woodhouse15bce402009-04-05 07:40:58 -070067};
David Brownell1f24b5a2009-03-26 00:42:41 -070068
Ben Hutchingsb520e412010-01-29 20:59:42 +000069static DEFINE_IDR(mtd_idr);
70
Thomas Gleixner97894cd2005-11-07 11:15:26 +000071/* These are exported solely for the purpose of mtd_blkdevs.c. You
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 should not use them for _anything_ else */
Ingo Molnar48b19262006-03-31 02:29:41 -080073DEFINE_MUTEX(mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074EXPORT_SYMBOL_GPL(mtd_table_mutex);
Ben Hutchingsb520e412010-01-29 20:59:42 +000075
76struct mtd_info *__mtd_next_device(int i)
77{
78 return idr_get_next(&mtd_idr, &i);
79}
80EXPORT_SYMBOL_GPL(__mtd_next_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
82static LIST_HEAD(mtd_notifiers);
83
David Brownell1f24b5a2009-03-26 00:42:41 -070084
David Brownell1f24b5a2009-03-26 00:42:41 -070085#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
David Brownell1f24b5a2009-03-26 00:42:41 -070086
87/* REVISIT once MTD uses the driver model better, whoever allocates
88 * the mtd_info will probably want to use the release() hook...
89 */
90static void mtd_release(struct device *dev)
91{
Brian Norris5e472122014-07-21 19:06:47 -070092 struct mtd_info *mtd = dev_get_drvdata(dev);
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +020093 dev_t index = MTD_DEVT(mtd->index);
David Brownell1f24b5a2009-03-26 00:42:41 -070094
Brian Norris5e472122014-07-21 19:06:47 -070095 /* remove /dev/mtdXro node */
96 device_destroy(&mtd_class, index + 1);
David Woodhouse15bce402009-04-05 07:40:58 -070097}
98
David Brownell1f24b5a2009-03-26 00:42:41 -070099static ssize_t mtd_type_show(struct device *dev,
100 struct device_attribute *attr, char *buf)
101{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200102 struct mtd_info *mtd = dev_get_drvdata(dev);
David Brownell1f24b5a2009-03-26 00:42:41 -0700103 char *type;
104
105 switch (mtd->type) {
106 case MTD_ABSENT:
107 type = "absent";
108 break;
109 case MTD_RAM:
110 type = "ram";
111 break;
112 case MTD_ROM:
113 type = "rom";
114 break;
115 case MTD_NORFLASH:
116 type = "nor";
117 break;
118 case MTD_NANDFLASH:
119 type = "nand";
120 break;
121 case MTD_DATAFLASH:
122 type = "dataflash";
123 break;
124 case MTD_UBIVOLUME:
125 type = "ubi";
126 break;
Huang Shijief4837242013-09-25 14:58:19 +0800127 case MTD_MLCNANDFLASH:
128 type = "mlc-nand";
129 break;
David Brownell1f24b5a2009-03-26 00:42:41 -0700130 default:
131 type = "unknown";
132 }
133
134 return snprintf(buf, PAGE_SIZE, "%s\n", type);
135}
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700136static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
137
138static ssize_t mtd_flags_show(struct device *dev,
139 struct device_attribute *attr, char *buf)
140{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200141 struct mtd_info *mtd = dev_get_drvdata(dev);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700142
143 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700144}
145static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
146
147static ssize_t mtd_size_show(struct device *dev,
148 struct device_attribute *attr, char *buf)
149{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200150 struct mtd_info *mtd = dev_get_drvdata(dev);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700151
152 return snprintf(buf, PAGE_SIZE, "%llu\n",
153 (unsigned long long)mtd->size);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700154}
155static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
156
157static ssize_t mtd_erasesize_show(struct device *dev,
158 struct device_attribute *attr, char *buf)
159{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200160 struct mtd_info *mtd = dev_get_drvdata(dev);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700161
162 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700163}
164static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
165
166static ssize_t mtd_writesize_show(struct device *dev,
167 struct device_attribute *attr, char *buf)
168{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200169 struct mtd_info *mtd = dev_get_drvdata(dev);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700170
171 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700172}
173static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
174
Artem Bityutskiye7693542009-04-18 12:29:42 +0300175static ssize_t mtd_subpagesize_show(struct device *dev,
176 struct device_attribute *attr, char *buf)
177{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200178 struct mtd_info *mtd = dev_get_drvdata(dev);
Artem Bityutskiye7693542009-04-18 12:29:42 +0300179 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
180
181 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
Artem Bityutskiye7693542009-04-18 12:29:42 +0300182}
183static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
184
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700185static ssize_t mtd_oobsize_show(struct device *dev,
186 struct device_attribute *attr, char *buf)
187{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200188 struct mtd_info *mtd = dev_get_drvdata(dev);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700189
190 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700191}
192static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
193
Xiaolei Li7cc9aa62018-04-02 16:20:10 +0800194static ssize_t mtd_oobavail_show(struct device *dev,
195 struct device_attribute *attr, char *buf)
196{
197 struct mtd_info *mtd = dev_get_drvdata(dev);
198
199 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->oobavail);
200}
201static DEVICE_ATTR(oobavail, S_IRUGO, mtd_oobavail_show, NULL);
202
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700203static ssize_t mtd_numeraseregions_show(struct device *dev,
204 struct device_attribute *attr, char *buf)
205{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200206 struct mtd_info *mtd = dev_get_drvdata(dev);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700207
208 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700209}
210static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
211 NULL);
212
213static ssize_t mtd_name_show(struct device *dev,
214 struct device_attribute *attr, char *buf)
215{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200216 struct mtd_info *mtd = dev_get_drvdata(dev);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700217
218 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700219}
220static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
David Brownell1f24b5a2009-03-26 00:42:41 -0700221
Mike Dunna9b672e2012-04-25 12:06:07 -0700222static ssize_t mtd_ecc_strength_show(struct device *dev,
223 struct device_attribute *attr, char *buf)
224{
225 struct mtd_info *mtd = dev_get_drvdata(dev);
226
227 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
228}
229static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
230
Mike Dunnd062d4e2012-04-25 12:06:08 -0700231static ssize_t mtd_bitflip_threshold_show(struct device *dev,
232 struct device_attribute *attr,
233 char *buf)
234{
235 struct mtd_info *mtd = dev_get_drvdata(dev);
236
237 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
238}
239
240static ssize_t mtd_bitflip_threshold_store(struct device *dev,
241 struct device_attribute *attr,
242 const char *buf, size_t count)
243{
244 struct mtd_info *mtd = dev_get_drvdata(dev);
245 unsigned int bitflip_threshold;
246 int retval;
247
248 retval = kstrtouint(buf, 0, &bitflip_threshold);
249 if (retval)
250 return retval;
251
252 mtd->bitflip_threshold = bitflip_threshold;
253 return count;
254}
255static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
256 mtd_bitflip_threshold_show,
257 mtd_bitflip_threshold_store);
258
Huang Shijiebf977e32013-08-16 10:10:05 +0800259static ssize_t mtd_ecc_step_size_show(struct device *dev,
260 struct device_attribute *attr, char *buf)
261{
262 struct mtd_info *mtd = dev_get_drvdata(dev);
263
264 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
265
266}
267static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
268
Ezequiel Garcia990a3af2014-06-24 10:55:50 -0300269static ssize_t mtd_ecc_stats_corrected_show(struct device *dev,
270 struct device_attribute *attr, char *buf)
271{
272 struct mtd_info *mtd = dev_get_drvdata(dev);
273 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
274
275 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected);
276}
277static DEVICE_ATTR(corrected_bits, S_IRUGO,
278 mtd_ecc_stats_corrected_show, NULL);
279
280static ssize_t mtd_ecc_stats_errors_show(struct device *dev,
281 struct device_attribute *attr, char *buf)
282{
283 struct mtd_info *mtd = dev_get_drvdata(dev);
284 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
285
286 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed);
287}
288static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL);
289
290static ssize_t mtd_badblocks_show(struct device *dev,
291 struct device_attribute *attr, char *buf)
292{
293 struct mtd_info *mtd = dev_get_drvdata(dev);
294 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
295
296 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks);
297}
298static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL);
299
300static ssize_t mtd_bbtblocks_show(struct device *dev,
301 struct device_attribute *attr, char *buf)
302{
303 struct mtd_info *mtd = dev_get_drvdata(dev);
304 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
305
306 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks);
307}
308static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL);
309
David Brownell1f24b5a2009-03-26 00:42:41 -0700310static struct attribute *mtd_attrs[] = {
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700311 &dev_attr_type.attr,
312 &dev_attr_flags.attr,
313 &dev_attr_size.attr,
314 &dev_attr_erasesize.attr,
315 &dev_attr_writesize.attr,
Artem Bityutskiye7693542009-04-18 12:29:42 +0300316 &dev_attr_subpagesize.attr,
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700317 &dev_attr_oobsize.attr,
Xiaolei Li7cc9aa62018-04-02 16:20:10 +0800318 &dev_attr_oobavail.attr,
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700319 &dev_attr_numeraseregions.attr,
320 &dev_attr_name.attr,
Mike Dunna9b672e2012-04-25 12:06:07 -0700321 &dev_attr_ecc_strength.attr,
Huang Shijiebf977e32013-08-16 10:10:05 +0800322 &dev_attr_ecc_step_size.attr,
Ezequiel Garcia990a3af2014-06-24 10:55:50 -0300323 &dev_attr_corrected_bits.attr,
324 &dev_attr_ecc_failures.attr,
325 &dev_attr_bad_blocks.attr,
326 &dev_attr_bbt_blocks.attr,
Mike Dunnd062d4e2012-04-25 12:06:08 -0700327 &dev_attr_bitflip_threshold.attr,
David Brownell1f24b5a2009-03-26 00:42:41 -0700328 NULL,
329};
Axel Lin54c738f2013-12-02 10:12:25 +0800330ATTRIBUTE_GROUPS(mtd);
David Brownell1f24b5a2009-03-26 00:42:41 -0700331
Bhumika Goyal75864b32017-08-19 13:52:17 +0530332static const struct device_type mtd_devtype = {
David Brownell1f24b5a2009-03-26 00:42:41 -0700333 .name = "mtd",
334 .groups = mtd_groups,
335 .release = mtd_release,
336};
337
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100338#ifndef CONFIG_MMU
339unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
340{
341 switch (mtd->type) {
342 case MTD_RAM:
343 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
344 NOMMU_MAP_READ | NOMMU_MAP_WRITE;
345 case MTD_ROM:
346 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
347 NOMMU_MAP_READ;
348 default:
349 return NOMMU_MAP_COPY;
350 }
351}
Arnd Bergmann706a4e52015-01-28 11:09:20 -0700352EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100353#endif
354
Brian Norris3efe41b2014-11-26 01:01:08 -0800355static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
356 void *cmd)
357{
358 struct mtd_info *mtd;
359
360 mtd = container_of(n, struct mtd_info, reboot_notifier);
361 mtd->_reboot(mtd);
362
363 return NOTIFY_DONE;
364}
365
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366/**
Boris Brezillon477b0222015-11-16 15:53:13 +0100367 * mtd_wunit_to_pairing_info - get pairing information of a wunit
368 * @mtd: pointer to new MTD device info structure
369 * @wunit: write unit we are interested in
370 * @info: returned pairing information
371 *
372 * Retrieve pairing information associated to the wunit.
373 * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
374 * paired together, and where programming a page may influence the page it is
375 * paired with.
376 * The notion of page is replaced by the term wunit (write-unit) to stay
377 * consistent with the ->writesize field.
378 *
379 * The @wunit argument can be extracted from an absolute offset using
380 * mtd_offset_to_wunit(). @info is filled with the pairing information attached
381 * to @wunit.
382 *
383 * From the pairing info the MTD user can find all the wunits paired with
384 * @wunit using the following loop:
385 *
386 * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
387 * info.pair = i;
388 * mtd_pairing_info_to_wunit(mtd, &info);
389 * ...
390 * }
391 */
392int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
393 struct mtd_pairing_info *info)
394{
395 int npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
396
397 if (wunit < 0 || wunit >= npairs)
398 return -EINVAL;
399
400 if (mtd->pairing && mtd->pairing->get_info)
401 return mtd->pairing->get_info(mtd, wunit, info);
402
403 info->group = 0;
404 info->pair = wunit;
405
406 return 0;
407}
408EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
409
410/**
Xiaolei Lic77a9312018-03-29 09:34:58 +0800411 * mtd_pairing_info_to_wunit - get wunit from pairing information
Boris Brezillon477b0222015-11-16 15:53:13 +0100412 * @mtd: pointer to new MTD device info structure
413 * @info: pairing information struct
414 *
415 * Returns a positive number representing the wunit associated to the info
416 * struct, or a negative error code.
417 *
418 * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
419 * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
420 * doc).
421 *
422 * It can also be used to only program the first page of each pair (i.e.
423 * page attached to group 0), which allows one to use an MLC NAND in
424 * software-emulated SLC mode:
425 *
426 * info.group = 0;
427 * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
428 * for (info.pair = 0; info.pair < npairs; info.pair++) {
429 * wunit = mtd_pairing_info_to_wunit(mtd, &info);
430 * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
431 * mtd->writesize, &retlen, buf + (i * mtd->writesize));
432 * }
433 */
434int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
435 const struct mtd_pairing_info *info)
436{
437 int ngroups = mtd_pairing_groups(mtd);
438 int npairs = mtd_wunit_per_eb(mtd) / ngroups;
439
440 if (!info || info->pair < 0 || info->pair >= npairs ||
441 info->group < 0 || info->group >= ngroups)
442 return -EINVAL;
443
444 if (mtd->pairing && mtd->pairing->get_wunit)
445 return mtd->pairing->get_wunit(mtd, info);
446
447 return info->pair;
448}
449EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
450
451/**
452 * mtd_pairing_groups - get the number of pairing groups
453 * @mtd: pointer to new MTD device info structure
454 *
455 * Returns the number of pairing groups.
456 *
457 * This number is usually equal to the number of bits exposed by a single
458 * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
459 * to iterate over all pages of a given pair.
460 */
461int mtd_pairing_groups(struct mtd_info *mtd)
462{
463 if (!mtd->pairing || !mtd->pairing->ngroups)
464 return 1;
465
466 return mtd->pairing->ngroups;
467}
468EXPORT_SYMBOL_GPL(mtd_pairing_groups);
469
Alban Bedelc4dfa252018-11-13 15:01:10 +0100470static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
471 void *val, size_t bytes)
472{
473 struct mtd_info *mtd = priv;
474 size_t retlen;
475 int err;
476
477 err = mtd_read(mtd, offset, bytes, &retlen, val);
478 if (err && err != -EUCLEAN)
479 return err;
480
481 return retlen == bytes ? 0 : -EIO;
482}
483
484static int mtd_nvmem_add(struct mtd_info *mtd)
485{
486 struct nvmem_config config = {};
487
Aneesh Kumar K.V6e952682019-02-11 19:03:37 +0530488 config.id = -1;
Alban Bedelc4dfa252018-11-13 15:01:10 +0100489 config.dev = &mtd->dev;
490 config.name = mtd->name;
491 config.owner = THIS_MODULE;
492 config.reg_read = mtd_nvmem_reg_read;
493 config.size = mtd->size;
494 config.word_size = 1;
495 config.stride = 1;
496 config.read_only = true;
497 config.root_only = true;
498 config.no_of_node = true;
499 config.priv = mtd;
500
501 mtd->nvmem = nvmem_register(&config);
502 if (IS_ERR(mtd->nvmem)) {
503 /* Just ignore if there is no NVMEM support in the kernel */
Boris Brezillon19e16fb2019-01-02 15:36:53 +0100504 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
Alban Bedelc4dfa252018-11-13 15:01:10 +0100505 mtd->nvmem = NULL;
506 } else {
507 dev_err(&mtd->dev, "Failed to register NVMEM device\n");
508 return PTR_ERR(mtd->nvmem);
509 }
510 }
511
512 return 0;
513}
514
Mario Rugieroe8e3edb2017-05-29 08:38:41 -0300515static struct dentry *dfs_dir_mtd;
516
Boris Brezillon477b0222015-11-16 15:53:13 +0100517/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 * add_mtd_device - register an MTD device
519 * @mtd: pointer to new MTD device info structure
520 *
521 * Add a device to the list of MTD devices present in the system, and
522 * notify each currently active MTD 'user' of its arrival. Returns
Brian Norris57dd9902015-06-01 16:17:18 -0700523 * zero on success or non-zero on failure.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 */
525
526int add_mtd_device(struct mtd_info *mtd)
527{
Ben Hutchingsb520e412010-01-29 20:59:42 +0000528 struct mtd_notifier *not;
529 int i, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
Brian Norrisbe0dbff2015-06-01 16:17:20 -0700531 /*
532 * May occur, for instance, on buggy drivers which call
533 * mtd_device_parse_register() multiple times on the same master MTD,
534 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
535 */
Jan Karafa060522017-04-12 12:24:37 +0200536 if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
Brian Norrisbe0dbff2015-06-01 16:17:20 -0700537 return -EEXIST;
538
Artem B. Bityutskiy783ed812006-06-14 19:53:44 +0400539 BUG_ON(mtd->writesize == 0);
Boris Brezillon33f45c42017-12-15 13:39:51 +0100540
Boris Brezillon2431c4f2018-12-20 15:13:20 +0100541 /*
542 * MTD drivers should implement ->_{write,read}() or
543 * ->_{write,read}_oob(), but not both.
544 */
545 if (WARN_ON((mtd->_write && mtd->_write_oob) ||
546 (mtd->_read && mtd->_read_oob)))
547 return -EINVAL;
548
Boris Brezillon33f45c42017-12-15 13:39:51 +0100549 if (WARN_ON((!mtd->erasesize || !mtd->_erase) &&
550 !(mtd->flags & MTD_NO_ERASE)))
551 return -EINVAL;
552
Ingo Molnar48b19262006-03-31 02:29:41 -0800553 mutex_lock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
Tejun Heo589e9c42013-02-27 17:04:33 -0800555 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
Brian Norris57dd9902015-06-01 16:17:18 -0700556 if (i < 0) {
557 error = i;
Ben Hutchingsb520e412010-01-29 20:59:42 +0000558 goto fail_locked;
Brian Norris57dd9902015-06-01 16:17:18 -0700559 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
Ben Hutchingsb520e412010-01-29 20:59:42 +0000561 mtd->index = i;
562 mtd->usecount = 0;
Adrian Hunter69423d92008-12-10 13:37:21 +0000563
Mike Dunnd062d4e2012-04-25 12:06:08 -0700564 /* default value if not set by driver */
565 if (mtd->bitflip_threshold == 0)
566 mtd->bitflip_threshold = mtd->ecc_strength;
567
Ben Hutchingsb520e412010-01-29 20:59:42 +0000568 if (is_power_of_2(mtd->erasesize))
569 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
570 else
571 mtd->erasesize_shift = 0;
Adrian Hunter69423d92008-12-10 13:37:21 +0000572
Ben Hutchingsb520e412010-01-29 20:59:42 +0000573 if (is_power_of_2(mtd->writesize))
574 mtd->writesize_shift = ffs(mtd->writesize) - 1;
575 else
576 mtd->writesize_shift = 0;
Adrian Hunter69423d92008-12-10 13:37:21 +0000577
Ben Hutchingsb520e412010-01-29 20:59:42 +0000578 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
579 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
Håvard Skinnemoen187ef152006-09-22 10:07:08 +0100580
Ben Hutchingsb520e412010-01-29 20:59:42 +0000581 /* Some chips always power up locked. Unlock them now */
Artem Bityutskiy38134562011-12-30 17:00:35 +0200582 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
583 error = mtd_unlock(mtd, 0, mtd->size);
584 if (error && error != -EOPNOTSUPP)
Ben Hutchingsb520e412010-01-29 20:59:42 +0000585 printk(KERN_WARNING
586 "%s: unlock failed, writes may not work\n",
587 mtd->name);
Brian Norris57dd9902015-06-01 16:17:18 -0700588 /* Ignore unlock failures? */
589 error = 0;
Ben Hutchingsb520e412010-01-29 20:59:42 +0000590 }
David Brownell1f24b5a2009-03-26 00:42:41 -0700591
Ben Hutchingsb520e412010-01-29 20:59:42 +0000592 /* Caller should have set dev.parent to match the
Frans Klaver260e89a2015-06-10 22:38:15 +0200593 * physical device, if appropriate.
Ben Hutchingsb520e412010-01-29 20:59:42 +0000594 */
595 mtd->dev.type = &mtd_devtype;
596 mtd->dev.class = &mtd_class;
597 mtd->dev.devt = MTD_DEVT(i);
598 dev_set_name(&mtd->dev, "mtd%d", i);
599 dev_set_drvdata(&mtd->dev, mtd);
Brian Norris215a02f2015-11-11 16:26:04 -0800600 of_node_get(mtd_get_of_node(mtd));
Brian Norris57dd9902015-06-01 16:17:18 -0700601 error = device_register(&mtd->dev);
602 if (error)
Ben Hutchingsb520e412010-01-29 20:59:42 +0000603 goto fail_added;
David Brownell1f24b5a2009-03-26 00:42:41 -0700604
Alban Bedelc4dfa252018-11-13 15:01:10 +0100605 /* Add the nvmem provider */
606 error = mtd_nvmem_add(mtd);
607 if (error)
608 goto fail_nvmem_add;
609
Mario Rugieroe8e3edb2017-05-29 08:38:41 -0300610 if (!IS_ERR_OR_NULL(dfs_dir_mtd)) {
611 mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(&mtd->dev), dfs_dir_mtd);
612 if (IS_ERR_OR_NULL(mtd->dbg.dfs_dir)) {
613 pr_debug("mtd device %s won't show data in debugfs\n",
614 dev_name(&mtd->dev));
615 }
616 }
617
Brian Norris5e472122014-07-21 19:06:47 -0700618 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
619 "mtd%dro", i);
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000620
Brian Norris289c0522011-07-19 10:06:09 -0700621 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
Ben Hutchingsb520e412010-01-29 20:59:42 +0000622 /* No need to get a refcount on the module containing
623 the notifier, since we hold the mtd_table_mutex */
624 list_for_each_entry(not, &mtd_notifiers, list)
625 not->add(mtd);
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000626
Ingo Molnar48b19262006-03-31 02:29:41 -0800627 mutex_unlock(&mtd_table_mutex);
Ben Hutchingsb520e412010-01-29 20:59:42 +0000628 /* We _know_ we aren't being removed, because
629 our caller is still holding us here. So none
630 of this try_ nonsense, and no bitching about it
631 either. :) */
632 __module_get(THIS_MODULE);
633 return 0;
634
Alban Bedelc4dfa252018-11-13 15:01:10 +0100635fail_nvmem_add:
636 device_unregister(&mtd->dev);
Ben Hutchingsb520e412010-01-29 20:59:42 +0000637fail_added:
Brian Norris215a02f2015-11-11 16:26:04 -0800638 of_node_put(mtd_get_of_node(mtd));
Ben Hutchingsb520e412010-01-29 20:59:42 +0000639 idr_remove(&mtd_idr, i);
640fail_locked:
641 mutex_unlock(&mtd_table_mutex);
Brian Norris57dd9902015-06-01 16:17:18 -0700642 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643}
644
645/**
646 * del_mtd_device - unregister an MTD device
647 * @mtd: pointer to MTD device info structure
648 *
649 * Remove a device from the list of MTD devices present in the system,
650 * and notify each currently active MTD 'user' of its departure.
651 * Returns zero on success or 1 on failure, which currently will happen
652 * if the requested device does not appear to be present in the list.
653 */
654
Jamie Ileseea72d52011-05-23 10:23:42 +0100655int del_mtd_device(struct mtd_info *mtd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656{
657 int ret;
Maxim Levitsky75c0b842010-02-22 20:39:32 +0200658 struct mtd_notifier *not;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000659
Ingo Molnar48b19262006-03-31 02:29:41 -0800660 mutex_lock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661
Mario Rugieroe8e3edb2017-05-29 08:38:41 -0300662 debugfs_remove_recursive(mtd->dbg.dfs_dir);
663
Ben Hutchingsb520e412010-01-29 20:59:42 +0000664 if (idr_find(&mtd_idr, mtd->index) != mtd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 ret = -ENODEV;
Maxim Levitsky75c0b842010-02-22 20:39:32 +0200666 goto out_error;
667 }
668
669 /* No need to get a refcount on the module containing
670 the notifier, since we hold the mtd_table_mutex */
671 list_for_each_entry(not, &mtd_notifiers, list)
672 not->remove(mtd);
673
674 if (mtd->usecount) {
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000675 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 mtd->index, mtd->name, mtd->usecount);
677 ret = -EBUSY;
678 } else {
Alban Bedelc4dfa252018-11-13 15:01:10 +0100679 /* Try to remove the NVMEM provider */
680 if (mtd->nvmem)
681 nvmem_unregister(mtd->nvmem);
682
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700683 device_unregister(&mtd->dev);
684
Ben Hutchingsb520e412010-01-29 20:59:42 +0000685 idr_remove(&mtd_idr, mtd->index);
Brian Norris215a02f2015-11-11 16:26:04 -0800686 of_node_put(mtd_get_of_node(mtd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
688 module_put(THIS_MODULE);
689 ret = 0;
690 }
691
Maxim Levitsky75c0b842010-02-22 20:39:32 +0200692out_error:
Ingo Molnar48b19262006-03-31 02:29:41 -0800693 mutex_unlock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 return ret;
695}
696
Brian Norris472b4442015-12-11 15:58:01 -0800697/*
698 * Set a few defaults based on the parent devices, if not provided by the
699 * driver
700 */
701static void mtd_set_dev_defaults(struct mtd_info *mtd)
702{
703 if (mtd->dev.parent) {
704 if (!mtd->owner && mtd->dev.parent->driver)
705 mtd->owner = mtd->dev.parent->driver->owner;
706 if (!mtd->name)
707 mtd->name = dev_name(mtd->dev.parent);
708 } else {
709 pr_debug("mtd device won't show a device symlink in sysfs\n");
710 }
Rafał Miłecki1186af42018-11-20 09:55:45 +0100711
712 mtd->orig_flags = mtd->flags;
Brian Norris472b4442015-12-11 15:58:01 -0800713}
Dan Ehrenberg727dc612015-04-02 15:15:10 -0700714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715/**
Dmitry Eremin-Solenikov1c4c2152011-03-25 22:26:25 +0300716 * mtd_device_parse_register - parse partitions and register an MTD device.
717 *
718 * @mtd: the MTD device to register
719 * @types: the list of MTD partition probes to try, see
720 * 'parse_mtd_partitions()' for more information
Dmitry Eremin-Solenikovc7975332011-06-10 18:18:28 +0400721 * @parser_data: MTD partition parser-specific data
Dmitry Eremin-Solenikov1c4c2152011-03-25 22:26:25 +0300722 * @parts: fallback partition information to register, if parsing fails;
723 * only valid if %nr_parts > %0
724 * @nr_parts: the number of partitions in parts, if zero then the full
725 * MTD device is registered if no partition info is found
726 *
727 * This function aggregates MTD partitions parsing (done by
728 * 'parse_mtd_partitions()') and MTD device and partitions registering. It
729 * basically follows the most common pattern found in many MTD drivers:
730 *
Rafał Miłecki55a999a2018-03-27 15:36:47 +0200731 * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
732 * registered first.
733 * * Then It tries to probe partitions on MTD device @mtd using parsers
Dmitry Eremin-Solenikov1c4c2152011-03-25 22:26:25 +0300734 * specified in @types (if @types is %NULL, then the default list of parsers
735 * is used, see 'parse_mtd_partitions()' for more information). If none are
736 * found this functions tries to fallback to information specified in
737 * @parts/@nr_parts.
Dmitry Eremin-Solenikov1c4c2152011-03-25 22:26:25 +0300738 * * If no partitions were found this function just registers the MTD device
739 * @mtd and exits.
740 *
741 * Returns zero in case of success and a negative error code in case of failure.
742 */
Artem Bityutskiy26a47342013-03-11 15:38:48 +0200743int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
Dmitry Eremin-Solenikovc7975332011-06-10 18:18:28 +0400744 struct mtd_part_parser_data *parser_data,
Dmitry Eremin-Solenikov1c4c2152011-03-25 22:26:25 +0300745 const struct mtd_partition *parts,
746 int nr_parts)
747{
Dan Ehrenberg727dc612015-04-02 15:15:10 -0700748 int ret;
Dmitry Eremin-Solenikov1c4c2152011-03-25 22:26:25 +0300749
Brian Norris472b4442015-12-11 15:58:01 -0800750 mtd_set_dev_defaults(mtd);
751
Rafał Miłecki2c77c572018-01-16 16:45:41 +0100752 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
753 ret = add_mtd_device(mtd);
754 if (ret)
755 return ret;
756 }
757
Rafał Miłecki0dbe4ea2018-01-16 16:45:42 +0100758 /* Prefer parsed partitions over driver-provided fallback */
Rafał Miłecki5ac67ce2018-03-27 22:35:41 +0200759 ret = parse_mtd_partitions(mtd, types, parser_data);
760 if (ret > 0)
761 ret = 0;
762 else if (nr_parts)
Rafał Miłecki0dbe4ea2018-01-16 16:45:42 +0100763 ret = add_mtd_partitions(mtd, parts, nr_parts);
764 else if (!device_is_registered(&mtd->dev))
765 ret = add_mtd_device(mtd);
766 else
767 ret = 0;
768
Brian Norris3e00ed02015-06-01 16:17:19 -0700769 if (ret)
770 goto out;
Dmitry Eremin-Solenikov1c4c2152011-03-25 22:26:25 +0300771
Niklas Cassele1dd8642015-02-01 02:08:50 +0100772 /*
773 * FIXME: some drivers unfortunately call this function more than once.
774 * So we have to check if we've already assigned the reboot notifier.
775 *
776 * Generally, we can make multiple calls work for most cases, but it
777 * does cause problems with parse_mtd_partitions() above (e.g.,
778 * cmdlineparts will register partitions more than once).
779 */
Brian Norrisf8479dd2015-11-03 17:01:53 -0800780 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
781 "MTD already registered\n");
Niklas Cassele1dd8642015-02-01 02:08:50 +0100782 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
Brian Norris3efe41b2014-11-26 01:01:08 -0800783 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
784 register_reboot_notifier(&mtd->reboot_notifier);
785 }
786
Brian Norris3e00ed02015-06-01 16:17:19 -0700787out:
Rafał Miłecki2c77c572018-01-16 16:45:41 +0100788 if (ret && device_is_registered(&mtd->dev))
789 del_mtd_device(mtd);
790
Dan Ehrenberg727dc612015-04-02 15:15:10 -0700791 return ret;
Dmitry Eremin-Solenikov1c4c2152011-03-25 22:26:25 +0300792}
793EXPORT_SYMBOL_GPL(mtd_device_parse_register);
794
795/**
Jamie Ilesf5671ab2011-05-23 17:15:46 +0100796 * mtd_device_unregister - unregister an existing MTD device.
797 *
798 * @master: the MTD device to unregister. This will unregister both the master
799 * and any partitions if registered.
800 */
801int mtd_device_unregister(struct mtd_info *master)
802{
803 int err;
804
Brian Norris3efe41b2014-11-26 01:01:08 -0800805 if (master->_reboot)
806 unregister_reboot_notifier(&master->reboot_notifier);
807
Jamie Ilesf5671ab2011-05-23 17:15:46 +0100808 err = del_mtd_partitions(master);
809 if (err)
810 return err;
811
812 if (!device_is_registered(&master->dev))
813 return 0;
814
815 return del_mtd_device(master);
816}
817EXPORT_SYMBOL_GPL(mtd_device_unregister);
818
819/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 * register_mtd_user - register a 'user' of MTD devices.
821 * @new: pointer to notifier info structure
822 *
823 * Registers a pair of callbacks function to be called upon addition
824 * or removal of MTD devices. Causes the 'add' callback to be immediately
825 * invoked for each MTD device currently present in the system.
826 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827void register_mtd_user (struct mtd_notifier *new)
828{
Ben Hutchingsf1332ba2010-01-29 20:57:11 +0000829 struct mtd_info *mtd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830
Ingo Molnar48b19262006-03-31 02:29:41 -0800831 mutex_lock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832
833 list_add(&new->list, &mtd_notifiers);
834
Wanlong Gaod5ca5122011-05-20 21:14:30 +0800835 __module_get(THIS_MODULE);
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000836
Ben Hutchingsf1332ba2010-01-29 20:57:11 +0000837 mtd_for_each_device(mtd)
838 new->add(mtd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
Ingo Molnar48b19262006-03-31 02:29:41 -0800840 mutex_unlock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841}
Artem Bityutskiy33c87b42011-12-30 16:11:14 +0200842EXPORT_SYMBOL_GPL(register_mtd_user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
844/**
Artem B. Bityuckiy49450792005-02-18 14:34:54 +0000845 * unregister_mtd_user - unregister a 'user' of MTD devices.
846 * @old: pointer to notifier info structure
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 *
848 * Removes a callback function pair from the list of 'users' to be
849 * notified upon addition or removal of MTD devices. Causes the
850 * 'remove' callback to be immediately invoked for each MTD device
851 * currently present in the system.
852 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853int unregister_mtd_user (struct mtd_notifier *old)
854{
Ben Hutchingsf1332ba2010-01-29 20:57:11 +0000855 struct mtd_info *mtd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856
Ingo Molnar48b19262006-03-31 02:29:41 -0800857 mutex_lock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
859 module_put(THIS_MODULE);
860
Ben Hutchingsf1332ba2010-01-29 20:57:11 +0000861 mtd_for_each_device(mtd)
862 old->remove(mtd);
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000863
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 list_del(&old->list);
Ingo Molnar48b19262006-03-31 02:29:41 -0800865 mutex_unlock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 return 0;
867}
Artem Bityutskiy33c87b42011-12-30 16:11:14 +0200868EXPORT_SYMBOL_GPL(unregister_mtd_user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
870/**
871 * get_mtd_device - obtain a validated handle for an MTD device
872 * @mtd: last known address of the required MTD device
873 * @num: internal device number of the required MTD device
874 *
875 * Given a number and NULL address, return the num'th entry in the device
876 * table, if any. Given an address and num == -1, search the device table
877 * for a device with that address and return if it's still present. Given
Artem Bityutskiy9c740342006-10-11 14:52:47 +0300878 * both, return the num'th driver only if its address matches. Return
879 * error code if not.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
882{
Ben Hutchingsf1332ba2010-01-29 20:57:11 +0000883 struct mtd_info *ret = NULL, *other;
884 int err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885
Ingo Molnar48b19262006-03-31 02:29:41 -0800886 mutex_lock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
888 if (num == -1) {
Ben Hutchingsf1332ba2010-01-29 20:57:11 +0000889 mtd_for_each_device(other) {
890 if (other == mtd) {
891 ret = mtd;
892 break;
893 }
894 }
Ben Hutchingsb520e412010-01-29 20:59:42 +0000895 } else if (num >= 0) {
896 ret = idr_find(&mtd_idr, num);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 if (mtd && mtd != ret)
898 ret = NULL;
899 }
900
Maxim Levitsky3bd45652010-02-22 20:39:28 +0200901 if (!ret) {
902 ret = ERR_PTR(err);
903 goto out;
Artem Bityutskiy9fe912c2006-10-11 14:52:45 +0300904 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905
Maxim Levitsky3bd45652010-02-22 20:39:28 +0200906 err = __get_mtd_device(ret);
907 if (err)
908 ret = ERR_PTR(err);
909out:
Ingo Molnar48b19262006-03-31 02:29:41 -0800910 mutex_unlock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 return ret;
Maxim Levitsky3bd45652010-02-22 20:39:28 +0200912}
Artem Bityutskiy33c87b42011-12-30 16:11:14 +0200913EXPORT_SYMBOL_GPL(get_mtd_device);
Artem Bityutskiy9c740342006-10-11 14:52:47 +0300914
Maxim Levitsky3bd45652010-02-22 20:39:28 +0200915
916int __get_mtd_device(struct mtd_info *mtd)
917{
918 int err;
919
920 if (!try_module_get(mtd->owner))
921 return -ENODEV;
922
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200923 if (mtd->_get_device) {
924 err = mtd->_get_device(mtd);
Maxim Levitsky3bd45652010-02-22 20:39:28 +0200925
926 if (err) {
927 module_put(mtd->owner);
928 return err;
929 }
930 }
931 mtd->usecount++;
932 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933}
Artem Bityutskiy33c87b42011-12-30 16:11:14 +0200934EXPORT_SYMBOL_GPL(__get_mtd_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
Artem Bityutskiy77993082006-10-11 14:52:44 +0300936/**
937 * get_mtd_device_nm - obtain a validated handle for an MTD device by
938 * device name
939 * @name: MTD device name to open
940 *
941 * This function returns MTD device description structure in case of
942 * success and an error code in case of failure.
943 */
Artem Bityutskiy77993082006-10-11 14:52:44 +0300944struct mtd_info *get_mtd_device_nm(const char *name)
945{
Ben Hutchingsf1332ba2010-01-29 20:57:11 +0000946 int err = -ENODEV;
947 struct mtd_info *mtd = NULL, *other;
Artem Bityutskiy77993082006-10-11 14:52:44 +0300948
949 mutex_lock(&mtd_table_mutex);
950
Ben Hutchingsf1332ba2010-01-29 20:57:11 +0000951 mtd_for_each_device(other) {
952 if (!strcmp(name, other->name)) {
953 mtd = other;
Artem Bityutskiy77993082006-10-11 14:52:44 +0300954 break;
955 }
956 }
957
Artem Bityutskiy9fe912c2006-10-11 14:52:45 +0300958 if (!mtd)
Artem Bityutskiy77993082006-10-11 14:52:44 +0300959 goto out_unlock;
960
Wanlong Gao52534f22011-05-17 22:36:18 +0800961 err = __get_mtd_device(mtd);
962 if (err)
Artem Bityutskiy77993082006-10-11 14:52:44 +0300963 goto out_unlock;
964
Artem Bityutskiy77993082006-10-11 14:52:44 +0300965 mutex_unlock(&mtd_table_mutex);
966 return mtd;
Artem Bityutskiy9fe912c2006-10-11 14:52:45 +0300967
Artem Bityutskiy9fe912c2006-10-11 14:52:45 +0300968out_unlock:
969 mutex_unlock(&mtd_table_mutex);
970 return ERR_PTR(err);
Artem Bityutskiy77993082006-10-11 14:52:44 +0300971}
Artem Bityutskiy33c87b42011-12-30 16:11:14 +0200972EXPORT_SYMBOL_GPL(get_mtd_device_nm);
Artem Bityutskiy77993082006-10-11 14:52:44 +0300973
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974void put_mtd_device(struct mtd_info *mtd)
975{
Ingo Molnar48b19262006-03-31 02:29:41 -0800976 mutex_lock(&mtd_table_mutex);
Maxim Levitsky3bd45652010-02-22 20:39:28 +0200977 __put_mtd_device(mtd);
978 mutex_unlock(&mtd_table_mutex);
979
980}
Artem Bityutskiy33c87b42011-12-30 16:11:14 +0200981EXPORT_SYMBOL_GPL(put_mtd_device);
Maxim Levitsky3bd45652010-02-22 20:39:28 +0200982
983void __put_mtd_device(struct mtd_info *mtd)
984{
985 --mtd->usecount;
986 BUG_ON(mtd->usecount < 0);
987
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200988 if (mtd->_put_device)
989 mtd->_put_device(mtd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
991 module_put(mtd->owner);
992}
Artem Bityutskiy33c87b42011-12-30 16:11:14 +0200993EXPORT_SYMBOL_GPL(__put_mtd_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
Artem Bityutskiy52b02032011-12-30 15:57:25 +0200995/*
Boris Brezillon884cfd92018-02-12 22:03:09 +0100996 * Erase is an synchronous operation. Device drivers are epected to return a
997 * negative error code if the operation failed and update instr->fail_addr
998 * to point the portion that was not properly erased.
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +0200999 */
1000int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1001{
Boris Brezillonc585da92018-02-12 22:03:07 +01001002 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1003
Boris Brezillone6e620f2018-01-22 10:38:01 +01001004 if (!mtd->erasesize || !mtd->_erase)
1005 return -ENOTSUPP;
1006
Brian Norris0c2b4e22014-07-21 19:06:27 -07001007 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001008 return -EINVAL;
Artem Bityutskiy664addc2012-02-03 18:13:23 +02001009 if (!(mtd->flags & MTD_WRITEABLE))
1010 return -EROFS;
Boris Brezillone6e620f2018-01-22 10:38:01 +01001011
Boris Brezillone7bfb3f2018-02-12 22:03:11 +01001012 if (!instr->len)
Artem Bityutskiybcb1d232012-02-06 13:27:43 +02001013 return 0;
Boris Brezillone7bfb3f2018-02-12 22:03:11 +01001014
Ezequiel Garciafea728c2016-04-12 17:46:42 -03001015 ledtrig_mtd_activity();
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001016 return mtd->_erase(mtd, instr);
1017}
1018EXPORT_SYMBOL_GPL(mtd_erase);
1019
1020/*
1021 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1022 */
1023int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1024 void **virt, resource_size_t *phys)
1025{
1026 *retlen = 0;
Artem Bityutskiy0dd52352012-02-08 15:13:26 +02001027 *virt = NULL;
1028 if (phys)
1029 *phys = 0;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001030 if (!mtd->_point)
1031 return -EOPNOTSUPP;
Brian Norris0c2b4e22014-07-21 19:06:27 -07001032 if (from < 0 || from >= mtd->size || len > mtd->size - from)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001033 return -EINVAL;
Artem Bityutskiybcb1d232012-02-06 13:27:43 +02001034 if (!len)
1035 return 0;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001036 return mtd->_point(mtd, from, len, retlen, virt, phys);
1037}
1038EXPORT_SYMBOL_GPL(mtd_point);
1039
1040/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
1041int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1042{
Boris Brezillonb9504242017-06-25 20:22:57 +02001043 if (!mtd->_unpoint)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001044 return -EOPNOTSUPP;
Brian Norris0c2b4e22014-07-21 19:06:27 -07001045 if (from < 0 || from >= mtd->size || len > mtd->size - from)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001046 return -EINVAL;
Artem Bityutskiybcb1d232012-02-06 13:27:43 +02001047 if (!len)
1048 return 0;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001049 return mtd->_unpoint(mtd, from, len);
1050}
1051EXPORT_SYMBOL_GPL(mtd_unpoint);
1052
1053/*
1054 * Allow NOMMU mmap() to directly map the device (if not NULL)
1055 * - return the address to which the offset maps
1056 * - return -ENOSYS to indicate refusal to do the mapping
1057 */
1058unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1059 unsigned long offset, unsigned long flags)
1060{
Nicolas Pitre9eaa9032017-10-30 14:48:32 -04001061 size_t retlen;
1062 void *virt;
1063 int ret;
1064
1065 ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1066 if (ret)
1067 return ret;
1068 if (retlen != len) {
1069 mtd_unpoint(mtd, offset, retlen);
1070 return -ENOSYS;
1071 }
1072 return (unsigned long)virt;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001073}
1074EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1075
1076int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1077 u_char *buf)
1078{
Boris Brezillon2431c4f2018-12-20 15:13:20 +01001079 struct mtd_oob_ops ops = {
1080 .len = len,
1081 .datbuf = buf,
1082 };
1083 int ret;
Mike Dunnedbc45402012-04-25 12:06:11 -07001084
Boris Brezillon2431c4f2018-12-20 15:13:20 +01001085 ret = mtd_read_oob(mtd, from, &ops);
1086 *retlen = ops.retlen;
Boris Brezillon24ff1292018-01-09 09:50:34 +01001087
Boris Brezillon2431c4f2018-12-20 15:13:20 +01001088 return ret;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001089}
1090EXPORT_SYMBOL_GPL(mtd_read);
1091
1092int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1093 const u_char *buf)
1094{
Boris Brezillon2431c4f2018-12-20 15:13:20 +01001095 struct mtd_oob_ops ops = {
1096 .len = len,
1097 .datbuf = (u8 *)buf,
1098 };
1099 int ret;
Boris Brezillon24ff1292018-01-09 09:50:34 +01001100
Boris Brezillon2431c4f2018-12-20 15:13:20 +01001101 ret = mtd_write_oob(mtd, to, &ops);
1102 *retlen = ops.retlen;
Boris Brezillon24ff1292018-01-09 09:50:34 +01001103
Boris Brezillon2431c4f2018-12-20 15:13:20 +01001104 return ret;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001105}
1106EXPORT_SYMBOL_GPL(mtd_write);
1107
1108/*
1109 * In blackbox flight recorder like scenarios we want to make successful writes
1110 * in interrupt context. panic_write() is only intended to be called when its
1111 * known the kernel is about to panic and we need the write to succeed. Since
1112 * the kernel is not going to be running for much longer, this function can
1113 * break locks and delay to ensure the write succeeds (but not sleep).
1114 */
1115int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1116 const u_char *buf)
1117{
1118 *retlen = 0;
1119 if (!mtd->_panic_write)
1120 return -EOPNOTSUPP;
Brian Norris0c2b4e22014-07-21 19:06:27 -07001121 if (to < 0 || to >= mtd->size || len > mtd->size - to)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001122 return -EINVAL;
Artem Bityutskiy664addc2012-02-03 18:13:23 +02001123 if (!(mtd->flags & MTD_WRITEABLE))
1124 return -EROFS;
Artem Bityutskiybcb1d232012-02-06 13:27:43 +02001125 if (!len)
1126 return 0;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001127 return mtd->_panic_write(mtd, to, len, retlen, buf);
1128}
1129EXPORT_SYMBOL_GPL(mtd_panic_write);
1130
Boris Brezillon5cdd9292017-06-27 21:22:21 +02001131static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1132 struct mtd_oob_ops *ops)
1133{
1134 /*
1135 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1136 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1137 * this case.
1138 */
1139 if (!ops->datbuf)
1140 ops->len = 0;
1141
1142 if (!ops->oobbuf)
1143 ops->ooblen = 0;
1144
Miquel Raynald82c3682017-12-18 08:26:28 +01001145 if (offs < 0 || offs + ops->len > mtd->size)
Boris Brezillon5cdd9292017-06-27 21:22:21 +02001146 return -EINVAL;
1147
1148 if (ops->ooblen) {
Miquel Raynal89f706d2018-11-18 21:18:31 +01001149 size_t maxooblen;
Boris Brezillon5cdd9292017-06-27 21:22:21 +02001150
1151 if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1152 return -EINVAL;
1153
Miquel Raynal89f706d2018-11-18 21:18:31 +01001154 maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1155 mtd_div_by_ws(offs, mtd)) *
Boris Brezillon5cdd9292017-06-27 21:22:21 +02001156 mtd_oobavail(mtd, ops)) - ops->ooboffs;
1157 if (ops->ooblen > maxooblen)
1158 return -EINVAL;
1159 }
1160
1161 return 0;
1162}
1163
Brian Norrisd2d48482012-06-22 16:35:38 -07001164int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1165{
Brian Norrise47f6852012-06-22 16:35:39 -07001166 int ret_code;
Brian Norrisd2d48482012-06-22 16:35:38 -07001167 ops->retlen = ops->oobretlen = 0;
Ezequiel Garciafea728c2016-04-12 17:46:42 -03001168
Boris Brezillon5cdd9292017-06-27 21:22:21 +02001169 ret_code = mtd_check_oob_ops(mtd, from, ops);
1170 if (ret_code)
1171 return ret_code;
1172
Ezequiel Garciafea728c2016-04-12 17:46:42 -03001173 ledtrig_mtd_activity();
Miquel Raynal89fd23e2018-07-14 14:42:00 +02001174
1175 /* Check the validity of a potential fallback on mtd->_read */
1176 if (!mtd->_read_oob && (!mtd->_read || ops->oobbuf))
1177 return -EOPNOTSUPP;
1178
1179 if (mtd->_read_oob)
1180 ret_code = mtd->_read_oob(mtd, from, ops);
1181 else
1182 ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen,
1183 ops->datbuf);
1184
Brian Norrise47f6852012-06-22 16:35:39 -07001185 /*
1186 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1187 * similar to mtd->_read(), returning a non-negative integer
1188 * representing max bitflips. In other cases, mtd->_read_oob() may
1189 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1190 */
Brian Norrise47f6852012-06-22 16:35:39 -07001191 if (unlikely(ret_code < 0))
1192 return ret_code;
1193 if (mtd->ecc_strength == 0)
1194 return 0; /* device lacks ecc */
1195 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
Brian Norrisd2d48482012-06-22 16:35:38 -07001196}
1197EXPORT_SYMBOL_GPL(mtd_read_oob);
1198
Ezequiel Garcia0c034fe2016-04-12 17:46:39 -03001199int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1200 struct mtd_oob_ops *ops)
1201{
Boris Brezillon5cdd9292017-06-27 21:22:21 +02001202 int ret;
1203
Ezequiel Garcia0c034fe2016-04-12 17:46:39 -03001204 ops->retlen = ops->oobretlen = 0;
Miquel Raynal89fd23e2018-07-14 14:42:00 +02001205
Ezequiel Garcia0c034fe2016-04-12 17:46:39 -03001206 if (!(mtd->flags & MTD_WRITEABLE))
1207 return -EROFS;
Boris Brezillon5cdd9292017-06-27 21:22:21 +02001208
1209 ret = mtd_check_oob_ops(mtd, to, ops);
1210 if (ret)
1211 return ret;
1212
Ezequiel Garciafea728c2016-04-12 17:46:42 -03001213 ledtrig_mtd_activity();
Miquel Raynal89fd23e2018-07-14 14:42:00 +02001214
1215 /* Check the validity of a potential fallback on mtd->_write */
1216 if (!mtd->_write_oob && (!mtd->_write || ops->oobbuf))
1217 return -EOPNOTSUPP;
1218
1219 if (mtd->_write_oob)
1220 return mtd->_write_oob(mtd, to, ops);
1221 else
1222 return mtd->_write(mtd, to, ops->len, &ops->retlen,
1223 ops->datbuf);
Ezequiel Garcia0c034fe2016-04-12 17:46:39 -03001224}
1225EXPORT_SYMBOL_GPL(mtd_write_oob);
1226
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001227/**
1228 * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1229 * @mtd: MTD device structure
1230 * @section: ECC section. Depending on the layout you may have all the ECC
1231 * bytes stored in a single contiguous section, or one section
1232 * per ECC chunk (and sometime several sections for a single ECC
1233 * ECC chunk)
1234 * @oobecc: OOB region struct filled with the appropriate ECC position
1235 * information
1236 *
Masahiro Yamada7da0fff2016-12-14 09:31:01 +09001237 * This function returns ECC section information in the OOB area. If you want
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001238 * to get all the ECC bytes information, then you should call
1239 * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1240 *
1241 * Returns zero on success, a negative error code otherwise.
1242 */
1243int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1244 struct mtd_oob_region *oobecc)
1245{
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001246 memset(oobecc, 0, sizeof(*oobecc));
1247
1248 if (!mtd || section < 0)
1249 return -EINVAL;
1250
Boris Brezillonadbbc3b2016-02-03 19:01:31 +01001251 if (!mtd->ooblayout || !mtd->ooblayout->ecc)
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001252 return -ENOTSUPP;
1253
Boris Brezillonadbbc3b2016-02-03 19:01:31 +01001254 return mtd->ooblayout->ecc(mtd, section, oobecc);
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001255}
1256EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1257
1258/**
1259 * mtd_ooblayout_free - Get the OOB region definition of a specific free
1260 * section
1261 * @mtd: MTD device structure
1262 * @section: Free section you are interested in. Depending on the layout
1263 * you may have all the free bytes stored in a single contiguous
1264 * section, or one section per ECC chunk plus an extra section
1265 * for the remaining bytes (or other funky layout).
1266 * @oobfree: OOB region struct filled with the appropriate free position
1267 * information
1268 *
Masahiro Yamada7da0fff2016-12-14 09:31:01 +09001269 * This function returns free bytes position in the OOB area. If you want
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001270 * to get all the free bytes information, then you should call
1271 * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1272 *
1273 * Returns zero on success, a negative error code otherwise.
1274 */
1275int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1276 struct mtd_oob_region *oobfree)
1277{
1278 memset(oobfree, 0, sizeof(*oobfree));
1279
1280 if (!mtd || section < 0)
1281 return -EINVAL;
1282
Boris Brezillonadbbc3b2016-02-03 19:01:31 +01001283 if (!mtd->ooblayout || !mtd->ooblayout->free)
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001284 return -ENOTSUPP;
1285
Boris Brezillonadbbc3b2016-02-03 19:01:31 +01001286 return mtd->ooblayout->free(mtd, section, oobfree);
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001287}
1288EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1289
1290/**
1291 * mtd_ooblayout_find_region - Find the region attached to a specific byte
1292 * @mtd: mtd info structure
1293 * @byte: the byte we are searching for
1294 * @sectionp: pointer where the section id will be stored
1295 * @oobregion: used to retrieve the ECC position
1296 * @iter: iterator function. Should be either mtd_ooblayout_free or
1297 * mtd_ooblayout_ecc depending on the region type you're searching for
1298 *
Masahiro Yamada7da0fff2016-12-14 09:31:01 +09001299 * This function returns the section id and oobregion information of a
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001300 * specific byte. For example, say you want to know where the 4th ECC byte is
1301 * stored, you'll use:
1302 *
1303 * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
1304 *
1305 * Returns zero on success, a negative error code otherwise.
1306 */
1307static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1308 int *sectionp, struct mtd_oob_region *oobregion,
1309 int (*iter)(struct mtd_info *,
1310 int section,
1311 struct mtd_oob_region *oobregion))
1312{
1313 int pos = 0, ret, section = 0;
1314
1315 memset(oobregion, 0, sizeof(*oobregion));
1316
1317 while (1) {
1318 ret = iter(mtd, section, oobregion);
1319 if (ret)
1320 return ret;
1321
1322 if (pos + oobregion->length > byte)
1323 break;
1324
1325 pos += oobregion->length;
1326 section++;
1327 }
1328
1329 /*
1330 * Adjust region info to make it start at the beginning at the
1331 * 'start' ECC byte.
1332 */
1333 oobregion->offset += byte - pos;
1334 oobregion->length -= byte - pos;
1335 *sectionp = section;
1336
1337 return 0;
1338}
1339
1340/**
1341 * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1342 * ECC byte
1343 * @mtd: mtd info structure
1344 * @eccbyte: the byte we are searching for
1345 * @sectionp: pointer where the section id will be stored
1346 * @oobregion: OOB region information
1347 *
1348 * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1349 * byte.
1350 *
1351 * Returns zero on success, a negative error code otherwise.
1352 */
1353int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1354 int *section,
1355 struct mtd_oob_region *oobregion)
1356{
1357 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1358 mtd_ooblayout_ecc);
1359}
1360EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1361
1362/**
1363 * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1364 * @mtd: mtd info structure
1365 * @buf: destination buffer to store OOB bytes
1366 * @oobbuf: OOB buffer
1367 * @start: first byte to retrieve
1368 * @nbytes: number of bytes to retrieve
1369 * @iter: section iterator
1370 *
1371 * Extract bytes attached to a specific category (ECC or free)
1372 * from the OOB buffer and copy them into buf.
1373 *
1374 * Returns zero on success, a negative error code otherwise.
1375 */
1376static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1377 const u8 *oobbuf, int start, int nbytes,
1378 int (*iter)(struct mtd_info *,
1379 int section,
1380 struct mtd_oob_region *oobregion))
1381{
Masahiro Yamada8e8fd4d2016-11-09 11:08:08 +09001382 struct mtd_oob_region oobregion;
1383 int section, ret;
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001384
1385 ret = mtd_ooblayout_find_region(mtd, start, &section,
1386 &oobregion, iter);
1387
1388 while (!ret) {
1389 int cnt;
1390
Masahiro Yamada7c295ef2016-11-09 11:08:09 +09001391 cnt = min_t(int, nbytes, oobregion.length);
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001392 memcpy(buf, oobbuf + oobregion.offset, cnt);
1393 buf += cnt;
1394 nbytes -= cnt;
1395
1396 if (!nbytes)
1397 break;
1398
1399 ret = iter(mtd, ++section, &oobregion);
1400 }
1401
1402 return ret;
1403}
1404
1405/**
1406 * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1407 * @mtd: mtd info structure
1408 * @buf: source buffer to get OOB bytes from
1409 * @oobbuf: OOB buffer
1410 * @start: first OOB byte to set
1411 * @nbytes: number of OOB bytes to set
1412 * @iter: section iterator
1413 *
1414 * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1415 * is selected by passing the appropriate iterator.
1416 *
1417 * Returns zero on success, a negative error code otherwise.
1418 */
1419static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1420 u8 *oobbuf, int start, int nbytes,
1421 int (*iter)(struct mtd_info *,
1422 int section,
1423 struct mtd_oob_region *oobregion))
1424{
Masahiro Yamada8e8fd4d2016-11-09 11:08:08 +09001425 struct mtd_oob_region oobregion;
1426 int section, ret;
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001427
1428 ret = mtd_ooblayout_find_region(mtd, start, &section,
1429 &oobregion, iter);
1430
1431 while (!ret) {
1432 int cnt;
1433
Masahiro Yamada7c295ef2016-11-09 11:08:09 +09001434 cnt = min_t(int, nbytes, oobregion.length);
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001435 memcpy(oobbuf + oobregion.offset, buf, cnt);
1436 buf += cnt;
1437 nbytes -= cnt;
1438
1439 if (!nbytes)
1440 break;
1441
1442 ret = iter(mtd, ++section, &oobregion);
1443 }
1444
1445 return ret;
1446}
1447
1448/**
1449 * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1450 * @mtd: mtd info structure
1451 * @iter: category iterator
1452 *
1453 * Count the number of bytes in a given category.
1454 *
1455 * Returns a positive value on success, a negative error code otherwise.
1456 */
1457static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1458 int (*iter)(struct mtd_info *,
1459 int section,
1460 struct mtd_oob_region *oobregion))
1461{
Masahiro Yamada4d6aecf2016-11-09 11:08:10 +09001462 struct mtd_oob_region oobregion;
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001463 int section = 0, ret, nbytes = 0;
1464
1465 while (1) {
1466 ret = iter(mtd, section++, &oobregion);
1467 if (ret) {
1468 if (ret == -ERANGE)
1469 ret = nbytes;
1470 break;
1471 }
1472
1473 nbytes += oobregion.length;
1474 }
1475
1476 return ret;
1477}
1478
1479/**
1480 * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
1481 * @mtd: mtd info structure
1482 * @eccbuf: destination buffer to store ECC bytes
1483 * @oobbuf: OOB buffer
1484 * @start: first ECC byte to retrieve
1485 * @nbytes: number of ECC bytes to retrieve
1486 *
1487 * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
1488 *
1489 * Returns zero on success, a negative error code otherwise.
1490 */
1491int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1492 const u8 *oobbuf, int start, int nbytes)
1493{
1494 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1495 mtd_ooblayout_ecc);
1496}
1497EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1498
1499/**
1500 * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
1501 * @mtd: mtd info structure
1502 * @eccbuf: source buffer to get ECC bytes from
1503 * @oobbuf: OOB buffer
1504 * @start: first ECC byte to set
1505 * @nbytes: number of ECC bytes to set
1506 *
1507 * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
1508 *
1509 * Returns zero on success, a negative error code otherwise.
1510 */
1511int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
1512 u8 *oobbuf, int start, int nbytes)
1513{
1514 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1515 mtd_ooblayout_ecc);
1516}
1517EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
1518
1519/**
1520 * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
1521 * @mtd: mtd info structure
1522 * @databuf: destination buffer to store ECC bytes
1523 * @oobbuf: OOB buffer
1524 * @start: first ECC byte to retrieve
1525 * @nbytes: number of ECC bytes to retrieve
1526 *
1527 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
1528 *
1529 * Returns zero on success, a negative error code otherwise.
1530 */
1531int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
1532 const u8 *oobbuf, int start, int nbytes)
1533{
1534 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
1535 mtd_ooblayout_free);
1536}
1537EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
1538
1539/**
Xiaolei Lic77a9312018-03-29 09:34:58 +08001540 * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001541 * @mtd: mtd info structure
Xiaolei Lic77a9312018-03-29 09:34:58 +08001542 * @databuf: source buffer to get data bytes from
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001543 * @oobbuf: OOB buffer
1544 * @start: first ECC byte to set
1545 * @nbytes: number of ECC bytes to set
1546 *
1547 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
1548 *
1549 * Returns zero on success, a negative error code otherwise.
1550 */
1551int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
1552 u8 *oobbuf, int start, int nbytes)
1553{
1554 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
1555 mtd_ooblayout_free);
1556}
1557EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
1558
1559/**
1560 * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
1561 * @mtd: mtd info structure
1562 *
1563 * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
1564 *
1565 * Returns zero on success, a negative error code otherwise.
1566 */
1567int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
1568{
1569 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
1570}
1571EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
1572
1573/**
Xiaolei Lic77a9312018-03-29 09:34:58 +08001574 * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001575 * @mtd: mtd info structure
1576 *
1577 * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
1578 *
1579 * Returns zero on success, a negative error code otherwise.
1580 */
1581int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
1582{
1583 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
1584}
1585EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
1586
Artem Bityutskiyde3cac92012-02-08 16:37:14 +02001587/*
1588 * Method to access the protection register area, present in some flash
1589 * devices. The user data is one time programmable but the factory data is read
1590 * only.
1591 */
Christian Riesch4b78fc42014-01-28 09:29:44 +01001592int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1593 struct otp_info *buf)
Artem Bityutskiyde3cac92012-02-08 16:37:14 +02001594{
1595 if (!mtd->_get_fact_prot_info)
1596 return -EOPNOTSUPP;
1597 if (!len)
1598 return 0;
Christian Riesch4b78fc42014-01-28 09:29:44 +01001599 return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
Artem Bityutskiyde3cac92012-02-08 16:37:14 +02001600}
1601EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
1602
1603int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1604 size_t *retlen, u_char *buf)
1605{
1606 *retlen = 0;
1607 if (!mtd->_read_fact_prot_reg)
1608 return -EOPNOTSUPP;
1609 if (!len)
1610 return 0;
1611 return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
1612}
1613EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
1614
Christian Riesch4b78fc42014-01-28 09:29:44 +01001615int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1616 struct otp_info *buf)
Artem Bityutskiyde3cac92012-02-08 16:37:14 +02001617{
1618 if (!mtd->_get_user_prot_info)
1619 return -EOPNOTSUPP;
1620 if (!len)
1621 return 0;
Christian Riesch4b78fc42014-01-28 09:29:44 +01001622 return mtd->_get_user_prot_info(mtd, len, retlen, buf);
Artem Bityutskiyde3cac92012-02-08 16:37:14 +02001623}
1624EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
1625
1626int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1627 size_t *retlen, u_char *buf)
1628{
1629 *retlen = 0;
1630 if (!mtd->_read_user_prot_reg)
1631 return -EOPNOTSUPP;
1632 if (!len)
1633 return 0;
1634 return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
1635}
1636EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
1637
1638int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
1639 size_t *retlen, u_char *buf)
1640{
Christian Riesch9a78bc82014-03-06 12:42:37 +01001641 int ret;
1642
Artem Bityutskiyde3cac92012-02-08 16:37:14 +02001643 *retlen = 0;
1644 if (!mtd->_write_user_prot_reg)
1645 return -EOPNOTSUPP;
1646 if (!len)
1647 return 0;
Christian Riesch9a78bc82014-03-06 12:42:37 +01001648 ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
1649 if (ret)
1650 return ret;
1651
1652 /*
1653 * If no data could be written at all, we are out of memory and
1654 * must return -ENOSPC.
1655 */
1656 return (*retlen) ? 0 : -ENOSPC;
Artem Bityutskiyde3cac92012-02-08 16:37:14 +02001657}
1658EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
1659
1660int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
1661{
1662 if (!mtd->_lock_user_prot_reg)
1663 return -EOPNOTSUPP;
1664 if (!len)
1665 return 0;
1666 return mtd->_lock_user_prot_reg(mtd, from, len);
1667}
1668EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
1669
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001670/* Chip-supported device locking */
1671int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1672{
1673 if (!mtd->_lock)
1674 return -EOPNOTSUPP;
Brian Norris0c2b4e22014-07-21 19:06:27 -07001675 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001676 return -EINVAL;
Artem Bityutskiybcb1d232012-02-06 13:27:43 +02001677 if (!len)
1678 return 0;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001679 return mtd->_lock(mtd, ofs, len);
1680}
1681EXPORT_SYMBOL_GPL(mtd_lock);
1682
1683int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1684{
1685 if (!mtd->_unlock)
1686 return -EOPNOTSUPP;
Brian Norris0c2b4e22014-07-21 19:06:27 -07001687 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001688 return -EINVAL;
Artem Bityutskiybcb1d232012-02-06 13:27:43 +02001689 if (!len)
1690 return 0;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001691 return mtd->_unlock(mtd, ofs, len);
1692}
1693EXPORT_SYMBOL_GPL(mtd_unlock);
1694
1695int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1696{
1697 if (!mtd->_is_locked)
1698 return -EOPNOTSUPP;
Brian Norris0c2b4e22014-07-21 19:06:27 -07001699 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001700 return -EINVAL;
Artem Bityutskiybcb1d232012-02-06 13:27:43 +02001701 if (!len)
1702 return 0;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001703 return mtd->_is_locked(mtd, ofs, len);
1704}
1705EXPORT_SYMBOL_GPL(mtd_is_locked);
1706
Ezequiel Garcia8471bb72014-05-21 19:06:12 -03001707int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001708{
Brian Norris0c2b4e22014-07-21 19:06:27 -07001709 if (ofs < 0 || ofs >= mtd->size)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001710 return -EINVAL;
Ezequiel Garcia8471bb72014-05-21 19:06:12 -03001711 if (!mtd->_block_isreserved)
1712 return 0;
1713 return mtd->_block_isreserved(mtd, ofs);
1714}
1715EXPORT_SYMBOL_GPL(mtd_block_isreserved);
1716
1717int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
1718{
Brian Norris0c2b4e22014-07-21 19:06:27 -07001719 if (ofs < 0 || ofs >= mtd->size)
Ezequiel Garcia8471bb72014-05-21 19:06:12 -03001720 return -EINVAL;
1721 if (!mtd->_block_isbad)
1722 return 0;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001723 return mtd->_block_isbad(mtd, ofs);
1724}
1725EXPORT_SYMBOL_GPL(mtd_block_isbad);
1726
1727int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
1728{
1729 if (!mtd->_block_markbad)
1730 return -EOPNOTSUPP;
Brian Norris0c2b4e22014-07-21 19:06:27 -07001731 if (ofs < 0 || ofs >= mtd->size)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001732 return -EINVAL;
Artem Bityutskiy664addc2012-02-03 18:13:23 +02001733 if (!(mtd->flags & MTD_WRITEABLE))
1734 return -EROFS;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001735 return mtd->_block_markbad(mtd, ofs);
1736}
1737EXPORT_SYMBOL_GPL(mtd_block_markbad);
1738
1739/*
Artem Bityutskiy52b02032011-12-30 15:57:25 +02001740 * default_mtd_writev - the default writev method
1741 * @mtd: mtd device description object pointer
1742 * @vecs: the vectors to write
1743 * @count: count of vectors in @vecs
1744 * @to: the MTD device offset to write to
1745 * @retlen: on exit contains the count of bytes written to the MTD device.
1746 *
1747 * This function returns zero in case of success and a negative error code in
1748 * case of failure.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 */
Artem Bityutskiy1dbebd32011-12-30 16:23:41 +02001750static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1751 unsigned long count, loff_t to, size_t *retlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752{
1753 unsigned long i;
1754 size_t totlen = 0, thislen;
1755 int ret = 0;
1756
Artem Bityutskiy52b02032011-12-30 15:57:25 +02001757 for (i = 0; i < count; i++) {
1758 if (!vecs[i].iov_len)
1759 continue;
1760 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
1761 vecs[i].iov_base);
1762 totlen += thislen;
1763 if (ret || thislen != vecs[i].iov_len)
1764 break;
1765 to += vecs[i].iov_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 }
Artem Bityutskiy52b02032011-12-30 15:57:25 +02001767 *retlen = totlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 return ret;
1769}
Artem Bityutskiy1dbebd32011-12-30 16:23:41 +02001770
1771/*
1772 * mtd_writev - the vector-based MTD write method
1773 * @mtd: mtd device description object pointer
1774 * @vecs: the vectors to write
1775 * @count: count of vectors in @vecs
1776 * @to: the MTD device offset to write to
1777 * @retlen: on exit contains the count of bytes written to the MTD device.
1778 *
1779 * This function returns zero in case of success and a negative error code in
1780 * case of failure.
1781 */
1782int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1783 unsigned long count, loff_t to, size_t *retlen)
1784{
1785 *retlen = 0;
Artem Bityutskiy664addc2012-02-03 18:13:23 +02001786 if (!(mtd->flags & MTD_WRITEABLE))
1787 return -EROFS;
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +02001788 if (!mtd->_writev)
Artem Bityutskiy1dbebd32011-12-30 16:23:41 +02001789 return default_mtd_writev(mtd, vecs, count, to, retlen);
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +02001790 return mtd->_writev(mtd, vecs, count, to, retlen);
Artem Bityutskiy1dbebd32011-12-30 16:23:41 +02001791}
1792EXPORT_SYMBOL_GPL(mtd_writev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793
Grant Erickson33b53712011-04-08 08:51:32 -07001794/**
1795 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
Artem Bityutskiy52b02032011-12-30 15:57:25 +02001796 * @mtd: mtd device description object pointer
1797 * @size: a pointer to the ideal or maximum size of the allocation, points
Grant Erickson33b53712011-04-08 08:51:32 -07001798 * to the actual allocation size on success.
1799 *
1800 * This routine attempts to allocate a contiguous kernel buffer up to
1801 * the specified size, backing off the size of the request exponentially
1802 * until the request succeeds or until the allocation size falls below
1803 * the system page size. This attempts to make sure it does not adversely
1804 * impact system performance, so when allocating more than one page, we
Linus Torvaldscaf49192012-12-10 10:51:16 -08001805 * ask the memory allocator to avoid re-trying, swapping, writing back
1806 * or performing I/O.
Grant Erickson33b53712011-04-08 08:51:32 -07001807 *
1808 * Note, this function also makes sure that the allocated buffer is aligned to
1809 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
1810 *
1811 * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
1812 * to handle smaller (i.e. degraded) buffer allocations under low- or
1813 * fragmented-memory situations where such reduced allocations, from a
1814 * requested ideal, are allowed.
1815 *
1816 * Returns a pointer to the allocated buffer on success; otherwise, NULL.
1817 */
1818void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
1819{
Mel Gormand0164ad2015-11-06 16:28:21 -08001820 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
Grant Erickson33b53712011-04-08 08:51:32 -07001821 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
1822 void *kbuf;
1823
1824 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
1825
1826 while (*size > min_alloc) {
1827 kbuf = kmalloc(*size, flags);
1828 if (kbuf)
1829 return kbuf;
1830
1831 *size >>= 1;
1832 *size = ALIGN(*size, mtd->writesize);
1833 }
1834
1835 /*
1836 * For the last resort allocation allow 'kmalloc()' to do all sorts of
1837 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
1838 */
1839 return kmalloc(*size, GFP_KERNEL);
1840}
Grant Erickson33b53712011-04-08 08:51:32 -07001841EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842
Pavel Machek2d2dce02006-03-31 02:29:49 -08001843#ifdef CONFIG_PROC_FS
1844
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845/*====================================================================*/
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846/* Support for /proc/mtd */
1847
Alexey Dobriyan447d9bd2011-05-13 23:34:19 +03001848static int mtd_proc_show(struct seq_file *m, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849{
Ben Hutchingsf1332ba2010-01-29 20:57:11 +00001850 struct mtd_info *mtd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
Alexey Dobriyan447d9bd2011-05-13 23:34:19 +03001852 seq_puts(m, "dev: size erasesize name\n");
Ingo Molnar48b19262006-03-31 02:29:41 -08001853 mutex_lock(&mtd_table_mutex);
Ben Hutchingsf1332ba2010-01-29 20:57:11 +00001854 mtd_for_each_device(mtd) {
Alexey Dobriyan447d9bd2011-05-13 23:34:19 +03001855 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
1856 mtd->index, (unsigned long long)mtd->size,
1857 mtd->erasesize, mtd->name);
Wanlong Gaod5ca5122011-05-20 21:14:30 +08001858 }
Ingo Molnar48b19262006-03-31 02:29:41 -08001859 mutex_unlock(&mtd_table_mutex);
Wanlong Gaod5ca5122011-05-20 21:14:30 +08001860 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861}
Kevin Cernekee45b09072009-04-04 11:03:04 -07001862#endif /* CONFIG_PROC_FS */
1863
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864/*====================================================================*/
1865/* Init code */
1866
Steve Longerbeam445caaa2016-08-04 19:31:15 +05301867static struct backing_dev_info * __init mtd_bdi_init(char *name)
Jens Axboe0661b1a2010-04-27 09:49:47 +02001868{
Steve Longerbeam445caaa2016-08-04 19:31:15 +05301869 struct backing_dev_info *bdi;
Jens Axboe0661b1a2010-04-27 09:49:47 +02001870 int ret;
1871
Jan Karafa060522017-04-12 12:24:37 +02001872 bdi = bdi_alloc(GFP_KERNEL);
Steve Longerbeam445caaa2016-08-04 19:31:15 +05301873 if (!bdi)
1874 return ERR_PTR(-ENOMEM);
Jens Axboe0661b1a2010-04-27 09:49:47 +02001875
Jan Karafa060522017-04-12 12:24:37 +02001876 bdi->name = name;
1877 /*
1878 * We put '-0' suffix to the name to get the same name format as we
1879 * used to get. Since this is called only once, we get a unique name.
1880 */
Jan Kara7c4cc302017-04-12 12:24:49 +02001881 ret = bdi_register(bdi, "%.28s-0", name);
Jens Axboe0661b1a2010-04-27 09:49:47 +02001882 if (ret)
Jan Karafa060522017-04-12 12:24:37 +02001883 bdi_put(bdi);
Jens Axboe0661b1a2010-04-27 09:49:47 +02001884
Steve Longerbeam445caaa2016-08-04 19:31:15 +05301885 return ret ? ERR_PTR(ret) : bdi;
Jens Axboe0661b1a2010-04-27 09:49:47 +02001886}
1887
Artem Bityutskiy93e56212013-03-15 12:56:05 +02001888static struct proc_dir_entry *proc_mtd;
1889
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890static int __init init_mtd(void)
1891{
David Woodhouse15bce402009-04-05 07:40:58 -07001892 int ret;
Kevin Cernekee694bb7f2009-04-03 13:00:45 -07001893
Jens Axboe0661b1a2010-04-27 09:49:47 +02001894 ret = class_register(&mtd_class);
1895 if (ret)
1896 goto err_reg;
1897
Steve Longerbeam445caaa2016-08-04 19:31:15 +05301898 mtd_bdi = mtd_bdi_init("mtd");
1899 if (IS_ERR(mtd_bdi)) {
1900 ret = PTR_ERR(mtd_bdi);
Christoph Hellwigb4caecd2015-01-14 10:42:32 +01001901 goto err_bdi;
Steve Longerbeam445caaa2016-08-04 19:31:15 +05301902 }
Jens Axboe0661b1a2010-04-27 09:49:47 +02001903
Christoph Hellwig3f3942a2018-05-15 15:57:23 +02001904 proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
Artem Bityutskiy93e56212013-03-15 12:56:05 +02001905
Artem Bityutskiy660685d2013-03-14 13:27:40 +02001906 ret = init_mtdchar();
1907 if (ret)
1908 goto out_procfs;
1909
Mario Rugieroe8e3edb2017-05-29 08:38:41 -03001910 dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
1911
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 return 0;
Jens Axboe0661b1a2010-04-27 09:49:47 +02001913
Artem Bityutskiy660685d2013-03-14 13:27:40 +02001914out_procfs:
1915 if (proc_mtd)
1916 remove_proc_entry("mtd", NULL);
Jan Karafa060522017-04-12 12:24:37 +02001917 bdi_put(mtd_bdi);
Christoph Hellwigb4caecd2015-01-14 10:42:32 +01001918err_bdi:
Jens Axboe0661b1a2010-04-27 09:49:47 +02001919 class_unregister(&mtd_class);
1920err_reg:
1921 pr_err("Error registering mtd class or bdi: %d\n", ret);
1922 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923}
1924
1925static void __exit cleanup_mtd(void)
1926{
Mario Rugieroe8e3edb2017-05-29 08:38:41 -03001927 debugfs_remove_recursive(dfs_dir_mtd);
Artem Bityutskiy660685d2013-03-14 13:27:40 +02001928 cleanup_mtdchar();
Wanlong Gaod5ca5122011-05-20 21:14:30 +08001929 if (proc_mtd)
Artem Bityutskiy93e56212013-03-15 12:56:05 +02001930 remove_proc_entry("mtd", NULL);
David Woodhouse15bce402009-04-05 07:40:58 -07001931 class_unregister(&mtd_class);
Jan Karafa060522017-04-12 12:24:37 +02001932 bdi_put(mtd_bdi);
Johannes Thumshirn35667b92015-07-08 17:15:34 +02001933 idr_destroy(&mtd_idr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934}
1935
1936module_init(init_mtd);
1937module_exit(cleanup_mtd);
1938
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939MODULE_LICENSE("GPL");
1940MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
1941MODULE_DESCRIPTION("Core MTD registration and access routines");