blob: 5f1053d995b083f43e203efd691c5011a286c27d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Core registration and callback routines for MTD
3 * drivers and users.
4 *
David Woodhousea1452a32010-08-08 20:58:20 +01005 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
6 * Copyright © 2006 Red Hat UK Limited
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 */
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/module.h>
25#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/ptrace.h>
Alexey Dobriyan447d9bd2011-05-13 23:34:19 +030027#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/string.h>
29#include <linux/timer.h>
30#include <linux/major.h>
31#include <linux/fs.h>
Artem Bityutskiy77993082006-10-11 14:52:44 +030032#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/ioctl.h>
34#include <linux/init.h>
Brian Norris215a02f2015-11-11 16:26:04 -080035#include <linux/of.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/proc_fs.h>
Ben Hutchingsb520e412010-01-29 20:59:42 +000037#include <linux/idr.h>
Jörn Engela33eb6b2010-04-27 09:40:52 +020038#include <linux/backing-dev.h>
Tejun Heo05d71b462010-03-30 02:52:39 +090039#include <linux/gfp.h>
David Howells0d01ff22013-04-11 23:51:01 +010040#include <linux/slab.h>
Brian Norris3efe41b2014-11-26 01:01:08 -080041#include <linux/reboot.h>
Ezequiel Garciafea728c2016-04-12 17:46:42 -030042#include <linux/leds.h>
Mario Rugieroe8e3edb2017-05-29 08:38:41 -030043#include <linux/debugfs.h>
Alban Bedelc4dfa252018-11-13 15:01:10 +010044#include <linux/nvmem-provider.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46#include <linux/mtd/mtd.h>
Jamie Ilesf5671ab2011-05-23 17:15:46 +010047#include <linux/mtd/partitions.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Ben Dooks356d70f2007-05-28 20:28:34 +010049#include "mtdcore.h"
Artem Bityutskiy660685d2013-03-14 13:27:40 +020050
Jan Karafa060522017-04-12 12:24:37 +020051struct backing_dev_info *mtd_bdi;
Ben Dooks356d70f2007-05-28 20:28:34 +010052
Lars-Peter Clausen57b8045d2015-04-06 12:00:39 +020053#ifdef CONFIG_PM_SLEEP
54
55static int mtd_cls_suspend(struct device *dev)
56{
57 struct mtd_info *mtd = dev_get_drvdata(dev);
58
59 return mtd ? mtd_suspend(mtd) : 0;
60}
61
62static int mtd_cls_resume(struct device *dev)
63{
64 struct mtd_info *mtd = dev_get_drvdata(dev);
65
66 if (mtd)
67 mtd_resume(mtd);
68 return 0;
69}
70
71static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
72#define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
73#else
74#define MTD_CLS_PM_OPS NULL
75#endif
David Brownell1f24b5a2009-03-26 00:42:41 -070076
David Woodhouse15bce402009-04-05 07:40:58 -070077static struct class mtd_class = {
78 .name = "mtd",
79 .owner = THIS_MODULE,
Lars-Peter Clausen57b8045d2015-04-06 12:00:39 +020080 .pm = MTD_CLS_PM_OPS,
David Woodhouse15bce402009-04-05 07:40:58 -070081};
David Brownell1f24b5a2009-03-26 00:42:41 -070082
Ben Hutchingsb520e412010-01-29 20:59:42 +000083static DEFINE_IDR(mtd_idr);
84
Thomas Gleixner97894cd2005-11-07 11:15:26 +000085/* These are exported solely for the purpose of mtd_blkdevs.c. You
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 should not use them for _anything_ else */
Ingo Molnar48b19262006-03-31 02:29:41 -080087DEFINE_MUTEX(mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088EXPORT_SYMBOL_GPL(mtd_table_mutex);
Ben Hutchingsb520e412010-01-29 20:59:42 +000089
90struct mtd_info *__mtd_next_device(int i)
91{
92 return idr_get_next(&mtd_idr, &i);
93}
94EXPORT_SYMBOL_GPL(__mtd_next_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
96static LIST_HEAD(mtd_notifiers);
97
David Brownell1f24b5a2009-03-26 00:42:41 -070098
David Brownell1f24b5a2009-03-26 00:42:41 -070099#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
David Brownell1f24b5a2009-03-26 00:42:41 -0700100
101/* REVISIT once MTD uses the driver model better, whoever allocates
102 * the mtd_info will probably want to use the release() hook...
103 */
104static void mtd_release(struct device *dev)
105{
Brian Norris5e472122014-07-21 19:06:47 -0700106 struct mtd_info *mtd = dev_get_drvdata(dev);
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200107 dev_t index = MTD_DEVT(mtd->index);
David Brownell1f24b5a2009-03-26 00:42:41 -0700108
Brian Norris5e472122014-07-21 19:06:47 -0700109 /* remove /dev/mtdXro node */
110 device_destroy(&mtd_class, index + 1);
David Woodhouse15bce402009-04-05 07:40:58 -0700111}
112
David Brownell1f24b5a2009-03-26 00:42:41 -0700113static ssize_t mtd_type_show(struct device *dev,
114 struct device_attribute *attr, char *buf)
115{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200116 struct mtd_info *mtd = dev_get_drvdata(dev);
David Brownell1f24b5a2009-03-26 00:42:41 -0700117 char *type;
118
119 switch (mtd->type) {
120 case MTD_ABSENT:
121 type = "absent";
122 break;
123 case MTD_RAM:
124 type = "ram";
125 break;
126 case MTD_ROM:
127 type = "rom";
128 break;
129 case MTD_NORFLASH:
130 type = "nor";
131 break;
132 case MTD_NANDFLASH:
133 type = "nand";
134 break;
135 case MTD_DATAFLASH:
136 type = "dataflash";
137 break;
138 case MTD_UBIVOLUME:
139 type = "ubi";
140 break;
Huang Shijief4837242013-09-25 14:58:19 +0800141 case MTD_MLCNANDFLASH:
142 type = "mlc-nand";
143 break;
David Brownell1f24b5a2009-03-26 00:42:41 -0700144 default:
145 type = "unknown";
146 }
147
148 return snprintf(buf, PAGE_SIZE, "%s\n", type);
149}
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700150static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
151
152static ssize_t mtd_flags_show(struct device *dev,
153 struct device_attribute *attr, char *buf)
154{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200155 struct mtd_info *mtd = dev_get_drvdata(dev);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700156
157 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
158
159}
160static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
161
162static ssize_t mtd_size_show(struct device *dev,
163 struct device_attribute *attr, char *buf)
164{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200165 struct mtd_info *mtd = dev_get_drvdata(dev);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700166
167 return snprintf(buf, PAGE_SIZE, "%llu\n",
168 (unsigned long long)mtd->size);
169
170}
171static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
172
173static ssize_t mtd_erasesize_show(struct device *dev,
174 struct device_attribute *attr, char *buf)
175{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200176 struct mtd_info *mtd = dev_get_drvdata(dev);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700177
178 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
179
180}
181static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
182
183static ssize_t mtd_writesize_show(struct device *dev,
184 struct device_attribute *attr, char *buf)
185{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200186 struct mtd_info *mtd = dev_get_drvdata(dev);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700187
188 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
189
190}
191static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
192
Artem Bityutskiye7693542009-04-18 12:29:42 +0300193static ssize_t mtd_subpagesize_show(struct device *dev,
194 struct device_attribute *attr, char *buf)
195{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200196 struct mtd_info *mtd = dev_get_drvdata(dev);
Artem Bityutskiye7693542009-04-18 12:29:42 +0300197 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
198
199 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
200
201}
202static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
203
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700204static ssize_t mtd_oobsize_show(struct device *dev,
205 struct device_attribute *attr, char *buf)
206{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200207 struct mtd_info *mtd = dev_get_drvdata(dev);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700208
209 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
210
211}
212static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
213
Xiaolei Li7cc9aa62018-04-02 16:20:10 +0800214static ssize_t mtd_oobavail_show(struct device *dev,
215 struct device_attribute *attr, char *buf)
216{
217 struct mtd_info *mtd = dev_get_drvdata(dev);
218
219 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->oobavail);
220}
221static DEVICE_ATTR(oobavail, S_IRUGO, mtd_oobavail_show, NULL);
222
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700223static ssize_t mtd_numeraseregions_show(struct device *dev,
224 struct device_attribute *attr, char *buf)
225{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200226 struct mtd_info *mtd = dev_get_drvdata(dev);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700227
228 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
229
230}
231static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
232 NULL);
233
234static ssize_t mtd_name_show(struct device *dev,
235 struct device_attribute *attr, char *buf)
236{
Artem Bityutskiyd5de20a2011-12-29 18:00:29 +0200237 struct mtd_info *mtd = dev_get_drvdata(dev);
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700238
239 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
240
241}
242static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
David Brownell1f24b5a2009-03-26 00:42:41 -0700243
Mike Dunna9b672e2012-04-25 12:06:07 -0700244static ssize_t mtd_ecc_strength_show(struct device *dev,
245 struct device_attribute *attr, char *buf)
246{
247 struct mtd_info *mtd = dev_get_drvdata(dev);
248
249 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
250}
251static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
252
Mike Dunnd062d4e2012-04-25 12:06:08 -0700253static ssize_t mtd_bitflip_threshold_show(struct device *dev,
254 struct device_attribute *attr,
255 char *buf)
256{
257 struct mtd_info *mtd = dev_get_drvdata(dev);
258
259 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
260}
261
262static ssize_t mtd_bitflip_threshold_store(struct device *dev,
263 struct device_attribute *attr,
264 const char *buf, size_t count)
265{
266 struct mtd_info *mtd = dev_get_drvdata(dev);
267 unsigned int bitflip_threshold;
268 int retval;
269
270 retval = kstrtouint(buf, 0, &bitflip_threshold);
271 if (retval)
272 return retval;
273
274 mtd->bitflip_threshold = bitflip_threshold;
275 return count;
276}
277static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
278 mtd_bitflip_threshold_show,
279 mtd_bitflip_threshold_store);
280
Huang Shijiebf977e32013-08-16 10:10:05 +0800281static ssize_t mtd_ecc_step_size_show(struct device *dev,
282 struct device_attribute *attr, char *buf)
283{
284 struct mtd_info *mtd = dev_get_drvdata(dev);
285
286 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
287
288}
289static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
290
Ezequiel Garcia990a3af2014-06-24 10:55:50 -0300291static ssize_t mtd_ecc_stats_corrected_show(struct device *dev,
292 struct device_attribute *attr, char *buf)
293{
294 struct mtd_info *mtd = dev_get_drvdata(dev);
295 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
296
297 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected);
298}
299static DEVICE_ATTR(corrected_bits, S_IRUGO,
300 mtd_ecc_stats_corrected_show, NULL);
301
302static ssize_t mtd_ecc_stats_errors_show(struct device *dev,
303 struct device_attribute *attr, char *buf)
304{
305 struct mtd_info *mtd = dev_get_drvdata(dev);
306 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
307
308 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed);
309}
310static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL);
311
312static ssize_t mtd_badblocks_show(struct device *dev,
313 struct device_attribute *attr, char *buf)
314{
315 struct mtd_info *mtd = dev_get_drvdata(dev);
316 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
317
318 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks);
319}
320static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL);
321
322static ssize_t mtd_bbtblocks_show(struct device *dev,
323 struct device_attribute *attr, char *buf)
324{
325 struct mtd_info *mtd = dev_get_drvdata(dev);
326 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
327
328 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks);
329}
330static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL);
331
David Brownell1f24b5a2009-03-26 00:42:41 -0700332static struct attribute *mtd_attrs[] = {
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700333 &dev_attr_type.attr,
334 &dev_attr_flags.attr,
335 &dev_attr_size.attr,
336 &dev_attr_erasesize.attr,
337 &dev_attr_writesize.attr,
Artem Bityutskiye7693542009-04-18 12:29:42 +0300338 &dev_attr_subpagesize.attr,
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700339 &dev_attr_oobsize.attr,
Xiaolei Li7cc9aa62018-04-02 16:20:10 +0800340 &dev_attr_oobavail.attr,
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700341 &dev_attr_numeraseregions.attr,
342 &dev_attr_name.attr,
Mike Dunna9b672e2012-04-25 12:06:07 -0700343 &dev_attr_ecc_strength.attr,
Huang Shijiebf977e32013-08-16 10:10:05 +0800344 &dev_attr_ecc_step_size.attr,
Ezequiel Garcia990a3af2014-06-24 10:55:50 -0300345 &dev_attr_corrected_bits.attr,
346 &dev_attr_ecc_failures.attr,
347 &dev_attr_bad_blocks.attr,
348 &dev_attr_bbt_blocks.attr,
Mike Dunnd062d4e2012-04-25 12:06:08 -0700349 &dev_attr_bitflip_threshold.attr,
David Brownell1f24b5a2009-03-26 00:42:41 -0700350 NULL,
351};
Axel Lin54c738f2013-12-02 10:12:25 +0800352ATTRIBUTE_GROUPS(mtd);
David Brownell1f24b5a2009-03-26 00:42:41 -0700353
Bhumika Goyal75864b32017-08-19 13:52:17 +0530354static const struct device_type mtd_devtype = {
David Brownell1f24b5a2009-03-26 00:42:41 -0700355 .name = "mtd",
356 .groups = mtd_groups,
357 .release = mtd_release,
358};
359
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100360#ifndef CONFIG_MMU
361unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
362{
363 switch (mtd->type) {
364 case MTD_RAM:
365 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
366 NOMMU_MAP_READ | NOMMU_MAP_WRITE;
367 case MTD_ROM:
368 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
369 NOMMU_MAP_READ;
370 default:
371 return NOMMU_MAP_COPY;
372 }
373}
Arnd Bergmann706a4e52015-01-28 11:09:20 -0700374EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
Christoph Hellwigb4caecd2015-01-14 10:42:32 +0100375#endif
376
Brian Norris3efe41b2014-11-26 01:01:08 -0800377static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
378 void *cmd)
379{
380 struct mtd_info *mtd;
381
382 mtd = container_of(n, struct mtd_info, reboot_notifier);
383 mtd->_reboot(mtd);
384
385 return NOTIFY_DONE;
386}
387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388/**
Boris Brezillon477b0222015-11-16 15:53:13 +0100389 * mtd_wunit_to_pairing_info - get pairing information of a wunit
390 * @mtd: pointer to new MTD device info structure
391 * @wunit: write unit we are interested in
392 * @info: returned pairing information
393 *
394 * Retrieve pairing information associated to the wunit.
395 * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
396 * paired together, and where programming a page may influence the page it is
397 * paired with.
398 * The notion of page is replaced by the term wunit (write-unit) to stay
399 * consistent with the ->writesize field.
400 *
401 * The @wunit argument can be extracted from an absolute offset using
402 * mtd_offset_to_wunit(). @info is filled with the pairing information attached
403 * to @wunit.
404 *
405 * From the pairing info the MTD user can find all the wunits paired with
406 * @wunit using the following loop:
407 *
408 * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
409 * info.pair = i;
410 * mtd_pairing_info_to_wunit(mtd, &info);
411 * ...
412 * }
413 */
414int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
415 struct mtd_pairing_info *info)
416{
417 int npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
418
419 if (wunit < 0 || wunit >= npairs)
420 return -EINVAL;
421
422 if (mtd->pairing && mtd->pairing->get_info)
423 return mtd->pairing->get_info(mtd, wunit, info);
424
425 info->group = 0;
426 info->pair = wunit;
427
428 return 0;
429}
430EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
431
432/**
Xiaolei Lic77a9312018-03-29 09:34:58 +0800433 * mtd_pairing_info_to_wunit - get wunit from pairing information
Boris Brezillon477b0222015-11-16 15:53:13 +0100434 * @mtd: pointer to new MTD device info structure
435 * @info: pairing information struct
436 *
437 * Returns a positive number representing the wunit associated to the info
438 * struct, or a negative error code.
439 *
440 * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
441 * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
442 * doc).
443 *
444 * It can also be used to only program the first page of each pair (i.e.
445 * page attached to group 0), which allows one to use an MLC NAND in
446 * software-emulated SLC mode:
447 *
448 * info.group = 0;
449 * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
450 * for (info.pair = 0; info.pair < npairs; info.pair++) {
451 * wunit = mtd_pairing_info_to_wunit(mtd, &info);
452 * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
453 * mtd->writesize, &retlen, buf + (i * mtd->writesize));
454 * }
455 */
456int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
457 const struct mtd_pairing_info *info)
458{
459 int ngroups = mtd_pairing_groups(mtd);
460 int npairs = mtd_wunit_per_eb(mtd) / ngroups;
461
462 if (!info || info->pair < 0 || info->pair >= npairs ||
463 info->group < 0 || info->group >= ngroups)
464 return -EINVAL;
465
466 if (mtd->pairing && mtd->pairing->get_wunit)
467 return mtd->pairing->get_wunit(mtd, info);
468
469 return info->pair;
470}
471EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
472
473/**
474 * mtd_pairing_groups - get the number of pairing groups
475 * @mtd: pointer to new MTD device info structure
476 *
477 * Returns the number of pairing groups.
478 *
479 * This number is usually equal to the number of bits exposed by a single
480 * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
481 * to iterate over all pages of a given pair.
482 */
483int mtd_pairing_groups(struct mtd_info *mtd)
484{
485 if (!mtd->pairing || !mtd->pairing->ngroups)
486 return 1;
487
488 return mtd->pairing->ngroups;
489}
490EXPORT_SYMBOL_GPL(mtd_pairing_groups);
491
Alban Bedelc4dfa252018-11-13 15:01:10 +0100492static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
493 void *val, size_t bytes)
494{
495 struct mtd_info *mtd = priv;
496 size_t retlen;
497 int err;
498
499 err = mtd_read(mtd, offset, bytes, &retlen, val);
500 if (err && err != -EUCLEAN)
501 return err;
502
503 return retlen == bytes ? 0 : -EIO;
504}
505
506static int mtd_nvmem_add(struct mtd_info *mtd)
507{
508 struct nvmem_config config = {};
509
510 config.dev = &mtd->dev;
511 config.name = mtd->name;
512 config.owner = THIS_MODULE;
513 config.reg_read = mtd_nvmem_reg_read;
514 config.size = mtd->size;
515 config.word_size = 1;
516 config.stride = 1;
517 config.read_only = true;
518 config.root_only = true;
519 config.no_of_node = true;
520 config.priv = mtd;
521
522 mtd->nvmem = nvmem_register(&config);
523 if (IS_ERR(mtd->nvmem)) {
524 /* Just ignore if there is no NVMEM support in the kernel */
525 if (PTR_ERR(mtd->nvmem) == -ENOSYS) {
526 mtd->nvmem = NULL;
527 } else {
528 dev_err(&mtd->dev, "Failed to register NVMEM device\n");
529 return PTR_ERR(mtd->nvmem);
530 }
531 }
532
533 return 0;
534}
535
Mario Rugieroe8e3edb2017-05-29 08:38:41 -0300536static struct dentry *dfs_dir_mtd;
537
Boris Brezillon477b0222015-11-16 15:53:13 +0100538/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 * add_mtd_device - register an MTD device
540 * @mtd: pointer to new MTD device info structure
541 *
542 * Add a device to the list of MTD devices present in the system, and
543 * notify each currently active MTD 'user' of its arrival. Returns
Brian Norris57dd9902015-06-01 16:17:18 -0700544 * zero on success or non-zero on failure.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 */
546
547int add_mtd_device(struct mtd_info *mtd)
548{
Ben Hutchingsb520e412010-01-29 20:59:42 +0000549 struct mtd_notifier *not;
550 int i, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551
Brian Norrisbe0dbff2015-06-01 16:17:20 -0700552 /*
553 * May occur, for instance, on buggy drivers which call
554 * mtd_device_parse_register() multiple times on the same master MTD,
555 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
556 */
Jan Karafa060522017-04-12 12:24:37 +0200557 if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
Brian Norrisbe0dbff2015-06-01 16:17:20 -0700558 return -EEXIST;
559
Artem B. Bityutskiy783ed812006-06-14 19:53:44 +0400560 BUG_ON(mtd->writesize == 0);
Boris Brezillon33f45c42017-12-15 13:39:51 +0100561
562 if (WARN_ON((!mtd->erasesize || !mtd->_erase) &&
563 !(mtd->flags & MTD_NO_ERASE)))
564 return -EINVAL;
565
Ingo Molnar48b19262006-03-31 02:29:41 -0800566 mutex_lock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
Tejun Heo589e9c42013-02-27 17:04:33 -0800568 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
Brian Norris57dd9902015-06-01 16:17:18 -0700569 if (i < 0) {
570 error = i;
Ben Hutchingsb520e412010-01-29 20:59:42 +0000571 goto fail_locked;
Brian Norris57dd9902015-06-01 16:17:18 -0700572 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
Ben Hutchingsb520e412010-01-29 20:59:42 +0000574 mtd->index = i;
575 mtd->usecount = 0;
Adrian Hunter69423d92008-12-10 13:37:21 +0000576
Mike Dunnd062d4e2012-04-25 12:06:08 -0700577 /* default value if not set by driver */
578 if (mtd->bitflip_threshold == 0)
579 mtd->bitflip_threshold = mtd->ecc_strength;
580
Ben Hutchingsb520e412010-01-29 20:59:42 +0000581 if (is_power_of_2(mtd->erasesize))
582 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
583 else
584 mtd->erasesize_shift = 0;
Adrian Hunter69423d92008-12-10 13:37:21 +0000585
Ben Hutchingsb520e412010-01-29 20:59:42 +0000586 if (is_power_of_2(mtd->writesize))
587 mtd->writesize_shift = ffs(mtd->writesize) - 1;
588 else
589 mtd->writesize_shift = 0;
Adrian Hunter69423d92008-12-10 13:37:21 +0000590
Ben Hutchingsb520e412010-01-29 20:59:42 +0000591 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
592 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
Håvard Skinnemoen187ef152006-09-22 10:07:08 +0100593
Ben Hutchingsb520e412010-01-29 20:59:42 +0000594 /* Some chips always power up locked. Unlock them now */
Artem Bityutskiy38134562011-12-30 17:00:35 +0200595 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
596 error = mtd_unlock(mtd, 0, mtd->size);
597 if (error && error != -EOPNOTSUPP)
Ben Hutchingsb520e412010-01-29 20:59:42 +0000598 printk(KERN_WARNING
599 "%s: unlock failed, writes may not work\n",
600 mtd->name);
Brian Norris57dd9902015-06-01 16:17:18 -0700601 /* Ignore unlock failures? */
602 error = 0;
Ben Hutchingsb520e412010-01-29 20:59:42 +0000603 }
David Brownell1f24b5a2009-03-26 00:42:41 -0700604
Ben Hutchingsb520e412010-01-29 20:59:42 +0000605 /* Caller should have set dev.parent to match the
Frans Klaver260e89a2015-06-10 22:38:15 +0200606 * physical device, if appropriate.
Ben Hutchingsb520e412010-01-29 20:59:42 +0000607 */
608 mtd->dev.type = &mtd_devtype;
609 mtd->dev.class = &mtd_class;
610 mtd->dev.devt = MTD_DEVT(i);
611 dev_set_name(&mtd->dev, "mtd%d", i);
612 dev_set_drvdata(&mtd->dev, mtd);
Brian Norris215a02f2015-11-11 16:26:04 -0800613 of_node_get(mtd_get_of_node(mtd));
Brian Norris57dd9902015-06-01 16:17:18 -0700614 error = device_register(&mtd->dev);
615 if (error)
Ben Hutchingsb520e412010-01-29 20:59:42 +0000616 goto fail_added;
David Brownell1f24b5a2009-03-26 00:42:41 -0700617
Alban Bedelc4dfa252018-11-13 15:01:10 +0100618 /* Add the nvmem provider */
619 error = mtd_nvmem_add(mtd);
620 if (error)
621 goto fail_nvmem_add;
622
Mario Rugieroe8e3edb2017-05-29 08:38:41 -0300623 if (!IS_ERR_OR_NULL(dfs_dir_mtd)) {
624 mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(&mtd->dev), dfs_dir_mtd);
625 if (IS_ERR_OR_NULL(mtd->dbg.dfs_dir)) {
626 pr_debug("mtd device %s won't show data in debugfs\n",
627 dev_name(&mtd->dev));
628 }
629 }
630
Brian Norris5e472122014-07-21 19:06:47 -0700631 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
632 "mtd%dro", i);
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000633
Brian Norris289c0522011-07-19 10:06:09 -0700634 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
Ben Hutchingsb520e412010-01-29 20:59:42 +0000635 /* No need to get a refcount on the module containing
636 the notifier, since we hold the mtd_table_mutex */
637 list_for_each_entry(not, &mtd_notifiers, list)
638 not->add(mtd);
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000639
Ingo Molnar48b19262006-03-31 02:29:41 -0800640 mutex_unlock(&mtd_table_mutex);
Ben Hutchingsb520e412010-01-29 20:59:42 +0000641 /* We _know_ we aren't being removed, because
642 our caller is still holding us here. So none
643 of this try_ nonsense, and no bitching about it
644 either. :) */
645 __module_get(THIS_MODULE);
646 return 0;
647
Alban Bedelc4dfa252018-11-13 15:01:10 +0100648fail_nvmem_add:
649 device_unregister(&mtd->dev);
Ben Hutchingsb520e412010-01-29 20:59:42 +0000650fail_added:
Brian Norris215a02f2015-11-11 16:26:04 -0800651 of_node_put(mtd_get_of_node(mtd));
Ben Hutchingsb520e412010-01-29 20:59:42 +0000652 idr_remove(&mtd_idr, i);
653fail_locked:
654 mutex_unlock(&mtd_table_mutex);
Brian Norris57dd9902015-06-01 16:17:18 -0700655 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656}
657
658/**
659 * del_mtd_device - unregister an MTD device
660 * @mtd: pointer to MTD device info structure
661 *
662 * Remove a device from the list of MTD devices present in the system,
663 * and notify each currently active MTD 'user' of its departure.
664 * Returns zero on success or 1 on failure, which currently will happen
665 * if the requested device does not appear to be present in the list.
666 */
667
Jamie Ileseea72d52011-05-23 10:23:42 +0100668int del_mtd_device(struct mtd_info *mtd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669{
670 int ret;
Maxim Levitsky75c0b842010-02-22 20:39:32 +0200671 struct mtd_notifier *not;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000672
Ingo Molnar48b19262006-03-31 02:29:41 -0800673 mutex_lock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
Mario Rugieroe8e3edb2017-05-29 08:38:41 -0300675 debugfs_remove_recursive(mtd->dbg.dfs_dir);
676
Ben Hutchingsb520e412010-01-29 20:59:42 +0000677 if (idr_find(&mtd_idr, mtd->index) != mtd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 ret = -ENODEV;
Maxim Levitsky75c0b842010-02-22 20:39:32 +0200679 goto out_error;
680 }
681
682 /* No need to get a refcount on the module containing
683 the notifier, since we hold the mtd_table_mutex */
684 list_for_each_entry(not, &mtd_notifiers, list)
685 not->remove(mtd);
686
687 if (mtd->usecount) {
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000688 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 mtd->index, mtd->name, mtd->usecount);
690 ret = -EBUSY;
691 } else {
Alban Bedelc4dfa252018-11-13 15:01:10 +0100692 /* Try to remove the NVMEM provider */
693 if (mtd->nvmem)
694 nvmem_unregister(mtd->nvmem);
695
Kevin Cernekee694bb7f2009-04-03 13:00:45 -0700696 device_unregister(&mtd->dev);
697
Ben Hutchingsb520e412010-01-29 20:59:42 +0000698 idr_remove(&mtd_idr, mtd->index);
Brian Norris215a02f2015-11-11 16:26:04 -0800699 of_node_put(mtd_get_of_node(mtd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
701 module_put(THIS_MODULE);
702 ret = 0;
703 }
704
Maxim Levitsky75c0b842010-02-22 20:39:32 +0200705out_error:
Ingo Molnar48b19262006-03-31 02:29:41 -0800706 mutex_unlock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 return ret;
708}
709
Brian Norris472b4442015-12-11 15:58:01 -0800710/*
711 * Set a few defaults based on the parent devices, if not provided by the
712 * driver
713 */
714static void mtd_set_dev_defaults(struct mtd_info *mtd)
715{
716 if (mtd->dev.parent) {
717 if (!mtd->owner && mtd->dev.parent->driver)
718 mtd->owner = mtd->dev.parent->driver->owner;
719 if (!mtd->name)
720 mtd->name = dev_name(mtd->dev.parent);
721 } else {
722 pr_debug("mtd device won't show a device symlink in sysfs\n");
723 }
724}
Dan Ehrenberg727dc612015-04-02 15:15:10 -0700725
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726/**
Dmitry Eremin-Solenikov1c4c2152011-03-25 22:26:25 +0300727 * mtd_device_parse_register - parse partitions and register an MTD device.
728 *
729 * @mtd: the MTD device to register
730 * @types: the list of MTD partition probes to try, see
731 * 'parse_mtd_partitions()' for more information
Dmitry Eremin-Solenikovc7975332011-06-10 18:18:28 +0400732 * @parser_data: MTD partition parser-specific data
Dmitry Eremin-Solenikov1c4c2152011-03-25 22:26:25 +0300733 * @parts: fallback partition information to register, if parsing fails;
734 * only valid if %nr_parts > %0
735 * @nr_parts: the number of partitions in parts, if zero then the full
736 * MTD device is registered if no partition info is found
737 *
738 * This function aggregates MTD partitions parsing (done by
739 * 'parse_mtd_partitions()') and MTD device and partitions registering. It
740 * basically follows the most common pattern found in many MTD drivers:
741 *
Rafał Miłecki55a999a2018-03-27 15:36:47 +0200742 * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
743 * registered first.
744 * * Then It tries to probe partitions on MTD device @mtd using parsers
Dmitry Eremin-Solenikov1c4c2152011-03-25 22:26:25 +0300745 * specified in @types (if @types is %NULL, then the default list of parsers
746 * is used, see 'parse_mtd_partitions()' for more information). If none are
747 * found this functions tries to fallback to information specified in
748 * @parts/@nr_parts.
Dmitry Eremin-Solenikov1c4c2152011-03-25 22:26:25 +0300749 * * If no partitions were found this function just registers the MTD device
750 * @mtd and exits.
751 *
752 * Returns zero in case of success and a negative error code in case of failure.
753 */
Artem Bityutskiy26a47342013-03-11 15:38:48 +0200754int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
Dmitry Eremin-Solenikovc7975332011-06-10 18:18:28 +0400755 struct mtd_part_parser_data *parser_data,
Dmitry Eremin-Solenikov1c4c2152011-03-25 22:26:25 +0300756 const struct mtd_partition *parts,
757 int nr_parts)
758{
Dan Ehrenberg727dc612015-04-02 15:15:10 -0700759 int ret;
Dmitry Eremin-Solenikov1c4c2152011-03-25 22:26:25 +0300760
Brian Norris472b4442015-12-11 15:58:01 -0800761 mtd_set_dev_defaults(mtd);
762
Rafał Miłecki2c77c572018-01-16 16:45:41 +0100763 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
764 ret = add_mtd_device(mtd);
765 if (ret)
766 return ret;
767 }
768
Rafał Miłecki0dbe4ea2018-01-16 16:45:42 +0100769 /* Prefer parsed partitions over driver-provided fallback */
Rafał Miłecki5ac67ce2018-03-27 22:35:41 +0200770 ret = parse_mtd_partitions(mtd, types, parser_data);
771 if (ret > 0)
772 ret = 0;
773 else if (nr_parts)
Rafał Miłecki0dbe4ea2018-01-16 16:45:42 +0100774 ret = add_mtd_partitions(mtd, parts, nr_parts);
775 else if (!device_is_registered(&mtd->dev))
776 ret = add_mtd_device(mtd);
777 else
778 ret = 0;
779
Brian Norris3e00ed02015-06-01 16:17:19 -0700780 if (ret)
781 goto out;
Dmitry Eremin-Solenikov1c4c2152011-03-25 22:26:25 +0300782
Niklas Cassele1dd8642015-02-01 02:08:50 +0100783 /*
784 * FIXME: some drivers unfortunately call this function more than once.
785 * So we have to check if we've already assigned the reboot notifier.
786 *
787 * Generally, we can make multiple calls work for most cases, but it
788 * does cause problems with parse_mtd_partitions() above (e.g.,
789 * cmdlineparts will register partitions more than once).
790 */
Brian Norrisf8479dd2015-11-03 17:01:53 -0800791 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
792 "MTD already registered\n");
Niklas Cassele1dd8642015-02-01 02:08:50 +0100793 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
Brian Norris3efe41b2014-11-26 01:01:08 -0800794 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
795 register_reboot_notifier(&mtd->reboot_notifier);
796 }
797
Brian Norris3e00ed02015-06-01 16:17:19 -0700798out:
Rafał Miłecki2c77c572018-01-16 16:45:41 +0100799 if (ret && device_is_registered(&mtd->dev))
800 del_mtd_device(mtd);
801
Dan Ehrenberg727dc612015-04-02 15:15:10 -0700802 return ret;
Dmitry Eremin-Solenikov1c4c2152011-03-25 22:26:25 +0300803}
804EXPORT_SYMBOL_GPL(mtd_device_parse_register);
805
806/**
Jamie Ilesf5671ab2011-05-23 17:15:46 +0100807 * mtd_device_unregister - unregister an existing MTD device.
808 *
809 * @master: the MTD device to unregister. This will unregister both the master
810 * and any partitions if registered.
811 */
812int mtd_device_unregister(struct mtd_info *master)
813{
814 int err;
815
Brian Norris3efe41b2014-11-26 01:01:08 -0800816 if (master->_reboot)
817 unregister_reboot_notifier(&master->reboot_notifier);
818
Jamie Ilesf5671ab2011-05-23 17:15:46 +0100819 err = del_mtd_partitions(master);
820 if (err)
821 return err;
822
823 if (!device_is_registered(&master->dev))
824 return 0;
825
826 return del_mtd_device(master);
827}
828EXPORT_SYMBOL_GPL(mtd_device_unregister);
829
830/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 * register_mtd_user - register a 'user' of MTD devices.
832 * @new: pointer to notifier info structure
833 *
834 * Registers a pair of callbacks function to be called upon addition
835 * or removal of MTD devices. Causes the 'add' callback to be immediately
836 * invoked for each MTD device currently present in the system.
837 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838void register_mtd_user (struct mtd_notifier *new)
839{
Ben Hutchingsf1332ba2010-01-29 20:57:11 +0000840 struct mtd_info *mtd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
Ingo Molnar48b19262006-03-31 02:29:41 -0800842 mutex_lock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
844 list_add(&new->list, &mtd_notifiers);
845
Wanlong Gaod5ca5122011-05-20 21:14:30 +0800846 __module_get(THIS_MODULE);
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000847
Ben Hutchingsf1332ba2010-01-29 20:57:11 +0000848 mtd_for_each_device(mtd)
849 new->add(mtd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850
Ingo Molnar48b19262006-03-31 02:29:41 -0800851 mutex_unlock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852}
Artem Bityutskiy33c87b42011-12-30 16:11:14 +0200853EXPORT_SYMBOL_GPL(register_mtd_user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854
855/**
Artem B. Bityuckiy49450792005-02-18 14:34:54 +0000856 * unregister_mtd_user - unregister a 'user' of MTD devices.
857 * @old: pointer to notifier info structure
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 *
859 * Removes a callback function pair from the list of 'users' to be
860 * notified upon addition or removal of MTD devices. Causes the
861 * 'remove' callback to be immediately invoked for each MTD device
862 * currently present in the system.
863 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864int unregister_mtd_user (struct mtd_notifier *old)
865{
Ben Hutchingsf1332ba2010-01-29 20:57:11 +0000866 struct mtd_info *mtd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
Ingo Molnar48b19262006-03-31 02:29:41 -0800868 mutex_lock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
870 module_put(THIS_MODULE);
871
Ben Hutchingsf1332ba2010-01-29 20:57:11 +0000872 mtd_for_each_device(mtd)
873 old->remove(mtd);
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000874
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 list_del(&old->list);
Ingo Molnar48b19262006-03-31 02:29:41 -0800876 mutex_unlock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 return 0;
878}
Artem Bityutskiy33c87b42011-12-30 16:11:14 +0200879EXPORT_SYMBOL_GPL(unregister_mtd_user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
881/**
882 * get_mtd_device - obtain a validated handle for an MTD device
883 * @mtd: last known address of the required MTD device
884 * @num: internal device number of the required MTD device
885 *
886 * Given a number and NULL address, return the num'th entry in the device
887 * table, if any. Given an address and num == -1, search the device table
888 * for a device with that address and return if it's still present. Given
Artem Bityutskiy9c740342006-10-11 14:52:47 +0300889 * both, return the num'th driver only if its address matches. Return
890 * error code if not.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
893{
Ben Hutchingsf1332ba2010-01-29 20:57:11 +0000894 struct mtd_info *ret = NULL, *other;
895 int err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896
Ingo Molnar48b19262006-03-31 02:29:41 -0800897 mutex_lock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898
899 if (num == -1) {
Ben Hutchingsf1332ba2010-01-29 20:57:11 +0000900 mtd_for_each_device(other) {
901 if (other == mtd) {
902 ret = mtd;
903 break;
904 }
905 }
Ben Hutchingsb520e412010-01-29 20:59:42 +0000906 } else if (num >= 0) {
907 ret = idr_find(&mtd_idr, num);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 if (mtd && mtd != ret)
909 ret = NULL;
910 }
911
Maxim Levitsky3bd45652010-02-22 20:39:28 +0200912 if (!ret) {
913 ret = ERR_PTR(err);
914 goto out;
Artem Bityutskiy9fe912c2006-10-11 14:52:45 +0300915 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916
Maxim Levitsky3bd45652010-02-22 20:39:28 +0200917 err = __get_mtd_device(ret);
918 if (err)
919 ret = ERR_PTR(err);
920out:
Ingo Molnar48b19262006-03-31 02:29:41 -0800921 mutex_unlock(&mtd_table_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 return ret;
Maxim Levitsky3bd45652010-02-22 20:39:28 +0200923}
Artem Bityutskiy33c87b42011-12-30 16:11:14 +0200924EXPORT_SYMBOL_GPL(get_mtd_device);
Artem Bityutskiy9c740342006-10-11 14:52:47 +0300925
Maxim Levitsky3bd45652010-02-22 20:39:28 +0200926
927int __get_mtd_device(struct mtd_info *mtd)
928{
929 int err;
930
931 if (!try_module_get(mtd->owner))
932 return -ENODEV;
933
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200934 if (mtd->_get_device) {
935 err = mtd->_get_device(mtd);
Maxim Levitsky3bd45652010-02-22 20:39:28 +0200936
937 if (err) {
938 module_put(mtd->owner);
939 return err;
940 }
941 }
942 mtd->usecount++;
943 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944}
Artem Bityutskiy33c87b42011-12-30 16:11:14 +0200945EXPORT_SYMBOL_GPL(__get_mtd_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
Artem Bityutskiy77993082006-10-11 14:52:44 +0300947/**
948 * get_mtd_device_nm - obtain a validated handle for an MTD device by
949 * device name
950 * @name: MTD device name to open
951 *
952 * This function returns MTD device description structure in case of
953 * success and an error code in case of failure.
954 */
Artem Bityutskiy77993082006-10-11 14:52:44 +0300955struct mtd_info *get_mtd_device_nm(const char *name)
956{
Ben Hutchingsf1332ba2010-01-29 20:57:11 +0000957 int err = -ENODEV;
958 struct mtd_info *mtd = NULL, *other;
Artem Bityutskiy77993082006-10-11 14:52:44 +0300959
960 mutex_lock(&mtd_table_mutex);
961
Ben Hutchingsf1332ba2010-01-29 20:57:11 +0000962 mtd_for_each_device(other) {
963 if (!strcmp(name, other->name)) {
964 mtd = other;
Artem Bityutskiy77993082006-10-11 14:52:44 +0300965 break;
966 }
967 }
968
Artem Bityutskiy9fe912c2006-10-11 14:52:45 +0300969 if (!mtd)
Artem Bityutskiy77993082006-10-11 14:52:44 +0300970 goto out_unlock;
971
Wanlong Gao52534f22011-05-17 22:36:18 +0800972 err = __get_mtd_device(mtd);
973 if (err)
Artem Bityutskiy77993082006-10-11 14:52:44 +0300974 goto out_unlock;
975
Artem Bityutskiy77993082006-10-11 14:52:44 +0300976 mutex_unlock(&mtd_table_mutex);
977 return mtd;
Artem Bityutskiy9fe912c2006-10-11 14:52:45 +0300978
Artem Bityutskiy9fe912c2006-10-11 14:52:45 +0300979out_unlock:
980 mutex_unlock(&mtd_table_mutex);
981 return ERR_PTR(err);
Artem Bityutskiy77993082006-10-11 14:52:44 +0300982}
Artem Bityutskiy33c87b42011-12-30 16:11:14 +0200983EXPORT_SYMBOL_GPL(get_mtd_device_nm);
Artem Bityutskiy77993082006-10-11 14:52:44 +0300984
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985void put_mtd_device(struct mtd_info *mtd)
986{
Ingo Molnar48b19262006-03-31 02:29:41 -0800987 mutex_lock(&mtd_table_mutex);
Maxim Levitsky3bd45652010-02-22 20:39:28 +0200988 __put_mtd_device(mtd);
989 mutex_unlock(&mtd_table_mutex);
990
991}
Artem Bityutskiy33c87b42011-12-30 16:11:14 +0200992EXPORT_SYMBOL_GPL(put_mtd_device);
Maxim Levitsky3bd45652010-02-22 20:39:28 +0200993
994void __put_mtd_device(struct mtd_info *mtd)
995{
996 --mtd->usecount;
997 BUG_ON(mtd->usecount < 0);
998
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200999 if (mtd->_put_device)
1000 mtd->_put_device(mtd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
1002 module_put(mtd->owner);
1003}
Artem Bityutskiy33c87b42011-12-30 16:11:14 +02001004EXPORT_SYMBOL_GPL(__put_mtd_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
Artem Bityutskiy52b02032011-12-30 15:57:25 +02001006/*
Boris Brezillon884cfd92018-02-12 22:03:09 +01001007 * Erase is an synchronous operation. Device drivers are epected to return a
1008 * negative error code if the operation failed and update instr->fail_addr
1009 * to point the portion that was not properly erased.
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001010 */
1011int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1012{
Boris Brezillonc585da92018-02-12 22:03:07 +01001013 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1014
Boris Brezillone6e620f2018-01-22 10:38:01 +01001015 if (!mtd->erasesize || !mtd->_erase)
1016 return -ENOTSUPP;
1017
Brian Norris0c2b4e22014-07-21 19:06:27 -07001018 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001019 return -EINVAL;
Artem Bityutskiy664addc2012-02-03 18:13:23 +02001020 if (!(mtd->flags & MTD_WRITEABLE))
1021 return -EROFS;
Boris Brezillone6e620f2018-01-22 10:38:01 +01001022
Boris Brezillone7bfb3f2018-02-12 22:03:11 +01001023 if (!instr->len)
Artem Bityutskiybcb1d232012-02-06 13:27:43 +02001024 return 0;
Boris Brezillone7bfb3f2018-02-12 22:03:11 +01001025
Ezequiel Garciafea728c2016-04-12 17:46:42 -03001026 ledtrig_mtd_activity();
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001027 return mtd->_erase(mtd, instr);
1028}
1029EXPORT_SYMBOL_GPL(mtd_erase);
1030
1031/*
1032 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1033 */
1034int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1035 void **virt, resource_size_t *phys)
1036{
1037 *retlen = 0;
Artem Bityutskiy0dd52352012-02-08 15:13:26 +02001038 *virt = NULL;
1039 if (phys)
1040 *phys = 0;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001041 if (!mtd->_point)
1042 return -EOPNOTSUPP;
Brian Norris0c2b4e22014-07-21 19:06:27 -07001043 if (from < 0 || from >= mtd->size || len > mtd->size - from)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001044 return -EINVAL;
Artem Bityutskiybcb1d232012-02-06 13:27:43 +02001045 if (!len)
1046 return 0;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001047 return mtd->_point(mtd, from, len, retlen, virt, phys);
1048}
1049EXPORT_SYMBOL_GPL(mtd_point);
1050
1051/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
1052int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1053{
Boris Brezillonb9504242017-06-25 20:22:57 +02001054 if (!mtd->_unpoint)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001055 return -EOPNOTSUPP;
Brian Norris0c2b4e22014-07-21 19:06:27 -07001056 if (from < 0 || from >= mtd->size || len > mtd->size - from)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001057 return -EINVAL;
Artem Bityutskiybcb1d232012-02-06 13:27:43 +02001058 if (!len)
1059 return 0;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001060 return mtd->_unpoint(mtd, from, len);
1061}
1062EXPORT_SYMBOL_GPL(mtd_unpoint);
1063
1064/*
1065 * Allow NOMMU mmap() to directly map the device (if not NULL)
1066 * - return the address to which the offset maps
1067 * - return -ENOSYS to indicate refusal to do the mapping
1068 */
1069unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1070 unsigned long offset, unsigned long flags)
1071{
Nicolas Pitre9eaa9032017-10-30 14:48:32 -04001072 size_t retlen;
1073 void *virt;
1074 int ret;
1075
1076 ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1077 if (ret)
1078 return ret;
1079 if (retlen != len) {
1080 mtd_unpoint(mtd, offset, retlen);
1081 return -ENOSYS;
1082 }
1083 return (unsigned long)virt;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001084}
1085EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1086
1087int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1088 u_char *buf)
1089{
Mike Dunnedbc45402012-04-25 12:06:11 -07001090 int ret_code;
Artem Bityutskiy834247e2012-02-06 12:39:07 +02001091 *retlen = 0;
Brian Norris0c2b4e22014-07-21 19:06:27 -07001092 if (from < 0 || from >= mtd->size || len > mtd->size - from)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001093 return -EINVAL;
Artem Bityutskiybcb1d232012-02-06 13:27:43 +02001094 if (!len)
1095 return 0;
Mike Dunnedbc45402012-04-25 12:06:11 -07001096
Ezequiel Garciafea728c2016-04-12 17:46:42 -03001097 ledtrig_mtd_activity();
Mike Dunnedbc45402012-04-25 12:06:11 -07001098 /*
1099 * In the absence of an error, drivers return a non-negative integer
1100 * representing the maximum number of bitflips that were corrected on
1101 * any one ecc region (if applicable; zero otherwise).
1102 */
Boris Brezillon24ff1292018-01-09 09:50:34 +01001103 if (mtd->_read) {
1104 ret_code = mtd->_read(mtd, from, len, retlen, buf);
1105 } else if (mtd->_read_oob) {
1106 struct mtd_oob_ops ops = {
1107 .len = len,
1108 .datbuf = buf,
1109 };
1110
1111 ret_code = mtd->_read_oob(mtd, from, &ops);
1112 *retlen = ops.retlen;
1113 } else {
1114 return -ENOTSUPP;
1115 }
1116
Mike Dunnedbc45402012-04-25 12:06:11 -07001117 if (unlikely(ret_code < 0))
1118 return ret_code;
1119 if (mtd->ecc_strength == 0)
1120 return 0; /* device lacks ecc */
1121 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001122}
1123EXPORT_SYMBOL_GPL(mtd_read);
1124
1125int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1126 const u_char *buf)
1127{
1128 *retlen = 0;
Brian Norris0c2b4e22014-07-21 19:06:27 -07001129 if (to < 0 || to >= mtd->size || len > mtd->size - to)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001130 return -EINVAL;
Boris Brezillon24ff1292018-01-09 09:50:34 +01001131 if ((!mtd->_write && !mtd->_write_oob) ||
1132 !(mtd->flags & MTD_WRITEABLE))
Artem Bityutskiy664addc2012-02-03 18:13:23 +02001133 return -EROFS;
Artem Bityutskiybcb1d232012-02-06 13:27:43 +02001134 if (!len)
1135 return 0;
Ezequiel Garciafea728c2016-04-12 17:46:42 -03001136 ledtrig_mtd_activity();
Boris Brezillon24ff1292018-01-09 09:50:34 +01001137
1138 if (!mtd->_write) {
1139 struct mtd_oob_ops ops = {
1140 .len = len,
1141 .datbuf = (u8 *)buf,
1142 };
1143 int ret;
1144
1145 ret = mtd->_write_oob(mtd, to, &ops);
1146 *retlen = ops.retlen;
1147 return ret;
1148 }
1149
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001150 return mtd->_write(mtd, to, len, retlen, buf);
1151}
1152EXPORT_SYMBOL_GPL(mtd_write);
1153
1154/*
1155 * In blackbox flight recorder like scenarios we want to make successful writes
1156 * in interrupt context. panic_write() is only intended to be called when its
1157 * known the kernel is about to panic and we need the write to succeed. Since
1158 * the kernel is not going to be running for much longer, this function can
1159 * break locks and delay to ensure the write succeeds (but not sleep).
1160 */
1161int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1162 const u_char *buf)
1163{
1164 *retlen = 0;
1165 if (!mtd->_panic_write)
1166 return -EOPNOTSUPP;
Brian Norris0c2b4e22014-07-21 19:06:27 -07001167 if (to < 0 || to >= mtd->size || len > mtd->size - to)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001168 return -EINVAL;
Artem Bityutskiy664addc2012-02-03 18:13:23 +02001169 if (!(mtd->flags & MTD_WRITEABLE))
1170 return -EROFS;
Artem Bityutskiybcb1d232012-02-06 13:27:43 +02001171 if (!len)
1172 return 0;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001173 return mtd->_panic_write(mtd, to, len, retlen, buf);
1174}
1175EXPORT_SYMBOL_GPL(mtd_panic_write);
1176
Boris Brezillon5cdd9292017-06-27 21:22:21 +02001177static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1178 struct mtd_oob_ops *ops)
1179{
1180 /*
1181 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1182 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1183 * this case.
1184 */
1185 if (!ops->datbuf)
1186 ops->len = 0;
1187
1188 if (!ops->oobbuf)
1189 ops->ooblen = 0;
1190
Miquel Raynald82c3682017-12-18 08:26:28 +01001191 if (offs < 0 || offs + ops->len > mtd->size)
Boris Brezillon5cdd9292017-06-27 21:22:21 +02001192 return -EINVAL;
1193
1194 if (ops->ooblen) {
1195 u64 maxooblen;
1196
1197 if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1198 return -EINVAL;
1199
1200 maxooblen = ((mtd_div_by_ws(mtd->size, mtd) -
1201 mtd_div_by_ws(offs, mtd)) *
1202 mtd_oobavail(mtd, ops)) - ops->ooboffs;
1203 if (ops->ooblen > maxooblen)
1204 return -EINVAL;
1205 }
1206
1207 return 0;
1208}
1209
Brian Norrisd2d48482012-06-22 16:35:38 -07001210int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1211{
Brian Norrise47f6852012-06-22 16:35:39 -07001212 int ret_code;
Brian Norrisd2d48482012-06-22 16:35:38 -07001213 ops->retlen = ops->oobretlen = 0;
Ezequiel Garciafea728c2016-04-12 17:46:42 -03001214
Boris Brezillon5cdd9292017-06-27 21:22:21 +02001215 ret_code = mtd_check_oob_ops(mtd, from, ops);
1216 if (ret_code)
1217 return ret_code;
1218
Ezequiel Garciafea728c2016-04-12 17:46:42 -03001219 ledtrig_mtd_activity();
Miquel Raynal89fd23e2018-07-14 14:42:00 +02001220
1221 /* Check the validity of a potential fallback on mtd->_read */
1222 if (!mtd->_read_oob && (!mtd->_read || ops->oobbuf))
1223 return -EOPNOTSUPP;
1224
1225 if (mtd->_read_oob)
1226 ret_code = mtd->_read_oob(mtd, from, ops);
1227 else
1228 ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen,
1229 ops->datbuf);
1230
Brian Norrise47f6852012-06-22 16:35:39 -07001231 /*
1232 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1233 * similar to mtd->_read(), returning a non-negative integer
1234 * representing max bitflips. In other cases, mtd->_read_oob() may
1235 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1236 */
Brian Norrise47f6852012-06-22 16:35:39 -07001237 if (unlikely(ret_code < 0))
1238 return ret_code;
1239 if (mtd->ecc_strength == 0)
1240 return 0; /* device lacks ecc */
1241 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
Brian Norrisd2d48482012-06-22 16:35:38 -07001242}
1243EXPORT_SYMBOL_GPL(mtd_read_oob);
1244
Ezequiel Garcia0c034fe2016-04-12 17:46:39 -03001245int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1246 struct mtd_oob_ops *ops)
1247{
Boris Brezillon5cdd9292017-06-27 21:22:21 +02001248 int ret;
1249
Ezequiel Garcia0c034fe2016-04-12 17:46:39 -03001250 ops->retlen = ops->oobretlen = 0;
Miquel Raynal89fd23e2018-07-14 14:42:00 +02001251
Ezequiel Garcia0c034fe2016-04-12 17:46:39 -03001252 if (!(mtd->flags & MTD_WRITEABLE))
1253 return -EROFS;
Boris Brezillon5cdd9292017-06-27 21:22:21 +02001254
1255 ret = mtd_check_oob_ops(mtd, to, ops);
1256 if (ret)
1257 return ret;
1258
Ezequiel Garciafea728c2016-04-12 17:46:42 -03001259 ledtrig_mtd_activity();
Miquel Raynal89fd23e2018-07-14 14:42:00 +02001260
1261 /* Check the validity of a potential fallback on mtd->_write */
1262 if (!mtd->_write_oob && (!mtd->_write || ops->oobbuf))
1263 return -EOPNOTSUPP;
1264
1265 if (mtd->_write_oob)
1266 return mtd->_write_oob(mtd, to, ops);
1267 else
1268 return mtd->_write(mtd, to, ops->len, &ops->retlen,
1269 ops->datbuf);
Ezequiel Garcia0c034fe2016-04-12 17:46:39 -03001270}
1271EXPORT_SYMBOL_GPL(mtd_write_oob);
1272
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001273/**
1274 * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1275 * @mtd: MTD device structure
1276 * @section: ECC section. Depending on the layout you may have all the ECC
1277 * bytes stored in a single contiguous section, or one section
1278 * per ECC chunk (and sometime several sections for a single ECC
1279 * ECC chunk)
1280 * @oobecc: OOB region struct filled with the appropriate ECC position
1281 * information
1282 *
Masahiro Yamada7da0fff2016-12-14 09:31:01 +09001283 * This function returns ECC section information in the OOB area. If you want
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001284 * to get all the ECC bytes information, then you should call
1285 * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1286 *
1287 * Returns zero on success, a negative error code otherwise.
1288 */
1289int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1290 struct mtd_oob_region *oobecc)
1291{
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001292 memset(oobecc, 0, sizeof(*oobecc));
1293
1294 if (!mtd || section < 0)
1295 return -EINVAL;
1296
Boris Brezillonadbbc3b2016-02-03 19:01:31 +01001297 if (!mtd->ooblayout || !mtd->ooblayout->ecc)
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001298 return -ENOTSUPP;
1299
Boris Brezillonadbbc3b2016-02-03 19:01:31 +01001300 return mtd->ooblayout->ecc(mtd, section, oobecc);
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001301}
1302EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1303
1304/**
1305 * mtd_ooblayout_free - Get the OOB region definition of a specific free
1306 * section
1307 * @mtd: MTD device structure
1308 * @section: Free section you are interested in. Depending on the layout
1309 * you may have all the free bytes stored in a single contiguous
1310 * section, or one section per ECC chunk plus an extra section
1311 * for the remaining bytes (or other funky layout).
1312 * @oobfree: OOB region struct filled with the appropriate free position
1313 * information
1314 *
Masahiro Yamada7da0fff2016-12-14 09:31:01 +09001315 * This function returns free bytes position in the OOB area. If you want
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001316 * to get all the free bytes information, then you should call
1317 * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1318 *
1319 * Returns zero on success, a negative error code otherwise.
1320 */
1321int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1322 struct mtd_oob_region *oobfree)
1323{
1324 memset(oobfree, 0, sizeof(*oobfree));
1325
1326 if (!mtd || section < 0)
1327 return -EINVAL;
1328
Boris Brezillonadbbc3b2016-02-03 19:01:31 +01001329 if (!mtd->ooblayout || !mtd->ooblayout->free)
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001330 return -ENOTSUPP;
1331
Boris Brezillonadbbc3b2016-02-03 19:01:31 +01001332 return mtd->ooblayout->free(mtd, section, oobfree);
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001333}
1334EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1335
1336/**
1337 * mtd_ooblayout_find_region - Find the region attached to a specific byte
1338 * @mtd: mtd info structure
1339 * @byte: the byte we are searching for
1340 * @sectionp: pointer where the section id will be stored
1341 * @oobregion: used to retrieve the ECC position
1342 * @iter: iterator function. Should be either mtd_ooblayout_free or
1343 * mtd_ooblayout_ecc depending on the region type you're searching for
1344 *
Masahiro Yamada7da0fff2016-12-14 09:31:01 +09001345 * This function returns the section id and oobregion information of a
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001346 * specific byte. For example, say you want to know where the 4th ECC byte is
1347 * stored, you'll use:
1348 *
1349 * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
1350 *
1351 * Returns zero on success, a negative error code otherwise.
1352 */
1353static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1354 int *sectionp, struct mtd_oob_region *oobregion,
1355 int (*iter)(struct mtd_info *,
1356 int section,
1357 struct mtd_oob_region *oobregion))
1358{
1359 int pos = 0, ret, section = 0;
1360
1361 memset(oobregion, 0, sizeof(*oobregion));
1362
1363 while (1) {
1364 ret = iter(mtd, section, oobregion);
1365 if (ret)
1366 return ret;
1367
1368 if (pos + oobregion->length > byte)
1369 break;
1370
1371 pos += oobregion->length;
1372 section++;
1373 }
1374
1375 /*
1376 * Adjust region info to make it start at the beginning at the
1377 * 'start' ECC byte.
1378 */
1379 oobregion->offset += byte - pos;
1380 oobregion->length -= byte - pos;
1381 *sectionp = section;
1382
1383 return 0;
1384}
1385
1386/**
1387 * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1388 * ECC byte
1389 * @mtd: mtd info structure
1390 * @eccbyte: the byte we are searching for
1391 * @sectionp: pointer where the section id will be stored
1392 * @oobregion: OOB region information
1393 *
1394 * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1395 * byte.
1396 *
1397 * Returns zero on success, a negative error code otherwise.
1398 */
1399int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1400 int *section,
1401 struct mtd_oob_region *oobregion)
1402{
1403 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1404 mtd_ooblayout_ecc);
1405}
1406EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1407
1408/**
1409 * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1410 * @mtd: mtd info structure
1411 * @buf: destination buffer to store OOB bytes
1412 * @oobbuf: OOB buffer
1413 * @start: first byte to retrieve
1414 * @nbytes: number of bytes to retrieve
1415 * @iter: section iterator
1416 *
1417 * Extract bytes attached to a specific category (ECC or free)
1418 * from the OOB buffer and copy them into buf.
1419 *
1420 * Returns zero on success, a negative error code otherwise.
1421 */
1422static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1423 const u8 *oobbuf, int start, int nbytes,
1424 int (*iter)(struct mtd_info *,
1425 int section,
1426 struct mtd_oob_region *oobregion))
1427{
Masahiro Yamada8e8fd4d2016-11-09 11:08:08 +09001428 struct mtd_oob_region oobregion;
1429 int section, ret;
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001430
1431 ret = mtd_ooblayout_find_region(mtd, start, &section,
1432 &oobregion, iter);
1433
1434 while (!ret) {
1435 int cnt;
1436
Masahiro Yamada7c295ef2016-11-09 11:08:09 +09001437 cnt = min_t(int, nbytes, oobregion.length);
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001438 memcpy(buf, oobbuf + oobregion.offset, cnt);
1439 buf += cnt;
1440 nbytes -= cnt;
1441
1442 if (!nbytes)
1443 break;
1444
1445 ret = iter(mtd, ++section, &oobregion);
1446 }
1447
1448 return ret;
1449}
1450
1451/**
1452 * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1453 * @mtd: mtd info structure
1454 * @buf: source buffer to get OOB bytes from
1455 * @oobbuf: OOB buffer
1456 * @start: first OOB byte to set
1457 * @nbytes: number of OOB bytes to set
1458 * @iter: section iterator
1459 *
1460 * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1461 * is selected by passing the appropriate iterator.
1462 *
1463 * Returns zero on success, a negative error code otherwise.
1464 */
1465static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1466 u8 *oobbuf, int start, int nbytes,
1467 int (*iter)(struct mtd_info *,
1468 int section,
1469 struct mtd_oob_region *oobregion))
1470{
Masahiro Yamada8e8fd4d2016-11-09 11:08:08 +09001471 struct mtd_oob_region oobregion;
1472 int section, ret;
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001473
1474 ret = mtd_ooblayout_find_region(mtd, start, &section,
1475 &oobregion, iter);
1476
1477 while (!ret) {
1478 int cnt;
1479
Masahiro Yamada7c295ef2016-11-09 11:08:09 +09001480 cnt = min_t(int, nbytes, oobregion.length);
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001481 memcpy(oobbuf + oobregion.offset, buf, cnt);
1482 buf += cnt;
1483 nbytes -= cnt;
1484
1485 if (!nbytes)
1486 break;
1487
1488 ret = iter(mtd, ++section, &oobregion);
1489 }
1490
1491 return ret;
1492}
1493
1494/**
1495 * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1496 * @mtd: mtd info structure
1497 * @iter: category iterator
1498 *
1499 * Count the number of bytes in a given category.
1500 *
1501 * Returns a positive value on success, a negative error code otherwise.
1502 */
1503static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1504 int (*iter)(struct mtd_info *,
1505 int section,
1506 struct mtd_oob_region *oobregion))
1507{
Masahiro Yamada4d6aecf2016-11-09 11:08:10 +09001508 struct mtd_oob_region oobregion;
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001509 int section = 0, ret, nbytes = 0;
1510
1511 while (1) {
1512 ret = iter(mtd, section++, &oobregion);
1513 if (ret) {
1514 if (ret == -ERANGE)
1515 ret = nbytes;
1516 break;
1517 }
1518
1519 nbytes += oobregion.length;
1520 }
1521
1522 return ret;
1523}
1524
1525/**
1526 * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
1527 * @mtd: mtd info structure
1528 * @eccbuf: destination buffer to store ECC bytes
1529 * @oobbuf: OOB buffer
1530 * @start: first ECC byte to retrieve
1531 * @nbytes: number of ECC bytes to retrieve
1532 *
1533 * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
1534 *
1535 * Returns zero on success, a negative error code otherwise.
1536 */
1537int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1538 const u8 *oobbuf, int start, int nbytes)
1539{
1540 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1541 mtd_ooblayout_ecc);
1542}
1543EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1544
1545/**
1546 * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
1547 * @mtd: mtd info structure
1548 * @eccbuf: source buffer to get ECC bytes from
1549 * @oobbuf: OOB buffer
1550 * @start: first ECC byte to set
1551 * @nbytes: number of ECC bytes to set
1552 *
1553 * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
1554 *
1555 * Returns zero on success, a negative error code otherwise.
1556 */
1557int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
1558 u8 *oobbuf, int start, int nbytes)
1559{
1560 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1561 mtd_ooblayout_ecc);
1562}
1563EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
1564
1565/**
1566 * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
1567 * @mtd: mtd info structure
1568 * @databuf: destination buffer to store ECC bytes
1569 * @oobbuf: OOB buffer
1570 * @start: first ECC byte to retrieve
1571 * @nbytes: number of ECC bytes to retrieve
1572 *
1573 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
1574 *
1575 * Returns zero on success, a negative error code otherwise.
1576 */
1577int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
1578 const u8 *oobbuf, int start, int nbytes)
1579{
1580 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
1581 mtd_ooblayout_free);
1582}
1583EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
1584
1585/**
Xiaolei Lic77a9312018-03-29 09:34:58 +08001586 * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001587 * @mtd: mtd info structure
Xiaolei Lic77a9312018-03-29 09:34:58 +08001588 * @databuf: source buffer to get data bytes from
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001589 * @oobbuf: OOB buffer
1590 * @start: first ECC byte to set
1591 * @nbytes: number of ECC bytes to set
1592 *
1593 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
1594 *
1595 * Returns zero on success, a negative error code otherwise.
1596 */
1597int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
1598 u8 *oobbuf, int start, int nbytes)
1599{
1600 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
1601 mtd_ooblayout_free);
1602}
1603EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
1604
1605/**
1606 * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
1607 * @mtd: mtd info structure
1608 *
1609 * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
1610 *
1611 * Returns zero on success, a negative error code otherwise.
1612 */
1613int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
1614{
1615 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
1616}
1617EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
1618
1619/**
Xiaolei Lic77a9312018-03-29 09:34:58 +08001620 * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
Boris Brezillon75eb2ce2016-02-04 09:52:30 +01001621 * @mtd: mtd info structure
1622 *
1623 * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
1624 *
1625 * Returns zero on success, a negative error code otherwise.
1626 */
1627int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
1628{
1629 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
1630}
1631EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
1632
Artem Bityutskiyde3cac92012-02-08 16:37:14 +02001633/*
1634 * Method to access the protection register area, present in some flash
1635 * devices. The user data is one time programmable but the factory data is read
1636 * only.
1637 */
Christian Riesch4b78fc42014-01-28 09:29:44 +01001638int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1639 struct otp_info *buf)
Artem Bityutskiyde3cac92012-02-08 16:37:14 +02001640{
1641 if (!mtd->_get_fact_prot_info)
1642 return -EOPNOTSUPP;
1643 if (!len)
1644 return 0;
Christian Riesch4b78fc42014-01-28 09:29:44 +01001645 return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
Artem Bityutskiyde3cac92012-02-08 16:37:14 +02001646}
1647EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
1648
1649int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1650 size_t *retlen, u_char *buf)
1651{
1652 *retlen = 0;
1653 if (!mtd->_read_fact_prot_reg)
1654 return -EOPNOTSUPP;
1655 if (!len)
1656 return 0;
1657 return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
1658}
1659EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
1660
Christian Riesch4b78fc42014-01-28 09:29:44 +01001661int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1662 struct otp_info *buf)
Artem Bityutskiyde3cac92012-02-08 16:37:14 +02001663{
1664 if (!mtd->_get_user_prot_info)
1665 return -EOPNOTSUPP;
1666 if (!len)
1667 return 0;
Christian Riesch4b78fc42014-01-28 09:29:44 +01001668 return mtd->_get_user_prot_info(mtd, len, retlen, buf);
Artem Bityutskiyde3cac92012-02-08 16:37:14 +02001669}
1670EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
1671
1672int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1673 size_t *retlen, u_char *buf)
1674{
1675 *retlen = 0;
1676 if (!mtd->_read_user_prot_reg)
1677 return -EOPNOTSUPP;
1678 if (!len)
1679 return 0;
1680 return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
1681}
1682EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
1683
1684int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
1685 size_t *retlen, u_char *buf)
1686{
Christian Riesch9a78bc82014-03-06 12:42:37 +01001687 int ret;
1688
Artem Bityutskiyde3cac92012-02-08 16:37:14 +02001689 *retlen = 0;
1690 if (!mtd->_write_user_prot_reg)
1691 return -EOPNOTSUPP;
1692 if (!len)
1693 return 0;
Christian Riesch9a78bc82014-03-06 12:42:37 +01001694 ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
1695 if (ret)
1696 return ret;
1697
1698 /*
1699 * If no data could be written at all, we are out of memory and
1700 * must return -ENOSPC.
1701 */
1702 return (*retlen) ? 0 : -ENOSPC;
Artem Bityutskiyde3cac92012-02-08 16:37:14 +02001703}
1704EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
1705
1706int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
1707{
1708 if (!mtd->_lock_user_prot_reg)
1709 return -EOPNOTSUPP;
1710 if (!len)
1711 return 0;
1712 return mtd->_lock_user_prot_reg(mtd, from, len);
1713}
1714EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
1715
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001716/* Chip-supported device locking */
1717int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1718{
1719 if (!mtd->_lock)
1720 return -EOPNOTSUPP;
Brian Norris0c2b4e22014-07-21 19:06:27 -07001721 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001722 return -EINVAL;
Artem Bityutskiybcb1d232012-02-06 13:27:43 +02001723 if (!len)
1724 return 0;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001725 return mtd->_lock(mtd, ofs, len);
1726}
1727EXPORT_SYMBOL_GPL(mtd_lock);
1728
1729int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1730{
1731 if (!mtd->_unlock)
1732 return -EOPNOTSUPP;
Brian Norris0c2b4e22014-07-21 19:06:27 -07001733 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001734 return -EINVAL;
Artem Bityutskiybcb1d232012-02-06 13:27:43 +02001735 if (!len)
1736 return 0;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001737 return mtd->_unlock(mtd, ofs, len);
1738}
1739EXPORT_SYMBOL_GPL(mtd_unlock);
1740
1741int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1742{
1743 if (!mtd->_is_locked)
1744 return -EOPNOTSUPP;
Brian Norris0c2b4e22014-07-21 19:06:27 -07001745 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001746 return -EINVAL;
Artem Bityutskiybcb1d232012-02-06 13:27:43 +02001747 if (!len)
1748 return 0;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001749 return mtd->_is_locked(mtd, ofs, len);
1750}
1751EXPORT_SYMBOL_GPL(mtd_is_locked);
1752
Ezequiel Garcia8471bb72014-05-21 19:06:12 -03001753int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001754{
Brian Norris0c2b4e22014-07-21 19:06:27 -07001755 if (ofs < 0 || ofs >= mtd->size)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001756 return -EINVAL;
Ezequiel Garcia8471bb72014-05-21 19:06:12 -03001757 if (!mtd->_block_isreserved)
1758 return 0;
1759 return mtd->_block_isreserved(mtd, ofs);
1760}
1761EXPORT_SYMBOL_GPL(mtd_block_isreserved);
1762
1763int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
1764{
Brian Norris0c2b4e22014-07-21 19:06:27 -07001765 if (ofs < 0 || ofs >= mtd->size)
Ezequiel Garcia8471bb72014-05-21 19:06:12 -03001766 return -EINVAL;
1767 if (!mtd->_block_isbad)
1768 return 0;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001769 return mtd->_block_isbad(mtd, ofs);
1770}
1771EXPORT_SYMBOL_GPL(mtd_block_isbad);
1772
1773int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
1774{
1775 if (!mtd->_block_markbad)
1776 return -EOPNOTSUPP;
Brian Norris0c2b4e22014-07-21 19:06:27 -07001777 if (ofs < 0 || ofs >= mtd->size)
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001778 return -EINVAL;
Artem Bityutskiy664addc2012-02-03 18:13:23 +02001779 if (!(mtd->flags & MTD_WRITEABLE))
1780 return -EROFS;
Artem Bityutskiy8273a0c2012-02-03 14:34:14 +02001781 return mtd->_block_markbad(mtd, ofs);
1782}
1783EXPORT_SYMBOL_GPL(mtd_block_markbad);
1784
1785/*
Artem Bityutskiy52b02032011-12-30 15:57:25 +02001786 * default_mtd_writev - the default writev method
1787 * @mtd: mtd device description object pointer
1788 * @vecs: the vectors to write
1789 * @count: count of vectors in @vecs
1790 * @to: the MTD device offset to write to
1791 * @retlen: on exit contains the count of bytes written to the MTD device.
1792 *
1793 * This function returns zero in case of success and a negative error code in
1794 * case of failure.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 */
Artem Bityutskiy1dbebd32011-12-30 16:23:41 +02001796static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1797 unsigned long count, loff_t to, size_t *retlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798{
1799 unsigned long i;
1800 size_t totlen = 0, thislen;
1801 int ret = 0;
1802
Artem Bityutskiy52b02032011-12-30 15:57:25 +02001803 for (i = 0; i < count; i++) {
1804 if (!vecs[i].iov_len)
1805 continue;
1806 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
1807 vecs[i].iov_base);
1808 totlen += thislen;
1809 if (ret || thislen != vecs[i].iov_len)
1810 break;
1811 to += vecs[i].iov_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 }
Artem Bityutskiy52b02032011-12-30 15:57:25 +02001813 *retlen = totlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 return ret;
1815}
Artem Bityutskiy1dbebd32011-12-30 16:23:41 +02001816
1817/*
1818 * mtd_writev - the vector-based MTD write method
1819 * @mtd: mtd device description object pointer
1820 * @vecs: the vectors to write
1821 * @count: count of vectors in @vecs
1822 * @to: the MTD device offset to write to
1823 * @retlen: on exit contains the count of bytes written to the MTD device.
1824 *
1825 * This function returns zero in case of success and a negative error code in
1826 * case of failure.
1827 */
1828int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1829 unsigned long count, loff_t to, size_t *retlen)
1830{
1831 *retlen = 0;
Artem Bityutskiy664addc2012-02-03 18:13:23 +02001832 if (!(mtd->flags & MTD_WRITEABLE))
1833 return -EROFS;
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +02001834 if (!mtd->_writev)
Artem Bityutskiy1dbebd32011-12-30 16:23:41 +02001835 return default_mtd_writev(mtd, vecs, count, to, retlen);
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +02001836 return mtd->_writev(mtd, vecs, count, to, retlen);
Artem Bityutskiy1dbebd32011-12-30 16:23:41 +02001837}
1838EXPORT_SYMBOL_GPL(mtd_writev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839
Grant Erickson33b53712011-04-08 08:51:32 -07001840/**
1841 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
Artem Bityutskiy52b02032011-12-30 15:57:25 +02001842 * @mtd: mtd device description object pointer
1843 * @size: a pointer to the ideal or maximum size of the allocation, points
Grant Erickson33b53712011-04-08 08:51:32 -07001844 * to the actual allocation size on success.
1845 *
1846 * This routine attempts to allocate a contiguous kernel buffer up to
1847 * the specified size, backing off the size of the request exponentially
1848 * until the request succeeds or until the allocation size falls below
1849 * the system page size. This attempts to make sure it does not adversely
1850 * impact system performance, so when allocating more than one page, we
Linus Torvaldscaf49192012-12-10 10:51:16 -08001851 * ask the memory allocator to avoid re-trying, swapping, writing back
1852 * or performing I/O.
Grant Erickson33b53712011-04-08 08:51:32 -07001853 *
1854 * Note, this function also makes sure that the allocated buffer is aligned to
1855 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
1856 *
1857 * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
1858 * to handle smaller (i.e. degraded) buffer allocations under low- or
1859 * fragmented-memory situations where such reduced allocations, from a
1860 * requested ideal, are allowed.
1861 *
1862 * Returns a pointer to the allocated buffer on success; otherwise, NULL.
1863 */
1864void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
1865{
Mel Gormand0164ad2015-11-06 16:28:21 -08001866 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
Grant Erickson33b53712011-04-08 08:51:32 -07001867 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
1868 void *kbuf;
1869
1870 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
1871
1872 while (*size > min_alloc) {
1873 kbuf = kmalloc(*size, flags);
1874 if (kbuf)
1875 return kbuf;
1876
1877 *size >>= 1;
1878 *size = ALIGN(*size, mtd->writesize);
1879 }
1880
1881 /*
1882 * For the last resort allocation allow 'kmalloc()' to do all sorts of
1883 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
1884 */
1885 return kmalloc(*size, GFP_KERNEL);
1886}
Grant Erickson33b53712011-04-08 08:51:32 -07001887EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
Pavel Machek2d2dce02006-03-31 02:29:49 -08001889#ifdef CONFIG_PROC_FS
1890
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891/*====================================================================*/
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892/* Support for /proc/mtd */
1893
Alexey Dobriyan447d9bd2011-05-13 23:34:19 +03001894static int mtd_proc_show(struct seq_file *m, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895{
Ben Hutchingsf1332ba2010-01-29 20:57:11 +00001896 struct mtd_info *mtd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897
Alexey Dobriyan447d9bd2011-05-13 23:34:19 +03001898 seq_puts(m, "dev: size erasesize name\n");
Ingo Molnar48b19262006-03-31 02:29:41 -08001899 mutex_lock(&mtd_table_mutex);
Ben Hutchingsf1332ba2010-01-29 20:57:11 +00001900 mtd_for_each_device(mtd) {
Alexey Dobriyan447d9bd2011-05-13 23:34:19 +03001901 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
1902 mtd->index, (unsigned long long)mtd->size,
1903 mtd->erasesize, mtd->name);
Wanlong Gaod5ca5122011-05-20 21:14:30 +08001904 }
Ingo Molnar48b19262006-03-31 02:29:41 -08001905 mutex_unlock(&mtd_table_mutex);
Wanlong Gaod5ca5122011-05-20 21:14:30 +08001906 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907}
Kevin Cernekee45b09072009-04-04 11:03:04 -07001908#endif /* CONFIG_PROC_FS */
1909
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910/*====================================================================*/
1911/* Init code */
1912
Steve Longerbeam445caaa2016-08-04 19:31:15 +05301913static struct backing_dev_info * __init mtd_bdi_init(char *name)
Jens Axboe0661b1a2010-04-27 09:49:47 +02001914{
Steve Longerbeam445caaa2016-08-04 19:31:15 +05301915 struct backing_dev_info *bdi;
Jens Axboe0661b1a2010-04-27 09:49:47 +02001916 int ret;
1917
Jan Karafa060522017-04-12 12:24:37 +02001918 bdi = bdi_alloc(GFP_KERNEL);
Steve Longerbeam445caaa2016-08-04 19:31:15 +05301919 if (!bdi)
1920 return ERR_PTR(-ENOMEM);
Jens Axboe0661b1a2010-04-27 09:49:47 +02001921
Jan Karafa060522017-04-12 12:24:37 +02001922 bdi->name = name;
1923 /*
1924 * We put '-0' suffix to the name to get the same name format as we
1925 * used to get. Since this is called only once, we get a unique name.
1926 */
Jan Kara7c4cc302017-04-12 12:24:49 +02001927 ret = bdi_register(bdi, "%.28s-0", name);
Jens Axboe0661b1a2010-04-27 09:49:47 +02001928 if (ret)
Jan Karafa060522017-04-12 12:24:37 +02001929 bdi_put(bdi);
Jens Axboe0661b1a2010-04-27 09:49:47 +02001930
Steve Longerbeam445caaa2016-08-04 19:31:15 +05301931 return ret ? ERR_PTR(ret) : bdi;
Jens Axboe0661b1a2010-04-27 09:49:47 +02001932}
1933
Artem Bityutskiy93e56212013-03-15 12:56:05 +02001934static struct proc_dir_entry *proc_mtd;
1935
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936static int __init init_mtd(void)
1937{
David Woodhouse15bce402009-04-05 07:40:58 -07001938 int ret;
Kevin Cernekee694bb7f2009-04-03 13:00:45 -07001939
Jens Axboe0661b1a2010-04-27 09:49:47 +02001940 ret = class_register(&mtd_class);
1941 if (ret)
1942 goto err_reg;
1943
Steve Longerbeam445caaa2016-08-04 19:31:15 +05301944 mtd_bdi = mtd_bdi_init("mtd");
1945 if (IS_ERR(mtd_bdi)) {
1946 ret = PTR_ERR(mtd_bdi);
Christoph Hellwigb4caecd2015-01-14 10:42:32 +01001947 goto err_bdi;
Steve Longerbeam445caaa2016-08-04 19:31:15 +05301948 }
Jens Axboe0661b1a2010-04-27 09:49:47 +02001949
Christoph Hellwig3f3942a2018-05-15 15:57:23 +02001950 proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
Artem Bityutskiy93e56212013-03-15 12:56:05 +02001951
Artem Bityutskiy660685d2013-03-14 13:27:40 +02001952 ret = init_mtdchar();
1953 if (ret)
1954 goto out_procfs;
1955
Mario Rugieroe8e3edb2017-05-29 08:38:41 -03001956 dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
1957
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 return 0;
Jens Axboe0661b1a2010-04-27 09:49:47 +02001959
Artem Bityutskiy660685d2013-03-14 13:27:40 +02001960out_procfs:
1961 if (proc_mtd)
1962 remove_proc_entry("mtd", NULL);
Jan Karafa060522017-04-12 12:24:37 +02001963 bdi_put(mtd_bdi);
Christoph Hellwigb4caecd2015-01-14 10:42:32 +01001964err_bdi:
Jens Axboe0661b1a2010-04-27 09:49:47 +02001965 class_unregister(&mtd_class);
1966err_reg:
1967 pr_err("Error registering mtd class or bdi: %d\n", ret);
1968 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969}
1970
1971static void __exit cleanup_mtd(void)
1972{
Mario Rugieroe8e3edb2017-05-29 08:38:41 -03001973 debugfs_remove_recursive(dfs_dir_mtd);
Artem Bityutskiy660685d2013-03-14 13:27:40 +02001974 cleanup_mtdchar();
Wanlong Gaod5ca5122011-05-20 21:14:30 +08001975 if (proc_mtd)
Artem Bityutskiy93e56212013-03-15 12:56:05 +02001976 remove_proc_entry("mtd", NULL);
David Woodhouse15bce402009-04-05 07:40:58 -07001977 class_unregister(&mtd_class);
Jan Karafa060522017-04-12 12:24:37 +02001978 bdi_put(mtd_bdi);
Johannes Thumshirn35667b92015-07-08 17:15:34 +02001979 idr_destroy(&mtd_idr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980}
1981
1982module_init(init_mtd);
1983module_exit(cleanup_mtd);
1984
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985MODULE_LICENSE("GPL");
1986MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
1987MODULE_DESCRIPTION("Core MTD registration and access routines");