]> nv-tegra.nvidia Code Review - linux-3.10.git/blob - fs/logfs/dev_mtd.c
mtd: introduce mtd_read interface
[linux-3.10.git] / fs / logfs / dev_mtd.c
1 /*
2  * fs/logfs/dev_mtd.c   - Device access methods for MTD
3  *
4  * As should be obvious for Linux kernel code, license is GPLv2
5  *
6  * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
7  */
8 #include "logfs.h"
9 #include <linux/completion.h>
10 #include <linux/mount.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13
14 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
15
16 static int logfs_mtd_read(struct super_block *sb, loff_t ofs, size_t len,
17                         void *buf)
18 {
19         struct mtd_info *mtd = logfs_super(sb)->s_mtd;
20         size_t retlen;
21         int ret;
22
23         ret = mtd_read(mtd, ofs, len, &retlen, buf);
24         BUG_ON(ret == -EINVAL);
25         if (ret)
26                 return ret;
27
28         /* Not sure if we should loop instead. */
29         if (retlen != len)
30                 return -EIO;
31
32         return 0;
33 }
34
35 static int loffs_mtd_write(struct super_block *sb, loff_t ofs, size_t len,
36                         void *buf)
37 {
38         struct logfs_super *super = logfs_super(sb);
39         struct mtd_info *mtd = super->s_mtd;
40         size_t retlen;
41         loff_t page_start, page_end;
42         int ret;
43
44         if (super->s_flags & LOGFS_SB_FLAG_RO)
45                 return -EROFS;
46
47         BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs));
48         BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift);
49         BUG_ON(len > PAGE_CACHE_SIZE);
50         page_start = ofs & PAGE_CACHE_MASK;
51         page_end = PAGE_CACHE_ALIGN(ofs + len) - 1;
52         ret = mtd->write(mtd, ofs, len, &retlen, buf);
53         if (ret || (retlen != len))
54                 return -EIO;
55
56         return 0;
57 }
58
59 /*
60  * For as long as I can remember (since about 2001) mtd->erase has been an
61  * asynchronous interface lacking the first driver to actually use the
62  * asynchronous properties.  So just to prevent the first implementor of such
63  * a thing from breaking logfs in 2350, we do the usual pointless dance to
64  * declare a completion variable and wait for completion before returning
65  * from logfs_mtd_erase().  What an exercise in futility!
66  */
67 static void logfs_erase_callback(struct erase_info *ei)
68 {
69         complete((struct completion *)ei->priv);
70 }
71
72 static int logfs_mtd_erase_mapping(struct super_block *sb, loff_t ofs,
73                                 size_t len)
74 {
75         struct logfs_super *super = logfs_super(sb);
76         struct address_space *mapping = super->s_mapping_inode->i_mapping;
77         struct page *page;
78         pgoff_t index = ofs >> PAGE_SHIFT;
79
80         for (index = ofs >> PAGE_SHIFT; index < (ofs + len) >> PAGE_SHIFT; index++) {
81                 page = find_get_page(mapping, index);
82                 if (!page)
83                         continue;
84                 memset(page_address(page), 0xFF, PAGE_SIZE);
85                 page_cache_release(page);
86         }
87         return 0;
88 }
89
90 static int logfs_mtd_erase(struct super_block *sb, loff_t ofs, size_t len,
91                 int ensure_write)
92 {
93         struct mtd_info *mtd = logfs_super(sb)->s_mtd;
94         struct erase_info ei;
95         DECLARE_COMPLETION_ONSTACK(complete);
96         int ret;
97
98         BUG_ON(len % mtd->erasesize);
99         if (logfs_super(sb)->s_flags & LOGFS_SB_FLAG_RO)
100                 return -EROFS;
101
102         memset(&ei, 0, sizeof(ei));
103         ei.mtd = mtd;
104         ei.addr = ofs;
105         ei.len = len;
106         ei.callback = logfs_erase_callback;
107         ei.priv = (long)&complete;
108         ret = mtd_erase(mtd, &ei);
109         if (ret)
110                 return -EIO;
111
112         wait_for_completion(&complete);
113         if (ei.state != MTD_ERASE_DONE)
114                 return -EIO;
115         return logfs_mtd_erase_mapping(sb, ofs, len);
116 }
117
118 static void logfs_mtd_sync(struct super_block *sb)
119 {
120         struct mtd_info *mtd = logfs_super(sb)->s_mtd;
121
122         if (mtd->sync)
123                 mtd->sync(mtd);
124 }
125
126 static int logfs_mtd_readpage(void *_sb, struct page *page)
127 {
128         struct super_block *sb = _sb;
129         int err;
130
131         err = logfs_mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
132                         page_address(page));
133         if (err == -EUCLEAN || err == -EBADMSG) {
134                 /* -EBADMSG happens regularly on power failures */
135                 err = 0;
136                 /* FIXME: force GC this segment */
137         }
138         if (err) {
139                 ClearPageUptodate(page);
140                 SetPageError(page);
141         } else {
142                 SetPageUptodate(page);
143                 ClearPageError(page);
144         }
145         unlock_page(page);
146         return err;
147 }
148
149 static struct page *logfs_mtd_find_first_sb(struct super_block *sb, u64 *ofs)
150 {
151         struct logfs_super *super = logfs_super(sb);
152         struct address_space *mapping = super->s_mapping_inode->i_mapping;
153         filler_t *filler = logfs_mtd_readpage;
154         struct mtd_info *mtd = super->s_mtd;
155
156         if (!mtd->block_isbad)
157                 return NULL;
158
159         *ofs = 0;
160         while (mtd->block_isbad(mtd, *ofs)) {
161                 *ofs += mtd->erasesize;
162                 if (*ofs >= mtd->size)
163                         return NULL;
164         }
165         BUG_ON(*ofs & ~PAGE_MASK);
166         return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb);
167 }
168
169 static struct page *logfs_mtd_find_last_sb(struct super_block *sb, u64 *ofs)
170 {
171         struct logfs_super *super = logfs_super(sb);
172         struct address_space *mapping = super->s_mapping_inode->i_mapping;
173         filler_t *filler = logfs_mtd_readpage;
174         struct mtd_info *mtd = super->s_mtd;
175
176         if (!mtd->block_isbad)
177                 return NULL;
178
179         *ofs = mtd->size - mtd->erasesize;
180         while (mtd->block_isbad(mtd, *ofs)) {
181                 *ofs -= mtd->erasesize;
182                 if (*ofs <= 0)
183                         return NULL;
184         }
185         *ofs = *ofs + mtd->erasesize - 0x1000;
186         BUG_ON(*ofs & ~PAGE_MASK);
187         return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb);
188 }
189
190 static int __logfs_mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
191                 size_t nr_pages)
192 {
193         struct logfs_super *super = logfs_super(sb);
194         struct address_space *mapping = super->s_mapping_inode->i_mapping;
195         struct page *page;
196         int i, err;
197
198         for (i = 0; i < nr_pages; i++) {
199                 page = find_lock_page(mapping, index + i);
200                 BUG_ON(!page);
201
202                 err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
203                                         page_address(page));
204                 unlock_page(page);
205                 page_cache_release(page);
206                 if (err)
207                         return err;
208         }
209         return 0;
210 }
211
212 static void logfs_mtd_writeseg(struct super_block *sb, u64 ofs, size_t len)
213 {
214         struct logfs_super *super = logfs_super(sb);
215         int head;
216
217         if (super->s_flags & LOGFS_SB_FLAG_RO)
218                 return;
219
220         if (len == 0) {
221                 /* This can happen when the object fit perfectly into a
222                  * segment, the segment gets written per sync and subsequently
223                  * closed.
224                  */
225                 return;
226         }
227         head = ofs & (PAGE_SIZE - 1);
228         if (head) {
229                 ofs -= head;
230                 len += head;
231         }
232         len = PAGE_ALIGN(len);
233         __logfs_mtd_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
234 }
235
236 static void logfs_mtd_put_device(struct logfs_super *s)
237 {
238         put_mtd_device(s->s_mtd);
239 }
240
241 static int logfs_mtd_can_write_buf(struct super_block *sb, u64 ofs)
242 {
243         struct logfs_super *super = logfs_super(sb);
244         void *buf;
245         int err;
246
247         buf = kmalloc(super->s_writesize, GFP_KERNEL);
248         if (!buf)
249                 return -ENOMEM;
250         err = logfs_mtd_read(sb, ofs, super->s_writesize, buf);
251         if (err)
252                 goto out;
253         if (memchr_inv(buf, 0xff, super->s_writesize))
254                 err = -EIO;
255         kfree(buf);
256 out:
257         return err;
258 }
259
260 static const struct logfs_device_ops mtd_devops = {
261         .find_first_sb  = logfs_mtd_find_first_sb,
262         .find_last_sb   = logfs_mtd_find_last_sb,
263         .readpage       = logfs_mtd_readpage,
264         .writeseg       = logfs_mtd_writeseg,
265         .erase          = logfs_mtd_erase,
266         .can_write_buf  = logfs_mtd_can_write_buf,
267         .sync           = logfs_mtd_sync,
268         .put_device     = logfs_mtd_put_device,
269 };
270
271 int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr)
272 {
273         struct mtd_info *mtd = get_mtd_device(NULL, mtdnr);
274         if (IS_ERR(mtd))
275                 return PTR_ERR(mtd);
276
277         s->s_bdev = NULL;
278         s->s_mtd = mtd;
279         s->s_devops = &mtd_devops;
280         return 0;
281 }