8c83b11a77d595123009f3a5d97832b29ac7fea6
[linux-2.6.git] / drivers / mtd / mtd_blkdevs.c
1 /*
2  * (C) 2003 David Woodhouse <dwmw2@infradead.org>
3  *
4  * Interface to Linux 2.5 block layer for MTD 'translation layers'.
5  *
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/fs.h>
13 #include <linux/mtd/blktrans.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/blkdev.h>
16 #include <linux/blkpg.h>
17 #include <linux/spinlock.h>
18 #include <linux/smp_lock.h>
19 #include <linux/hdreg.h>
20 #include <linux/init.h>
21 #include <linux/mutex.h>
22 #include <linux/kthread.h>
23 #include <asm/uaccess.h>
24
25 #include "mtdcore.h"
26
27 static LIST_HEAD(blktrans_majors);
28 static DEFINE_MUTEX(blktrans_ref_mutex);
29
30 void blktrans_dev_release(struct kref *kref)
31 {
32         struct mtd_blktrans_dev *dev =
33                 container_of(kref, struct mtd_blktrans_dev, ref);
34
35         dev->disk->private_data = NULL;
36         blk_cleanup_queue(dev->rq);
37         put_disk(dev->disk);
38         list_del(&dev->list);
39         kfree(dev);
40 }
41
42 static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
43 {
44         struct mtd_blktrans_dev *dev;
45
46         mutex_lock(&blktrans_ref_mutex);
47         dev = disk->private_data;
48
49         if (!dev)
50                 goto unlock;
51         kref_get(&dev->ref);
52 unlock:
53         mutex_unlock(&blktrans_ref_mutex);
54         return dev;
55 }
56
57 void blktrans_dev_put(struct mtd_blktrans_dev *dev)
58 {
59         mutex_lock(&blktrans_ref_mutex);
60         kref_put(&dev->ref, blktrans_dev_release);
61         mutex_unlock(&blktrans_ref_mutex);
62 }
63
64
65 static int do_blktrans_request(struct mtd_blktrans_ops *tr,
66                                struct mtd_blktrans_dev *dev,
67                                struct request *req)
68 {
69         unsigned long block, nsect;
70         char *buf;
71
72         block = blk_rq_pos(req) << 9 >> tr->blkshift;
73         nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
74
75         buf = req->buffer;
76
77         if (req->cmd_type != REQ_TYPE_FS)
78                 return -EIO;
79
80         if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
81             get_capacity(req->rq_disk))
82                 return -EIO;
83
84         if (req->cmd_flags & REQ_DISCARD)
85                 return tr->discard(dev, block, nsect);
86
87         switch(rq_data_dir(req)) {
88         case READ:
89                 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
90                         if (tr->readsect(dev, block, buf))
91                                 return -EIO;
92                 rq_flush_dcache_pages(req);
93                 return 0;
94         case WRITE:
95                 if (!tr->writesect)
96                         return -EIO;
97
98                 rq_flush_dcache_pages(req);
99                 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
100                         if (tr->writesect(dev, block, buf))
101                                 return -EIO;
102                 return 0;
103         default:
104                 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
105                 return -EIO;
106         }
107 }
108
109 static int mtd_blktrans_thread(void *arg)
110 {
111         struct mtd_blktrans_dev *dev = arg;
112         struct request_queue *rq = dev->rq;
113         struct request *req = NULL;
114
115         spin_lock_irq(rq->queue_lock);
116
117         while (!kthread_should_stop()) {
118                 int res;
119
120                 if (!req && !(req = blk_fetch_request(rq))) {
121                         set_current_state(TASK_INTERRUPTIBLE);
122                         spin_unlock_irq(rq->queue_lock);
123                         schedule();
124                         spin_lock_irq(rq->queue_lock);
125                         continue;
126                 }
127
128                 spin_unlock_irq(rq->queue_lock);
129
130                 mutex_lock(&dev->lock);
131                 res = do_blktrans_request(dev->tr, dev, req);
132                 mutex_unlock(&dev->lock);
133
134                 spin_lock_irq(rq->queue_lock);
135
136                 if (!__blk_end_request_cur(req, res))
137                         req = NULL;
138         }
139
140         if (req)
141                 __blk_end_request_all(req, -EIO);
142
143         spin_unlock_irq(rq->queue_lock);
144
145         return 0;
146 }
147
148 static void mtd_blktrans_request(struct request_queue *rq)
149 {
150         struct mtd_blktrans_dev *dev;
151         struct request *req = NULL;
152
153         dev = rq->queuedata;
154
155         if (!dev)
156                 while ((req = blk_fetch_request(rq)) != NULL)
157                         __blk_end_request_all(req, -ENODEV);
158         else
159                 wake_up_process(dev->thread);
160 }
161
162 static int blktrans_open(struct block_device *bdev, fmode_t mode)
163 {
164         struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
165         int ret;
166
167         if (!dev)
168                 return -ERESTARTSYS;
169
170         mutex_lock(&dev->lock);
171
172         if (!dev->mtd) {
173                 ret = -ENXIO;
174                 goto unlock;
175         }
176
177         ret = !dev->open++ && dev->tr->open ? dev->tr->open(dev) : 0;
178
179         /* Take another reference on the device so it won't go away till
180                 last release */
181         if (!ret)
182                 kref_get(&dev->ref);
183 unlock:
184         mutex_unlock(&dev->lock);
185         blktrans_dev_put(dev);
186         return ret;
187 }
188
189 static int blktrans_release(struct gendisk *disk, fmode_t mode)
190 {
191         struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
192         int ret = -ENXIO;
193
194         if (!dev)
195                 return ret;
196
197         mutex_lock(&dev->lock);
198
199         /* Release one reference, we sure its not the last one here*/
200         kref_put(&dev->ref, blktrans_dev_release);
201
202         if (!dev->mtd)
203                 goto unlock;
204
205         ret = !--dev->open && dev->tr->release ? dev->tr->release(dev) : 0;
206 unlock:
207         mutex_unlock(&dev->lock);
208         blktrans_dev_put(dev);
209         return ret;
210 }
211
212 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
213 {
214         struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
215         int ret = -ENXIO;
216
217         if (!dev)
218                 return ret;
219
220         mutex_lock(&dev->lock);
221
222         if (!dev->mtd)
223                 goto unlock;
224
225         ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
226 unlock:
227         mutex_unlock(&dev->lock);
228         blktrans_dev_put(dev);
229         return ret;
230 }
231
232 static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
233                               unsigned int cmd, unsigned long arg)
234 {
235         struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
236         int ret = -ENXIO;
237
238         if (!dev)
239                 return ret;
240
241         lock_kernel();
242         mutex_lock(&dev->lock);
243
244         if (!dev->mtd)
245                 goto unlock;
246
247         switch (cmd) {
248         case BLKFLSBUF:
249                 ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
250         default:
251                 ret = -ENOTTY;
252         }
253 unlock:
254         mutex_unlock(&dev->lock);
255         unlock_kernel();
256         blktrans_dev_put(dev);
257         return ret;
258 }
259
260 static const struct block_device_operations mtd_blktrans_ops = {
261         .owner          = THIS_MODULE,
262         .open           = blktrans_open,
263         .release        = blktrans_release,
264         .ioctl          = blktrans_ioctl,
265         .getgeo         = blktrans_getgeo,
266 };
267
268 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
269 {
270         struct mtd_blktrans_ops *tr = new->tr;
271         struct mtd_blktrans_dev *d;
272         int last_devnum = -1;
273         struct gendisk *gd;
274         int ret;
275
276         if (mutex_trylock(&mtd_table_mutex)) {
277                 mutex_unlock(&mtd_table_mutex);
278                 BUG();
279         }
280
281         mutex_lock(&blktrans_ref_mutex);
282         list_for_each_entry(d, &tr->devs, list) {
283                 if (new->devnum == -1) {
284                         /* Use first free number */
285                         if (d->devnum != last_devnum+1) {
286                                 /* Found a free devnum. Plug it in here */
287                                 new->devnum = last_devnum+1;
288                                 list_add_tail(&new->list, &d->list);
289                                 goto added;
290                         }
291                 } else if (d->devnum == new->devnum) {
292                         /* Required number taken */
293                         mutex_unlock(&blktrans_ref_mutex);
294                         return -EBUSY;
295                 } else if (d->devnum > new->devnum) {
296                         /* Required number was free */
297                         list_add_tail(&new->list, &d->list);
298                         goto added;
299                 }
300                 last_devnum = d->devnum;
301         }
302
303         ret = -EBUSY;
304         if (new->devnum == -1)
305                 new->devnum = last_devnum+1;
306
307         /* Check that the device and any partitions will get valid
308          * minor numbers and that the disk naming code below can cope
309          * with this number. */
310         if (new->devnum > (MINORMASK >> tr->part_bits) ||
311             (tr->part_bits && new->devnum >= 27 * 26)) {
312                 mutex_unlock(&blktrans_ref_mutex);
313                 goto error1;
314         }
315
316         list_add_tail(&new->list, &tr->devs);
317  added:
318         mutex_unlock(&blktrans_ref_mutex);
319
320         mutex_init(&new->lock);
321         kref_init(&new->ref);
322         if (!tr->writesect)
323                 new->readonly = 1;
324
325         /* Create gendisk */
326         ret = -ENOMEM;
327         gd = alloc_disk(1 << tr->part_bits);
328
329         if (!gd)
330                 goto error2;
331
332         new->disk = gd;
333         gd->private_data = new;
334         gd->major = tr->major;
335         gd->first_minor = (new->devnum) << tr->part_bits;
336         gd->fops = &mtd_blktrans_ops;
337
338         if (tr->part_bits)
339                 if (new->devnum < 26)
340                         snprintf(gd->disk_name, sizeof(gd->disk_name),
341                                  "%s%c", tr->name, 'a' + new->devnum);
342                 else
343                         snprintf(gd->disk_name, sizeof(gd->disk_name),
344                                  "%s%c%c", tr->name,
345                                  'a' - 1 + new->devnum / 26,
346                                  'a' + new->devnum % 26);
347         else
348                 snprintf(gd->disk_name, sizeof(gd->disk_name),
349                          "%s%d", tr->name, new->devnum);
350
351         set_capacity(gd, (new->size * tr->blksize) >> 9);
352
353         /* Create the request queue */
354         spin_lock_init(&new->queue_lock);
355         new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
356
357         if (!new->rq)
358                 goto error3;
359
360         new->rq->queuedata = new;
361         blk_queue_logical_block_size(new->rq, tr->blksize);
362
363         if (tr->discard)
364                 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
365                                         new->rq);
366
367         gd->queue = new->rq;
368
369         __get_mtd_device(new->mtd);
370         __module_get(tr->owner);
371
372         /* Create processing thread */
373         /* TODO: workqueue ? */
374         new->thread = kthread_run(mtd_blktrans_thread, new,
375                         "%s%d", tr->name, new->mtd->index);
376         if (IS_ERR(new->thread)) {
377                 ret = PTR_ERR(new->thread);
378                 goto error4;
379         }
380         gd->driverfs_dev = &new->mtd->dev;
381
382         if (new->readonly)
383                 set_disk_ro(gd, 1);
384
385         add_disk(gd);
386
387         if (new->disk_attributes) {
388                 ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
389                                         new->disk_attributes);
390                 WARN_ON(ret);
391         }
392         return 0;
393 error4:
394         module_put(tr->owner);
395         __put_mtd_device(new->mtd);
396         blk_cleanup_queue(new->rq);
397 error3:
398         put_disk(new->disk);
399 error2:
400         list_del(&new->list);
401 error1:
402         kfree(new);
403         return ret;
404 }
405
406 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
407 {
408         unsigned long flags;
409
410         if (mutex_trylock(&mtd_table_mutex)) {
411                 mutex_unlock(&mtd_table_mutex);
412                 BUG();
413         }
414
415         /* Stop new requests to arrive */
416         del_gendisk(old->disk);
417
418         if (old->disk_attributes)
419                 sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
420                                                 old->disk_attributes);
421
422         /* Stop the thread */
423         kthread_stop(old->thread);
424
425         /* Kill current requests */
426         spin_lock_irqsave(&old->queue_lock, flags);
427         old->rq->queuedata = NULL;
428         blk_start_queue(old->rq);
429         spin_unlock_irqrestore(&old->queue_lock, flags);
430
431         /* Ask trans driver for release to the mtd device */
432         mutex_lock(&old->lock);
433         if (old->open && old->tr->release) {
434                 old->tr->release(old);
435                 old->open = 0;
436         }
437
438         __put_mtd_device(old->mtd);
439         module_put(old->tr->owner);
440
441         /* At that point, we don't touch the mtd anymore */
442         old->mtd = NULL;
443
444         mutex_unlock(&old->lock);
445         blktrans_dev_put(old);
446         return 0;
447 }
448
449 static void blktrans_notify_remove(struct mtd_info *mtd)
450 {
451         struct mtd_blktrans_ops *tr;
452         struct mtd_blktrans_dev *dev, *next;
453
454         list_for_each_entry(tr, &blktrans_majors, list)
455                 list_for_each_entry_safe(dev, next, &tr->devs, list)
456                         if (dev->mtd == mtd)
457                                 tr->remove_dev(dev);
458 }
459
460 static void blktrans_notify_add(struct mtd_info *mtd)
461 {
462         struct mtd_blktrans_ops *tr;
463
464         if (mtd->type == MTD_ABSENT)
465                 return;
466
467         list_for_each_entry(tr, &blktrans_majors, list)
468                 tr->add_mtd(tr, mtd);
469 }
470
471 static struct mtd_notifier blktrans_notifier = {
472         .add = blktrans_notify_add,
473         .remove = blktrans_notify_remove,
474 };
475
476 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
477 {
478         struct mtd_info *mtd;
479         int ret;
480
481         /* Register the notifier if/when the first device type is
482            registered, to prevent the link/init ordering from fucking
483            us over. */
484         if (!blktrans_notifier.list.next)
485                 register_mtd_user(&blktrans_notifier);
486
487
488         mutex_lock(&mtd_table_mutex);
489
490         ret = register_blkdev(tr->major, tr->name);
491         if (ret) {
492                 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
493                        tr->name, tr->major, ret);
494                 mutex_unlock(&mtd_table_mutex);
495                 return ret;
496         }
497
498         tr->blkshift = ffs(tr->blksize) - 1;
499
500         INIT_LIST_HEAD(&tr->devs);
501         list_add(&tr->list, &blktrans_majors);
502
503         mtd_for_each_device(mtd)
504                 if (mtd->type != MTD_ABSENT)
505                         tr->add_mtd(tr, mtd);
506
507         mutex_unlock(&mtd_table_mutex);
508         return 0;
509 }
510
511 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
512 {
513         struct mtd_blktrans_dev *dev, *next;
514
515         mutex_lock(&mtd_table_mutex);
516
517         /* Remove it from the list of active majors */
518         list_del(&tr->list);
519
520         list_for_each_entry_safe(dev, next, &tr->devs, list)
521                 tr->remove_dev(dev);
522
523         unregister_blkdev(tr->major, tr->name);
524         mutex_unlock(&mtd_table_mutex);
525
526         BUG_ON(!list_empty(&tr->devs));
527         return 0;
528 }
529
530 static void __exit mtd_blktrans_exit(void)
531 {
532         /* No race here -- if someone's currently in register_mtd_blktrans
533            we're screwed anyway. */
534         if (blktrans_notifier.list.next)
535                 unregister_mtd_user(&blktrans_notifier);
536 }
537
538 module_exit(mtd_blktrans_exit);
539
540 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
541 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
542 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
543 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
544
545 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
546 MODULE_LICENSE("GPL");
547 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");