]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - drivers/md/md.c
md/raid5: allow layout/chunksize to be changed on an active 2-drive raid5.
[linux-2.6.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/sysctl.h>
38 #include <linux/seq_file.h>
39 #include <linux/buffer_head.h> /* for invalidate_bdev */
40 #include <linux/poll.h>
41 #include <linux/ctype.h>
42 #include <linux/hdreg.h>
43 #include <linux/proc_fs.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/file.h>
47 #include <linux/delay.h>
48 #include <linux/raid/md_p.h>
49 #include <linux/raid/md_u.h>
50 #include "md.h"
51 #include "bitmap.h"
52
53 #define DEBUG 0
54 #define dprintk(x...) ((void)(DEBUG && printk(x)))
55
56
57 #ifndef MODULE
58 static void autostart_arrays(int part);
59 #endif
60
61 static LIST_HEAD(pers_list);
62 static DEFINE_SPINLOCK(pers_lock);
63
64 static void md_print_devices(void);
65
66 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
67
68 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
69
70 /*
71  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
72  * is 1000 KB/sec, so the extra system load does not show up that much.
73  * Increase it if you want to have more _guaranteed_ speed. Note that
74  * the RAID driver will use the maximum available bandwidth if the IO
75  * subsystem is idle. There is also an 'absolute maximum' reconstruction
76  * speed limit - in case reconstruction slows down your system despite
77  * idle IO detection.
78  *
79  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
80  * or /sys/block/mdX/md/sync_speed_{min,max}
81  */
82
83 static int sysctl_speed_limit_min = 1000;
84 static int sysctl_speed_limit_max = 200000;
85 static inline int speed_min(mddev_t *mddev)
86 {
87         return mddev->sync_speed_min ?
88                 mddev->sync_speed_min : sysctl_speed_limit_min;
89 }
90
91 static inline int speed_max(mddev_t *mddev)
92 {
93         return mddev->sync_speed_max ?
94                 mddev->sync_speed_max : sysctl_speed_limit_max;
95 }
96
97 static struct ctl_table_header *raid_table_header;
98
99 static ctl_table raid_table[] = {
100         {
101                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MIN,
102                 .procname       = "speed_limit_min",
103                 .data           = &sysctl_speed_limit_min,
104                 .maxlen         = sizeof(int),
105                 .mode           = S_IRUGO|S_IWUSR,
106                 .proc_handler   = &proc_dointvec,
107         },
108         {
109                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MAX,
110                 .procname       = "speed_limit_max",
111                 .data           = &sysctl_speed_limit_max,
112                 .maxlen         = sizeof(int),
113                 .mode           = S_IRUGO|S_IWUSR,
114                 .proc_handler   = &proc_dointvec,
115         },
116         { .ctl_name = 0 }
117 };
118
119 static ctl_table raid_dir_table[] = {
120         {
121                 .ctl_name       = DEV_RAID,
122                 .procname       = "raid",
123                 .maxlen         = 0,
124                 .mode           = S_IRUGO|S_IXUGO,
125                 .child          = raid_table,
126         },
127         { .ctl_name = 0 }
128 };
129
130 static ctl_table raid_root_table[] = {
131         {
132                 .ctl_name       = CTL_DEV,
133                 .procname       = "dev",
134                 .maxlen         = 0,
135                 .mode           = 0555,
136                 .child          = raid_dir_table,
137         },
138         { .ctl_name = 0 }
139 };
140
141 static struct block_device_operations md_fops;
142
143 static int start_readonly;
144
145 /*
146  * We have a system wide 'event count' that is incremented
147  * on any 'interesting' event, and readers of /proc/mdstat
148  * can use 'poll' or 'select' to find out when the event
149  * count increases.
150  *
151  * Events are:
152  *  start array, stop array, error, add device, remove device,
153  *  start build, activate spare
154  */
155 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
156 static atomic_t md_event_count;
157 void md_new_event(mddev_t *mddev)
158 {
159         atomic_inc(&md_event_count);
160         wake_up(&md_event_waiters);
161 }
162 EXPORT_SYMBOL_GPL(md_new_event);
163
164 /* Alternate version that can be called from interrupts
165  * when calling sysfs_notify isn't needed.
166  */
167 static void md_new_event_inintr(mddev_t *mddev)
168 {
169         atomic_inc(&md_event_count);
170         wake_up(&md_event_waiters);
171 }
172
173 /*
174  * Enables to iterate over all existing md arrays
175  * all_mddevs_lock protects this list.
176  */
177 static LIST_HEAD(all_mddevs);
178 static DEFINE_SPINLOCK(all_mddevs_lock);
179
180
181 /*
182  * iterates through all used mddevs in the system.
183  * We take care to grab the all_mddevs_lock whenever navigating
184  * the list, and to always hold a refcount when unlocked.
185  * Any code which breaks out of this loop while own
186  * a reference to the current mddev and must mddev_put it.
187  */
188 #define for_each_mddev(mddev,tmp)                                       \
189                                                                         \
190         for (({ spin_lock(&all_mddevs_lock);                            \
191                 tmp = all_mddevs.next;                                  \
192                 mddev = NULL;});                                        \
193              ({ if (tmp != &all_mddevs)                                 \
194                         mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
195                 spin_unlock(&all_mddevs_lock);                          \
196                 if (mddev) mddev_put(mddev);                            \
197                 mddev = list_entry(tmp, mddev_t, all_mddevs);           \
198                 tmp != &all_mddevs;});                                  \
199              ({ spin_lock(&all_mddevs_lock);                            \
200                 tmp = tmp->next;})                                      \
201                 )
202
203
204 /* Rather than calling directly into the personality make_request function,
205  * IO requests come here first so that we can check if the device is
206  * being suspended pending a reconfiguration.
207  * We hold a refcount over the call to ->make_request.  By the time that
208  * call has finished, the bio has been linked into some internal structure
209  * and so is visible to ->quiesce(), so we don't need the refcount any more.
210  */
211 static int md_make_request(struct request_queue *q, struct bio *bio)
212 {
213         mddev_t *mddev = q->queuedata;
214         int rv;
215         if (mddev == NULL || mddev->pers == NULL) {
216                 bio_io_error(bio);
217                 return 0;
218         }
219         rcu_read_lock();
220         if (mddev->suspended) {
221                 DEFINE_WAIT(__wait);
222                 for (;;) {
223                         prepare_to_wait(&mddev->sb_wait, &__wait,
224                                         TASK_UNINTERRUPTIBLE);
225                         if (!mddev->suspended)
226                                 break;
227                         rcu_read_unlock();
228                         schedule();
229                         rcu_read_lock();
230                 }
231                 finish_wait(&mddev->sb_wait, &__wait);
232         }
233         atomic_inc(&mddev->active_io);
234         rcu_read_unlock();
235         rv = mddev->pers->make_request(q, bio);
236         if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
237                 wake_up(&mddev->sb_wait);
238
239         return rv;
240 }
241
242 static void mddev_suspend(mddev_t *mddev)
243 {
244         BUG_ON(mddev->suspended);
245         mddev->suspended = 1;
246         synchronize_rcu();
247         wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
248         mddev->pers->quiesce(mddev, 1);
249         md_unregister_thread(mddev->thread);
250         mddev->thread = NULL;
251         /* we now know that no code is executing in the personality module,
252          * except possibly the tail end of a ->bi_end_io function, but that
253          * is certain to complete before the module has a chance to get
254          * unloaded
255          */
256 }
257
258 static void mddev_resume(mddev_t *mddev)
259 {
260         mddev->suspended = 0;
261         wake_up(&mddev->sb_wait);
262         mddev->pers->quiesce(mddev, 0);
263 }
264
265
266 static inline mddev_t *mddev_get(mddev_t *mddev)
267 {
268         atomic_inc(&mddev->active);
269         return mddev;
270 }
271
272 static void mddev_delayed_delete(struct work_struct *ws)
273 {
274         mddev_t *mddev = container_of(ws, mddev_t, del_work);
275         kobject_del(&mddev->kobj);
276         kobject_put(&mddev->kobj);
277 }
278
279 static void mddev_put(mddev_t *mddev)
280 {
281         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
282                 return;
283         if (!mddev->raid_disks && list_empty(&mddev->disks) &&
284             !mddev->hold_active) {
285                 list_del(&mddev->all_mddevs);
286                 if (mddev->gendisk) {
287                         /* we did a probe so need to clean up.
288                          * Call schedule_work inside the spinlock
289                          * so that flush_scheduled_work() after
290                          * mddev_find will succeed in waiting for the
291                          * work to be done.
292                          */
293                         INIT_WORK(&mddev->del_work, mddev_delayed_delete);
294                         schedule_work(&mddev->del_work);
295                 } else
296                         kfree(mddev);
297         }
298         spin_unlock(&all_mddevs_lock);
299 }
300
301 static mddev_t * mddev_find(dev_t unit)
302 {
303         mddev_t *mddev, *new = NULL;
304
305  retry:
306         spin_lock(&all_mddevs_lock);
307
308         if (unit) {
309                 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
310                         if (mddev->unit == unit) {
311                                 mddev_get(mddev);
312                                 spin_unlock(&all_mddevs_lock);
313                                 kfree(new);
314                                 return mddev;
315                         }
316
317                 if (new) {
318                         list_add(&new->all_mddevs, &all_mddevs);
319                         spin_unlock(&all_mddevs_lock);
320                         new->hold_active = UNTIL_IOCTL;
321                         return new;
322                 }
323         } else if (new) {
324                 /* find an unused unit number */
325                 static int next_minor = 512;
326                 int start = next_minor;
327                 int is_free = 0;
328                 int dev = 0;
329                 while (!is_free) {
330                         dev = MKDEV(MD_MAJOR, next_minor);
331                         next_minor++;
332                         if (next_minor > MINORMASK)
333                                 next_minor = 0;
334                         if (next_minor == start) {
335                                 /* Oh dear, all in use. */
336                                 spin_unlock(&all_mddevs_lock);
337                                 kfree(new);
338                                 return NULL;
339                         }
340                                 
341                         is_free = 1;
342                         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
343                                 if (mddev->unit == dev) {
344                                         is_free = 0;
345                                         break;
346                                 }
347                 }
348                 new->unit = dev;
349                 new->md_minor = MINOR(dev);
350                 new->hold_active = UNTIL_STOP;
351                 list_add(&new->all_mddevs, &all_mddevs);
352                 spin_unlock(&all_mddevs_lock);
353                 return new;
354         }
355         spin_unlock(&all_mddevs_lock);
356
357         new = kzalloc(sizeof(*new), GFP_KERNEL);
358         if (!new)
359                 return NULL;
360
361         new->unit = unit;
362         if (MAJOR(unit) == MD_MAJOR)
363                 new->md_minor = MINOR(unit);
364         else
365                 new->md_minor = MINOR(unit) >> MdpMinorShift;
366
367         mutex_init(&new->reconfig_mutex);
368         INIT_LIST_HEAD(&new->disks);
369         INIT_LIST_HEAD(&new->all_mddevs);
370         init_timer(&new->safemode_timer);
371         atomic_set(&new->active, 1);
372         atomic_set(&new->openers, 0);
373         atomic_set(&new->active_io, 0);
374         spin_lock_init(&new->write_lock);
375         init_waitqueue_head(&new->sb_wait);
376         init_waitqueue_head(&new->recovery_wait);
377         new->reshape_position = MaxSector;
378         new->resync_min = 0;
379         new->resync_max = MaxSector;
380         new->level = LEVEL_NONE;
381
382         goto retry;
383 }
384
385 static inline int mddev_lock(mddev_t * mddev)
386 {
387         return mutex_lock_interruptible(&mddev->reconfig_mutex);
388 }
389
390 static inline int mddev_trylock(mddev_t * mddev)
391 {
392         return mutex_trylock(&mddev->reconfig_mutex);
393 }
394
395 static inline void mddev_unlock(mddev_t * mddev)
396 {
397         mutex_unlock(&mddev->reconfig_mutex);
398
399         md_wakeup_thread(mddev->thread);
400 }
401
402 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
403 {
404         mdk_rdev_t *rdev;
405
406         list_for_each_entry(rdev, &mddev->disks, same_set)
407                 if (rdev->desc_nr == nr)
408                         return rdev;
409
410         return NULL;
411 }
412
413 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
414 {
415         mdk_rdev_t *rdev;
416
417         list_for_each_entry(rdev, &mddev->disks, same_set)
418                 if (rdev->bdev->bd_dev == dev)
419                         return rdev;
420
421         return NULL;
422 }
423
424 static struct mdk_personality *find_pers(int level, char *clevel)
425 {
426         struct mdk_personality *pers;
427         list_for_each_entry(pers, &pers_list, list) {
428                 if (level != LEVEL_NONE && pers->level == level)
429                         return pers;
430                 if (strcmp(pers->name, clevel)==0)
431                         return pers;
432         }
433         return NULL;
434 }
435
436 /* return the offset of the super block in 512byte sectors */
437 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
438 {
439         sector_t num_sectors = bdev->bd_inode->i_size / 512;
440         return MD_NEW_SIZE_SECTORS(num_sectors);
441 }
442
443 static sector_t calc_num_sectors(mdk_rdev_t *rdev, unsigned chunk_size)
444 {
445         sector_t num_sectors = rdev->sb_start;
446
447         if (chunk_size)
448                 num_sectors &= ~((sector_t)chunk_size/512 - 1);
449         return num_sectors;
450 }
451
452 static int alloc_disk_sb(mdk_rdev_t * rdev)
453 {
454         if (rdev->sb_page)
455                 MD_BUG();
456
457         rdev->sb_page = alloc_page(GFP_KERNEL);
458         if (!rdev->sb_page) {
459                 printk(KERN_ALERT "md: out of memory.\n");
460                 return -ENOMEM;
461         }
462
463         return 0;
464 }
465
466 static void free_disk_sb(mdk_rdev_t * rdev)
467 {
468         if (rdev->sb_page) {
469                 put_page(rdev->sb_page);
470                 rdev->sb_loaded = 0;
471                 rdev->sb_page = NULL;
472                 rdev->sb_start = 0;
473                 rdev->sectors = 0;
474         }
475 }
476
477
478 static void super_written(struct bio *bio, int error)
479 {
480         mdk_rdev_t *rdev = bio->bi_private;
481         mddev_t *mddev = rdev->mddev;
482
483         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
484                 printk("md: super_written gets error=%d, uptodate=%d\n",
485                        error, test_bit(BIO_UPTODATE, &bio->bi_flags));
486                 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
487                 md_error(mddev, rdev);
488         }
489
490         if (atomic_dec_and_test(&mddev->pending_writes))
491                 wake_up(&mddev->sb_wait);
492         bio_put(bio);
493 }
494
495 static void super_written_barrier(struct bio *bio, int error)
496 {
497         struct bio *bio2 = bio->bi_private;
498         mdk_rdev_t *rdev = bio2->bi_private;
499         mddev_t *mddev = rdev->mddev;
500
501         if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
502             error == -EOPNOTSUPP) {
503                 unsigned long flags;
504                 /* barriers don't appear to be supported :-( */
505                 set_bit(BarriersNotsupp, &rdev->flags);
506                 mddev->barriers_work = 0;
507                 spin_lock_irqsave(&mddev->write_lock, flags);
508                 bio2->bi_next = mddev->biolist;
509                 mddev->biolist = bio2;
510                 spin_unlock_irqrestore(&mddev->write_lock, flags);
511                 wake_up(&mddev->sb_wait);
512                 bio_put(bio);
513         } else {
514                 bio_put(bio2);
515                 bio->bi_private = rdev;
516                 super_written(bio, error);
517         }
518 }
519
520 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
521                    sector_t sector, int size, struct page *page)
522 {
523         /* write first size bytes of page to sector of rdev
524          * Increment mddev->pending_writes before returning
525          * and decrement it on completion, waking up sb_wait
526          * if zero is reached.
527          * If an error occurred, call md_error
528          *
529          * As we might need to resubmit the request if BIO_RW_BARRIER
530          * causes ENOTSUPP, we allocate a spare bio...
531          */
532         struct bio *bio = bio_alloc(GFP_NOIO, 1);
533         int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
534
535         bio->bi_bdev = rdev->bdev;
536         bio->bi_sector = sector;
537         bio_add_page(bio, page, size, 0);
538         bio->bi_private = rdev;
539         bio->bi_end_io = super_written;
540         bio->bi_rw = rw;
541
542         atomic_inc(&mddev->pending_writes);
543         if (!test_bit(BarriersNotsupp, &rdev->flags)) {
544                 struct bio *rbio;
545                 rw |= (1<<BIO_RW_BARRIER);
546                 rbio = bio_clone(bio, GFP_NOIO);
547                 rbio->bi_private = bio;
548                 rbio->bi_end_io = super_written_barrier;
549                 submit_bio(rw, rbio);
550         } else
551                 submit_bio(rw, bio);
552 }
553
554 void md_super_wait(mddev_t *mddev)
555 {
556         /* wait for all superblock writes that were scheduled to complete.
557          * if any had to be retried (due to BARRIER problems), retry them
558          */
559         DEFINE_WAIT(wq);
560         for(;;) {
561                 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
562                 if (atomic_read(&mddev->pending_writes)==0)
563                         break;
564                 while (mddev->biolist) {
565                         struct bio *bio;
566                         spin_lock_irq(&mddev->write_lock);
567                         bio = mddev->biolist;
568                         mddev->biolist = bio->bi_next ;
569                         bio->bi_next = NULL;
570                         spin_unlock_irq(&mddev->write_lock);
571                         submit_bio(bio->bi_rw, bio);
572                 }
573                 schedule();
574         }
575         finish_wait(&mddev->sb_wait, &wq);
576 }
577
578 static void bi_complete(struct bio *bio, int error)
579 {
580         complete((struct completion*)bio->bi_private);
581 }
582
583 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
584                    struct page *page, int rw)
585 {
586         struct bio *bio = bio_alloc(GFP_NOIO, 1);
587         struct completion event;
588         int ret;
589
590         rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
591
592         bio->bi_bdev = bdev;
593         bio->bi_sector = sector;
594         bio_add_page(bio, page, size, 0);
595         init_completion(&event);
596         bio->bi_private = &event;
597         bio->bi_end_io = bi_complete;
598         submit_bio(rw, bio);
599         wait_for_completion(&event);
600
601         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
602         bio_put(bio);
603         return ret;
604 }
605 EXPORT_SYMBOL_GPL(sync_page_io);
606
607 static int read_disk_sb(mdk_rdev_t * rdev, int size)
608 {
609         char b[BDEVNAME_SIZE];
610         if (!rdev->sb_page) {
611                 MD_BUG();
612                 return -EINVAL;
613         }
614         if (rdev->sb_loaded)
615                 return 0;
616
617
618         if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ))
619                 goto fail;
620         rdev->sb_loaded = 1;
621         return 0;
622
623 fail:
624         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
625                 bdevname(rdev->bdev,b));
626         return -EINVAL;
627 }
628
629 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
630 {
631         return  sb1->set_uuid0 == sb2->set_uuid0 &&
632                 sb1->set_uuid1 == sb2->set_uuid1 &&
633                 sb1->set_uuid2 == sb2->set_uuid2 &&
634                 sb1->set_uuid3 == sb2->set_uuid3;
635 }
636
637 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
638 {
639         int ret;
640         mdp_super_t *tmp1, *tmp2;
641
642         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
643         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
644
645         if (!tmp1 || !tmp2) {
646                 ret = 0;
647                 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
648                 goto abort;
649         }
650
651         *tmp1 = *sb1;
652         *tmp2 = *sb2;
653
654         /*
655          * nr_disks is not constant
656          */
657         tmp1->nr_disks = 0;
658         tmp2->nr_disks = 0;
659
660         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
661 abort:
662         kfree(tmp1);
663         kfree(tmp2);
664         return ret;
665 }
666
667
668 static u32 md_csum_fold(u32 csum)
669 {
670         csum = (csum & 0xffff) + (csum >> 16);
671         return (csum & 0xffff) + (csum >> 16);
672 }
673
674 static unsigned int calc_sb_csum(mdp_super_t * sb)
675 {
676         u64 newcsum = 0;
677         u32 *sb32 = (u32*)sb;
678         int i;
679         unsigned int disk_csum, csum;
680
681         disk_csum = sb->sb_csum;
682         sb->sb_csum = 0;
683
684         for (i = 0; i < MD_SB_BYTES/4 ; i++)
685                 newcsum += sb32[i];
686         csum = (newcsum & 0xffffffff) + (newcsum>>32);
687
688
689 #ifdef CONFIG_ALPHA
690         /* This used to use csum_partial, which was wrong for several
691          * reasons including that different results are returned on
692          * different architectures.  It isn't critical that we get exactly
693          * the same return value as before (we always csum_fold before
694          * testing, and that removes any differences).  However as we
695          * know that csum_partial always returned a 16bit value on
696          * alphas, do a fold to maximise conformity to previous behaviour.
697          */
698         sb->sb_csum = md_csum_fold(disk_csum);
699 #else
700         sb->sb_csum = disk_csum;
701 #endif
702         return csum;
703 }
704
705
706 /*
707  * Handle superblock details.
708  * We want to be able to handle multiple superblock formats
709  * so we have a common interface to them all, and an array of
710  * different handlers.
711  * We rely on user-space to write the initial superblock, and support
712  * reading and updating of superblocks.
713  * Interface methods are:
714  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
715  *      loads and validates a superblock on dev.
716  *      if refdev != NULL, compare superblocks on both devices
717  *    Return:
718  *      0 - dev has a superblock that is compatible with refdev
719  *      1 - dev has a superblock that is compatible and newer than refdev
720  *          so dev should be used as the refdev in future
721  *     -EINVAL superblock incompatible or invalid
722  *     -othererror e.g. -EIO
723  *
724  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
725  *      Verify that dev is acceptable into mddev.
726  *       The first time, mddev->raid_disks will be 0, and data from
727  *       dev should be merged in.  Subsequent calls check that dev
728  *       is new enough.  Return 0 or -EINVAL
729  *
730  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
731  *     Update the superblock for rdev with data in mddev
732  *     This does not write to disc.
733  *
734  */
735
736 struct super_type  {
737         char                *name;
738         struct module       *owner;
739         int                 (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev,
740                                           int minor_version);
741         int                 (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
742         void                (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
743         unsigned long long  (*rdev_size_change)(mdk_rdev_t *rdev,
744                                                 sector_t num_sectors);
745 };
746
747 /*
748  * load_super for 0.90.0 
749  */
750 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
751 {
752         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
753         mdp_super_t *sb;
754         int ret;
755
756         /*
757          * Calculate the position of the superblock (512byte sectors),
758          * it's at the end of the disk.
759          *
760          * It also happens to be a multiple of 4Kb.
761          */
762         rdev->sb_start = calc_dev_sboffset(rdev->bdev);
763
764         ret = read_disk_sb(rdev, MD_SB_BYTES);
765         if (ret) return ret;
766
767         ret = -EINVAL;
768
769         bdevname(rdev->bdev, b);
770         sb = (mdp_super_t*)page_address(rdev->sb_page);
771
772         if (sb->md_magic != MD_SB_MAGIC) {
773                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
774                        b);
775                 goto abort;
776         }
777
778         if (sb->major_version != 0 ||
779             sb->minor_version < 90 ||
780             sb->minor_version > 91) {
781                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
782                         sb->major_version, sb->minor_version,
783                         b);
784                 goto abort;
785         }
786
787         if (sb->raid_disks <= 0)
788                 goto abort;
789
790         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
791                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
792                         b);
793                 goto abort;
794         }
795
796         rdev->preferred_minor = sb->md_minor;
797         rdev->data_offset = 0;
798         rdev->sb_size = MD_SB_BYTES;
799
800         if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) {
801                 if (sb->level != 1 && sb->level != 4
802                     && sb->level != 5 && sb->level != 6
803                     && sb->level != 10) {
804                         /* FIXME use a better test */
805                         printk(KERN_WARNING
806                                "md: bitmaps not supported for this level.\n");
807                         goto abort;
808                 }
809         }
810
811         if (sb->level == LEVEL_MULTIPATH)
812                 rdev->desc_nr = -1;
813         else
814                 rdev->desc_nr = sb->this_disk.number;
815
816         if (!refdev) {
817                 ret = 1;
818         } else {
819                 __u64 ev1, ev2;
820                 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
821                 if (!uuid_equal(refsb, sb)) {
822                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
823                                 b, bdevname(refdev->bdev,b2));
824                         goto abort;
825                 }
826                 if (!sb_equal(refsb, sb)) {
827                         printk(KERN_WARNING "md: %s has same UUID"
828                                " but different superblock to %s\n",
829                                b, bdevname(refdev->bdev, b2));
830                         goto abort;
831                 }
832                 ev1 = md_event(sb);
833                 ev2 = md_event(refsb);
834                 if (ev1 > ev2)
835                         ret = 1;
836                 else 
837                         ret = 0;
838         }
839         rdev->sectors = calc_num_sectors(rdev, sb->chunk_size);
840
841         if (rdev->sectors < sb->size * 2 && sb->level > 1)
842                 /* "this cannot possibly happen" ... */
843                 ret = -EINVAL;
844
845  abort:
846         return ret;
847 }
848
849 /*
850  * validate_super for 0.90.0
851  */
852 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
853 {
854         mdp_disk_t *desc;
855         mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
856         __u64 ev1 = md_event(sb);
857
858         rdev->raid_disk = -1;
859         clear_bit(Faulty, &rdev->flags);
860         clear_bit(In_sync, &rdev->flags);
861         clear_bit(WriteMostly, &rdev->flags);
862         clear_bit(BarriersNotsupp, &rdev->flags);
863
864         if (mddev->raid_disks == 0) {
865                 mddev->major_version = 0;
866                 mddev->minor_version = sb->minor_version;
867                 mddev->patch_version = sb->patch_version;
868                 mddev->external = 0;
869                 mddev->chunk_size = sb->chunk_size;
870                 mddev->ctime = sb->ctime;
871                 mddev->utime = sb->utime;
872                 mddev->level = sb->level;
873                 mddev->clevel[0] = 0;
874                 mddev->layout = sb->layout;
875                 mddev->raid_disks = sb->raid_disks;
876                 mddev->dev_sectors = sb->size * 2;
877                 mddev->events = ev1;
878                 mddev->bitmap_offset = 0;
879                 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
880
881                 if (mddev->minor_version >= 91) {
882                         mddev->reshape_position = sb->reshape_position;
883                         mddev->delta_disks = sb->delta_disks;
884                         mddev->new_level = sb->new_level;
885                         mddev->new_layout = sb->new_layout;
886                         mddev->new_chunk = sb->new_chunk;
887                 } else {
888                         mddev->reshape_position = MaxSector;
889                         mddev->delta_disks = 0;
890                         mddev->new_level = mddev->level;
891                         mddev->new_layout = mddev->layout;
892                         mddev->new_chunk = mddev->chunk_size;
893                 }
894
895                 if (sb->state & (1<<MD_SB_CLEAN))
896                         mddev->recovery_cp = MaxSector;
897                 else {
898                         if (sb->events_hi == sb->cp_events_hi && 
899                                 sb->events_lo == sb->cp_events_lo) {
900                                 mddev->recovery_cp = sb->recovery_cp;
901                         } else
902                                 mddev->recovery_cp = 0;
903                 }
904
905                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
906                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
907                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
908                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
909
910                 mddev->max_disks = MD_SB_DISKS;
911
912                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
913                     mddev->bitmap_file == NULL)
914                         mddev->bitmap_offset = mddev->default_bitmap_offset;
915
916         } else if (mddev->pers == NULL) {
917                 /* Insist on good event counter while assembling */
918                 ++ev1;
919                 if (ev1 < mddev->events) 
920                         return -EINVAL;
921         } else if (mddev->bitmap) {
922                 /* if adding to array with a bitmap, then we can accept an
923                  * older device ... but not too old.
924                  */
925                 if (ev1 < mddev->bitmap->events_cleared)
926                         return 0;
927         } else {
928                 if (ev1 < mddev->events)
929                         /* just a hot-add of a new device, leave raid_disk at -1 */
930                         return 0;
931         }
932
933         if (mddev->level != LEVEL_MULTIPATH) {
934                 desc = sb->disks + rdev->desc_nr;
935
936                 if (desc->state & (1<<MD_DISK_FAULTY))
937                         set_bit(Faulty, &rdev->flags);
938                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
939                             desc->raid_disk < mddev->raid_disks */) {
940                         set_bit(In_sync, &rdev->flags);
941                         rdev->raid_disk = desc->raid_disk;
942                 }
943                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
944                         set_bit(WriteMostly, &rdev->flags);
945         } else /* MULTIPATH are always insync */
946                 set_bit(In_sync, &rdev->flags);
947         return 0;
948 }
949
950 /*
951  * sync_super for 0.90.0
952  */
953 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
954 {
955         mdp_super_t *sb;
956         mdk_rdev_t *rdev2;
957         int next_spare = mddev->raid_disks;
958
959
960         /* make rdev->sb match mddev data..
961          *
962          * 1/ zero out disks
963          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
964          * 3/ any empty disks < next_spare become removed
965          *
966          * disks[0] gets initialised to REMOVED because
967          * we cannot be sure from other fields if it has
968          * been initialised or not.
969          */
970         int i;
971         int active=0, working=0,failed=0,spare=0,nr_disks=0;
972
973         rdev->sb_size = MD_SB_BYTES;
974
975         sb = (mdp_super_t*)page_address(rdev->sb_page);
976
977         memset(sb, 0, sizeof(*sb));
978
979         sb->md_magic = MD_SB_MAGIC;
980         sb->major_version = mddev->major_version;
981         sb->patch_version = mddev->patch_version;
982         sb->gvalid_words  = 0; /* ignored */
983         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
984         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
985         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
986         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
987
988         sb->ctime = mddev->ctime;
989         sb->level = mddev->level;
990         sb->size = mddev->dev_sectors / 2;
991         sb->raid_disks = mddev->raid_disks;
992         sb->md_minor = mddev->md_minor;
993         sb->not_persistent = 0;
994         sb->utime = mddev->utime;
995         sb->state = 0;
996         sb->events_hi = (mddev->events>>32);
997         sb->events_lo = (u32)mddev->events;
998
999         if (mddev->reshape_position == MaxSector)
1000                 sb->minor_version = 90;
1001         else {
1002                 sb->minor_version = 91;
1003                 sb->reshape_position = mddev->reshape_position;
1004                 sb->new_level = mddev->new_level;
1005                 sb->delta_disks = mddev->delta_disks;
1006                 sb->new_layout = mddev->new_layout;
1007                 sb->new_chunk = mddev->new_chunk;
1008         }
1009         mddev->minor_version = sb->minor_version;
1010         if (mddev->in_sync)
1011         {
1012                 sb->recovery_cp = mddev->recovery_cp;
1013                 sb->cp_events_hi = (mddev->events>>32);
1014                 sb->cp_events_lo = (u32)mddev->events;
1015                 if (mddev->recovery_cp == MaxSector)
1016                         sb->state = (1<< MD_SB_CLEAN);
1017         } else
1018                 sb->recovery_cp = 0;
1019
1020         sb->layout = mddev->layout;
1021         sb->chunk_size = mddev->chunk_size;
1022
1023         if (mddev->bitmap && mddev->bitmap_file == NULL)
1024                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1025
1026         sb->disks[0].state = (1<<MD_DISK_REMOVED);
1027         list_for_each_entry(rdev2, &mddev->disks, same_set) {
1028                 mdp_disk_t *d;
1029                 int desc_nr;
1030                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
1031                     && !test_bit(Faulty, &rdev2->flags))
1032                         desc_nr = rdev2->raid_disk;
1033                 else
1034                         desc_nr = next_spare++;
1035                 rdev2->desc_nr = desc_nr;
1036                 d = &sb->disks[rdev2->desc_nr];
1037                 nr_disks++;
1038                 d->number = rdev2->desc_nr;
1039                 d->major = MAJOR(rdev2->bdev->bd_dev);
1040                 d->minor = MINOR(rdev2->bdev->bd_dev);
1041                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
1042                     && !test_bit(Faulty, &rdev2->flags))
1043                         d->raid_disk = rdev2->raid_disk;
1044                 else
1045                         d->raid_disk = rdev2->desc_nr; /* compatibility */
1046                 if (test_bit(Faulty, &rdev2->flags))
1047                         d->state = (1<<MD_DISK_FAULTY);
1048                 else if (test_bit(In_sync, &rdev2->flags)) {
1049                         d->state = (1<<MD_DISK_ACTIVE);
1050                         d->state |= (1<<MD_DISK_SYNC);
1051                         active++;
1052                         working++;
1053                 } else {
1054                         d->state = 0;
1055                         spare++;
1056                         working++;
1057                 }
1058                 if (test_bit(WriteMostly, &rdev2->flags))
1059                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
1060         }
1061         /* now set the "removed" and "faulty" bits on any missing devices */
1062         for (i=0 ; i < mddev->raid_disks ; i++) {
1063                 mdp_disk_t *d = &sb->disks[i];
1064                 if (d->state == 0 && d->number == 0) {
1065                         d->number = i;
1066                         d->raid_disk = i;
1067                         d->state = (1<<MD_DISK_REMOVED);
1068                         d->state |= (1<<MD_DISK_FAULTY);
1069                         failed++;
1070                 }
1071         }
1072         sb->nr_disks = nr_disks;
1073         sb->active_disks = active;
1074         sb->working_disks = working;
1075         sb->failed_disks = failed;
1076         sb->spare_disks = spare;
1077
1078         sb->this_disk = sb->disks[rdev->desc_nr];
1079         sb->sb_csum = calc_sb_csum(sb);
1080 }
1081
1082 /*
1083  * rdev_size_change for 0.90.0
1084  */
1085 static unsigned long long
1086 super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1087 {
1088         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1089                 return 0; /* component must fit device */
1090         if (rdev->mddev->bitmap_offset)
1091                 return 0; /* can't move bitmap */
1092         rdev->sb_start = calc_dev_sboffset(rdev->bdev);
1093         if (!num_sectors || num_sectors > rdev->sb_start)
1094                 num_sectors = rdev->sb_start;
1095         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1096                        rdev->sb_page);
1097         md_super_wait(rdev->mddev);
1098         return num_sectors / 2; /* kB for sysfs */
1099 }
1100
1101
1102 /*
1103  * version 1 superblock
1104  */
1105
1106 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1107 {
1108         __le32 disk_csum;
1109         u32 csum;
1110         unsigned long long newcsum;
1111         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1112         __le32 *isuper = (__le32*)sb;
1113         int i;
1114
1115         disk_csum = sb->sb_csum;
1116         sb->sb_csum = 0;
1117         newcsum = 0;
1118         for (i=0; size>=4; size -= 4 )
1119                 newcsum += le32_to_cpu(*isuper++);
1120
1121         if (size == 2)
1122                 newcsum += le16_to_cpu(*(__le16*) isuper);
1123
1124         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1125         sb->sb_csum = disk_csum;
1126         return cpu_to_le32(csum);
1127 }
1128
1129 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1130 {
1131         struct mdp_superblock_1 *sb;
1132         int ret;
1133         sector_t sb_start;
1134         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1135         int bmask;
1136
1137         /*
1138          * Calculate the position of the superblock in 512byte sectors.
1139          * It is always aligned to a 4K boundary and
1140          * depeding on minor_version, it can be:
1141          * 0: At least 8K, but less than 12K, from end of device
1142          * 1: At start of device
1143          * 2: 4K from start of device.
1144          */
1145         switch(minor_version) {
1146         case 0:
1147                 sb_start = rdev->bdev->bd_inode->i_size >> 9;
1148                 sb_start -= 8*2;
1149                 sb_start &= ~(sector_t)(4*2-1);
1150                 break;
1151         case 1:
1152                 sb_start = 0;
1153                 break;
1154         case 2:
1155                 sb_start = 8;
1156                 break;
1157         default:
1158                 return -EINVAL;
1159         }
1160         rdev->sb_start = sb_start;
1161
1162         /* superblock is rarely larger than 1K, but it can be larger,
1163          * and it is safe to read 4k, so we do that
1164          */
1165         ret = read_disk_sb(rdev, 4096);
1166         if (ret) return ret;
1167
1168
1169         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1170
1171         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1172             sb->major_version != cpu_to_le32(1) ||
1173             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1174             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1175             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1176                 return -EINVAL;
1177
1178         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1179                 printk("md: invalid superblock checksum on %s\n",
1180                         bdevname(rdev->bdev,b));
1181                 return -EINVAL;
1182         }
1183         if (le64_to_cpu(sb->data_size) < 10) {
1184                 printk("md: data_size too small on %s\n",
1185                        bdevname(rdev->bdev,b));
1186                 return -EINVAL;
1187         }
1188         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) {
1189                 if (sb->level != cpu_to_le32(1) &&
1190                     sb->level != cpu_to_le32(4) &&
1191                     sb->level != cpu_to_le32(5) &&
1192                     sb->level != cpu_to_le32(6) &&
1193                     sb->level != cpu_to_le32(10)) {
1194                         printk(KERN_WARNING
1195                                "md: bitmaps not supported for this level.\n");
1196                         return -EINVAL;
1197                 }
1198         }
1199
1200         rdev->preferred_minor = 0xffff;
1201         rdev->data_offset = le64_to_cpu(sb->data_offset);
1202         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1203
1204         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1205         bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
1206         if (rdev->sb_size & bmask)
1207                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1208
1209         if (minor_version
1210             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1211                 return -EINVAL;
1212
1213         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1214                 rdev->desc_nr = -1;
1215         else
1216                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1217
1218         if (!refdev) {
1219                 ret = 1;
1220         } else {
1221                 __u64 ev1, ev2;
1222                 struct mdp_superblock_1 *refsb = 
1223                         (struct mdp_superblock_1*)page_address(refdev->sb_page);
1224
1225                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1226                     sb->level != refsb->level ||
1227                     sb->layout != refsb->layout ||
1228                     sb->chunksize != refsb->chunksize) {
1229                         printk(KERN_WARNING "md: %s has strangely different"
1230                                 " superblock to %s\n",
1231                                 bdevname(rdev->bdev,b),
1232                                 bdevname(refdev->bdev,b2));
1233                         return -EINVAL;
1234                 }
1235                 ev1 = le64_to_cpu(sb->events);
1236                 ev2 = le64_to_cpu(refsb->events);
1237
1238                 if (ev1 > ev2)
1239                         ret = 1;
1240                 else
1241                         ret = 0;
1242         }
1243         if (minor_version)
1244                 rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) -
1245                         le64_to_cpu(sb->data_offset);
1246         else
1247                 rdev->sectors = rdev->sb_start;
1248         if (rdev->sectors < le64_to_cpu(sb->data_size))
1249                 return -EINVAL;
1250         rdev->sectors = le64_to_cpu(sb->data_size);
1251         if (le32_to_cpu(sb->chunksize))
1252                 rdev->sectors &= ~((sector_t)le32_to_cpu(sb->chunksize) - 1);
1253
1254         if (le64_to_cpu(sb->size) > rdev->sectors)
1255                 return -EINVAL;
1256         return ret;
1257 }
1258
1259 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1260 {
1261         struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1262         __u64 ev1 = le64_to_cpu(sb->events);
1263
1264         rdev->raid_disk = -1;
1265         clear_bit(Faulty, &rdev->flags);
1266         clear_bit(In_sync, &rdev->flags);
1267         clear_bit(WriteMostly, &rdev->flags);
1268         clear_bit(BarriersNotsupp, &rdev->flags);
1269
1270         if (mddev->raid_disks == 0) {
1271                 mddev->major_version = 1;
1272                 mddev->patch_version = 0;
1273                 mddev->external = 0;
1274                 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1275                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1276                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1277                 mddev->level = le32_to_cpu(sb->level);
1278                 mddev->clevel[0] = 0;
1279                 mddev->layout = le32_to_cpu(sb->layout);
1280                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1281                 mddev->dev_sectors = le64_to_cpu(sb->size);
1282                 mddev->events = ev1;
1283                 mddev->bitmap_offset = 0;
1284                 mddev->default_bitmap_offset = 1024 >> 9;
1285                 
1286                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1287                 memcpy(mddev->uuid, sb->set_uuid, 16);
1288
1289                 mddev->max_disks =  (4096-256)/2;
1290
1291                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1292                     mddev->bitmap_file == NULL )
1293                         mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1294
1295                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1296                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1297                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1298                         mddev->new_level = le32_to_cpu(sb->new_level);
1299                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1300                         mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
1301                 } else {
1302                         mddev->reshape_position = MaxSector;
1303                         mddev->delta_disks = 0;
1304                         mddev->new_level = mddev->level;
1305                         mddev->new_layout = mddev->layout;
1306                         mddev->new_chunk = mddev->chunk_size;
1307                 }
1308
1309         } else if (mddev->pers == NULL) {
1310                 /* Insist of good event counter while assembling */
1311                 ++ev1;
1312                 if (ev1 < mddev->events)
1313                         return -EINVAL;
1314         } else if (mddev->bitmap) {
1315                 /* If adding to array with a bitmap, then we can accept an
1316                  * older device, but not too old.
1317                  */
1318                 if (ev1 < mddev->bitmap->events_cleared)
1319                         return 0;
1320         } else {
1321                 if (ev1 < mddev->events)
1322                         /* just a hot-add of a new device, leave raid_disk at -1 */
1323                         return 0;
1324         }
1325         if (mddev->level != LEVEL_MULTIPATH) {
1326                 int role;
1327                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1328                 switch(role) {
1329                 case 0xffff: /* spare */
1330                         break;
1331                 case 0xfffe: /* faulty */
1332                         set_bit(Faulty, &rdev->flags);
1333                         break;
1334                 default:
1335                         if ((le32_to_cpu(sb->feature_map) &
1336                              MD_FEATURE_RECOVERY_OFFSET))
1337                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1338                         else
1339                                 set_bit(In_sync, &rdev->flags);
1340                         rdev->raid_disk = role;
1341                         break;
1342                 }
1343                 if (sb->devflags & WriteMostly1)
1344                         set_bit(WriteMostly, &rdev->flags);
1345         } else /* MULTIPATH are always insync */
1346                 set_bit(In_sync, &rdev->flags);
1347
1348         return 0;
1349 }
1350
1351 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1352 {
1353         struct mdp_superblock_1 *sb;
1354         mdk_rdev_t *rdev2;
1355         int max_dev, i;
1356         /* make rdev->sb match mddev and rdev data. */
1357
1358         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1359
1360         sb->feature_map = 0;
1361         sb->pad0 = 0;
1362         sb->recovery_offset = cpu_to_le64(0);
1363         memset(sb->pad1, 0, sizeof(sb->pad1));
1364         memset(sb->pad2, 0, sizeof(sb->pad2));
1365         memset(sb->pad3, 0, sizeof(sb->pad3));
1366
1367         sb->utime = cpu_to_le64((__u64)mddev->utime);
1368         sb->events = cpu_to_le64(mddev->events);
1369         if (mddev->in_sync)
1370                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1371         else
1372                 sb->resync_offset = cpu_to_le64(0);
1373
1374         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1375
1376         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1377         sb->size = cpu_to_le64(mddev->dev_sectors);
1378
1379         if (mddev->bitmap && mddev->bitmap_file == NULL) {
1380                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1381                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1382         }
1383
1384         if (rdev->raid_disk >= 0 &&
1385             !test_bit(In_sync, &rdev->flags)) {
1386                 if (mddev->curr_resync_completed > rdev->recovery_offset)
1387                         rdev->recovery_offset = mddev->curr_resync_completed;
1388                 if (rdev->recovery_offset > 0) {
1389                         sb->feature_map |=
1390                                 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1391                         sb->recovery_offset =
1392                                 cpu_to_le64(rdev->recovery_offset);
1393                 }
1394         }
1395
1396         if (mddev->reshape_position != MaxSector) {
1397                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1398                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1399                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1400                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1401                 sb->new_level = cpu_to_le32(mddev->new_level);
1402                 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
1403         }
1404
1405         max_dev = 0;
1406         list_for_each_entry(rdev2, &mddev->disks, same_set)
1407                 if (rdev2->desc_nr+1 > max_dev)
1408                         max_dev = rdev2->desc_nr+1;
1409
1410         if (max_dev > le32_to_cpu(sb->max_dev))
1411                 sb->max_dev = cpu_to_le32(max_dev);
1412         for (i=0; i<max_dev;i++)
1413                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1414         
1415         list_for_each_entry(rdev2, &mddev->disks, same_set) {
1416                 i = rdev2->desc_nr;
1417                 if (test_bit(Faulty, &rdev2->flags))
1418                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1419                 else if (test_bit(In_sync, &rdev2->flags))
1420                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1421                 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1422                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1423                 else
1424                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1425         }
1426
1427         sb->sb_csum = calc_sb_1_csum(sb);
1428 }
1429
1430 static unsigned long long
1431 super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1432 {
1433         struct mdp_superblock_1 *sb;
1434         sector_t max_sectors;
1435         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1436                 return 0; /* component must fit device */
1437         if (rdev->sb_start < rdev->data_offset) {
1438                 /* minor versions 1 and 2; superblock before data */
1439                 max_sectors = rdev->bdev->bd_inode->i_size >> 9;
1440                 max_sectors -= rdev->data_offset;
1441                 if (!num_sectors || num_sectors > max_sectors)
1442                         num_sectors = max_sectors;
1443         } else if (rdev->mddev->bitmap_offset) {
1444                 /* minor version 0 with bitmap we can't move */
1445                 return 0;
1446         } else {
1447                 /* minor version 0; superblock after data */
1448                 sector_t sb_start;
1449                 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
1450                 sb_start &= ~(sector_t)(4*2 - 1);
1451                 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1452                 if (!num_sectors || num_sectors > max_sectors)
1453                         num_sectors = max_sectors;
1454                 rdev->sb_start = sb_start;
1455         }
1456         sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
1457         sb->data_size = cpu_to_le64(num_sectors);
1458         sb->super_offset = rdev->sb_start;
1459         sb->sb_csum = calc_sb_1_csum(sb);
1460         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1461                        rdev->sb_page);
1462         md_super_wait(rdev->mddev);
1463         return num_sectors / 2; /* kB for sysfs */
1464 }
1465
1466 static struct super_type super_types[] = {
1467         [0] = {
1468                 .name   = "0.90.0",
1469                 .owner  = THIS_MODULE,
1470                 .load_super         = super_90_load,
1471                 .validate_super     = super_90_validate,
1472                 .sync_super         = super_90_sync,
1473                 .rdev_size_change   = super_90_rdev_size_change,
1474         },
1475         [1] = {
1476                 .name   = "md-1",
1477                 .owner  = THIS_MODULE,
1478                 .load_super         = super_1_load,
1479                 .validate_super     = super_1_validate,
1480                 .sync_super         = super_1_sync,
1481                 .rdev_size_change   = super_1_rdev_size_change,
1482         },
1483 };
1484
1485 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1486 {
1487         mdk_rdev_t *rdev, *rdev2;
1488
1489         rcu_read_lock();
1490         rdev_for_each_rcu(rdev, mddev1)
1491                 rdev_for_each_rcu(rdev2, mddev2)
1492                         if (rdev->bdev->bd_contains ==
1493                             rdev2->bdev->bd_contains) {
1494                                 rcu_read_unlock();
1495                                 return 1;
1496                         }
1497         rcu_read_unlock();
1498         return 0;
1499 }
1500
1501 static LIST_HEAD(pending_raid_disks);
1502
1503 static void md_integrity_check(mdk_rdev_t *rdev, mddev_t *mddev)
1504 {
1505         struct mdk_personality *pers = mddev->pers;
1506         struct gendisk *disk = mddev->gendisk;
1507         struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
1508         struct blk_integrity *bi_mddev = blk_get_integrity(disk);
1509
1510         /* Data integrity passthrough not supported on RAID 4, 5 and 6 */
1511         if (pers && pers->level >= 4 && pers->level <= 6)
1512                 return;
1513
1514         /* If rdev is integrity capable, register profile for mddev */
1515         if (!bi_mddev && bi_rdev) {
1516                 if (blk_integrity_register(disk, bi_rdev))
1517                         printk(KERN_ERR "%s: %s Could not register integrity!\n",
1518                                __func__, disk->disk_name);
1519                 else
1520                         printk(KERN_NOTICE "Enabling data integrity on %s\n",
1521                                disk->disk_name);
1522                 return;
1523         }
1524
1525         /* Check that mddev and rdev have matching profiles */
1526         if (blk_integrity_compare(disk, rdev->bdev->bd_disk) < 0) {
1527                 printk(KERN_ERR "%s: %s/%s integrity mismatch!\n", __func__,
1528                        disk->disk_name, rdev->bdev->bd_disk->disk_name);
1529                 printk(KERN_NOTICE "Disabling data integrity on %s\n",
1530                        disk->disk_name);
1531                 blk_integrity_unregister(disk);
1532         }
1533 }
1534
1535 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1536 {
1537         char b[BDEVNAME_SIZE];
1538         struct kobject *ko;
1539         char *s;
1540         int err;
1541
1542         if (rdev->mddev) {
1543                 MD_BUG();
1544                 return -EINVAL;
1545         }
1546
1547         /* prevent duplicates */
1548         if (find_rdev(mddev, rdev->bdev->bd_dev))
1549                 return -EEXIST;
1550
1551         /* make sure rdev->sectors exceeds mddev->dev_sectors */
1552         if (rdev->sectors && (mddev->dev_sectors == 0 ||
1553                         rdev->sectors < mddev->dev_sectors)) {
1554                 if (mddev->pers) {
1555                         /* Cannot change size, so fail
1556                          * If mddev->level <= 0, then we don't care
1557                          * about aligning sizes (e.g. linear)
1558                          */
1559                         if (mddev->level > 0)
1560                                 return -ENOSPC;
1561                 } else
1562                         mddev->dev_sectors = rdev->sectors;
1563         }
1564
1565         /* Verify rdev->desc_nr is unique.
1566          * If it is -1, assign a free number, else
1567          * check number is not in use
1568          */
1569         if (rdev->desc_nr < 0) {
1570                 int choice = 0;
1571                 if (mddev->pers) choice = mddev->raid_disks;
1572                 while (find_rdev_nr(mddev, choice))
1573                         choice++;
1574                 rdev->desc_nr = choice;
1575         } else {
1576                 if (find_rdev_nr(mddev, rdev->desc_nr))
1577                         return -EBUSY;
1578         }
1579         if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
1580                 printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
1581                        mdname(mddev), mddev->max_disks);
1582                 return -EBUSY;
1583         }
1584         bdevname(rdev->bdev,b);
1585         while ( (s=strchr(b, '/')) != NULL)
1586                 *s = '!';
1587
1588         rdev->mddev = mddev;
1589         printk(KERN_INFO "md: bind<%s>\n", b);
1590
1591         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
1592                 goto fail;
1593
1594         ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
1595         if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1596                 kobject_del(&rdev->kobj);
1597                 goto fail;
1598         }
1599         rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state");
1600
1601         list_add_rcu(&rdev->same_set, &mddev->disks);
1602         bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
1603
1604         /* May as well allow recovery to be retried once */
1605         mddev->recovery_disabled = 0;
1606
1607         md_integrity_check(rdev, mddev);
1608         return 0;
1609
1610  fail:
1611         printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1612                b, mdname(mddev));
1613         return err;
1614 }
1615
1616 static void md_delayed_delete(struct work_struct *ws)
1617 {
1618         mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1619         kobject_del(&rdev->kobj);
1620         kobject_put(&rdev->kobj);
1621 }
1622
1623 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1624 {
1625         char b[BDEVNAME_SIZE];
1626         if (!rdev->mddev) {
1627                 MD_BUG();
1628                 return;
1629         }
1630         bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1631         list_del_rcu(&rdev->same_set);
1632         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1633         rdev->mddev = NULL;
1634         sysfs_remove_link(&rdev->kobj, "block");
1635         sysfs_put(rdev->sysfs_state);
1636         rdev->sysfs_state = NULL;
1637         /* We need to delay this, otherwise we can deadlock when
1638          * writing to 'remove' to "dev/state".  We also need
1639          * to delay it due to rcu usage.
1640          */
1641         synchronize_rcu();
1642         INIT_WORK(&rdev->del_work, md_delayed_delete);
1643         kobject_get(&rdev->kobj);
1644         schedule_work(&rdev->del_work);
1645 }
1646
1647 /*
1648  * prevent the device from being mounted, repartitioned or
1649  * otherwise reused by a RAID array (or any other kernel
1650  * subsystem), by bd_claiming the device.
1651  */
1652 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1653 {
1654         int err = 0;
1655         struct block_device *bdev;
1656         char b[BDEVNAME_SIZE];
1657
1658         bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1659         if (IS_ERR(bdev)) {
1660                 printk(KERN_ERR "md: could not open %s.\n",
1661                         __bdevname(dev, b));
1662                 return PTR_ERR(bdev);
1663         }
1664         err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
1665         if (err) {
1666                 printk(KERN_ERR "md: could not bd_claim %s.\n",
1667                         bdevname(bdev, b));
1668                 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1669                 return err;
1670         }
1671         if (!shared)
1672                 set_bit(AllReserved, &rdev->flags);
1673         rdev->bdev = bdev;
1674         return err;
1675 }
1676
1677 static void unlock_rdev(mdk_rdev_t *rdev)
1678 {
1679         struct block_device *bdev = rdev->bdev;
1680         rdev->bdev = NULL;
1681         if (!bdev)
1682                 MD_BUG();
1683         bd_release(bdev);
1684         blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1685 }
1686
1687 void md_autodetect_dev(dev_t dev);
1688
1689 static void export_rdev(mdk_rdev_t * rdev)
1690 {
1691         char b[BDEVNAME_SIZE];
1692         printk(KERN_INFO "md: export_rdev(%s)\n",
1693                 bdevname(rdev->bdev,b));
1694         if (rdev->mddev)
1695                 MD_BUG();
1696         free_disk_sb(rdev);
1697 #ifndef MODULE
1698         if (test_bit(AutoDetected, &rdev->flags))
1699                 md_autodetect_dev(rdev->bdev->bd_dev);
1700 #endif
1701         unlock_rdev(rdev);
1702         kobject_put(&rdev->kobj);
1703 }
1704
1705 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1706 {
1707         unbind_rdev_from_array(rdev);
1708         export_rdev(rdev);
1709 }
1710
1711 static void export_array(mddev_t *mddev)
1712 {
1713         mdk_rdev_t *rdev, *tmp;
1714
1715         rdev_for_each(rdev, tmp, mddev) {
1716                 if (!rdev->mddev) {
1717                         MD_BUG();
1718                         continue;
1719                 }
1720                 kick_rdev_from_array(rdev);
1721         }
1722         if (!list_empty(&mddev->disks))
1723                 MD_BUG();
1724         mddev->raid_disks = 0;
1725         mddev->major_version = 0;
1726 }
1727
1728 static void print_desc(mdp_disk_t *desc)
1729 {
1730         printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1731                 desc->major,desc->minor,desc->raid_disk,desc->state);
1732 }
1733
1734 static void print_sb_90(mdp_super_t *sb)
1735 {
1736         int i;
1737
1738         printk(KERN_INFO 
1739                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1740                 sb->major_version, sb->minor_version, sb->patch_version,
1741                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1742                 sb->ctime);
1743         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1744                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1745                 sb->md_minor, sb->layout, sb->chunk_size);
1746         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
1747                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1748                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1749                 sb->failed_disks, sb->spare_disks,
1750                 sb->sb_csum, (unsigned long)sb->events_lo);
1751
1752         printk(KERN_INFO);
1753         for (i = 0; i < MD_SB_DISKS; i++) {
1754                 mdp_disk_t *desc;
1755
1756                 desc = sb->disks + i;
1757                 if (desc->number || desc->major || desc->minor ||
1758                     desc->raid_disk || (desc->state && (desc->state != 4))) {
1759                         printk("     D %2d: ", i);
1760                         print_desc(desc);
1761                 }
1762         }
1763         printk(KERN_INFO "md:     THIS: ");
1764         print_desc(&sb->this_disk);
1765 }
1766
1767 static void print_sb_1(struct mdp_superblock_1 *sb)
1768 {
1769         __u8 *uuid;
1770
1771         uuid = sb->set_uuid;
1772         printk(KERN_INFO "md:  SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
1773                         ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
1774                KERN_INFO "md:    Name: \"%s\" CT:%llu\n",
1775                 le32_to_cpu(sb->major_version),
1776                 le32_to_cpu(sb->feature_map),
1777                 uuid[0], uuid[1], uuid[2], uuid[3],
1778                 uuid[4], uuid[5], uuid[6], uuid[7],
1779                 uuid[8], uuid[9], uuid[10], uuid[11],
1780                 uuid[12], uuid[13], uuid[14], uuid[15],
1781                 sb->set_name,
1782                 (unsigned long long)le64_to_cpu(sb->ctime)
1783                        & MD_SUPERBLOCK_1_TIME_SEC_MASK);
1784
1785         uuid = sb->device_uuid;
1786         printk(KERN_INFO "md:       L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
1787                         " RO:%llu\n"
1788                KERN_INFO "md:     Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
1789                         ":%02x%02x%02x%02x%02x%02x\n"
1790                KERN_INFO "md:       (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
1791                KERN_INFO "md:         (MaxDev:%u) \n",
1792                 le32_to_cpu(sb->level),
1793                 (unsigned long long)le64_to_cpu(sb->size),
1794                 le32_to_cpu(sb->raid_disks),
1795                 le32_to_cpu(sb->layout),
1796                 le32_to_cpu(sb->chunksize),
1797                 (unsigned long long)le64_to_cpu(sb->data_offset),
1798                 (unsigned long long)le64_to_cpu(sb->data_size),
1799                 (unsigned long long)le64_to_cpu(sb->super_offset),
1800                 (unsigned long long)le64_to_cpu(sb->recovery_offset),
1801                 le32_to_cpu(sb->dev_number),
1802                 uuid[0], uuid[1], uuid[2], uuid[3],
1803                 uuid[4], uuid[5], uuid[6], uuid[7],
1804                 uuid[8], uuid[9], uuid[10], uuid[11],
1805                 uuid[12], uuid[13], uuid[14], uuid[15],
1806                 sb->devflags,
1807                 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
1808                 (unsigned long long)le64_to_cpu(sb->events),
1809                 (unsigned long long)le64_to_cpu(sb->resync_offset),
1810                 le32_to_cpu(sb->sb_csum),
1811                 le32_to_cpu(sb->max_dev)
1812                 );
1813 }
1814
1815 static void print_rdev(mdk_rdev_t *rdev, int major_version)
1816 {
1817         char b[BDEVNAME_SIZE];
1818         printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
1819                 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
1820                 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1821                 rdev->desc_nr);
1822         if (rdev->sb_loaded) {
1823                 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
1824                 switch (major_version) {
1825                 case 0:
1826                         print_sb_90((mdp_super_t*)page_address(rdev->sb_page));
1827                         break;
1828                 case 1:
1829                         print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page));
1830                         break;
1831                 }
1832         } else
1833                 printk(KERN_INFO "md: no rdev superblock!\n");
1834 }
1835
1836 static void md_print_devices(void)
1837 {
1838         struct list_head *tmp;
1839         mdk_rdev_t *rdev;
1840         mddev_t *mddev;
1841         char b[BDEVNAME_SIZE];
1842
1843         printk("\n");
1844         printk("md:     **********************************\n");
1845         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
1846         printk("md:     **********************************\n");
1847         for_each_mddev(mddev, tmp) {
1848
1849                 if (mddev->bitmap)
1850                         bitmap_print_sb(mddev->bitmap);
1851                 else
1852                         printk("%s: ", mdname(mddev));
1853                 list_for_each_entry(rdev, &mddev->disks, same_set)
1854                         printk("<%s>", bdevname(rdev->bdev,b));
1855                 printk("\n");
1856
1857                 list_for_each_entry(rdev, &mddev->disks, same_set)
1858                         print_rdev(rdev, mddev->major_version);
1859         }
1860         printk("md:     **********************************\n");
1861         printk("\n");
1862 }
1863
1864
1865 static void sync_sbs(mddev_t * mddev, int nospares)
1866 {
1867         /* Update each superblock (in-memory image), but
1868          * if we are allowed to, skip spares which already
1869          * have the right event counter, or have one earlier
1870          * (which would mean they aren't being marked as dirty
1871          * with the rest of the array)
1872          */
1873         mdk_rdev_t *rdev;
1874
1875         list_for_each_entry(rdev, &mddev->disks, same_set) {
1876                 if (rdev->sb_events == mddev->events ||
1877                     (nospares &&
1878                      rdev->raid_disk < 0 &&
1879                      (rdev->sb_events&1)==0 &&
1880                      rdev->sb_events+1 == mddev->events)) {
1881                         /* Don't update this superblock */
1882                         rdev->sb_loaded = 2;
1883                 } else {
1884                         super_types[mddev->major_version].
1885                                 sync_super(mddev, rdev);
1886                         rdev->sb_loaded = 1;
1887                 }
1888         }
1889 }
1890
1891 static void md_update_sb(mddev_t * mddev, int force_change)
1892 {
1893         mdk_rdev_t *rdev;
1894         int sync_req;
1895         int nospares = 0;
1896
1897         if (mddev->external)
1898                 return;
1899 repeat:
1900         spin_lock_irq(&mddev->write_lock);
1901
1902         set_bit(MD_CHANGE_PENDING, &mddev->flags);
1903         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
1904                 force_change = 1;
1905         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
1906                 /* just a clean<-> dirty transition, possibly leave spares alone,
1907                  * though if events isn't the right even/odd, we will have to do
1908                  * spares after all
1909                  */
1910                 nospares = 1;
1911         if (force_change)
1912                 nospares = 0;
1913         if (mddev->degraded)
1914                 /* If the array is degraded, then skipping spares is both
1915                  * dangerous and fairly pointless.
1916                  * Dangerous because a device that was removed from the array
1917                  * might have a event_count that still looks up-to-date,
1918                  * so it can be re-added without a resync.
1919                  * Pointless because if there are any spares to skip,
1920                  * then a recovery will happen and soon that array won't
1921                  * be degraded any more and the spare can go back to sleep then.
1922                  */
1923                 nospares = 0;
1924
1925         sync_req = mddev->in_sync;
1926         mddev->utime = get_seconds();
1927
1928         /* If this is just a dirty<->clean transition, and the array is clean
1929          * and 'events' is odd, we can roll back to the previous clean state */
1930         if (nospares
1931             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1932             && (mddev->events & 1)
1933             && mddev->events != 1)
1934                 mddev->events--;
1935         else {
1936                 /* otherwise we have to go forward and ... */
1937                 mddev->events ++;
1938                 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1939                         /* .. if the array isn't clean, insist on an odd 'events' */
1940                         if ((mddev->events&1)==0) {
1941                                 mddev->events++;
1942                                 nospares = 0;
1943                         }
1944                 } else {
1945                         /* otherwise insist on an even 'events' (for clean states) */
1946                         if ((mddev->events&1)) {
1947                                 mddev->events++;
1948                                 nospares = 0;
1949                         }
1950                 }
1951         }
1952
1953         if (!mddev->events) {
1954                 /*
1955                  * oops, this 64-bit counter should never wrap.
1956                  * Either we are in around ~1 trillion A.C., assuming
1957                  * 1 reboot per second, or we have a bug:
1958                  */
1959                 MD_BUG();
1960                 mddev->events --;
1961         }
1962
1963         /*
1964          * do not write anything to disk if using
1965          * nonpersistent superblocks
1966          */
1967         if (!mddev->persistent) {
1968                 if (!mddev->external)
1969                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1970
1971                 spin_unlock_irq(&mddev->write_lock);
1972                 wake_up(&mddev->sb_wait);
1973                 return;
1974         }
1975         sync_sbs(mddev, nospares);
1976         spin_unlock_irq(&mddev->write_lock);
1977
1978         dprintk(KERN_INFO 
1979                 "md: updating %s RAID superblock on device (in sync %d)\n",
1980                 mdname(mddev),mddev->in_sync);
1981
1982         bitmap_update_sb(mddev->bitmap);
1983         list_for_each_entry(rdev, &mddev->disks, same_set) {
1984                 char b[BDEVNAME_SIZE];
1985                 dprintk(KERN_INFO "md: ");
1986                 if (rdev->sb_loaded != 1)
1987                         continue; /* no noise on spare devices */
1988                 if (test_bit(Faulty, &rdev->flags))
1989                         dprintk("(skipping faulty ");
1990
1991                 dprintk("%s ", bdevname(rdev->bdev,b));
1992                 if (!test_bit(Faulty, &rdev->flags)) {
1993                         md_super_write(mddev,rdev,
1994                                        rdev->sb_start, rdev->sb_size,
1995                                        rdev->sb_page);
1996                         dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1997                                 bdevname(rdev->bdev,b),
1998                                 (unsigned long long)rdev->sb_start);
1999                         rdev->sb_events = mddev->events;
2000
2001                 } else
2002                         dprintk(")\n");
2003                 if (mddev->level == LEVEL_MULTIPATH)
2004                         /* only need to write one superblock... */
2005                         break;
2006         }
2007         md_super_wait(mddev);
2008         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2009
2010         spin_lock_irq(&mddev->write_lock);
2011         if (mddev->in_sync != sync_req ||
2012             test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2013                 /* have to write it out again */
2014                 spin_unlock_irq(&mddev->write_lock);
2015                 goto repeat;
2016         }
2017         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2018         spin_unlock_irq(&mddev->write_lock);
2019         wake_up(&mddev->sb_wait);
2020
2021 }
2022
2023 /* words written to sysfs files may, or may not, be \n terminated.
2024  * We want to accept with case. For this we use cmd_match.
2025  */
2026 static int cmd_match(const char *cmd, const char *str)
2027 {
2028         /* See if cmd, written into a sysfs file, matches
2029          * str.  They must either be the same, or cmd can
2030          * have a trailing newline
2031          */
2032         while (*cmd && *str && *cmd == *str) {
2033                 cmd++;
2034                 str++;
2035         }
2036         if (*cmd == '\n')
2037                 cmd++;
2038         if (*str || *cmd)
2039                 return 0;
2040         return 1;
2041 }
2042
2043 struct rdev_sysfs_entry {
2044         struct attribute attr;
2045         ssize_t (*show)(mdk_rdev_t *, char *);
2046         ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
2047 };
2048
2049 static ssize_t
2050 state_show(mdk_rdev_t *rdev, char *page)
2051 {
2052         char *sep = "";
2053         size_t len = 0;
2054
2055         if (test_bit(Faulty, &rdev->flags)) {
2056                 len+= sprintf(page+len, "%sfaulty",sep);
2057                 sep = ",";
2058         }
2059         if (test_bit(In_sync, &rdev->flags)) {
2060                 len += sprintf(page+len, "%sin_sync",sep);
2061                 sep = ",";
2062         }
2063         if (test_bit(WriteMostly, &rdev->flags)) {
2064                 len += sprintf(page+len, "%swrite_mostly",sep);
2065                 sep = ",";
2066         }
2067         if (test_bit(Blocked, &rdev->flags)) {
2068                 len += sprintf(page+len, "%sblocked", sep);
2069                 sep = ",";
2070         }
2071         if (!test_bit(Faulty, &rdev->flags) &&
2072             !test_bit(In_sync, &rdev->flags)) {
2073                 len += sprintf(page+len, "%sspare", sep);
2074                 sep = ",";
2075         }
2076         return len+sprintf(page+len, "\n");
2077 }
2078
2079 static ssize_t
2080 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2081 {
2082         /* can write
2083          *  faulty  - simulates and error
2084          *  remove  - disconnects the device
2085          *  writemostly - sets write_mostly
2086          *  -writemostly - clears write_mostly
2087          *  blocked - sets the Blocked flag
2088          *  -blocked - clears the Blocked flag
2089          */
2090         int err = -EINVAL;
2091         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2092                 md_error(rdev->mddev, rdev);
2093                 err = 0;
2094         } else if (cmd_match(buf, "remove")) {
2095                 if (rdev->raid_disk >= 0)
2096                         err = -EBUSY;
2097                 else {
2098                         mddev_t *mddev = rdev->mddev;
2099                         kick_rdev_from_array(rdev);
2100                         if (mddev->pers)
2101                                 md_update_sb(mddev, 1);
2102                         md_new_event(mddev);
2103                         err = 0;
2104                 }
2105         } else if (cmd_match(buf, "writemostly")) {
2106                 set_bit(WriteMostly, &rdev->flags);
2107                 err = 0;
2108         } else if (cmd_match(buf, "-writemostly")) {
2109                 clear_bit(WriteMostly, &rdev->flags);
2110                 err = 0;
2111         } else if (cmd_match(buf, "blocked")) {
2112                 set_bit(Blocked, &rdev->flags);
2113                 err = 0;
2114         } else if (cmd_match(buf, "-blocked")) {
2115                 clear_bit(Blocked, &rdev->flags);
2116                 wake_up(&rdev->blocked_wait);
2117                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2118                 md_wakeup_thread(rdev->mddev->thread);
2119
2120                 err = 0;
2121         }
2122         if (!err && rdev->sysfs_state)
2123                 sysfs_notify_dirent(rdev->sysfs_state);
2124         return err ? err : len;
2125 }
2126 static struct rdev_sysfs_entry rdev_state =
2127 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
2128
2129 static ssize_t
2130 errors_show(mdk_rdev_t *rdev, char *page)
2131 {
2132         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2133 }
2134
2135 static ssize_t
2136 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2137 {
2138         char *e;
2139         unsigned long n = simple_strtoul(buf, &e, 10);
2140         if (*buf && (*e == 0 || *e == '\n')) {
2141                 atomic_set(&rdev->corrected_errors, n);
2142                 return len;
2143         }
2144         return -EINVAL;
2145 }
2146 static struct rdev_sysfs_entry rdev_errors =
2147 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2148
2149 static ssize_t
2150 slot_show(mdk_rdev_t *rdev, char *page)
2151 {
2152         if (rdev->raid_disk < 0)
2153                 return sprintf(page, "none\n");
2154         else
2155                 return sprintf(page, "%d\n", rdev->raid_disk);
2156 }
2157
2158 static ssize_t
2159 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2160 {
2161         char *e;
2162         int err;
2163         char nm[20];
2164         int slot = simple_strtoul(buf, &e, 10);
2165         if (strncmp(buf, "none", 4)==0)
2166                 slot = -1;
2167         else if (e==buf || (*e && *e!= '\n'))
2168                 return -EINVAL;
2169         if (rdev->mddev->pers && slot == -1) {
2170                 /* Setting 'slot' on an active array requires also
2171                  * updating the 'rd%d' link, and communicating
2172                  * with the personality with ->hot_*_disk.
2173                  * For now we only support removing
2174                  * failed/spare devices.  This normally happens automatically,
2175                  * but not when the metadata is externally managed.
2176                  */
2177                 if (rdev->raid_disk == -1)
2178                         return -EEXIST;
2179                 /* personality does all needed checks */
2180                 if (rdev->mddev->pers->hot_add_disk == NULL)
2181                         return -EINVAL;
2182                 err = rdev->mddev->pers->
2183                         hot_remove_disk(rdev->mddev, rdev->raid_disk);
2184                 if (err)
2185                         return err;
2186                 sprintf(nm, "rd%d", rdev->raid_disk);
2187                 sysfs_remove_link(&rdev->mddev->kobj, nm);
2188                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2189                 md_wakeup_thread(rdev->mddev->thread);
2190         } else if (rdev->mddev->pers) {
2191                 mdk_rdev_t *rdev2;
2192                 /* Activating a spare .. or possibly reactivating
2193                  * if we every get bitmaps working here.
2194                  */
2195
2196                 if (rdev->raid_disk != -1)
2197                         return -EBUSY;
2198
2199                 if (rdev->mddev->pers->hot_add_disk == NULL)
2200                         return -EINVAL;
2201
2202                 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
2203                         if (rdev2->raid_disk == slot)
2204                                 return -EEXIST;
2205
2206                 rdev->raid_disk = slot;
2207                 if (test_bit(In_sync, &rdev->flags))
2208                         rdev->saved_raid_disk = slot;
2209                 else
2210                         rdev->saved_raid_disk = -1;
2211                 err = rdev->mddev->pers->
2212                         hot_add_disk(rdev->mddev, rdev);
2213                 if (err) {
2214                         rdev->raid_disk = -1;
2215                         return err;
2216                 } else
2217                         sysfs_notify_dirent(rdev->sysfs_state);
2218                 sprintf(nm, "rd%d", rdev->raid_disk);
2219                 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
2220                         printk(KERN_WARNING
2221                                "md: cannot register "
2222                                "%s for %s\n",
2223                                nm, mdname(rdev->mddev));
2224
2225                 /* don't wakeup anyone, leave that to userspace. */
2226         } else {
2227                 if (slot >= rdev->mddev->raid_disks)
2228                         return -ENOSPC;
2229                 rdev->raid_disk = slot;
2230                 /* assume it is working */
2231                 clear_bit(Faulty, &rdev->flags);
2232                 clear_bit(WriteMostly, &rdev->flags);
2233                 set_bit(In_sync, &rdev->flags);
2234                 sysfs_notify_dirent(rdev->sysfs_state);
2235         }
2236         return len;
2237 }
2238
2239
2240 static struct rdev_sysfs_entry rdev_slot =
2241 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2242
2243 static ssize_t
2244 offset_show(mdk_rdev_t *rdev, char *page)
2245 {
2246         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2247 }
2248
2249 static ssize_t
2250 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2251 {
2252         char *e;
2253         unsigned long long offset = simple_strtoull(buf, &e, 10);
2254         if (e==buf || (*e && *e != '\n'))
2255                 return -EINVAL;
2256         if (rdev->mddev->pers && rdev->raid_disk >= 0)
2257                 return -EBUSY;
2258         if (rdev->sectors && rdev->mddev->external)
2259                 /* Must set offset before size, so overlap checks
2260                  * can be sane */
2261                 return -EBUSY;
2262         rdev->data_offset = offset;
2263         return len;
2264 }
2265
2266 static struct rdev_sysfs_entry rdev_offset =
2267 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2268
2269 static ssize_t
2270 rdev_size_show(mdk_rdev_t *rdev, char *page)
2271 {
2272         return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2273 }
2274
2275 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2276 {
2277         /* check if two start/length pairs overlap */
2278         if (s1+l1 <= s2)
2279                 return 0;
2280         if (s2+l2 <= s1)
2281                 return 0;
2282         return 1;
2283 }
2284
2285 static ssize_t
2286 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2287 {
2288         mddev_t *my_mddev = rdev->mddev;
2289         sector_t oldsectors = rdev->sectors;
2290         unsigned long long sectors;
2291
2292         if (strict_strtoull(buf, 10, &sectors) < 0)
2293                 return -EINVAL;
2294         sectors *= 2;
2295         if (my_mddev->pers && rdev->raid_disk >= 0) {
2296                 if (my_mddev->persistent) {
2297                         sectors = super_types[my_mddev->major_version].
2298                                 rdev_size_change(rdev, sectors);
2299                         if (!sectors)
2300                                 return -EBUSY;
2301                 } else if (!sectors)
2302                         sectors = (rdev->bdev->bd_inode->i_size >> 9) -
2303                                 rdev->data_offset;
2304         }
2305         if (sectors < my_mddev->dev_sectors)
2306                 return -EINVAL; /* component must fit device */
2307
2308         rdev->sectors = sectors;
2309         if (sectors > oldsectors && my_mddev->external) {
2310                 /* need to check that all other rdevs with the same ->bdev
2311                  * do not overlap.  We need to unlock the mddev to avoid
2312                  * a deadlock.  We have already changed rdev->sectors, and if
2313                  * we have to change it back, we will have the lock again.
2314                  */
2315                 mddev_t *mddev;
2316                 int overlap = 0;
2317                 struct list_head *tmp;
2318
2319                 mddev_unlock(my_mddev);
2320                 for_each_mddev(mddev, tmp) {
2321                         mdk_rdev_t *rdev2;
2322
2323                         mddev_lock(mddev);
2324                         list_for_each_entry(rdev2, &mddev->disks, same_set)
2325                                 if (test_bit(AllReserved, &rdev2->flags) ||
2326                                     (rdev->bdev == rdev2->bdev &&
2327                                      rdev != rdev2 &&
2328                                      overlaps(rdev->data_offset, rdev->sectors,
2329                                               rdev2->data_offset,
2330                                               rdev2->sectors))) {
2331                                         overlap = 1;
2332                                         break;
2333                                 }
2334                         mddev_unlock(mddev);
2335                         if (overlap) {
2336                                 mddev_put(mddev);
2337                                 break;
2338                         }
2339                 }
2340                 mddev_lock(my_mddev);
2341                 if (overlap) {
2342                         /* Someone else could have slipped in a size
2343                          * change here, but doing so is just silly.
2344                          * We put oldsectors back because we *know* it is
2345                          * safe, and trust userspace not to race with
2346                          * itself
2347                          */
2348                         rdev->sectors = oldsectors;
2349                         return -EBUSY;
2350                 }
2351         }
2352         return len;
2353 }
2354
2355 static struct rdev_sysfs_entry rdev_size =
2356 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2357
2358 static struct attribute *rdev_default_attrs[] = {
2359         &rdev_state.attr,
2360         &rdev_errors.attr,
2361         &rdev_slot.attr,
2362         &rdev_offset.attr,
2363         &rdev_size.attr,
2364         NULL,
2365 };
2366 static ssize_t
2367 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2368 {
2369         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2370         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2371         mddev_t *mddev = rdev->mddev;
2372         ssize_t rv;
2373
2374         if (!entry->show)
2375                 return -EIO;
2376
2377         rv = mddev ? mddev_lock(mddev) : -EBUSY;
2378         if (!rv) {
2379                 if (rdev->mddev == NULL)
2380                         rv = -EBUSY;
2381                 else
2382                         rv = entry->show(rdev, page);
2383                 mddev_unlock(mddev);
2384         }
2385         return rv;
2386 }
2387
2388 static ssize_t
2389 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2390               const char *page, size_t length)
2391 {
2392         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2393         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2394         ssize_t rv;
2395         mddev_t *mddev = rdev->mddev;
2396
2397         if (!entry->store)
2398                 return -EIO;
2399         if (!capable(CAP_SYS_ADMIN))
2400                 return -EACCES;
2401         rv = mddev ? mddev_lock(mddev): -EBUSY;
2402         if (!rv) {
2403                 if (rdev->mddev == NULL)
2404                         rv = -EBUSY;
2405                 else
2406                         rv = entry->store(rdev, page, length);
2407                 mddev_unlock(mddev);
2408         }
2409         return rv;
2410 }
2411
2412 static void rdev_free(struct kobject *ko)
2413 {
2414         mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
2415         kfree(rdev);
2416 }
2417 static struct sysfs_ops rdev_sysfs_ops = {
2418         .show           = rdev_attr_show,
2419         .store          = rdev_attr_store,
2420 };
2421 static struct kobj_type rdev_ktype = {
2422         .release        = rdev_free,
2423         .sysfs_ops      = &rdev_sysfs_ops,
2424         .default_attrs  = rdev_default_attrs,
2425 };
2426
2427 /*
2428  * Import a device. If 'super_format' >= 0, then sanity check the superblock
2429  *
2430  * mark the device faulty if:
2431  *
2432  *   - the device is nonexistent (zero size)
2433  *   - the device has no valid superblock
2434  *
2435  * a faulty rdev _never_ has rdev->sb set.
2436  */
2437 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
2438 {
2439         char b[BDEVNAME_SIZE];
2440         int err;
2441         mdk_rdev_t *rdev;
2442         sector_t size;
2443
2444         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2445         if (!rdev) {
2446                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
2447                 return ERR_PTR(-ENOMEM);
2448         }
2449
2450         if ((err = alloc_disk_sb(rdev)))
2451                 goto abort_free;
2452
2453         err = lock_rdev(rdev, newdev, super_format == -2);
2454         if (err)
2455                 goto abort_free;
2456
2457         kobject_init(&rdev->kobj, &rdev_ktype);
2458
2459         rdev->desc_nr = -1;
2460         rdev->saved_raid_disk = -1;
2461         rdev->raid_disk = -1;
2462         rdev->flags = 0;
2463         rdev->data_offset = 0;
2464         rdev->sb_events = 0;
2465         atomic_set(&rdev->nr_pending, 0);
2466         atomic_set(&rdev->read_errors, 0);
2467         atomic_set(&rdev->corrected_errors, 0);
2468
2469         size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2470         if (!size) {
2471                 printk(KERN_WARNING 
2472                         "md: %s has zero or unknown size, marking faulty!\n",
2473                         bdevname(rdev->bdev,b));
2474                 err = -EINVAL;
2475                 goto abort_free;
2476         }
2477
2478         if (super_format >= 0) {
2479                 err = super_types[super_format].
2480                         load_super(rdev, NULL, super_minor);
2481                 if (err == -EINVAL) {
2482                         printk(KERN_WARNING
2483                                 "md: %s does not have a valid v%d.%d "
2484                                "superblock, not importing!\n",
2485                                 bdevname(rdev->bdev,b),
2486                                super_format, super_minor);
2487                         goto abort_free;
2488                 }
2489                 if (err < 0) {
2490                         printk(KERN_WARNING 
2491                                 "md: could not read %s's sb, not importing!\n",
2492                                 bdevname(rdev->bdev,b));
2493                         goto abort_free;
2494                 }
2495         }
2496
2497         INIT_LIST_HEAD(&rdev->same_set);
2498         init_waitqueue_head(&rdev->blocked_wait);
2499
2500         return rdev;
2501
2502 abort_free:
2503         if (rdev->sb_page) {
2504                 if (rdev->bdev)
2505                         unlock_rdev(rdev);
2506                 free_disk_sb(rdev);
2507         }
2508         kfree(rdev);
2509         return ERR_PTR(err);
2510 }
2511
2512 /*
2513  * Check a full RAID array for plausibility
2514  */
2515
2516
2517 static void analyze_sbs(mddev_t * mddev)
2518 {
2519         int i;
2520         mdk_rdev_t *rdev, *freshest, *tmp;
2521         char b[BDEVNAME_SIZE];
2522
2523         freshest = NULL;
2524         rdev_for_each(rdev, tmp, mddev)
2525                 switch (super_types[mddev->major_version].
2526                         load_super(rdev, freshest, mddev->minor_version)) {
2527                 case 1:
2528                         freshest = rdev;
2529                         break;
2530                 case 0:
2531                         break;
2532                 default:
2533                         printk( KERN_ERR \
2534                                 "md: fatal superblock inconsistency in %s"
2535                                 " -- removing from array\n", 
2536                                 bdevname(rdev->bdev,b));
2537                         kick_rdev_from_array(rdev);
2538                 }
2539
2540
2541         super_types[mddev->major_version].
2542                 validate_super(mddev, freshest);
2543
2544         i = 0;
2545         rdev_for_each(rdev, tmp, mddev) {
2546                 if (rdev->desc_nr >= mddev->max_disks ||
2547                     i > mddev->max_disks) {
2548                         printk(KERN_WARNING
2549                                "md: %s: %s: only %d devices permitted\n",
2550                                mdname(mddev), bdevname(rdev->bdev, b),
2551                                mddev->max_disks);
2552                         kick_rdev_from_array(rdev);
2553                         continue;
2554                 }
2555                 if (rdev != freshest)
2556                         if (super_types[mddev->major_version].
2557                             validate_super(mddev, rdev)) {
2558                                 printk(KERN_WARNING "md: kicking non-fresh %s"
2559                                         " from array!\n",
2560                                         bdevname(rdev->bdev,b));
2561                                 kick_rdev_from_array(rdev);
2562                                 continue;
2563                         }
2564                 if (mddev->level == LEVEL_MULTIPATH) {
2565                         rdev->desc_nr = i++;
2566                         rdev->raid_disk = rdev->desc_nr;
2567                         set_bit(In_sync, &rdev->flags);
2568                 } else if (rdev->raid_disk >= mddev->raid_disks) {
2569                         rdev->raid_disk = -1;
2570                         clear_bit(In_sync, &rdev->flags);
2571                 }
2572         }
2573
2574
2575
2576         if (mddev->recovery_cp != MaxSector &&
2577             mddev->level >= 1)
2578                 printk(KERN_ERR "md: %s: raid array is not clean"
2579                        " -- starting background reconstruction\n",
2580                        mdname(mddev));
2581
2582 }
2583
2584 static void md_safemode_timeout(unsigned long data);
2585
2586 static ssize_t
2587 safe_delay_show(mddev_t *mddev, char *page)
2588 {
2589         int msec = (mddev->safemode_delay*1000)/HZ;
2590         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2591 }
2592 static ssize_t
2593 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2594 {
2595         int scale=1;
2596         int dot=0;
2597         int i;
2598         unsigned long msec;
2599         char buf[30];
2600
2601         /* remove a period, and count digits after it */
2602         if (len >= sizeof(buf))
2603                 return -EINVAL;
2604         strlcpy(buf, cbuf, sizeof(buf));
2605         for (i=0; i<len; i++) {
2606                 if (dot) {
2607                         if (isdigit(buf[i])) {
2608                                 buf[i-1] = buf[i];
2609                                 scale *= 10;
2610                         }
2611                         buf[i] = 0;
2612                 } else if (buf[i] == '.') {
2613                         dot=1;
2614                         buf[i] = 0;
2615                 }
2616         }
2617         if (strict_strtoul(buf, 10, &msec) < 0)
2618                 return -EINVAL;
2619         msec = (msec * 1000) / scale;
2620         if (msec == 0)
2621                 mddev->safemode_delay = 0;
2622         else {
2623                 unsigned long old_delay = mddev->safemode_delay;
2624                 mddev->safemode_delay = (msec*HZ)/1000;
2625                 if (mddev->safemode_delay == 0)
2626                         mddev->safemode_delay = 1;
2627                 if (mddev->safemode_delay < old_delay)
2628                         md_safemode_timeout((unsigned long)mddev);
2629         }
2630         return len;
2631 }
2632 static struct md_sysfs_entry md_safe_delay =
2633 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2634
2635 static ssize_t
2636 level_show(mddev_t *mddev, char *page)
2637 {
2638         struct mdk_personality *p = mddev->pers;
2639         if (p)
2640                 return sprintf(page, "%s\n", p->name);
2641         else if (mddev->clevel[0])
2642                 return sprintf(page, "%s\n", mddev->clevel);
2643         else if (mddev->level != LEVEL_NONE)
2644                 return sprintf(page, "%d\n", mddev->level);
2645         else
2646                 return 0;
2647 }
2648
2649 static ssize_t
2650 level_store(mddev_t *mddev, const char *buf, size_t len)
2651 {
2652         char level[16];
2653         ssize_t rv = len;
2654         struct mdk_personality *pers;
2655         void *priv;
2656
2657         if (mddev->pers == NULL) {
2658                 if (len == 0)
2659                         return 0;
2660                 if (len >= sizeof(mddev->clevel))
2661                         return -ENOSPC;
2662                 strncpy(mddev->clevel, buf, len);
2663                 if (mddev->clevel[len-1] == '\n')
2664                         len--;
2665                 mddev->clevel[len] = 0;
2666                 mddev->level = LEVEL_NONE;
2667                 return rv;
2668         }
2669
2670         /* request to change the personality.  Need to ensure:
2671          *  - array is not engaged in resync/recovery/reshape
2672          *  - old personality can be suspended
2673          *  - new personality will access other array.
2674          */
2675
2676         if (mddev->sync_thread || mddev->reshape_position != MaxSector)
2677                 return -EBUSY;
2678
2679         if (!mddev->pers->quiesce) {
2680                 printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
2681                        mdname(mddev), mddev->pers->name);
2682                 return -EINVAL;
2683         }
2684
2685         /* Now find the new personality */
2686         if (len == 0 || len >= sizeof(level))
2687                 return -EINVAL;
2688         strncpy(level, buf, len);
2689         if (level[len-1] == '\n')
2690                 len--;
2691         level[len] = 0;
2692
2693         request_module("md-%s", level);
2694         spin_lock(&pers_lock);
2695         pers = find_pers(LEVEL_NONE, level);
2696         if (!pers || !try_module_get(pers->owner)) {
2697                 spin_unlock(&pers_lock);
2698                 printk(KERN_WARNING "md: personality %s not loaded\n", level);
2699                 return -EINVAL;
2700         }
2701         spin_unlock(&pers_lock);
2702
2703         if (pers == mddev->pers) {
2704                 /* Nothing to do! */
2705                 module_put(pers->owner);
2706                 return rv;
2707         }
2708         if (!pers->takeover) {
2709                 module_put(pers->owner);
2710                 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
2711                        mdname(mddev), level);
2712                 return -EINVAL;
2713         }
2714
2715         /* ->takeover must set new_* and/or delta_disks
2716          * if it succeeds, and may set them when it fails.
2717          */
2718         priv = pers->takeover(mddev);
2719         if (IS_ERR(priv)) {
2720                 mddev->new_level = mddev->level;
2721                 mddev->new_layout = mddev->layout;
2722                 mddev->new_chunk = mddev->chunk_size;
2723                 mddev->raid_disks -= mddev->delta_disks;
2724                 mddev->delta_disks = 0;
2725                 module_put(pers->owner);
2726                 printk(KERN_WARNING "md: %s: %s would not accept array\n",
2727                        mdname(mddev), level);
2728                 return PTR_ERR(priv);
2729         }
2730
2731         /* Looks like we have a winner */
2732         mddev_suspend(mddev);
2733         mddev->pers->stop(mddev);
2734         module_put(mddev->pers->owner);
2735         mddev->pers = pers;
2736         mddev->private = priv;
2737         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
2738         mddev->level = mddev->new_level;
2739         mddev->layout = mddev->new_layout;
2740         mddev->chunk_size = mddev->new_chunk;
2741         mddev->delta_disks = 0;
2742         pers->run(mddev);
2743         mddev_resume(mddev);
2744         set_bit(MD_CHANGE_DEVS, &mddev->flags);
2745         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2746         md_wakeup_thread(mddev->thread);
2747         return rv;
2748 }
2749
2750 static struct md_sysfs_entry md_level =
2751 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2752
2753
2754 static ssize_t
2755 layout_show(mddev_t *mddev, char *page)
2756 {
2757         /* just a number, not meaningful for all levels */
2758         if (mddev->reshape_position != MaxSector &&
2759             mddev->layout != mddev->new_layout)
2760                 return sprintf(page, "%d (%d)\n",
2761                                mddev->new_layout, mddev->layout);
2762         return sprintf(page, "%d\n", mddev->layout);
2763 }
2764
2765 static ssize_t
2766 layout_store(mddev_t *mddev, const char *buf, size_t len)
2767 {
2768         char *e;
2769         unsigned long n = simple_strtoul(buf, &e, 10);
2770
2771         if (!*buf || (*e && *e != '\n'))
2772                 return -EINVAL;
2773
2774         if (mddev->pers) {
2775                 int err;
2776                 if (mddev->pers->reconfig == NULL)
2777                         return -EBUSY;
2778                 err = mddev->pers->reconfig(mddev, n, -1);
2779                 if (err)
2780                         return err;
2781         } else {
2782                 mddev->new_layout = n;
2783                 if (mddev->reshape_position == MaxSector)
2784                         mddev->layout = n;
2785         }
2786         return len;
2787 }
2788 static struct md_sysfs_entry md_layout =
2789 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
2790
2791
2792 static ssize_t
2793 raid_disks_show(mddev_t *mddev, char *page)
2794 {
2795         if (mddev->raid_disks == 0)
2796                 return 0;
2797         if (mddev->reshape_position != MaxSector &&
2798             mddev->delta_disks != 0)
2799                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
2800                                mddev->raid_disks - mddev->delta_disks);
2801         return sprintf(page, "%d\n", mddev->raid_disks);
2802 }
2803
2804 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2805
2806 static ssize_t
2807 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2808 {
2809         char *e;
2810         int rv = 0;
2811         unsigned long n = simple_strtoul(buf, &e, 10);
2812
2813         if (!*buf || (*e && *e != '\n'))
2814                 return -EINVAL;
2815
2816         if (mddev->pers)
2817                 rv = update_raid_disks(mddev, n);
2818         else if (mddev->reshape_position != MaxSector) {
2819                 int olddisks = mddev->raid_disks - mddev->delta_disks;
2820                 mddev->delta_disks = n - olddisks;
2821                 mddev->raid_disks = n;
2822         } else
2823                 mddev->raid_disks = n;
2824         return rv ? rv : len;
2825 }
2826 static struct md_sysfs_entry md_raid_disks =
2827 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2828
2829 static ssize_t
2830 chunk_size_show(mddev_t *mddev, char *page)
2831 {
2832         if (mddev->reshape_position != MaxSector &&
2833             mddev->chunk_size != mddev->new_chunk)
2834                 return sprintf(page, "%d (%d)\n", mddev->new_chunk,
2835                                mddev->chunk_size);
2836         return sprintf(page, "%d\n", mddev->chunk_size);
2837 }
2838
2839 static ssize_t
2840 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2841 {
2842         char *e;
2843         unsigned long n = simple_strtoul(buf, &e, 10);
2844
2845         if (!*buf || (*e && *e != '\n'))
2846                 return -EINVAL;
2847
2848         if (mddev->pers) {
2849                 int err;
2850                 if (mddev->pers->reconfig == NULL)
2851                         return -EBUSY;
2852                 err = mddev->pers->reconfig(mddev, -1, n);
2853                 if (err)
2854                         return err;
2855         } else {
2856                 mddev->new_chunk = n;
2857                 if (mddev->reshape_position == MaxSector)
2858                         mddev->chunk_size = n;
2859         }
2860         return len;
2861 }
2862 static struct md_sysfs_entry md_chunk_size =
2863 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
2864
2865 static ssize_t
2866 resync_start_show(mddev_t *mddev, char *page)
2867 {
2868         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2869 }
2870
2871 static ssize_t
2872 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2873 {
2874         char *e;
2875         unsigned long long n = simple_strtoull(buf, &e, 10);
2876
2877         if (mddev->pers)
2878                 return -EBUSY;
2879         if (!*buf || (*e && *e != '\n'))
2880                 return -EINVAL;
2881
2882         mddev->recovery_cp = n;
2883         return len;
2884 }
2885 static struct md_sysfs_entry md_resync_start =
2886 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
2887
2888 /*
2889  * The array state can be:
2890  *
2891  * clear
2892  *     No devices, no size, no level
2893  *     Equivalent to STOP_ARRAY ioctl
2894  * inactive
2895  *     May have some settings, but array is not active
2896  *        all IO results in error
2897  *     When written, doesn't tear down array, but just stops it
2898  * suspended (not supported yet)
2899  *     All IO requests will block. The array can be reconfigured.
2900  *     Writing this, if accepted, will block until array is quiescent
2901  * readonly
2902  *     no resync can happen.  no superblocks get written.
2903  *     write requests fail
2904  * read-auto
2905  *     like readonly, but behaves like 'clean' on a write request.
2906  *
2907  * clean - no pending writes, but otherwise active.
2908  *     When written to inactive array, starts without resync
2909  *     If a write request arrives then
2910  *       if metadata is known, mark 'dirty' and switch to 'active'.
2911  *       if not known, block and switch to write-pending
2912  *     If written to an active array that has pending writes, then fails.
2913  * active
2914  *     fully active: IO and resync can be happening.
2915  *     When written to inactive array, starts with resync
2916  *
2917  * write-pending
2918  *     clean, but writes are blocked waiting for 'active' to be written.
2919  *
2920  * active-idle
2921  *     like active, but no writes have been seen for a while (100msec).
2922  *
2923  */
2924 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
2925                    write_pending, active_idle, bad_word};
2926 static char *array_states[] = {
2927         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2928         "write-pending", "active-idle", NULL };
2929
2930 static int match_word(const char *word, char **list)
2931 {
2932         int n;
2933         for (n=0; list[n]; n++)
2934                 if (cmd_match(word, list[n]))
2935                         break;
2936         return n;
2937 }
2938
2939 static ssize_t
2940 array_state_show(mddev_t *mddev, char *page)
2941 {
2942         enum array_state st = inactive;
2943
2944         if (mddev->pers)
2945                 switch(mddev->ro) {
2946                 case 1:
2947                         st = readonly;
2948                         break;
2949                 case 2:
2950                         st = read_auto;
2951                         break;
2952                 case 0:
2953                         if (mddev->in_sync)
2954                                 st = clean;
2955                         else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
2956                                 st = write_pending;
2957                         else if (mddev->safemode)
2958                                 st = active_idle;
2959                         else
2960                                 st = active;
2961                 }
2962         else {
2963                 if (list_empty(&mddev->disks) &&
2964                     mddev->raid_disks == 0 &&
2965                     mddev->dev_sectors == 0)
2966                         st = clear;
2967                 else
2968                         st = inactive;
2969         }
2970         return sprintf(page, "%s\n", array_states[st]);
2971 }
2972
2973 static int do_md_stop(mddev_t * mddev, int ro, int is_open);
2974 static int do_md_run(mddev_t * mddev);
2975 static int restart_array(mddev_t *mddev);
2976
2977 static ssize_t
2978 array_state_store(mddev_t *mddev, const char *buf, size_t len)
2979 {
2980         int err = -EINVAL;
2981         enum array_state st = match_word(buf, array_states);
2982         switch(st) {
2983         case bad_word:
2984                 break;
2985         case clear:
2986                 /* stopping an active array */
2987                 if (atomic_read(&mddev->openers) > 0)
2988                         return -EBUSY;
2989                 err = do_md_stop(mddev, 0, 0);
2990                 break;
2991         case inactive:
2992                 /* stopping an active array */
2993                 if (mddev->pers) {
2994                         if (atomic_read(&mddev->openers) > 0)
2995                                 return -EBUSY;
2996                         err = do_md_stop(mddev, 2, 0);
2997                 } else
2998                         err = 0; /* already inactive */
2999                 break;
3000         case suspended:
3001                 break; /* not supported yet */
3002         case readonly:
3003                 if (mddev->pers)
3004                         err = do_md_stop(mddev, 1, 0);
3005                 else {
3006                         mddev->ro = 1;
3007                         set_disk_ro(mddev->gendisk, 1);
3008                         err = do_md_run(mddev);
3009                 }
3010                 break;
3011         case read_auto:
3012                 if (mddev->pers) {
3013                         if (mddev->ro == 0)
3014                                 err = do_md_stop(mddev, 1, 0);
3015                         else if (mddev->ro == 1)
3016                                 err = restart_array(mddev);
3017                         if (err == 0) {
3018                                 mddev->ro = 2;
3019                                 set_disk_ro(mddev->gendisk, 0);
3020                         }
3021                 } else {
3022                         mddev->ro = 2;
3023                         err = do_md_run(mddev);
3024                 }
3025                 break;
3026         case clean:
3027                 if (mddev->pers) {
3028                         restart_array(mddev);
3029                         spin_lock_irq(&mddev->write_lock);
3030                         if (atomic_read(&mddev->writes_pending) == 0) {
3031                                 if (mddev->in_sync == 0) {
3032                                         mddev->in_sync = 1;
3033                                         if (mddev->safemode == 1)
3034                                                 mddev->safemode = 0;
3035                                         if (mddev->persistent)
3036                                                 set_bit(MD_CHANGE_CLEAN,
3037                                                         &mddev->flags);
3038                                 }
3039                                 err = 0;
3040                         } else
3041                                 err = -EBUSY;
3042                         spin_unlock_irq(&mddev->write_lock);
3043                 } else {
3044                         mddev->ro = 0;
3045                         mddev->recovery_cp = MaxSector;
3046                         err = do_md_run(mddev);
3047                 }
3048                 break;
3049         case active:
3050                 if (mddev->pers) {
3051                         restart_array(mddev);
3052                         if (mddev->external)
3053                                 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
3054                         wake_up(&mddev->sb_wait);
3055                         err = 0;
3056                 } else {
3057                         mddev->ro = 0;
3058                         set_disk_ro(mddev->gendisk, 0);
3059                         err = do_md_run(mddev);
3060                 }
3061                 break;
3062         case write_pending:
3063         case active_idle:
3064                 /* these cannot be set */
3065                 break;
3066         }
3067         if (err)
3068                 return err;
3069         else {
3070                 sysfs_notify_dirent(mddev->sysfs_state);
3071                 return len;
3072         }
3073 }
3074 static struct md_sysfs_entry md_array_state =
3075 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3076
3077 static ssize_t
3078 null_show(mddev_t *mddev, char *page)
3079 {
3080         return -EINVAL;
3081 }
3082
3083 static ssize_t
3084 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
3085 {
3086         /* buf must be %d:%d\n? giving major and minor numbers */
3087         /* The new device is added to the array.
3088          * If the array has a persistent superblock, we read the
3089          * superblock to initialise info and check validity.
3090          * Otherwise, only checking done is that in bind_rdev_to_array,
3091          * which mainly checks size.
3092          */
3093         char *e;
3094         int major = simple_strtoul(buf, &e, 10);
3095         int minor;
3096         dev_t dev;
3097         mdk_rdev_t *rdev;
3098         int err;
3099
3100         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
3101                 return -EINVAL;
3102         minor = simple_strtoul(e+1, &e, 10);
3103         if (*e && *e != '\n')
3104                 return -EINVAL;
3105         dev = MKDEV(major, minor);
3106         if (major != MAJOR(dev) ||
3107             minor != MINOR(dev))
3108                 return -EOVERFLOW;
3109
3110
3111         if (mddev->persistent) {
3112                 rdev = md_import_device(dev, mddev->major_version,
3113                                         mddev->minor_version);
3114                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
3115                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3116                                                        mdk_rdev_t, same_set);
3117                         err = super_types[mddev->major_version]
3118                                 .load_super(rdev, rdev0, mddev->minor_version);
3119                         if (err < 0)
3120                                 goto out;
3121                 }
3122         } else if (mddev->external)
3123                 rdev = md_import_device(dev, -2, -1);
3124         else
3125                 rdev = md_import_device(dev, -1, -1);
3126
3127         if (IS_ERR(rdev))
3128                 return PTR_ERR(rdev);
3129         err = bind_rdev_to_array(rdev, mddev);
3130  out:
3131         if (err)
3132                 export_rdev(rdev);
3133         return err ? err : len;
3134 }
3135
3136 static struct md_sysfs_entry md_new_device =
3137 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
3138
3139 static ssize_t
3140 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
3141 {
3142         char *end;
3143         unsigned long chunk, end_chunk;
3144
3145         if (!mddev->bitmap)
3146                 goto out;
3147         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
3148         while (*buf) {
3149                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
3150                 if (buf == end) break;
3151                 if (*end == '-') { /* range */
3152                         buf = end + 1;
3153                         end_chunk = simple_strtoul(buf, &end, 0);
3154                         if (buf == end) break;
3155                 }
3156                 if (*end && !isspace(*end)) break;
3157                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
3158                 buf = end;
3159                 while (isspace(*buf)) buf++;
3160         }
3161         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
3162 out:
3163         return len;
3164 }
3165
3166 static struct md_sysfs_entry md_bitmap =
3167 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
3168
3169 static ssize_t
3170 size_show(mddev_t *mddev, char *page)
3171 {
3172         return sprintf(page, "%llu\n",
3173                 (unsigned long long)mddev->dev_sectors / 2);
3174 }
3175
3176 static int update_size(mddev_t *mddev, sector_t num_sectors);
3177
3178 static ssize_t
3179 size_store(mddev_t *mddev, const char *buf, size_t len)
3180 {
3181         /* If array is inactive, we can reduce the component size, but
3182          * not increase it (except from 0).
3183          * If array is active, we can try an on-line resize
3184          */
3185         unsigned long long sectors;
3186         int err = strict_strtoull(buf, 10, &sectors);
3187
3188         if (err < 0)
3189                 return err;
3190         sectors *= 2;
3191         if (mddev->pers) {
3192                 err = update_size(mddev, sectors);
3193                 md_update_sb(mddev, 1);
3194         } else {
3195                 if (mddev->dev_sectors == 0 ||
3196                     mddev->dev_sectors > sectors)
3197                         mddev->dev_sectors = sectors;
3198                 else
3199                         err = -ENOSPC;
3200         }
3201         return err ? err : len;
3202 }
3203
3204 static struct md_sysfs_entry md_size =
3205 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
3206
3207
3208 /* Metdata version.
3209  * This is one of
3210  *   'none' for arrays with no metadata (good luck...)
3211  *   'external' for arrays with externally managed metadata,
3212  * or N.M for internally known formats
3213  */
3214 static ssize_t
3215 metadata_show(mddev_t *mddev, char *page)
3216 {
3217         if (mddev->persistent)
3218                 return sprintf(page, "%d.%d\n",
3219                                mddev->major_version, mddev->minor_version);
3220         else if (mddev->external)
3221                 return sprintf(page, "external:%s\n", mddev->metadata_type);
3222         else
3223                 return sprintf(page, "none\n");
3224 }
3225
3226 static ssize_t
3227 metadata_store(mddev_t *mddev, const char *buf, size_t len)
3228 {
3229         int major, minor;
3230         char *e;
3231         /* Changing the details of 'external' metadata is
3232          * always permitted.  Otherwise there must be
3233          * no devices attached to the array.
3234          */
3235         if (mddev->external && strncmp(buf, "external:", 9) == 0)
3236                 ;
3237         else if (!list_empty(&mddev->disks))
3238                 return -EBUSY;
3239
3240         if (cmd_match(buf, "none")) {
3241                 mddev->persistent = 0;
3242                 mddev->external = 0;
3243                 mddev->major_version = 0;
3244                 mddev->minor_version = 90;
3245                 return len;
3246         }
3247         if (strncmp(buf, "external:", 9) == 0) {
3248                 size_t namelen = len-9;
3249                 if (namelen >= sizeof(mddev->metadata_type))
3250                         namelen = sizeof(mddev->metadata_type)-1;
3251                 strncpy(mddev->metadata_type, buf+9, namelen);
3252                 mddev->metadata_type[namelen] = 0;
3253                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
3254                         mddev->metadata_type[--namelen] = 0;
3255                 mddev->persistent = 0;
3256                 mddev->external = 1;
3257                 mddev->major_version = 0;
3258                 mddev->minor_version = 90;
3259                 return len;
3260         }
3261         major = simple_strtoul(buf, &e, 10);
3262         if (e==buf || *e != '.')
3263                 return -EINVAL;
3264         buf = e+1;
3265         minor = simple_strtoul(buf, &e, 10);
3266         if (e==buf || (*e && *e != '\n') )
3267                 return -EINVAL;
3268         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
3269                 return -ENOENT;
3270         mddev->major_version = major;
3271         mddev->minor_version = minor;
3272         mddev->persistent = 1;
3273         mddev->external = 0;
3274         return len;
3275 }
3276
3277 static struct md_sysfs_entry md_metadata =
3278 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
3279
3280 static ssize_t
3281 action_show(mddev_t *mddev, char *page)
3282 {
3283         char *type = "idle";
3284         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3285             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
3286                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3287                         type = "reshape";
3288                 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3289                         if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
3290                                 type = "resync";
3291                         else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
3292                                 type = "check";
3293                         else
3294                                 type = "repair";
3295                 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
3296                         type = "recover";
3297         }
3298         return sprintf(page, "%s\n", type);
3299 }
3300
3301 static ssize_t
3302 action_store(mddev_t *mddev, const char *page, size_t len)
3303 {
3304         if (!mddev->pers || !mddev->pers->sync_request)
3305                 return -EINVAL;
3306
3307         if (cmd_match(page, "idle")) {
3308                 if (mddev->sync_thread) {
3309                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3310                         md_unregister_thread(mddev->sync_thread);
3311                         mddev->sync_thread = NULL;
3312                         mddev->recovery = 0;
3313                 }
3314         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3315                    test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
3316                 return -EBUSY;
3317         else if (cmd_match(page, "resync"))
3318                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3319         else if (cmd_match(page, "recover")) {
3320                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3321                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3322         } else if (cmd_match(page, "reshape")) {
3323                 int err;
3324                 if (mddev->pers->start_reshape == NULL)
3325                         return -EINVAL;
3326                 err = mddev->pers->start_reshape(mddev);
3327                 if (err)
3328                         return err;
3329                 sysfs_notify(&mddev->kobj, NULL, "degraded");
3330         } else {
3331                 if (cmd_match(page, "check"))
3332                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3333                 else if (!cmd_match(page, "repair"))
3334                         return -EINVAL;
3335                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3336                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3337         }
3338         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3339         md_wakeup_thread(mddev->thread);
3340         sysfs_notify_dirent(mddev->sysfs_action);
3341         return len;
3342 }
3343
3344 static ssize_t
3345 mismatch_cnt_show(mddev_t *mddev, char *page)
3346 {
3347         return sprintf(page, "%llu\n",
3348                        (unsigned long long) mddev->resync_mismatches);
3349 }
3350
3351 static struct md_sysfs_entry md_scan_mode =
3352 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
3353
3354
3355 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
3356
3357 static ssize_t
3358 sync_min_show(mddev_t *mddev, char *page)
3359 {
3360         return sprintf(page, "%d (%s)\n", speed_min(mddev),
3361                        mddev->sync_speed_min ? "local": "system");
3362 }
3363
3364 static ssize_t
3365 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
3366 {
3367         int min;
3368         char *e;
3369         if (strncmp(buf, "system", 6)==0) {
3370                 mddev->sync_speed_min = 0;
3371                 return len;
3372         }
3373         min = simple_strtoul(buf, &e, 10);
3374         if (buf == e || (*e && *e != '\n') || min <= 0)
3375                 return -EINVAL;
3376         mddev->sync_speed_min = min;
3377         return len;
3378 }
3379
3380 static struct md_sysfs_entry md_sync_min =
3381 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
3382
3383 static ssize_t
3384 sync_max_show(mddev_t *mddev, char *page)
3385 {
3386         return sprintf(page, "%d (%s)\n", speed_max(mddev),
3387                        mddev->sync_speed_max ? "local": "system");
3388 }
3389
3390 static ssize_t
3391 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
3392 {
3393         int max;
3394         char *e;
3395         if (strncmp(buf, "system", 6)==0) {
3396                 mddev->sync_speed_max = 0;
3397                 return len;
3398         }
3399         max = simple_strtoul(buf, &e, 10);
3400         if (buf == e || (*e && *e != '\n') || max <= 0)
3401                 return -EINVAL;
3402         mddev->sync_speed_max = max;
3403         return len;
3404 }
3405
3406 static struct md_sysfs_entry md_sync_max =
3407 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
3408
3409 static ssize_t
3410 degraded_show(mddev_t *mddev, char *page)
3411 {
3412         return sprintf(page, "%d\n", mddev->degraded);
3413 }
3414 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
3415
3416 static ssize_t
3417 sync_force_parallel_show(mddev_t *mddev, char *page)
3418 {
3419         return sprintf(page, "%d\n", mddev->parallel_resync);
3420 }
3421
3422 static ssize_t
3423 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
3424 {
3425         long n;
3426
3427         if (strict_strtol(buf, 10, &n))
3428                 return -EINVAL;
3429
3430         if (n != 0 && n != 1)
3431                 return -EINVAL;
3432
3433         mddev->parallel_resync = n;
3434
3435         if (mddev->sync_thread)
3436                 wake_up(&resync_wait);
3437
3438         return len;
3439 }
3440
3441 /* force parallel resync, even with shared block devices */
3442 static struct md_sysfs_entry md_sync_force_parallel =
3443 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
3444        sync_force_parallel_show, sync_force_parallel_store);
3445
3446 static ssize_t
3447 sync_speed_show(mddev_t *mddev, char *page)
3448 {
3449         unsigned long resync, dt, db;
3450         resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
3451         dt = (jiffies - mddev->resync_mark) / HZ;
3452         if (!dt) dt++;
3453         db = resync - mddev->resync_mark_cnt;
3454         return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
3455 }
3456
3457 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
3458
3459 static ssize_t
3460 sync_completed_show(mddev_t *mddev, char *page)
3461 {
3462         unsigned long max_sectors, resync;
3463
3464         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3465                 max_sectors = mddev->resync_max_sectors;
3466         else
3467                 max_sectors = mddev->dev_sectors;
3468
3469         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
3470         return sprintf(page, "%lu / %lu\n", resync, max_sectors);
3471 }
3472
3473 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
3474
3475 static ssize_t
3476 min_sync_show(mddev_t *mddev, char *page)
3477 {
3478         return sprintf(page, "%llu\n",
3479                        (unsigned long long)mddev->resync_min);
3480 }
3481 static ssize_t
3482 min_sync_store(mddev_t *mddev, const char *buf, size_t len)
3483 {
3484         unsigned long long min;
3485         if (strict_strtoull(buf, 10, &min))
3486                 return -EINVAL;
3487         if (min > mddev->resync_max)
3488                 return -EINVAL;
3489         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3490                 return -EBUSY;
3491
3492         /* Must be a multiple of chunk_size */
3493         if (mddev->chunk_size) {
3494                 if (min & (sector_t)((mddev->chunk_size>>9)-1))
3495                         return -EINVAL;
3496         }
3497         mddev->resync_min = min;
3498
3499         return len;
3500 }
3501
3502 static struct md_sysfs_entry md_min_sync =
3503 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
3504
3505 static ssize_t
3506 max_sync_show(mddev_t *mddev, char *page)
3507 {
3508         if (mddev->resync_max == MaxSector)
3509                 return sprintf(page, "max\n");
3510         else
3511                 return sprintf(page, "%llu\n",
3512                                (unsigned long long)mddev->resync_max);
3513 }
3514 static ssize_t
3515 max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3516 {
3517         if (strncmp(buf, "max", 3) == 0)
3518                 mddev->resync_max = MaxSector;
3519         else {
3520                 unsigned long long max;
3521                 if (strict_strtoull(buf, 10, &max))
3522                         return -EINVAL;
3523                 if (max < mddev->resync_min)
3524                         return -EINVAL;
3525                 if (max < mddev->resync_max &&
3526                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3527                         return -EBUSY;
3528
3529                 /* Must be a multiple of chunk_size */
3530                 if (mddev->chunk_size) {
3531                         if (max & (sector_t)((mddev->chunk_size>>9)-1))
3532                                 return -EINVAL;
3533                 }
3534                 mddev->resync_max = max;
3535         }
3536         wake_up(&mddev->recovery_wait);
3537         return len;
3538 }
3539
3540 static struct md_sysfs_entry md_max_sync =
3541 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
3542
3543 static ssize_t
3544 suspend_lo_show(mddev_t *mddev, char *page)
3545 {
3546         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
3547 }
3548
3549 static ssize_t
3550 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
3551 {
3552         char *e;
3553         unsigned long long new = simple_strtoull(buf, &e, 10);
3554
3555         if (mddev->pers->quiesce == NULL)
3556                 return -EINVAL;
3557         if (buf == e || (*e && *e != '\n'))
3558                 return -EINVAL;
3559         if (new >= mddev->suspend_hi ||
3560             (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
3561                 mddev->suspend_lo = new;
3562                 mddev->pers->quiesce(mddev, 2);
3563                 return len;
3564         } else
3565                 return -EINVAL;
3566 }
3567 static struct md_sysfs_entry md_suspend_lo =
3568 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
3569
3570
3571 static ssize_t
3572 suspend_hi_show(mddev_t *mddev, char *page)
3573 {
3574         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
3575 }
3576
3577 static ssize_t
3578 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
3579 {
3580         char *e;
3581         unsigned long long new = simple_strtoull(buf, &e, 10);
3582
3583         if (mddev->pers->quiesce == NULL)
3584                 return -EINVAL;
3585         if (buf == e || (*e && *e != '\n'))
3586                 return -EINVAL;
3587         if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
3588             (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
3589                 mddev->suspend_hi = new;
3590                 mddev->pers->quiesce(mddev, 1);
3591                 mddev->pers->quiesce(mddev, 0);
3592                 return len;
3593         } else
3594                 return -EINVAL;
3595 }
3596 static struct md_sysfs_entry md_suspend_hi =
3597 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
3598
3599 static ssize_t
3600 reshape_position_show(mddev_t *mddev, char *page)
3601 {
3602         if (mddev->reshape_position != MaxSector)
3603                 return sprintf(page, "%llu\n",
3604                                (unsigned long long)mddev->reshape_position);
3605         strcpy(page, "none\n");
3606         return 5;
3607 }
3608
3609 static ssize_t
3610 reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
3611 {
3612         char *e;
3613         unsigned long long new = simple_strtoull(buf, &e, 10);
3614         if (mddev->pers)
3615                 return -EBUSY;
3616         if (buf == e || (*e && *e != '\n'))
3617                 return -EINVAL;
3618         mddev->reshape_position = new;
3619         mddev->delta_disks = 0;
3620         mddev->new_level = mddev->level;
3621         mddev->new_layout = mddev->layout;
3622         mddev->new_chunk = mddev->chunk_size;
3623         return len;
3624 }
3625
3626 static struct md_sysfs_entry md_reshape_position =
3627 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
3628        reshape_position_store);
3629
3630
3631 static struct attribute *md_default_attrs[] = {
3632         &md_level.attr,
3633         &md_layout.attr,
3634         &md_raid_disks.attr,
3635         &md_chunk_size.attr,
3636         &md_size.attr,
3637         &md_resync_start.attr,
3638         &md_metadata.attr,
3639         &md_new_device.attr,
3640         &md_safe_delay.attr,
3641         &md_array_state.attr,
3642         &md_reshape_position.attr,
3643         NULL,
3644 };
3645
3646 static struct attribute *md_redundancy_attrs[] = {
3647         &md_scan_mode.attr,
3648         &md_mismatches.attr,
3649         &md_sync_min.attr,
3650         &md_sync_max.attr,
3651         &md_sync_speed.attr,
3652         &md_sync_force_parallel.attr,
3653         &md_sync_completed.attr,
3654         &md_min_sync.attr,
3655         &md_max_sync.attr,
3656         &md_suspend_lo.attr,
3657         &md_suspend_hi.attr,
3658         &md_bitmap.attr,
3659         &md_degraded.attr,
3660         NULL,
3661 };
3662 static struct attribute_group md_redundancy_group = {
3663         .name = NULL,
3664         .attrs = md_redundancy_attrs,
3665 };
3666
3667
3668 static ssize_t
3669 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3670 {
3671         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3672         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3673         ssize_t rv;
3674
3675         if (!entry->show)
3676                 return -EIO;
3677         rv = mddev_lock(mddev);
3678         if (!rv) {
3679                 rv = entry->show(mddev, page);
3680                 mddev_unlock(mddev);
3681         }
3682         return rv;
3683 }
3684
3685 static ssize_t
3686 md_attr_store(struct kobject *kobj, struct attribute *attr,
3687               const char *page, size_t length)
3688 {
3689         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3690         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3691         ssize_t rv;
3692
3693         if (!entry->store)
3694                 return -EIO;
3695         if (!capable(CAP_SYS_ADMIN))
3696                 return -EACCES;
3697         rv = mddev_lock(mddev);
3698         if (mddev->hold_active == UNTIL_IOCTL)
3699                 mddev->hold_active = 0;
3700         if (!rv) {
3701                 rv = entry->store(mddev, page, length);
3702                 mddev_unlock(mddev);
3703         }
3704         return rv;
3705 }
3706
3707 static void md_free(struct kobject *ko)
3708 {
3709         mddev_t *mddev = container_of(ko, mddev_t, kobj);
3710
3711         if (mddev->sysfs_state)
3712                 sysfs_put(mddev->sysfs_state);
3713
3714         if (mddev->gendisk) {
3715                 del_gendisk(mddev->gendisk);
3716                 put_disk(mddev->gendisk);
3717         }
3718         if (mddev->queue)
3719                 blk_cleanup_queue(mddev->queue);
3720
3721         kfree(mddev);
3722 }
3723
3724 static struct sysfs_ops md_sysfs_ops = {
3725         .show   = md_attr_show,
3726         .store  = md_attr_store,
3727 };
3728 static struct kobj_type md_ktype = {
3729         .release        = md_free,
3730         .sysfs_ops      = &md_sysfs_ops,
3731         .default_attrs  = md_default_attrs,
3732 };
3733
3734 int mdp_major = 0;
3735
3736 static int md_alloc(dev_t dev, char *name)
3737 {
3738         static DEFINE_MUTEX(disks_mutex);
3739         mddev_t *mddev = mddev_find(dev);
3740         struct gendisk *disk;
3741         int partitioned;
3742         int shift;
3743         int unit;
3744         int error;
3745
3746         if (!mddev)
3747                 return -ENODEV;
3748
3749         partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
3750         shift = partitioned ? MdpMinorShift : 0;
3751         unit = MINOR(mddev->unit) >> shift;
3752
3753         /* wait for any previous instance if this device
3754          * to be completed removed (mddev_delayed_delete).
3755          */
3756         flush_scheduled_work();
3757
3758         mutex_lock(&disks_mutex);
3759         if (mddev->gendisk) {
3760                 mutex_unlock(&disks_mutex);
3761                 mddev_put(mddev);
3762                 return -EEXIST;
3763         }
3764
3765         if (name) {
3766                 /* Need to ensure that 'name' is not a duplicate.
3767                  */
3768                 mddev_t *mddev2;
3769                 spin_lock(&all_mddevs_lock);
3770
3771                 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
3772                         if (mddev2->gendisk &&
3773                             strcmp(mddev2->gendisk->disk_name, name) == 0) {
3774                                 spin_unlock(&all_mddevs_lock);
3775                                 return -EEXIST;
3776                         }
3777                 spin_unlock(&all_mddevs_lock);
3778         }
3779
3780         mddev->queue = blk_alloc_queue(GFP_KERNEL);
3781         if (!mddev->queue) {
3782                 mutex_unlock(&disks_mutex);
3783                 mddev_put(mddev);
3784                 return -ENOMEM;
3785         }
3786         mddev->queue->queuedata = mddev;
3787
3788         /* Can be unlocked because the queue is new: no concurrency */
3789         queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
3790
3791         blk_queue_make_request(mddev->queue, md_make_request);
3792
3793         disk = alloc_disk(1 << shift);
3794         if (!disk) {
3795                 mutex_unlock(&disks_mutex);
3796                 blk_cleanup_queue(mddev->queue);
3797                 mddev->queue = NULL;
3798                 mddev_put(mddev);
3799                 return -ENOMEM;
3800         }
3801         disk->major = MAJOR(mddev->unit);
3802         disk->first_minor = unit << shift;
3803         if (name)
3804                 strcpy(disk->disk_name, name);
3805         else if (partitioned)
3806                 sprintf(disk->disk_name, "md_d%d", unit);
3807         else
3808                 sprintf(disk->disk_name, "md%d", unit);
3809         disk->fops = &md_fops;
3810         disk->private_data = mddev;
3811         disk->queue = mddev->queue;
3812         /* Allow extended partitions.  This makes the
3813          * 'mdp' device redundant, but we can't really
3814          * remove it now.
3815          */
3816         disk->flags |= GENHD_FL_EXT_DEVT;
3817         add_disk(disk);
3818         mddev->gendisk = disk;
3819         error = kobject_init_and_add(&mddev->kobj, &md_ktype,
3820                                      &disk_to_dev(disk)->kobj, "%s", "md");
3821         mutex_unlock(&disks_mutex);
3822         if (error)
3823                 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
3824                        disk->disk_name);
3825         else {
3826                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
3827                 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
3828         }
3829         mddev_put(mddev);
3830         return 0;
3831 }
3832
3833 static struct kobject *md_probe(dev_t dev, int *part, void *data)
3834 {
3835         md_alloc(dev, NULL);
3836         return NULL;
3837 }
3838
3839 static int add_named_array(const char *val, struct kernel_param *kp)
3840 {
3841         /* val must be "md_*" where * is not all digits.
3842          * We allocate an array with a large free minor number, and
3843          * set the name to val.  val must not already be an active name.
3844          */
3845         int len = strlen(val);
3846         char buf[DISK_NAME_LEN];
3847
3848         while (len && val[len-1] == '\n')
3849                 len--;
3850         if (len >= DISK_NAME_LEN)
3851                 return -E2BIG;
3852         strlcpy(buf, val, len+1);
3853         if (strncmp(buf, "md_", 3) != 0)
3854                 return -EINVAL;
3855         return md_alloc(0, buf);
3856 }
3857
3858 static void md_safemode_timeout(unsigned long data)
3859 {
3860         mddev_t *mddev = (mddev_t *) data;
3861
3862         if (!atomic_read(&mddev->writes_pending)) {
3863                 mddev->safemode = 1;
3864                 if (mddev->external)
3865                         sysfs_notify_dirent(mddev->sysfs_state);
3866         }
3867         md_wakeup_thread(mddev->thread);
3868 }
3869
3870 static int start_dirty_degraded;
3871
3872 static int do_md_run(mddev_t * mddev)
3873 {
3874         int err;
3875         int chunk_size;
3876         mdk_rdev_t *rdev;
3877         struct gendisk *disk;
3878         struct mdk_personality *pers;
3879         char b[BDEVNAME_SIZE];
3880
3881         if (list_empty(&mddev->disks))
3882                 /* cannot run an array with no devices.. */
3883                 return -EINVAL;
3884
3885         if (mddev->pers)
3886                 return -EBUSY;
3887
3888         /*
3889          * Analyze all RAID superblock(s)
3890          */
3891         if (!mddev->raid_disks) {
3892                 if (!mddev->persistent)
3893                         return -EINVAL;
3894                 analyze_sbs(mddev);
3895         }
3896
3897         chunk_size = mddev->chunk_size;
3898
3899         if (chunk_size) {
3900                 if (chunk_size > MAX_CHUNK_SIZE) {
3901                         printk(KERN_ERR "too big chunk_size: %d > %d\n",
3902                                 chunk_size, MAX_CHUNK_SIZE);
3903                         return -EINVAL;
3904                 }
3905                 /*
3906                  * chunk-size has to be a power of 2
3907                  */
3908                 if ( (1 << ffz(~chunk_size)) != chunk_size) {
3909                         printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
3910                         return -EINVAL;
3911                 }
3912
3913                 /* devices must have minimum size of one chunk */
3914                 list_for_each_entry(rdev, &mddev->disks, same_set) {
3915                         if (test_bit(Faulty, &rdev->flags))
3916                                 continue;
3917                         if (rdev->sectors < chunk_size / 512) {
3918                                 printk(KERN_WARNING
3919                                         "md: Dev %s smaller than chunk_size:"
3920                                         " %llu < %d\n",
3921                                         bdevname(rdev->bdev,b),
3922                                         (unsigned long long)rdev->sectors,
3923                                         chunk_size / 512);
3924                                 return -EINVAL;
3925                         }
3926                 }
3927         }
3928
3929         if (mddev->level != LEVEL_NONE)
3930                 request_module("md-level-%d", mddev->level);
3931         else if (mddev->clevel[0])
3932                 request_module("md-%s", mddev->clevel);
3933
3934         /*
3935          * Drop all container device buffers, from now on
3936          * the only valid external interface is through the md
3937          * device.
3938          */
3939         list_for_each_entry(rdev, &mddev->disks, same_set) {
3940                 if (test_bit(Faulty, &rdev->flags))
3941                         continue;
3942                 sync_blockdev(rdev->bdev);
3943                 invalidate_bdev(rdev->bdev);
3944
3945                 /* perform some consistency tests on the device.
3946                  * We don't want the data to overlap the metadata,
3947                  * Internal Bitmap issues have been handled elsewhere.
3948                  */
3949                 if (rdev->data_offset < rdev->sb_start) {
3950                         if (mddev->dev_sectors &&
3951                             rdev->data_offset + mddev->dev_sectors
3952                             > rdev->sb_start) {
3953                                 printk("md: %s: data overlaps metadata\n",
3954                                        mdname(mddev));
3955                                 return -EINVAL;
3956                         }
3957                 } else {
3958                         if (rdev->sb_start + rdev->sb_size/512
3959                             > rdev->data_offset) {
3960                                 printk("md: %s: metadata overlaps data\n",
3961                                        mdname(mddev));
3962                                 return -EINVAL;
3963                         }
3964                 }
3965                 sysfs_notify_dirent(rdev->sysfs_state);
3966         }
3967
3968         md_probe(mddev->unit, NULL, NULL);
3969         disk = mddev->gendisk;
3970         if (!disk)
3971                 return -ENOMEM;
3972
3973         spin_lock(&pers_lock);
3974         pers = find_pers(mddev->level, mddev->clevel);
3975         if (!pers || !try_module_get(pers->owner)) {
3976                 spin_unlock(&pers_lock);
3977                 if (mddev->level != LEVEL_NONE)
3978                         printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
3979                                mddev->level);
3980                 else
3981                         printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
3982                                mddev->clevel);
3983                 return -EINVAL;
3984         }
3985         mddev->pers = pers;
3986         spin_unlock(&pers_lock);
3987         if (mddev->level != pers->level) {
3988                 mddev->level = pers->level;
3989                 mddev->new_level = pers->level;
3990         }
3991         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3992
3993         if (pers->level >= 4 && pers->level <= 6)
3994                 /* Cannot support integrity (yet) */
3995                 blk_integrity_unregister(mddev->gendisk);
3996
3997         if (mddev->reshape_position != MaxSector &&
3998             pers->start_reshape == NULL) {
3999                 /* This personality cannot handle reshaping... */
4000                 mddev->pers = NULL;
4001                 module_put(pers->owner);
4002                 return -EINVAL;
4003         }
4004
4005         if (pers->sync_request) {
4006                 /* Warn if this is a potentially silly
4007                  * configuration.
4008                  */
4009                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4010                 mdk_rdev_t *rdev2;
4011                 int warned = 0;
4012
4013                 list_for_each_entry(rdev, &mddev->disks, same_set)
4014                         list_for_each_entry(rdev2, &mddev->disks, same_set) {
4015                                 if (rdev < rdev2 &&
4016                                     rdev->bdev->bd_contains ==
4017                                     rdev2->bdev->bd_contains) {
4018                                         printk(KERN_WARNING
4019                                                "%s: WARNING: %s appears to be"
4020                                                " on the same physical disk as"
4021                                                " %s.\n",
4022                                                mdname(mddev),
4023                                                bdevname(rdev->bdev,b),
4024                                                bdevname(rdev2->bdev,b2));
4025                                         warned = 1;
4026                                 }
4027                         }
4028
4029                 if (warned)
4030                         printk(KERN_WARNING
4031                                "True protection against single-disk"
4032                                " failure might be compromised.\n");
4033         }
4034
4035         mddev->recovery = 0;
4036         /* may be over-ridden by personality */
4037         mddev->resync_max_sectors = mddev->dev_sectors;
4038
4039         mddev->barriers_work = 1;
4040         mddev->ok_start_degraded = start_dirty_degraded;
4041
4042         if (start_readonly)
4043                 mddev->ro = 2; /* read-only, but switch on first write */
4044
4045         err = mddev->pers->run(mddev);
4046         if (err)
4047                 printk(KERN_ERR "md: pers->run() failed ...\n");
4048         else if (mddev->pers->sync_request) {
4049                 err = bitmap_create(mddev);
4050                 if (err) {
4051                         printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
4052                                mdname(mddev), err);
4053                         mddev->pers->stop(mddev);
4054                 }
4055         }
4056         if (err) {
4057                 module_put(mddev->pers->owner);
4058                 mddev->pers = NULL;
4059                 bitmap_destroy(mddev);
4060                 return err;
4061         }
4062         if (mddev->pers->sync_request) {
4063                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4064                         printk(KERN_WARNING
4065                                "md: cannot register extra attributes for %s\n",
4066                                mdname(mddev));
4067                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
4068         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
4069                 mddev->ro = 0;
4070
4071         atomic_set(&mddev->writes_pending,0);
4072         mddev->safemode = 0;
4073         mddev->safemode_timer.function = md_safemode_timeout;
4074         mddev->safemode_timer.data = (unsigned long) mddev;
4075         mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
4076         mddev->in_sync = 1;
4077
4078         list_for_each_entry(rdev, &mddev->disks, same_set)
4079                 if (rdev->raid_disk >= 0) {
4080                         char nm[20];
4081                         sprintf(nm, "rd%d", rdev->raid_disk);
4082                         if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
4083                                 printk("md: cannot register %s for %s\n",
4084                                        nm, mdname(mddev));
4085                 }
4086         
4087         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4088         
4089         if (mddev->flags)
4090                 md_update_sb(mddev, 0);
4091
4092         set_capacity(disk, mddev->array_sectors);
4093
4094         /* If there is a partially-recovered drive we need to
4095          * start recovery here.  If we leave it to md_check_recovery,
4096          * it will remove the drives and not do the right thing
4097          */
4098         if (mddev->degraded && !mddev->sync_thread) {
4099                 int spares = 0;
4100                 list_for_each_entry(rdev, &mddev->disks, same_set)
4101                         if (rdev->raid_disk >= 0 &&
4102                             !test_bit(In_sync, &rdev->flags) &&
4103                             !test_bit(Faulty, &rdev->flags))
4104                                 /* complete an interrupted recovery */
4105                                 spares++;
4106                 if (spares && mddev->pers->sync_request) {
4107                         mddev->recovery = 0;
4108                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4109                         mddev->sync_thread = md_register_thread(md_do_sync,
4110                                                                 mddev,
4111                                                                 "%s_resync");
4112                         if (!mddev->sync_thread) {
4113                                 printk(KERN_ERR "%s: could not start resync"
4114                                        " thread...\n",
4115                                        mdname(mddev));
4116                                 /* leave the spares where they are, it shouldn't hurt */
4117                                 mddev->recovery = 0;
4118                         }
4119                 }
4120         }
4121         md_wakeup_thread(mddev->thread);
4122         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4123
4124         mddev->changed = 1;
4125         md_new_event(mddev);
4126         sysfs_notify_dirent(mddev->sysfs_state);
4127         if (mddev->sysfs_action)
4128                 sysfs_notify_dirent(mddev->sysfs_action);
4129         sysfs_notify(&mddev->kobj, NULL, "degraded");
4130         kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4131         return 0;
4132 }
4133
4134 static int restart_array(mddev_t *mddev)
4135 {
4136         struct gendisk *disk = mddev->gendisk;
4137
4138         /* Complain if it has no devices */
4139         if (list_empty(&mddev->disks))
4140                 return -ENXIO;
4141         if (!mddev->pers)
4142                 return -EINVAL;
4143         if (!mddev->ro)
4144                 return -EBUSY;
4145         mddev->safemode = 0;
4146         mddev->ro = 0;
4147         set_disk_ro(disk, 0);
4148         printk(KERN_INFO "md: %s switched to read-write mode.\n",
4149                 mdname(mddev));
4150         /* Kick recovery or resync if necessary */
4151         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4152         md_wakeup_thread(mddev->thread);
4153         md_wakeup_thread(mddev->sync_thread);
4154         sysfs_notify_dirent(mddev->sysfs_state);
4155         return 0;
4156 }
4157
4158 /* similar to deny_write_access, but accounts for our holding a reference
4159  * to the file ourselves */
4160 static int deny_bitmap_write_access(struct file * file)
4161 {
4162         struct inode *inode = file->f_mapping->host;
4163
4164         spin_lock(&inode->i_lock);
4165         if (atomic_read(&inode->i_writecount) > 1) {
4166                 spin_unlock(&inode->i_lock);
4167                 return -ETXTBSY;
4168         }
4169         atomic_set(&inode->i_writecount, -1);
4170         spin_unlock(&inode->i_lock);
4171
4172         return 0;
4173 }
4174
4175 static void restore_bitmap_write_access(struct file *file)
4176 {
4177         struct inode *inode = file->f_mapping->host;
4178
4179         spin_lock(&inode->i_lock);
4180         atomic_set(&inode->i_writecount, 1);
4181         spin_unlock(&inode->i_lock);
4182 }
4183
4184 /* mode:
4185  *   0 - completely stop and dis-assemble array
4186  *   1 - switch to readonly
4187  *   2 - stop but do not disassemble array
4188  */
4189 static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4190 {
4191         int err = 0;
4192         struct gendisk *disk = mddev->gendisk;
4193
4194         if (atomic_read(&mddev->openers) > is_open) {
4195                 printk("md: %s still in use.\n",mdname(mddev));
4196                 return -EBUSY;
4197         }
4198
4199         if (mddev->pers) {
4200
4201                 if (mddev->sync_thread) {
4202                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4203                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4204                         md_unregister_thread(mddev->sync_thread);
4205                         mddev->sync_thread = NULL;
4206                 }
4207
4208                 del_timer_sync(&mddev->safemode_timer);
4209
4210                 switch(mode) {
4211                 case 1: /* readonly */
4212                         err  = -ENXIO;
4213                         if (mddev->ro==1)
4214                                 goto out;
4215                         mddev->ro = 1;
4216                         break;
4217                 case 0: /* disassemble */
4218                 case 2: /* stop */
4219                         bitmap_flush(mddev);
4220                         md_super_wait(mddev);
4221                         if (mddev->ro)
4222                                 set_disk_ro(disk, 0);
4223
4224                         mddev->pers->stop(mddev);
4225                         mddev->queue->merge_bvec_fn = NULL;
4226                         mddev->queue->unplug_fn = NULL;
4227                         mddev->queue->backing_dev_info.congested_fn = NULL;
4228                         if (mddev->pers->sync_request) {
4229                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
4230                                 if (mddev->sysfs_action)
4231                                         sysfs_put(mddev->sysfs_action);
4232                                 mddev->sysfs_action = NULL;
4233                         }
4234                         module_put(mddev->pers->owner);
4235                         mddev->pers = NULL;
4236                         /* tell userspace to handle 'inactive' */
4237                         sysfs_notify_dirent(mddev->sysfs_state);
4238
4239                         set_capacity(disk, 0);
4240                         mddev->changed = 1;
4241
4242                         if (mddev->ro)
4243                                 mddev->ro = 0;
4244                 }
4245                 if (!mddev->in_sync || mddev->flags) {
4246                         /* mark array as shutdown cleanly */
4247                         mddev->in_sync = 1;
4248                         md_update_sb(mddev, 1);
4249                 }
4250                 if (mode == 1)
4251                         set_disk_ro(disk, 1);
4252                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4253         }
4254
4255         /*
4256          * Free resources if final stop
4257          */
4258         if (mode == 0) {
4259                 mdk_rdev_t *rdev;
4260
4261                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
4262
4263                 bitmap_destroy(mddev);
4264                 if (mddev->bitmap_file) {
4265                         restore_bitmap_write_access(mddev->bitmap_file);
4266                         fput(mddev->bitmap_file);
4267                         mddev->bitmap_file = NULL;
4268                 }
4269                 mddev->bitmap_offset = 0;
4270
4271                 list_for_each_entry(rdev, &mddev->disks, same_set)
4272                         if (rdev->raid_disk >= 0) {
4273                                 char nm[20];
4274                                 sprintf(nm, "rd%d", rdev->raid_disk);
4275                                 sysfs_remove_link(&mddev->kobj, nm);
4276                         }
4277
4278                 /* make sure all md_delayed_delete calls have finished */
4279                 flush_scheduled_work();
4280
4281                 export_array(mddev);
4282
4283                 mddev->array_sectors = 0;
4284                 mddev->dev_sectors = 0;
4285                 mddev->raid_disks = 0;
4286                 mddev->recovery_cp = 0;
4287                 mddev->resync_min = 0;
4288                 mddev->resync_max = MaxSector;
4289                 mddev->reshape_position = MaxSector;
4290                 mddev->external = 0;
4291                 mddev->persistent = 0;
4292                 mddev->level = LEVEL_NONE;
4293                 mddev->clevel[0] = 0;
4294                 mddev->flags = 0;
4295                 mddev->ro = 0;
4296                 mddev->metadata_type[0] = 0;
4297                 mddev->chunk_size = 0;
4298                 mddev->ctime = mddev->utime = 0;
4299                 mddev->layout = 0;
4300                 mddev->max_disks = 0;
4301                 mddev->events = 0;
4302                 mddev->delta_disks = 0;
4303                 mddev->new_level = LEVEL_NONE;
4304                 mddev->new_layout = 0;
4305                 mddev->new_chunk = 0;
4306                 mddev->curr_resync = 0;
4307                 mddev->resync_mismatches = 0;
4308                 mddev->suspend_lo = mddev->suspend_hi = 0;
4309                 mddev->sync_speed_min = mddev->sync_speed_max = 0;
4310                 mddev->recovery = 0;
4311                 mddev->in_sync = 0;
4312                 mddev->changed = 0;
4313                 mddev->degraded = 0;
4314                 mddev->barriers_work = 0;
4315                 mddev->safemode = 0;
4316                 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4317                 if (mddev->hold_active == UNTIL_STOP)
4318                         mddev->hold_active = 0;
4319
4320         } else if (mddev->pers)
4321                 printk(KERN_INFO "md: %s switched to read-only mode.\n",
4322                         mdname(mddev));
4323         err = 0;
4324         blk_integrity_unregister(disk);
4325         md_new_event(mddev);
4326         sysfs_notify_dirent(mddev->sysfs_state);
4327 out:
4328         return err;
4329 }
4330
4331 #ifndef MODULE
4332 static void autorun_array(mddev_t *mddev)
4333 {
4334         mdk_rdev_t *rdev;
4335         int err;
4336
4337         if (list_empty(&mddev->disks))
4338                 return;
4339
4340         printk(KERN_INFO "md: running: ");
4341
4342         list_for_each_entry(rdev, &mddev->disks, same_set) {
4343                 char b[BDEVNAME_SIZE];
4344                 printk("<%s>", bdevname(rdev->bdev,b));
4345         }
4346         printk("\n");
4347
4348         err = do_md_run(mddev);
4349         if (err) {
4350                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
4351                 do_md_stop(mddev, 0, 0);
4352         }
4353 }
4354
4355 /*
4356  * lets try to run arrays based on all disks that have arrived
4357  * until now. (those are in pending_raid_disks)
4358  *
4359  * the method: pick the first pending disk, collect all disks with
4360  * the same UUID, remove all from the pending list and put them into
4361  * the 'same_array' list. Then order this list based on superblock
4362  * update time (freshest comes first), kick out 'old' disks and
4363  * compare superblocks. If everything's fine then run it.
4364  *
4365  * If "unit" is allocated, then bump its reference count
4366  */
4367 static void autorun_devices(int part)
4368 {
4369         mdk_rdev_t *rdev0, *rdev, *tmp;
4370         mddev_t *mddev;
4371         char b[BDEVNAME_SIZE];
4372
4373         printk(KERN_INFO "md: autorun ...\n");
4374         while (!list_empty(&pending_raid_disks)) {
4375                 int unit;
4376                 dev_t dev;
4377                 LIST_HEAD(candidates);
4378                 rdev0 = list_entry(pending_raid_disks.next,
4379                                          mdk_rdev_t, same_set);
4380
4381                 printk(KERN_INFO "md: considering %s ...\n",
4382                         bdevname(rdev0->bdev,b));
4383                 INIT_LIST_HEAD(&candidates);
4384                 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
4385                         if (super_90_load(rdev, rdev0, 0) >= 0) {
4386                                 printk(KERN_INFO "md:  adding %s ...\n",
4387                                         bdevname(rdev->bdev,b));
4388                                 list_move(&rdev->same_set, &candidates);
4389                         }
4390                 /*
4391                  * now we have a set of devices, with all of them having
4392                  * mostly sane superblocks. It's time to allocate the
4393                  * mddev.
4394                  */
4395                 if (part) {
4396                         dev = MKDEV(mdp_major,
4397                                     rdev0->preferred_minor << MdpMinorShift);
4398                         unit = MINOR(dev) >> MdpMinorShift;
4399                 } else {
4400                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
4401                         unit = MINOR(dev);
4402                 }
4403                 if (rdev0->preferred_minor != unit) {
4404                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
4405                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
4406                         break;
4407                 }
4408
4409                 md_probe(dev, NULL, NULL);
4410                 mddev = mddev_find(dev);
4411                 if (!mddev || !mddev->gendisk) {
4412                         if (mddev)
4413                                 mddev_put(mddev);
4414                         printk(KERN_ERR
4415                                 "md: cannot allocate memory for md drive.\n");
4416                         break;
4417                 }
4418                 if (mddev_lock(mddev)) 
4419                         printk(KERN_WARNING "md: %s locked, cannot run\n",
4420                                mdname(mddev));
4421                 else if (mddev->raid_disks || mddev->major_version
4422                          || !list_empty(&mddev->disks)) {
4423                         printk(KERN_WARNING 
4424                                 "md: %s already running, cannot run %s\n",
4425                                 mdname(mddev), bdevname(rdev0->bdev,b));
4426                         mddev_unlock(mddev);
4427                 } else {
4428                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
4429                         mddev->persistent = 1;
4430                         rdev_for_each_list(rdev, tmp, &candidates) {
4431                                 list_del_init(&rdev->same_set);
4432                                 if (bind_rdev_to_array(rdev, mddev))
4433                                         export_rdev(rdev);
4434                         }
4435                         autorun_array(mddev);
4436                         mddev_unlock(mddev);
4437                 }
4438                 /* on success, candidates will be empty, on error
4439                  * it won't...
4440                  */
4441                 rdev_for_each_list(rdev, tmp, &candidates) {
4442                         list_del_init(&rdev->same_set);
4443                         export_rdev(rdev);
4444                 }
4445                 mddev_put(mddev);
4446         }
4447         printk(KERN_INFO "md: ... autorun DONE.\n");
4448 }
4449 #endif /* !MODULE */
4450
4451 static int get_version(void __user * arg)
4452 {
4453         mdu_version_t ver;
4454
4455         ver.major = MD_MAJOR_VERSION;
4456         ver.minor = MD_MINOR_VERSION;
4457         ver.patchlevel = MD_PATCHLEVEL_VERSION;
4458
4459         if (copy_to_user(arg, &ver, sizeof(ver)))
4460                 return -EFAULT;
4461
4462         return 0;
4463 }
4464
4465 static int get_array_info(mddev_t * mddev, void __user * arg)
4466 {
4467         mdu_array_info_t info;
4468         int nr,working,active,failed,spare;
4469         mdk_rdev_t *rdev;
4470
4471         nr=working=active=failed=spare=0;
4472         list_for_each_entry(rdev, &mddev->disks, same_set) {
4473                 nr++;
4474                 if (test_bit(Faulty, &rdev->flags))
4475                         failed++;
4476                 else {
4477                         working++;
4478                         if (test_bit(In_sync, &rdev->flags))
4479                                 active++;       
4480                         else
4481                                 spare++;
4482                 }
4483         }
4484
4485         info.major_version = mddev->major_version;
4486         info.minor_version = mddev->minor_version;
4487         info.patch_version = MD_PATCHLEVEL_VERSION;
4488         info.ctime         = mddev->ctime;
4489         info.level         = mddev->level;
4490         info.size          = mddev->dev_sectors / 2;
4491         if (info.size != mddev->dev_sectors / 2) /* overflow */
4492                 info.size = -1;
4493         info.nr_disks      = nr;
4494         info.raid_disks    = mddev->raid_disks;
4495         info.md_minor      = mddev->md_minor;
4496         info.not_persistent= !mddev->persistent;
4497
4498         info.utime         = mddev->utime;
4499         info.state         = 0;
4500         if (mddev->in_sync)
4501                 info.state = (1<<MD_SB_CLEAN);
4502         if (mddev->bitmap && mddev->bitmap_offset)
4503                 info.state = (1<<MD_SB_BITMAP_PRESENT);
4504         info.active_disks  = active;
4505         info.working_disks = working;
4506         info.failed_disks  = failed;
4507         info.spare_disks   = spare;
4508
4509         info.layout        = mddev->layout;
4510         info.chunk_size    = mddev->chunk_size;
4511
4512         if (copy_to_user(arg, &info, sizeof(info)))
4513                 return -EFAULT;
4514
4515         return 0;
4516 }
4517
4518 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
4519 {
4520         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
4521         char *ptr, *buf = NULL;
4522         int err = -ENOMEM;
4523
4524         if (md_allow_write(mddev))
4525                 file = kmalloc(sizeof(*file), GFP_NOIO);
4526         else
4527                 file = kmalloc(sizeof(*file), GFP_KERNEL);
4528
4529         if (!file)
4530                 goto out;
4531
4532         /* bitmap disabled, zero the first byte and copy out */
4533         if (!mddev->bitmap || !mddev->bitmap->file) {
4534                 file->pathname[0] = '\0';
4535                 goto copy_out;
4536         }
4537
4538         buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
4539         if (!buf)
4540                 goto out;
4541
4542         ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
4543         if (IS_ERR(ptr))
4544                 goto out;
4545
4546         strcpy(file->pathname, ptr);
4547
4548 copy_out:
4549         err = 0;
4550         if (copy_to_user(arg, file, sizeof(*file)))
4551                 err = -EFAULT;
4552 out:
4553         kfree(buf);
4554         kfree(file);
4555         return err;
4556 }
4557
4558 static int get_disk_info(mddev_t * mddev, void __user * arg)
4559 {
4560         mdu_disk_info_t info;
4561         mdk_rdev_t *rdev;
4562
4563         if (copy_from_user(&info, arg, sizeof(info)))
4564                 return -EFAULT;
4565
4566         rdev = find_rdev_nr(mddev, info.number);
4567         if (rdev) {
4568                 info.major = MAJOR(rdev->bdev->bd_dev);
4569                 info.minor = MINOR(rdev->bdev->bd_dev);
4570                 info.raid_disk = rdev->raid_disk;
4571                 info.state = 0;
4572                 if (test_bit(Faulty, &rdev->flags))
4573                         info.state |= (1<<MD_DISK_FAULTY);
4574                 else if (test_bit(In_sync, &rdev->flags)) {
4575                         info.state |= (1<<MD_DISK_ACTIVE);
4576                         info.state |= (1<<MD_DISK_SYNC);
4577                 }
4578                 if (test_bit(WriteMostly, &rdev->flags))
4579                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
4580         } else {
4581                 info.major = info.minor = 0;
4582                 info.raid_disk = -1;
4583                 info.state = (1<<MD_DISK_REMOVED);
4584         }
4585
4586         if (copy_to_user(arg, &info, sizeof(info)))
4587                 return -EFAULT;
4588
4589         return 0;
4590 }
4591
4592 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
4593 {
4594         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4595         mdk_rdev_t *rdev;
4596         dev_t dev = MKDEV(info->major,info->minor);
4597
4598         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
4599                 return -EOVERFLOW;
4600
4601         if (!mddev->raid_disks) {
4602                 int err;
4603                 /* expecting a device which has a superblock */
4604                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
4605                 if (IS_ERR(rdev)) {
4606                         printk(KERN_WARNING 
4607                                 "md: md_import_device returned %ld\n",
4608                                 PTR_ERR(rdev));
4609                         return PTR_ERR(rdev);
4610                 }
4611                 if (!list_empty(&mddev->disks)) {
4612                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
4613                                                         mdk_rdev_t, same_set);
4614                         int err = super_types[mddev->major_version]
4615                                 .load_super(rdev, rdev0, mddev->minor_version);
4616                         if (err < 0) {
4617                                 printk(KERN_WARNING 
4618                                         "md: %s has different UUID to %s\n",
4619                                         bdevname(rdev->bdev,b), 
4620                                         bdevname(rdev0->bdev,b2));
4621                                 export_rdev(rdev);
4622                                 return -EINVAL;
4623                         }
4624                 }
4625                 err = bind_rdev_to_array(rdev, mddev);
4626                 if (err)
4627                         export_rdev(rdev);
4628                 return err;
4629         }
4630
4631         /*
4632          * add_new_disk can be used once the array is assembled
4633          * to add "hot spares".  They must already have a superblock
4634          * written
4635          */
4636         if (mddev->pers) {
4637                 int err;
4638                 if (!mddev->pers->hot_add_disk) {
4639                         printk(KERN_WARNING 
4640                                 "%s: personality does not support diskops!\n",
4641                                mdname(mddev));
4642                         return -EINVAL;
4643                 }
4644                 if (mddev->persistent)
4645                         rdev = md_import_device(dev, mddev->major_version,
4646                                                 mddev->minor_version);
4647                 else
4648                         rdev = md_import_device(dev, -1, -1);
4649                 if (IS_ERR(rdev)) {
4650                         printk(KERN_WARNING 
4651                                 "md: md_import_device returned %ld\n",
4652                                 PTR_ERR(rdev));
4653                         return PTR_ERR(rdev);
4654                 }
4655                 /* set save_raid_disk if appropriate */
4656                 if (!mddev->persistent) {
4657                         if (info->state & (1<<MD_DISK_SYNC)  &&
4658                             info->raid_disk < mddev->raid_disks)
4659                                 rdev->raid_disk = info->raid_disk;
4660                         else
4661                                 rdev->raid_disk = -1;
4662                 } else
4663                         super_types[mddev->major_version].
4664                                 validate_super(mddev, rdev);
4665                 rdev->saved_raid_disk = rdev->raid_disk;
4666
4667                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
4668                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4669                         set_bit(WriteMostly, &rdev->flags);
4670                 else
4671                         clear_bit(WriteMostly, &rdev->flags);
4672
4673                 rdev->raid_disk = -1;
4674                 err = bind_rdev_to_array(rdev, mddev);
4675                 if (!err && !mddev->pers->hot_remove_disk) {
4676                         /* If there is hot_add_disk but no hot_remove_disk
4677                          * then added disks for geometry changes,
4678                          * and should be added immediately.
4679                          */
4680                         super_types[mddev->major_version].
4681                                 validate_super(mddev, rdev);
4682                         err = mddev->pers->hot_add_disk(mddev, rdev);
4683                         if (err)
4684                                 unbind_rdev_from_array(rdev);
4685                 }
4686                 if (err)
4687                         export_rdev(rdev);
4688                 else
4689                         sysfs_notify_dirent(rdev->sysfs_state);
4690
4691                 md_update_sb(mddev, 1);
4692                 if (mddev->degraded)
4693                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4694                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4695                 md_wakeup_thread(mddev->thread);
4696                 return err;
4697         }
4698
4699         /* otherwise, add_new_disk is only allowed
4700          * for major_version==0 superblocks
4701          */
4702         if (mddev->major_version != 0) {
4703                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
4704                        mdname(mddev));
4705                 return -EINVAL;
4706         }
4707
4708         if (!(info->state & (1<<MD_DISK_FAULTY))) {
4709                 int err;
4710                 rdev = md_import_device(dev, -1, 0);
4711                 if (IS_ERR(rdev)) {
4712                         printk(KERN_WARNING 
4713                                 "md: error, md_import_device() returned %ld\n",
4714                                 PTR_ERR(rdev));
4715                         return PTR_ERR(rdev);
4716                 }
4717                 rdev->desc_nr = info->number;
4718                 if (info->raid_disk < mddev->raid_disks)
4719                         rdev->raid_disk = info->raid_disk;
4720                 else
4721                         rdev->raid_disk = -1;
4722
4723                 if (rdev->raid_disk < mddev->raid_disks)
4724                         if (info->state & (1<<MD_DISK_SYNC))
4725                                 set_bit(In_sync, &rdev->flags);
4726
4727                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4728                         set_bit(WriteMostly, &rdev->flags);
4729
4730                 if (!mddev->persistent) {
4731                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
4732                         rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
4733                 } else 
4734                         rdev->sb_start = calc_dev_sboffset(rdev->bdev);
4735                 rdev->sectors = calc_num_sectors(rdev, mddev->chunk_size);
4736
4737                 err = bind_rdev_to_array(rdev, mddev);
4738                 if (err) {
4739                         export_rdev(rdev);
4740                         return err;
4741                 }
4742         }
4743
4744         return 0;
4745 }
4746
4747 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
4748 {
4749         char b[BDEVNAME_SIZE];
4750         mdk_rdev_t *rdev;
4751
4752         rdev = find_rdev(mddev, dev);
4753         if (!rdev)
4754                 return -ENXIO;
4755
4756         if (rdev->raid_disk >= 0)
4757                 goto busy;
4758
4759         kick_rdev_from_array(rdev);
4760         md_update_sb(mddev, 1);
4761         md_new_event(mddev);
4762
4763         return 0;
4764 busy:
4765         printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
4766                 bdevname(rdev->bdev,b), mdname(mddev));
4767         return -EBUSY;
4768 }
4769
4770 static int hot_add_disk(mddev_t * mddev, dev_t dev)
4771 {
4772         char b[BDEVNAME_SIZE];
4773         int err;
4774         mdk_rdev_t *rdev;
4775
4776         if (!mddev->pers)
4777                 return -ENODEV;
4778
4779         if (mddev->major_version != 0) {
4780                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
4781                         " version-0 superblocks.\n",
4782                         mdname(mddev));
4783                 return -EINVAL;
4784         }
4785         if (!mddev->pers->hot_add_disk) {
4786                 printk(KERN_WARNING 
4787                         "%s: personality does not support diskops!\n",
4788                         mdname(mddev));
4789                 return -EINVAL;
4790         }
4791
4792         rdev = md_import_device(dev, -1, 0);
4793         if (IS_ERR(rdev)) {
4794                 printk(KERN_WARNING 
4795                         "md: error, md_import_device() returned %ld\n",
4796                         PTR_ERR(rdev));
4797                 return -EINVAL;
4798         }
4799
4800         if (mddev->persistent)
4801                 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
4802         else
4803                 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
4804
4805         rdev->sectors = calc_num_sectors(rdev, mddev->chunk_size);
4806
4807         if (test_bit(Faulty, &rdev->flags)) {
4808                 printk(KERN_WARNING 
4809                         "md: can not hot-add faulty %s disk to %s!\n",
4810                         bdevname(rdev->bdev,b), mdname(mddev));
4811                 err = -EINVAL;
4812                 goto abort_export;
4813         }
4814         clear_bit(In_sync, &rdev->flags);
4815         rdev->desc_nr = -1;
4816         rdev->saved_raid_disk = -1;
4817         err = bind_rdev_to_array(rdev, mddev);
4818         if (err)
4819                 goto abort_export;
4820
4821         /*
4822          * The rest should better be atomic, we can have disk failures
4823          * noticed in interrupt contexts ...
4824          */
4825
4826         rdev->raid_disk = -1;
4827
4828         md_update_sb(mddev, 1);
4829
4830         /*
4831          * Kick recovery, maybe this spare has to be added to the
4832          * array immediately.
4833          */
4834         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4835         md_wakeup_thread(mddev->thread);
4836         md_new_event(mddev);
4837         return 0;
4838
4839 abort_export:
4840         export_rdev(rdev);
4841         return err;
4842 }
4843
4844 static int set_bitmap_file(mddev_t *mddev, int fd)
4845 {
4846         int err;
4847
4848         if (mddev->pers) {
4849                 if (!mddev->pers->quiesce)
4850                         return -EBUSY;
4851                 if (mddev->recovery || mddev->sync_thread)
4852                         return -EBUSY;
4853                 /* we should be able to change the bitmap.. */
4854         }
4855
4856
4857         if (fd >= 0) {
4858                 if (mddev->bitmap)
4859                         return -EEXIST; /* cannot add when bitmap is present */
4860                 mddev->bitmap_file = fget(fd);
4861
4862                 if (mddev->bitmap_file == NULL) {
4863                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
4864                                mdname(mddev));
4865                         return -EBADF;
4866                 }
4867
4868                 err = deny_bitmap_write_access(mddev->bitmap_file);
4869                 if (err) {
4870                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
4871                                mdname(mddev));
4872                         fput(mddev->bitmap_file);
4873                         mddev->bitmap_file = NULL;
4874                         return err;
4875                 }
4876                 mddev->bitmap_offset = 0; /* file overrides offset */
4877         } else if (mddev->bitmap == NULL)
4878                 return -ENOENT; /* cannot remove what isn't there */
4879         err = 0;
4880         if (mddev->pers) {
4881                 mddev->pers->quiesce(mddev, 1);
4882                 if (fd >= 0)
4883                         err = bitmap_create(mddev);
4884                 if (fd < 0 || err) {
4885                         bitmap_destroy(mddev);
4886                         fd = -1; /* make sure to put the file */
4887                 }
4888                 mddev->pers->quiesce(mddev, 0);
4889         }
4890         if (fd < 0) {
4891                 if (mddev->bitmap_file) {
4892                         restore_bitmap_write_access(mddev->bitmap_file);
4893                         fput(mddev->bitmap_file);
4894                 }
4895                 mddev->bitmap_file = NULL;
4896         }
4897
4898         return err;
4899 }
4900
4901 /*
4902  * set_array_info is used two different ways
4903  * The original usage is when creating a new array.
4904  * In this usage, raid_disks is > 0 and it together with
4905  *  level, size, not_persistent,layout,chunksize determine the
4906  *  shape of the array.
4907  *  This will always create an array with a type-0.90.0 superblock.
4908  * The newer usage is when assembling an array.
4909  *  In this case raid_disks will be 0, and the major_version field is
4910  *  use to determine which style super-blocks are to be found on the devices.
4911  *  The minor and patch _version numbers are also kept incase the
4912  *  super_block handler wishes to interpret them.
4913  */
4914 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
4915 {
4916
4917         if (info->raid_disks == 0) {
4918                 /* just setting version number for superblock loading */
4919                 if (info->major_version < 0 ||
4920                     info->major_version >= ARRAY_SIZE(super_types) ||
4921                     super_types[info->major_version].name == NULL) {
4922                         /* maybe try to auto-load a module? */
4923                         printk(KERN_INFO 
4924                                 "md: superblock version %d not known\n",
4925                                 info->major_version);
4926                         return -EINVAL;
4927                 }
4928                 mddev->major_version = info->major_version;
4929                 mddev->minor_version = info->minor_version;
4930                 mddev->patch_version = info->patch_version;
4931                 mddev->persistent = !info->not_persistent;
4932                 return 0;
4933         }
4934         mddev->major_version = MD_MAJOR_VERSION;
4935         mddev->minor_version = MD_MINOR_VERSION;
4936         mddev->patch_version = MD_PATCHLEVEL_VERSION;
4937         mddev->ctime         = get_seconds();
4938
4939         mddev->level         = info->level;
4940         mddev->clevel[0]     = 0;
4941         mddev->dev_sectors   = 2 * (sector_t)info->size;
4942         mddev->raid_disks    = info->raid_disks;
4943         /* don't set md_minor, it is determined by which /dev/md* was
4944          * openned
4945          */
4946         if (info->state & (1<<MD_SB_CLEAN))
4947                 mddev->recovery_cp = MaxSector;
4948         else
4949                 mddev->recovery_cp = 0;
4950         mddev->persistent    = ! info->not_persistent;
4951         mddev->external      = 0;
4952
4953         mddev->layout        = info->layout;
4954         mddev->chunk_size    = info->chunk_size;
4955
4956         mddev->max_disks     = MD_SB_DISKS;
4957
4958         if (mddev->persistent)
4959                 mddev->flags         = 0;
4960         set_bit(MD_CHANGE_DEVS, &mddev->flags);
4961
4962         mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
4963         mddev->bitmap_offset = 0;
4964
4965         mddev->reshape_position = MaxSector;
4966
4967         /*
4968          * Generate a 128 bit UUID
4969          */
4970         get_random_bytes(mddev->uuid, 16);
4971
4972         mddev->new_level = mddev->level;
4973         mddev->new_chunk = mddev->chunk_size;
4974         mddev->new_layout = mddev->layout;
4975         mddev->delta_disks = 0;
4976
4977         return 0;
4978 }
4979
4980 static int update_size(mddev_t *mddev, sector_t num_sectors)
4981 {
4982         mdk_rdev_t *rdev;
4983         int rv;
4984         int fit = (num_sectors == 0);
4985
4986         if (mddev->pers->resize == NULL)
4987                 return -EINVAL;
4988         /* The "num_sectors" is the number of sectors of each device that
4989          * is used.  This can only make sense for arrays with redundancy.
4990          * linear and raid0 always use whatever space is available. We can only
4991          * consider changing this number if no resync or reconstruction is
4992          * happening, and if the new size is acceptable. It must fit before the
4993          * sb_start or, if that is <data_offset, it must fit before the size
4994          * of each device.  If num_sectors is zero, we find the largest size
4995          * that fits.
4996
4997          */
4998         if (mddev->sync_thread)
4999                 return -EBUSY;
5000         if (mddev->bitmap)
5001                 /* Sorry, cannot grow a bitmap yet, just remove it,
5002                  * grow, and re-add.
5003                  */
5004                 return -EBUSY;
5005         list_for_each_entry(rdev, &mddev->disks, same_set) {
5006                 sector_t avail = rdev->sectors;
5007
5008                 if (fit && (num_sectors == 0 || num_sectors > avail))
5009                         num_sectors = avail;
5010                 if (avail < num_sectors)
5011                         return -ENOSPC;
5012         }
5013         rv = mddev->pers->resize(mddev, num_sectors);
5014         if (!rv) {
5015                 struct block_device *bdev;
5016
5017                 bdev = bdget_disk(mddev->gendisk, 0);
5018                 if (bdev) {
5019                         mutex_lock(&bdev->bd_inode->i_mutex);
5020                         i_size_write(bdev->bd_inode,
5021                                      (loff_t)mddev->array_sectors << 9);
5022                         mutex_unlock(&bdev->bd_inode->i_mutex);
5023                         bdput(bdev);
5024                 }
5025         }
5026         return rv;
5027 }
5028
5029 static int update_raid_disks(mddev_t *mddev, int raid_disks)
5030 {
5031         int rv;
5032         /* change the number of raid disks */
5033         if (mddev->pers->check_reshape == NULL)
5034                 return -EINVAL;
5035         if (raid_disks <= 0 ||
5036             raid_disks >= mddev->max_disks)
5037                 return -EINVAL;
5038         if (mddev->sync_thread || mddev->reshape_position != MaxSector)
5039                 return -EBUSY;
5040         mddev->delta_disks = raid_disks - mddev->raid_disks;
5041
5042         rv = mddev->pers->check_reshape(mddev);
5043         return rv;
5044 }
5045
5046
5047 /*
5048  * update_array_info is used to change the configuration of an
5049  * on-line array.
5050  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
5051  * fields in the info are checked against the array.
5052  * Any differences that cannot be handled will cause an error.
5053  * Normally, only one change can be managed at a time.
5054  */
5055 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
5056 {
5057         int rv = 0;
5058         int cnt = 0;
5059         int state = 0;
5060
5061         /* calculate expected state,ignoring low bits */
5062         if (mddev->bitmap && mddev->bitmap_offset)
5063                 state |= (1 << MD_SB_BITMAP_PRESENT);
5064
5065         if (mddev->major_version != info->major_version ||
5066             mddev->minor_version != info->minor_version ||
5067 /*          mddev->patch_version != info->patch_version || */
5068             mddev->ctime         != info->ctime         ||
5069             mddev->level         != info->level         ||
5070 /*          mddev->layout        != info->layout        || */
5071             !mddev->persistent   != info->not_persistent||
5072             mddev->chunk_size    != info->chunk_size    ||
5073             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
5074             ((state^info->state) & 0xfffffe00)
5075                 )
5076                 return -EINVAL;
5077         /* Check there is only one change */
5078         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5079                 cnt++;
5080         if (mddev->raid_disks != info->raid_disks)
5081                 cnt++;
5082         if (mddev->layout != info->layout)
5083                 cnt++;
5084         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
5085                 cnt++;
5086         if (cnt == 0)
5087                 return 0;
5088         if (cnt > 1)
5089                 return -EINVAL;
5090
5091         if (mddev->layout != info->layout) {
5092                 /* Change layout
5093                  * we don't need to do anything at the md level, the
5094                  * personality will take care of it all.
5095                  */
5096                 if (mddev->pers->reconfig == NULL)
5097                         return -EINVAL;
5098                 else
5099                         return mddev->pers->reconfig(mddev, info->layout, -1);
5100         }
5101         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5102                 rv = update_size(mddev, (sector_t)info->size * 2);
5103
5104         if (mddev->raid_disks    != info->raid_disks)
5105                 rv = update_raid_disks(mddev, info->raid_disks);
5106
5107         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
5108                 if (mddev->pers->quiesce == NULL)
5109                         return -EINVAL;
5110                 if (mddev->recovery || mddev->sync_thread)
5111                         return -EBUSY;
5112                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
5113                         /* add the bitmap */
5114                         if (mddev->bitmap)
5115                                 return -EEXIST;
5116                         if (mddev->default_bitmap_offset == 0)
5117                                 return -EINVAL;
5118                         mddev->bitmap_offset = mddev->default_bitmap_offset;
5119                         mddev->pers->quiesce(mddev, 1);
5120                         rv = bitmap_create(mddev);
5121                         if (rv)
5122                                 bitmap_destroy(mddev);
5123                         mddev->pers->quiesce(mddev, 0);
5124                 } else {
5125                         /* remove the bitmap */
5126                         if (!mddev->bitmap)
5127                                 return -ENOENT;
5128                         if (mddev->bitmap->file)
5129                                 return -EINVAL;
5130                         mddev->pers->quiesce(mddev, 1);
5131                         bitmap_destroy(mddev);
5132                         mddev->pers->quiesce(mddev, 0);
5133                         mddev->bitmap_offset = 0;
5134                 }
5135         }
5136         md_update_sb(mddev, 1);
5137         return rv;
5138 }
5139
5140 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
5141 {
5142         mdk_rdev_t *rdev;
5143
5144         if (mddev->pers == NULL)
5145                 return -ENODEV;
5146
5147         rdev = find_rdev(mddev, dev);
5148         if (!rdev)
5149                 return -ENODEV;
5150
5151         md_error(mddev, rdev);
5152         return 0;
5153 }
5154
5155 /*
5156  * We have a problem here : there is no easy way to give a CHS
5157  * virtual geometry. We currently pretend that we have a 2 heads
5158  * 4 sectors (with a BIG number of cylinders...). This drives
5159  * dosfs just mad... ;-)
5160  */
5161 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
5162 {
5163         mddev_t *mddev = bdev->bd_disk->private_data;
5164
5165         geo->heads = 2;
5166         geo->sectors = 4;
5167         geo->cylinders = get_capacity(mddev->gendisk) / 8;
5168         return 0;
5169 }
5170
5171 static int md_ioctl(struct block_device *bdev, fmode_t mode,
5172                         unsigned int cmd, unsigned long arg)
5173 {
5174         int err = 0;
5175         void __user *argp = (void __user *)arg;
5176         mddev_t *mddev = NULL;
5177
5178         if (!capable(CAP_SYS_ADMIN))
5179                 return -EACCES;
5180
5181         /*
5182          * Commands dealing with the RAID driver but not any
5183          * particular array:
5184          */
5185         switch (cmd)
5186         {
5187                 case RAID_VERSION:
5188                         err = get_version(argp);
5189                         goto done;
5190
5191                 case PRINT_RAID_DEBUG:
5192                         err = 0;
5193                         md_print_devices();
5194                         goto done;
5195
5196 #ifndef MODULE
5197                 case RAID_AUTORUN:
5198                         err = 0;
5199                         autostart_arrays(arg);
5200                         goto done;
5201 #endif
5202                 default:;
5203         }
5204
5205         /*
5206          * Commands creating/starting a new array:
5207          */
5208
5209         mddev = bdev->bd_disk->private_data;
5210
5211         if (!mddev) {
5212                 BUG();
5213                 goto abort;
5214         }
5215
5216         err = mddev_lock(mddev);
5217         if (err) {
5218                 printk(KERN_INFO 
5219                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
5220                         err, cmd);
5221                 goto abort;
5222         }
5223
5224         switch (cmd)
5225         {
5226                 case SET_ARRAY_INFO:
5227                         {
5228                                 mdu_array_info_t info;
5229                                 if (!arg)
5230                                         memset(&info, 0, sizeof(info));
5231                                 else if (copy_from_user(&info, argp, sizeof(info))) {
5232                                         err = -EFAULT;
5233                                         goto abort_unlock;
5234                                 }
5235                                 if (mddev->pers) {
5236                                         err = update_array_info(mddev, &info);
5237                                         if (err) {
5238                                                 printk(KERN_WARNING "md: couldn't update"
5239                                                        " array info. %d\n", err);
5240                                                 goto abort_unlock;
5241                                         }
5242                                         goto done_unlock;
5243                                 }
5244                                 if (!list_empty(&mddev->disks)) {
5245                                         printk(KERN_WARNING
5246                                                "md: array %s already has disks!\n",
5247                                                mdname(mddev));
5248                                         err = -EBUSY;
5249                                         goto abort_unlock;
5250                                 }
5251                                 if (mddev->raid_disks) {
5252                                         printk(KERN_WARNING
5253                                                "md: array %s already initialised!\n",
5254                                                mdname(mddev));
5255                                         err = -EBUSY;
5256                                         goto abort_unlock;
5257                                 }
5258                                 err = set_array_info(mddev, &info);
5259                                 if (err) {
5260                                         printk(KERN_WARNING "md: couldn't set"
5261                                                " array info. %d\n", err);
5262                                         goto abort_unlock;
5263                                 }
5264                         }
5265                         goto done_unlock;
5266
5267                 default:;
5268         }
5269
5270         /*
5271          * Commands querying/configuring an existing array:
5272          */
5273         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
5274          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
5275         if ((!mddev->raid_disks && !mddev->external)
5276             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
5277             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
5278             && cmd != GET_BITMAP_FILE) {
5279                 err = -ENODEV;
5280                 goto abort_unlock;
5281         }
5282
5283         /*
5284          * Commands even a read-only array can execute:
5285          */
5286         switch (cmd)
5287         {
5288                 case GET_ARRAY_INFO:
5289                         err = get_array_info(mddev, argp);
5290                         goto done_unlock;
5291
5292                 case GET_BITMAP_FILE:
5293                         err = get_bitmap_file(mddev, argp);
5294                         goto done_unlock;
5295
5296                 case GET_DISK_INFO:
5297                         err = get_disk_info(mddev, argp);
5298                         goto done_unlock;
5299
5300                 case RESTART_ARRAY_RW:
5301                         err = restart_array(mddev);
5302                         goto done_unlock;
5303
5304                 case STOP_ARRAY:
5305                         err = do_md_stop(mddev, 0, 1);
5306                         goto done_unlock;
5307
5308                 case STOP_ARRAY_RO:
5309                         err = do_md_stop(mddev, 1, 1);
5310                         goto done_unlock;
5311
5312         }
5313
5314         /*
5315          * The remaining ioctls are changing the state of the
5316          * superblock, so we do not allow them on read-only arrays.
5317          * However non-MD ioctls (e.g. get-size) will still come through
5318          * here and hit the 'default' below, so only disallow
5319          * 'md' ioctls, and switch to rw mode if started auto-readonly.
5320          */
5321         if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
5322                 if (mddev->ro == 2) {
5323                         mddev->ro = 0;
5324                         sysfs_notify_dirent(mddev->sysfs_state);
5325                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5326                         md_wakeup_thread(mddev->thread);
5327                 } else {
5328                         err = -EROFS;
5329                         goto abort_unlock;
5330                 }
5331         }
5332
5333         switch (cmd)
5334         {
5335                 case ADD_NEW_DISK:
5336                 {
5337                         mdu_disk_info_t info;
5338                         if (copy_from_user(&info, argp, sizeof(info)))
5339                                 err = -EFAULT;
5340                         else
5341                                 err = add_new_disk(mddev, &info);
5342                         goto done_unlock;
5343                 }
5344
5345                 case HOT_REMOVE_DISK:
5346                         err = hot_remove_disk(mddev, new_decode_dev(arg));
5347                         goto done_unlock;
5348
5349                 case HOT_ADD_DISK:
5350                         err = hot_add_disk(mddev, new_decode_dev(arg));
5351                         goto done_unlock;
5352
5353                 case SET_DISK_FAULTY:
5354                         err = set_disk_faulty(mddev, new_decode_dev(arg));
5355                         goto done_unlock;
5356
5357                 case RUN_ARRAY:
5358                         err = do_md_run(mddev);
5359                         goto done_unlock;
5360
5361                 case SET_BITMAP_FILE:
5362                         err = set_bitmap_file(mddev, (int)arg);
5363                         goto done_unlock;
5364
5365                 default:
5366                         err = -EINVAL;
5367                         goto abort_unlock;
5368         }
5369
5370 done_unlock:
5371 abort_unlock:
5372         if (mddev->hold_active == UNTIL_IOCTL &&
5373             err != -EINVAL)
5374                 mddev->hold_active = 0;
5375         mddev_unlock(mddev);
5376
5377         return err;
5378 done:
5379         if (err)
5380                 MD_BUG();
5381 abort:
5382         return err;
5383 }
5384
5385 static int md_open(struct block_device *bdev, fmode_t mode)
5386 {
5387         /*
5388          * Succeed if we can lock the mddev, which confirms that
5389          * it isn't being stopped right now.
5390          */
5391         mddev_t *mddev = mddev_find(bdev->bd_dev);
5392         int err;
5393
5394         if (mddev->gendisk != bdev->bd_disk) {
5395                 /* we are racing with mddev_put which is discarding this
5396                  * bd_disk.
5397                  */
5398                 mddev_put(mddev);
5399                 /* Wait until bdev->bd_disk is definitely gone */
5400                 flush_scheduled_work();
5401                 /* Then retry the open from the top */
5402                 return -ERESTARTSYS;
5403         }
5404         BUG_ON(mddev != bdev->bd_disk->private_data);
5405
5406         if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
5407                 goto out;
5408
5409         err = 0;
5410         atomic_inc(&mddev->openers);
5411         mddev_unlock(mddev);
5412
5413         check_disk_change(bdev);
5414  out:
5415         return err;
5416 }
5417
5418 static int md_release(struct gendisk *disk, fmode_t mode)
5419 {
5420         mddev_t *mddev = disk->private_data;
5421
5422         BUG_ON(!mddev);
5423         atomic_dec(&mddev->openers);
5424         mddev_put(mddev);
5425
5426         return 0;
5427 }
5428
5429 static int md_media_changed(struct gendisk *disk)
5430 {
5431         mddev_t *mddev = disk->private_data;
5432
5433         return mddev->changed;
5434 }
5435
5436 static int md_revalidate(struct gendisk *disk)
5437 {
5438         mddev_t *mddev = disk->private_data;
5439
5440         mddev->changed = 0;
5441         return 0;
5442 }
5443 static struct block_device_operations md_fops =
5444 {
5445         .owner          = THIS_MODULE,
5446         .open           = md_open,
5447         .release        = md_release,
5448         .locked_ioctl   = md_ioctl,
5449         .getgeo         = md_getgeo,
5450         .media_changed  = md_media_changed,
5451         .revalidate_disk= md_revalidate,
5452 };
5453
5454 static int md_thread(void * arg)
5455 {
5456         mdk_thread_t *thread = arg;
5457
5458         /*
5459          * md_thread is a 'system-thread', it's priority should be very
5460          * high. We avoid resource deadlocks individually in each
5461          * raid personality. (RAID5 does preallocation) We also use RR and
5462          * the very same RT priority as kswapd, thus we will never get
5463          * into a priority inversion deadlock.
5464          *
5465          * we definitely have to have equal or higher priority than
5466          * bdflush, otherwise bdflush will deadlock if there are too
5467          * many dirty RAID5 blocks.
5468          */
5469
5470         allow_signal(SIGKILL);
5471         while (!kthread_should_stop()) {
5472
5473                 /* We need to wait INTERRUPTIBLE so that
5474                  * we don't add to the load-average.
5475                  * That means we need to be sure no signals are
5476                  * pending
5477                  */
5478                 if (signal_pending(current))
5479                         flush_signals(current);
5480
5481                 wait_event_interruptible_timeout
5482                         (thread->wqueue,
5483                          test_bit(THREAD_WAKEUP, &thread->flags)
5484                          || kthread_should_stop(),
5485                          thread->timeout);
5486
5487                 clear_bit(THREAD_WAKEUP, &thread->flags);
5488
5489                 thread->run(thread->mddev);
5490         }
5491
5492         return 0;
5493 }
5494
5495 void md_wakeup_thread(mdk_thread_t *thread)
5496 {
5497         if (thread) {
5498                 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
5499                 set_bit(THREAD_WAKEUP, &thread->flags);
5500                 wake_up(&thread->wqueue);
5501         }
5502 }
5503
5504 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
5505                                  const char *name)
5506 {
5507         mdk_thread_t *thread;
5508
5509         thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
5510         if (!thread)
5511                 return NULL;
5512
5513         init_waitqueue_head(&thread->wqueue);
5514
5515         thread->run = run;
5516         thread->mddev = mddev;
5517         thread->timeout = MAX_SCHEDULE_TIMEOUT;
5518         thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
5519         if (IS_ERR(thread->tsk)) {
5520                 kfree(thread);
5521                 return NULL;
5522         }
5523         return thread;
5524 }
5525
5526 void md_unregister_thread(mdk_thread_t *thread)
5527 {
5528         if (!thread)
5529                 return;
5530         dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
5531
5532         kthread_stop(thread->tsk);
5533         kfree(thread);
5534 }
5535
5536 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
5537 {
5538         if (!mddev) {
5539                 MD_BUG();
5540                 return;
5541         }
5542
5543         if (!rdev || test_bit(Faulty, &rdev->flags))
5544                 return;
5545
5546         if (mddev->external)
5547                 set_bit(Blocked, &rdev->flags);
5548 /*
5549         dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
5550                 mdname(mddev),
5551                 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
5552                 __builtin_return_address(0),__builtin_return_address(1),
5553                 __builtin_return_address(2),__builtin_return_address(3));
5554 */
5555         if (!mddev->pers)
5556                 return;
5557         if (!mddev->pers->error_handler)
5558                 return;
5559         mddev->pers->error_handler(mddev,rdev);
5560         if (mddev->degraded)
5561                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5562         set_bit(StateChanged, &rdev->flags);
5563         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5564         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5565         md_wakeup_thread(mddev->thread);
5566         md_new_event_inintr(mddev);
5567 }
5568
5569 /* seq_file implementation /proc/mdstat */
5570
5571 static void status_unused(struct seq_file *seq)
5572 {
5573         int i = 0;
5574         mdk_rdev_t *rdev;
5575
5576         seq_printf(seq, "unused devices: ");
5577
5578         list_for_each_entry(rdev, &pending_raid_disks, same_set) {
5579                 char b[BDEVNAME_SIZE];
5580                 i++;
5581                 seq_printf(seq, "%s ",
5582                               bdevname(rdev->bdev,b));
5583         }
5584         if (!i)
5585                 seq_printf(seq, "<none>");
5586
5587         seq_printf(seq, "\n");
5588 }
5589
5590
5591 static void status_resync(struct seq_file *seq, mddev_t * mddev)
5592 {
5593         sector_t max_blocks, resync, res;
5594         unsigned long dt, db, rt;
5595         int scale;
5596         unsigned int per_milli;
5597
5598         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
5599
5600         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5601                 max_blocks = mddev->resync_max_sectors >> 1;
5602         else
5603                 max_blocks = mddev->dev_sectors / 2;
5604
5605         /*
5606          * Should not happen.
5607          */
5608         if (!max_blocks) {
5609                 MD_BUG();
5610                 return;
5611         }
5612         /* Pick 'scale' such that (resync>>scale)*1000 will fit
5613          * in a sector_t, and (max_blocks>>scale) will fit in a
5614          * u32, as those are the requirements for sector_div.
5615          * Thus 'scale' must be at least 10
5616          */
5617         scale = 10;
5618         if (sizeof(sector_t) > sizeof(unsigned long)) {
5619                 while ( max_blocks/2 > (1ULL<<(scale+32)))
5620                         scale++;
5621         }
5622         res = (resync>>scale)*1000;
5623         sector_div(res, (u32)((max_blocks>>scale)+1));
5624
5625         per_milli = res;
5626         {
5627                 int i, x = per_milli/50, y = 20-x;
5628                 seq_printf(seq, "[");
5629                 for (i = 0; i < x; i++)
5630                         seq_printf(seq, "=");
5631                 seq_printf(seq, ">");
5632                 for (i = 0; i < y; i++)
5633                         seq_printf(seq, ".");
5634                 seq_printf(seq, "] ");
5635         }
5636         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
5637                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
5638                     "reshape" :
5639                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
5640                      "check" :
5641                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
5642                       "resync" : "recovery"))),
5643                    per_milli/10, per_milli % 10,
5644                    (unsigned long long) resync,
5645                    (unsigned long long) max_blocks);
5646
5647         /*
5648          * We do not want to overflow, so the order of operands and
5649          * the * 100 / 100 trick are important. We do a +1 to be
5650          * safe against division by zero. We only estimate anyway.
5651          *
5652          * dt: time from mark until now
5653          * db: blocks written from mark until now
5654          * rt: remaining time
5655          */
5656         dt = ((jiffies - mddev->resync_mark) / HZ);
5657         if (!dt) dt++;
5658         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
5659                 - mddev->resync_mark_cnt;
5660         rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100;
5661
5662         seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
5663
5664         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
5665 }
5666
5667 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
5668 {
5669         struct list_head *tmp;
5670         loff_t l = *pos;
5671         mddev_t *mddev;
5672
5673         if (l >= 0x10000)
5674                 return NULL;
5675         if (!l--)
5676                 /* header */
5677                 return (void*)1;
5678
5679         spin_lock(&all_mddevs_lock);
5680         list_for_each(tmp,&all_mddevs)
5681                 if (!l--) {
5682                         mddev = list_entry(tmp, mddev_t, all_mddevs);
5683                         mddev_get(mddev);
5684                         spin_unlock(&all_mddevs_lock);
5685                         return mddev;
5686                 }
5687         spin_unlock(&all_mddevs_lock);
5688         if (!l--)
5689                 return (void*)2;/* tail */
5690         return NULL;
5691 }
5692
5693 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
5694 {
5695         struct list_head *tmp;
5696         mddev_t *next_mddev, *mddev = v;
5697         
5698         ++*pos;
5699         if (v == (void*)2)
5700                 return NULL;
5701
5702         spin_lock(&all_mddevs_lock);
5703         if (v == (void*)1)
5704                 tmp = all_mddevs.next;
5705         else
5706                 tmp = mddev->all_mddevs.next;
5707         if (tmp != &all_mddevs)
5708                 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
5709         else {
5710                 next_mddev = (void*)2;
5711                 *pos = 0x10000;
5712         }               
5713         spin_unlock(&all_mddevs_lock);
5714
5715         if (v != (void*)1)
5716                 mddev_put(mddev);
5717         return next_mddev;
5718
5719 }
5720
5721 static void md_seq_stop(struct seq_file *seq, void *v)
5722 {
5723         mddev_t *mddev = v;
5724
5725         if (mddev && v != (void*)1 && v != (void*)2)
5726                 mddev_put(mddev);
5727 }
5728
5729 struct mdstat_info {
5730         int event;
5731 };
5732
5733 static int md_seq_show(struct seq_file *seq, void *v)
5734 {
5735         mddev_t *mddev = v;
5736         sector_t sectors;
5737         mdk_rdev_t *rdev;
5738         struct mdstat_info *mi = seq->private;
5739         struct bitmap *bitmap;
5740
5741         if (v == (void*)1) {
5742                 struct mdk_personality *pers;
5743                 seq_printf(seq, "Personalities : ");
5744                 spin_lock(&pers_lock);
5745                 list_for_each_entry(pers, &pers_list, list)
5746                         seq_printf(seq, "[%s] ", pers->name);
5747
5748                 spin_unlock(&pers_lock);
5749                 seq_printf(seq, "\n");
5750                 mi->event = atomic_read(&md_event_count);
5751                 return 0;
5752         }
5753         if (v == (void*)2) {
5754                 status_unused(seq);
5755                 return 0;
5756         }
5757
5758         if (mddev_lock(mddev) < 0)
5759                 return -EINTR;
5760
5761         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
5762                 seq_printf(seq, "%s : %sactive", mdname(mddev),
5763                                                 mddev->pers ? "" : "in");
5764                 if (mddev->pers) {
5765                         if (mddev->ro==1)
5766                                 seq_printf(seq, " (read-only)");
5767                         if (mddev->ro==2)
5768                                 seq_printf(seq, " (auto-read-only)");
5769                         seq_printf(seq, " %s", mddev->pers->name);
5770                 }
5771
5772                 sectors = 0;
5773                 list_for_each_entry(rdev, &mddev->disks, same_set) {
5774                         char b[BDEVNAME_SIZE];
5775                         seq_printf(seq, " %s[%d]",
5776                                 bdevname(rdev->bdev,b), rdev->desc_nr);
5777                         if (test_bit(WriteMostly, &rdev->flags))
5778                                 seq_printf(seq, "(W)");
5779                         if (test_bit(Faulty, &rdev->flags)) {
5780                                 seq_printf(seq, "(F)");
5781                                 continue;
5782                         } else if (rdev->raid_disk < 0)
5783                                 seq_printf(seq, "(S)"); /* spare */
5784                         sectors += rdev->sectors;
5785                 }
5786
5787                 if (!list_empty(&mddev->disks)) {
5788                         if (mddev->pers)
5789                                 seq_printf(seq, "\n      %llu blocks",
5790                                            (unsigned long long)
5791                                            mddev->array_sectors / 2);
5792                         else
5793                                 seq_printf(seq, "\n      %llu blocks",
5794                                            (unsigned long long)sectors / 2);
5795                 }
5796                 if (mddev->persistent) {
5797                         if (mddev->major_version != 0 ||
5798                             mddev->minor_version != 90) {
5799                                 seq_printf(seq," super %d.%d",
5800                                            mddev->major_version,
5801                                            mddev->minor_version);
5802                         }
5803                 } else if (mddev->external)
5804                         seq_printf(seq, " super external:%s",
5805                                    mddev->metadata_type);
5806                 else
5807                         seq_printf(seq, " super non-persistent");
5808
5809                 if (mddev->pers) {
5810                         mddev->pers->status(seq, mddev);
5811                         seq_printf(seq, "\n      ");
5812                         if (mddev->pers->sync_request) {
5813                                 if (mddev->curr_resync > 2) {
5814                                         status_resync(seq, mddev);
5815                                         seq_printf(seq, "\n      ");
5816                                 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
5817                                         seq_printf(seq, "\tresync=DELAYED\n      ");
5818                                 else if (mddev->recovery_cp < MaxSector)
5819                                         seq_printf(seq, "\tresync=PENDING\n      ");
5820                         }
5821                 } else
5822                         seq_printf(seq, "\n       ");
5823
5824                 if ((bitmap = mddev->bitmap)) {
5825                         unsigned long chunk_kb;
5826                         unsigned long flags;
5827                         spin_lock_irqsave(&bitmap->lock, flags);
5828                         chunk_kb = bitmap->chunksize >> 10;
5829                         seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
5830                                 "%lu%s chunk",
5831                                 bitmap->pages - bitmap->missing_pages,
5832                                 bitmap->pages,
5833                                 (bitmap->pages - bitmap->missing_pages)
5834                                         << (PAGE_SHIFT - 10),
5835                                 chunk_kb ? chunk_kb : bitmap->chunksize,
5836                                 chunk_kb ? "KB" : "B");
5837                         if (bitmap->file) {
5838                                 seq_printf(seq, ", file: ");
5839                                 seq_path(seq, &bitmap->file->f_path, " \t\n");
5840                         }
5841
5842                         seq_printf(seq, "\n");
5843                         spin_unlock_irqrestore(&bitmap->lock, flags);
5844                 }
5845
5846                 seq_printf(seq, "\n");
5847         }
5848         mddev_unlock(mddev);
5849         
5850         return 0;
5851 }
5852
5853 static struct seq_operations md_seq_ops = {
5854         .start  = md_seq_start,
5855         .next   = md_seq_next,
5856         .stop   = md_seq_stop,
5857         .show   = md_seq_show,
5858 };
5859
5860 static int md_seq_open(struct inode *inode, struct file *file)
5861 {
5862         int error;
5863         struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
5864         if (mi == NULL)
5865                 return -ENOMEM;
5866
5867         error = seq_open(file, &md_seq_ops);
5868         if (error)
5869                 kfree(mi);
5870         else {
5871                 struct seq_file *p = file->private_data;
5872                 p->private = mi;
5873                 mi->event = atomic_read(&md_event_count);
5874         }
5875         return error;
5876 }
5877
5878 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
5879 {
5880         struct seq_file *m = filp->private_data;
5881         struct mdstat_info *mi = m->private;
5882         int mask;
5883
5884         poll_wait(filp, &md_event_waiters, wait);
5885
5886         /* always allow read */
5887         mask = POLLIN | POLLRDNORM;
5888
5889         if (mi->event != atomic_read(&md_event_count))
5890                 mask |= POLLERR | POLLPRI;
5891         return mask;
5892 }
5893
5894 static const struct file_operations md_seq_fops = {
5895         .owner          = THIS_MODULE,
5896         .open           = md_seq_open,
5897         .read           = seq_read,
5898         .llseek         = seq_lseek,
5899         .release        = seq_release_private,
5900         .poll           = mdstat_poll,
5901 };
5902
5903 int register_md_personality(struct mdk_personality *p)
5904 {
5905         spin_lock(&pers_lock);
5906         list_add_tail(&p->list, &pers_list);
5907         printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
5908         spin_unlock(&pers_lock);
5909         return 0;
5910 }
5911
5912 int unregister_md_personality(struct mdk_personality *p)
5913 {
5914         printk(KERN_INFO "md: %s personality unregistered\n", p->name);
5915         spin_lock(&pers_lock);
5916         list_del_init(&p->list);
5917         spin_unlock(&pers_lock);
5918         return 0;
5919 }
5920
5921 static int is_mddev_idle(mddev_t *mddev, int init)
5922 {
5923         mdk_rdev_t * rdev;
5924         int idle;
5925         int curr_events;
5926
5927         idle = 1;
5928         rcu_read_lock();
5929         rdev_for_each_rcu(rdev, mddev) {
5930                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
5931                 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
5932                               (int)part_stat_read(&disk->part0, sectors[1]) -
5933                               atomic_read(&disk->sync_io);
5934                 /* sync IO will cause sync_io to increase before the disk_stats
5935                  * as sync_io is counted when a request starts, and
5936                  * disk_stats is counted when it completes.
5937                  * So resync activity will cause curr_events to be smaller than
5938                  * when there was no such activity.
5939                  * non-sync IO will cause disk_stat to increase without
5940                  * increasing sync_io so curr_events will (eventually)
5941                  * be larger than it was before.  Once it becomes
5942                  * substantially larger, the test below will cause
5943                  * the array to appear non-idle, and resync will slow
5944                  * down.
5945                  * If there is a lot of outstanding resync activity when
5946                  * we set last_event to curr_events, then all that activity
5947                  * completing might cause the array to appear non-idle
5948                  * and resync will be slowed down even though there might
5949                  * not have been non-resync activity.  This will only
5950                  * happen once though.  'last_events' will soon reflect
5951                  * the state where there is little or no outstanding
5952                  * resync requests, and further resync activity will
5953                  * always make curr_events less than last_events.
5954                  *
5955                  */
5956                 if (init || curr_events - rdev->last_events > 64) {
5957                         rdev->last_events = curr_events;
5958                         idle = 0;
5959                 }
5960         }
5961         rcu_read_unlock();
5962         return idle;
5963 }
5964
5965 void md_done_sync(mddev_t *mddev, int blocks, int ok)
5966 {
5967         /* another "blocks" (512byte) blocks have been synced */
5968         atomic_sub(blocks, &mddev->recovery_active);
5969         wake_up(&mddev->recovery_wait);
5970         if (!ok) {
5971                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5972                 md_wakeup_thread(mddev->thread);
5973                 // stop recovery, signal do_sync ....
5974         }
5975 }
5976
5977
5978 /* md_write_start(mddev, bi)
5979  * If we need to update some array metadata (e.g. 'active' flag
5980  * in superblock) before writing, schedule a superblock update
5981  * and wait for it to complete.
5982  */
5983 void md_write_start(mddev_t *mddev, struct bio *bi)
5984 {
5985         int did_change = 0;
5986         if (bio_data_dir(bi) != WRITE)
5987                 return;
5988
5989         BUG_ON(mddev->ro == 1);
5990         if (mddev->ro == 2) {
5991                 /* need to switch to read/write */
5992                 mddev->ro = 0;
5993                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5994                 md_wakeup_thread(mddev->thread);
5995                 md_wakeup_thread(mddev->sync_thread);
5996                 did_change = 1;
5997         }
5998         atomic_inc(&mddev->writes_pending);
5999         if (mddev->safemode == 1)
6000                 mddev->safemode = 0;
6001         if (mddev->in_sync) {
6002                 spin_lock_irq(&mddev->write_lock);
6003                 if (mddev->in_sync) {
6004                         mddev->in_sync = 0;
6005                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6006                         md_wakeup_thread(mddev->thread);
6007                         did_change = 1;
6008                 }
6009                 spin_unlock_irq(&mddev->write_lock);
6010         }
6011         if (did_change)
6012                 sysfs_notify_dirent(mddev->sysfs_state);
6013         wait_event(mddev->sb_wait,
6014                    !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
6015                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6016 }
6017
6018 void md_write_end(mddev_t *mddev)
6019 {
6020         if (atomic_dec_and_test(&mddev->writes_pending)) {
6021                 if (mddev->safemode == 2)
6022                         md_wakeup_thread(mddev->thread);
6023                 else if (mddev->safemode_delay)
6024                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
6025         }
6026 }
6027
6028 /* md_allow_write(mddev)
6029  * Calling this ensures that the array is marked 'active' so that writes
6030  * may proceed without blocking.  It is important to call this before
6031  * attempting a GFP_KERNEL allocation while holding the mddev lock.
6032  * Must be called with mddev_lock held.
6033  *
6034  * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
6035  * is dropped, so return -EAGAIN after notifying userspace.
6036  */
6037 int md_allow_write(mddev_t *mddev)
6038 {
6039         if (!mddev->pers)
6040                 return 0;
6041         if (mddev->ro)
6042                 return 0;
6043         if (!mddev->pers->sync_request)
6044                 return 0;
6045
6046         spin_lock_irq(&mddev->write_lock);
6047         if (mddev->in_sync) {
6048                 mddev->in_sync = 0;
6049                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6050                 if (mddev->safemode_delay &&
6051                     mddev->safemode == 0)
6052                         mddev->safemode = 1;
6053                 spin_unlock_irq(&mddev->write_lock);
6054                 md_update_sb(mddev, 0);
6055                 sysfs_notify_dirent(mddev->sysfs_state);
6056         } else
6057                 spin_unlock_irq(&mddev->write_lock);
6058
6059         if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
6060                 return -EAGAIN;
6061         else
6062                 return 0;
6063 }
6064 EXPORT_SYMBOL_GPL(md_allow_write);
6065
6066 #define SYNC_MARKS      10
6067 #define SYNC_MARK_STEP  (3*HZ)
6068 void md_do_sync(mddev_t *mddev)
6069 {
6070         mddev_t *mddev2;
6071         unsigned int currspeed = 0,
6072                  window;
6073         sector_t max_sectors,j, io_sectors;
6074         unsigned long mark[SYNC_MARKS];
6075         sector_t mark_cnt[SYNC_MARKS];
6076         int last_mark,m;
6077         struct list_head *tmp;
6078         sector_t last_check;
6079         int skipped = 0;
6080         mdk_rdev_t *rdev;
6081         char *desc;
6082
6083         /* just incase thread restarts... */
6084         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
6085                 return;
6086         if (mddev->ro) /* never try to sync a read-only array */
6087                 return;
6088
6089         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6090                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
6091                         desc = "data-check";
6092                 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6093                         desc = "requested-resync";
6094                 else
6095                         desc = "resync";
6096         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6097                 desc = "reshape";
6098         else
6099                 desc = "recovery";
6100
6101         /* we overload curr_resync somewhat here.
6102          * 0 == not engaged in resync at all
6103          * 2 == checking that there is no conflict with another sync
6104          * 1 == like 2, but have yielded to allow conflicting resync to
6105          *              commense
6106          * other == active in resync - this many blocks
6107          *
6108          * Before starting a resync we must have set curr_resync to
6109          * 2, and then checked that every "conflicting" array has curr_resync
6110          * less than ours.  When we find one that is the same or higher
6111          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
6112          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
6113          * This will mean we have to start checking from the beginning again.
6114          *
6115          */
6116
6117         do {
6118                 mddev->curr_resync = 2;
6119
6120         try_again:
6121                 if (kthread_should_stop()) {
6122                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6123                         goto skip;
6124                 }
6125                 for_each_mddev(mddev2, tmp) {
6126                         if (mddev2 == mddev)
6127                                 continue;
6128                         if (!mddev->parallel_resync
6129                         &&  mddev2->curr_resync
6130                         &&  match_mddev_units(mddev, mddev2)) {
6131                                 DEFINE_WAIT(wq);
6132                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
6133                                         /* arbitrarily yield */
6134                                         mddev->curr_resync = 1;
6135                                         wake_up(&resync_wait);
6136                                 }
6137                                 if (mddev > mddev2 && mddev->curr_resync == 1)
6138                                         /* no need to wait here, we can wait the next
6139                                          * time 'round when curr_resync == 2
6140                                          */
6141                                         continue;
6142                                 /* We need to wait 'interruptible' so as not to
6143                                  * contribute to the load average, and not to
6144                                  * be caught by 'softlockup'
6145                                  */
6146                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
6147                                 if (!kthread_should_stop() &&
6148                                     mddev2->curr_resync >= mddev->curr_resync) {
6149                                         printk(KERN_INFO "md: delaying %s of %s"
6150                                                " until %s has finished (they"
6151                                                " share one or more physical units)\n",
6152                                                desc, mdname(mddev), mdname(mddev2));
6153                                         mddev_put(mddev2);
6154                                         if (signal_pending(current))
6155                                                 flush_signals(current);
6156                                         schedule();
6157                                         finish_wait(&resync_wait, &wq);
6158                                         goto try_again;
6159                                 }
6160                                 finish_wait(&resync_wait, &wq);
6161                         }
6162                 }
6163         } while (mddev->curr_resync < 2);
6164
6165         j = 0;
6166         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6167                 /* resync follows the size requested by the personality,
6168                  * which defaults to physical size, but can be virtual size
6169                  */
6170                 max_sectors = mddev->resync_max_sectors;
6171                 mddev->resync_mismatches = 0;
6172                 /* we don't use the checkpoint if there's a bitmap */
6173                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6174                         j = mddev->resync_min;
6175                 else if (!mddev->bitmap)
6176                         j = mddev->recovery_cp;
6177
6178         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6179                 max_sectors = mddev->dev_sectors;
6180         else {
6181                 /* recovery follows the physical size of devices */
6182                 max_sectors = mddev->dev_sectors;
6183                 j = MaxSector;
6184                 list_for_each_entry(rdev, &mddev->disks, same_set)
6185                         if (rdev->raid_disk >= 0 &&
6186                             !test_bit(Faulty, &rdev->flags) &&
6187                             !test_bit(In_sync, &rdev->flags) &&
6188                             rdev->recovery_offset < j)
6189                                 j = rdev->recovery_offset;
6190         }
6191
6192         printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
6193         printk(KERN_INFO "md: minimum _guaranteed_  speed:"
6194                 " %d KB/sec/disk.\n", speed_min(mddev));
6195         printk(KERN_INFO "md: using maximum available idle IO bandwidth "
6196                "(but not more than %d KB/sec) for %s.\n",
6197                speed_max(mddev), desc);
6198
6199         is_mddev_idle(mddev, 1); /* this initializes IO event counters */
6200
6201         io_sectors = 0;
6202         for (m = 0; m < SYNC_MARKS; m++) {
6203                 mark[m] = jiffies;
6204                 mark_cnt[m] = io_sectors;
6205         }
6206         last_mark = 0;
6207         mddev->resync_mark = mark[last_mark];
6208         mddev->resync_mark_cnt = mark_cnt[last_mark];
6209
6210         /*
6211          * Tune reconstruction:
6212          */
6213         window = 32*(PAGE_SIZE/512);
6214         printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
6215                 window/2,(unsigned long long) max_sectors/2);
6216
6217         atomic_set(&mddev->recovery_active, 0);
6218         last_check = 0;
6219
6220         if (j>2) {
6221                 printk(KERN_INFO 
6222                        "md: resuming %s of %s from checkpoint.\n",
6223                        desc, mdname(mddev));
6224                 mddev->curr_resync = j;
6225         }
6226
6227         while (j < max_sectors) {
6228                 sector_t sectors;
6229
6230                 skipped = 0;
6231                 if (j >= mddev->resync_max) {
6232                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6233                         wait_event(mddev->recovery_wait,
6234                                    mddev->resync_max > j
6235                                    || kthread_should_stop());
6236                 }
6237                 if (kthread_should_stop())
6238                         goto interrupted;
6239
6240                 if (mddev->curr_resync > mddev->curr_resync_completed &&
6241                     (mddev->curr_resync - mddev->curr_resync_completed)
6242                     > (max_sectors >> 4)) {
6243                         /* time to update curr_resync_completed */
6244                         blk_unplug(mddev->queue);
6245                         wait_event(mddev->recovery_wait,
6246                                    atomic_read(&mddev->recovery_active) == 0);
6247                         mddev->curr_resync_completed =
6248                                 mddev->curr_resync;
6249                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6250                 }
6251                 sectors = mddev->pers->sync_request(mddev, j, &skipped,
6252                                                   currspeed < speed_min(mddev));
6253                 if (sectors == 0) {
6254                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6255                         goto out;
6256                 }
6257
6258                 if (!skipped) { /* actual IO requested */
6259                         io_sectors += sectors;
6260                         atomic_add(sectors, &mddev->recovery_active);
6261                 }
6262
6263                 j += sectors;
6264                 if (j>1) mddev->curr_resync = j;
6265                 mddev->curr_mark_cnt = io_sectors;
6266                 if (last_check == 0)
6267                         /* this is the earliers that rebuilt will be
6268                          * visible in /proc/mdstat
6269                          */
6270                         md_new_event(mddev);
6271
6272                 if (last_check + window > io_sectors || j == max_sectors)
6273                         continue;
6274
6275                 last_check = io_sectors;
6276
6277                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6278                         break;
6279
6280         repeat:
6281                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
6282                         /* step marks */
6283                         int next = (last_mark+1) % SYNC_MARKS;
6284
6285                         mddev->resync_mark = mark[next];
6286                         mddev->resync_mark_cnt = mark_cnt[next];
6287                         mark[next] = jiffies;
6288                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
6289                         last_mark = next;
6290                 }
6291
6292
6293                 if (kthread_should_stop())
6294                         goto interrupted;
6295
6296
6297                 /*
6298                  * this loop exits only if either when we are slower than
6299                  * the 'hard' speed limit, or the system was IO-idle for
6300                  * a jiffy.
6301                  * the system might be non-idle CPU-wise, but we only care
6302                  * about not overloading the IO subsystem. (things like an
6303                  * e2fsck being done on the RAID array should execute fast)
6304                  */
6305                 blk_unplug(mddev->queue);
6306                 cond_resched();
6307
6308                 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
6309                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
6310
6311                 if (currspeed > speed_min(mddev)) {
6312                         if ((currspeed > speed_max(mddev)) ||
6313                                         !is_mddev_idle(mddev, 0)) {
6314                                 msleep(500);
6315                                 goto repeat;
6316                         }
6317                 }
6318         }
6319         printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
6320         /*
6321          * this also signals 'finished resyncing' to md_stop
6322          */
6323  out:
6324         blk_unplug(mddev->queue);
6325
6326         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
6327
6328         /* tell personality that we are finished */
6329         mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
6330
6331         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
6332             mddev->curr_resync > 2) {
6333                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6334                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6335                                 if (mddev->curr_resync >= mddev->recovery_cp) {
6336                                         printk(KERN_INFO
6337                                                "md: checkpointing %s of %s.\n",
6338                                                desc, mdname(mddev));
6339                                         mddev->recovery_cp = mddev->curr_resync;
6340                                 }
6341                         } else
6342                                 mddev->recovery_cp = MaxSector;
6343                 } else {
6344                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6345                                 mddev->curr_resync = MaxSector;
6346                         list_for_each_entry(rdev, &mddev->disks, same_set)
6347                                 if (rdev->raid_disk >= 0 &&
6348                                     !test_bit(Faulty, &rdev->flags) &&
6349                                     !test_bit(In_sync, &rdev->flags) &&
6350                                     rdev->recovery_offset < mddev->curr_resync)
6351                                         rdev->recovery_offset = mddev->curr_resync;
6352                 }
6353         }
6354         set_bit(MD_CHANGE_DEVS, &mddev->flags);
6355
6356  skip:
6357         mddev->curr_resync = 0;
6358         mddev->resync_min = 0;
6359         mddev->resync_max = MaxSector;
6360         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6361         wake_up(&resync_wait);
6362         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
6363         md_wakeup_thread(mddev->thread);
6364         return;
6365
6366  interrupted:
6367         /*
6368          * got a signal, exit.
6369          */
6370         printk(KERN_INFO
6371                "md: md_do_sync() got signal ... exiting\n");
6372         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6373         goto out;
6374
6375 }
6376 EXPORT_SYMBOL_GPL(md_do_sync);
6377
6378
6379 static int remove_and_add_spares(mddev_t *mddev)
6380 {
6381         mdk_rdev_t *rdev;
6382         int spares = 0;
6383
6384         mddev->curr_resync_completed = 0;
6385
6386         list_for_each_entry(rdev, &mddev->disks, same_set)
6387                 if (rdev->raid_disk >= 0 &&
6388                     !test_bit(Blocked, &rdev->flags) &&
6389                     (test_bit(Faulty, &rdev->flags) ||
6390                      ! test_bit(In_sync, &rdev->flags)) &&
6391                     atomic_read(&rdev->nr_pending)==0) {
6392                         if (mddev->pers->hot_remove_disk(
6393                                     mddev, rdev->raid_disk)==0) {
6394                                 char nm[20];
6395                                 sprintf(nm,"rd%d", rdev->raid_disk);
6396                                 sysfs_remove_link(&mddev->kobj, nm);
6397                                 rdev->raid_disk = -1;
6398                         }
6399                 }
6400
6401         if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) {
6402                 list_for_each_entry(rdev, &mddev->disks, same_set) {
6403                         if (rdev->raid_disk >= 0 &&
6404                             !test_bit(In_sync, &rdev->flags) &&
6405                             !test_bit(Blocked, &rdev->flags))
6406                                 spares++;
6407                         if (rdev->raid_disk < 0
6408                             && !test_bit(Faulty, &rdev->flags)) {
6409                                 rdev->recovery_offset = 0;
6410                                 if (mddev->pers->
6411                                     hot_add_disk(mddev, rdev) == 0) {
6412                                         char nm[20];
6413                                         sprintf(nm, "rd%d", rdev->raid_disk);
6414                                         if (sysfs_create_link(&mddev->kobj,
6415                                                               &rdev->kobj, nm))
6416                                                 printk(KERN_WARNING
6417                                                        "md: cannot register "
6418                                                        "%s for %s\n",
6419                                                        nm, mdname(mddev));
6420                                         spares++;
6421                                         md_new_event(mddev);
6422                                 } else
6423                                         break;
6424                         }
6425                 }
6426         }
6427         return spares;
6428 }
6429 /*
6430  * This routine is regularly called by all per-raid-array threads to
6431  * deal with generic issues like resync and super-block update.
6432  * Raid personalities that don't have a thread (linear/raid0) do not
6433  * need this as they never do any recovery or update the superblock.
6434  *
6435  * It does not do any resync itself, but rather "forks" off other threads
6436  * to do that as needed.
6437  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
6438  * "->recovery" and create a thread at ->sync_thread.
6439  * When the thread finishes it sets MD_RECOVERY_DONE
6440  * and wakeups up this thread which will reap the thread and finish up.
6441  * This thread also removes any faulty devices (with nr_pending == 0).
6442  *
6443  * The overall approach is:
6444  *  1/ if the superblock needs updating, update it.
6445  *  2/ If a recovery thread is running, don't do anything else.
6446  *  3/ If recovery has finished, clean up, possibly marking spares active.
6447  *  4/ If there are any faulty devices, remove them.
6448  *  5/ If array is degraded, try to add spares devices
6449  *  6/ If array has spares or is not in-sync, start a resync thread.
6450  */
6451 void md_check_recovery(mddev_t *mddev)
6452 {
6453         mdk_rdev_t *rdev;
6454
6455
6456         if (mddev->bitmap)
6457                 bitmap_daemon_work(mddev->bitmap);
6458
6459         if (mddev->ro)
6460                 return;
6461
6462         if (signal_pending(current)) {
6463                 if (mddev->pers->sync_request && !mddev->external) {
6464                         printk(KERN_INFO "md: %s in immediate safe mode\n",
6465                                mdname(mddev));
6466                         mddev->safemode = 2;
6467                 }
6468                 flush_signals(current);
6469         }
6470
6471         if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
6472                 return;
6473         if ( ! (
6474                 (mddev->flags && !mddev->external) ||
6475                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
6476                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
6477                 (mddev->external == 0 && mddev->safemode == 1) ||
6478                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
6479                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
6480                 ))
6481                 return;
6482
6483         if (mddev_trylock(mddev)) {
6484                 int spares = 0;
6485
6486                 if (mddev->ro) {
6487                         /* Only thing we do on a ro array is remove
6488                          * failed devices.
6489                          */
6490                         remove_and_add_spares(mddev);
6491                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6492                         goto unlock;
6493                 }
6494
6495                 if (!mddev->external) {
6496                         int did_change = 0;
6497                         spin_lock_irq(&mddev->write_lock);
6498                         if (mddev->safemode &&
6499                             !atomic_read(&mddev->writes_pending) &&
6500                             !mddev->in_sync &&
6501                             mddev->recovery_cp == MaxSector) {
6502                                 mddev->in_sync = 1;
6503                                 did_change = 1;
6504                                 if (mddev->persistent)
6505                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6506                         }
6507                         if (mddev->safemode == 1)
6508                                 mddev->safemode = 0;
6509                         spin_unlock_irq(&mddev->write_lock);
6510                         if (did_change)
6511                                 sysfs_notify_dirent(mddev->sysfs_state);
6512                 }
6513
6514                 if (mddev->flags)
6515                         md_update_sb(mddev, 0);
6516
6517                 list_for_each_entry(rdev, &mddev->disks, same_set)
6518                         if (test_and_clear_bit(StateChanged, &rdev->flags))
6519                                 sysfs_notify_dirent(rdev->sysfs_state);
6520
6521
6522                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
6523                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
6524                         /* resync/recovery still happening */
6525                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6526                         goto unlock;
6527                 }
6528                 if (mddev->sync_thread) {
6529                         /* resync has finished, collect result */
6530                         md_unregister_thread(mddev->sync_thread);
6531                         mddev->sync_thread = NULL;
6532                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
6533                             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
6534                                 /* success...*/
6535                                 /* activate any spares */
6536                                 if (mddev->pers->spare_active(mddev))
6537                                         sysfs_notify(&mddev->kobj, NULL,
6538                                                      "degraded");
6539                         }
6540                         md_update_sb(mddev, 1);
6541
6542                         /* if array is no-longer degraded, then any saved_raid_disk
6543                          * information must be scrapped
6544                          */
6545                         if (!mddev->degraded)
6546                                 list_for_each_entry(rdev, &mddev->disks, same_set)
6547                                         rdev->saved_raid_disk = -1;
6548
6549                         mddev->recovery = 0;
6550                         /* flag recovery needed just to double check */
6551                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6552                         sysfs_notify_dirent(mddev->sysfs_action);
6553                         md_new_event(mddev);
6554                         goto unlock;
6555                 }
6556                 /* Set RUNNING before clearing NEEDED to avoid
6557                  * any transients in the value of "sync_action".
6558                  */
6559                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6560                 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6561                 /* Clear some bits that don't mean anything, but
6562                  * might be left set
6563                  */
6564                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
6565                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
6566
6567                 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
6568                         goto unlock;
6569                 /* no recovery is running.
6570                  * remove any failed drives, then
6571                  * add spares if possible.
6572                  * Spare are also removed and re-added, to allow
6573                  * the personality to fail the re-add.
6574                  */
6575
6576                 if (mddev->reshape_position != MaxSector) {
6577                         if (mddev->pers->check_reshape(mddev) != 0)
6578                                 /* Cannot proceed */
6579                                 goto unlock;
6580                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
6581                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6582                 } else if ((spares = remove_and_add_spares(mddev))) {
6583                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6584                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
6585                         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
6586                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6587                 } else if (mddev->recovery_cp < MaxSector) {
6588                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6589                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6590                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
6591                         /* nothing to be done ... */
6592                         goto unlock;
6593
6594                 if (mddev->pers->sync_request) {
6595                         if (spares && mddev->bitmap && ! mddev->bitmap->file) {
6596                                 /* We are adding a device or devices to an array
6597                                  * which has the bitmap stored on all devices.
6598                                  * So make sure all bitmap pages get written
6599                                  */
6600                                 bitmap_write_all(mddev->bitmap);
6601                         }
6602                         mddev->sync_thread = md_register_thread(md_do_sync,
6603                                                                 mddev,
6604                                                                 "%s_resync");
6605                         if (!mddev->sync_thread) {
6606                                 printk(KERN_ERR "%s: could not start resync"
6607                                         " thread...\n", 
6608                                         mdname(mddev));
6609                                 /* leave the spares where they are, it shouldn't hurt */
6610                                 mddev->recovery = 0;
6611                         } else
6612                                 md_wakeup_thread(mddev->sync_thread);
6613                         sysfs_notify_dirent(mddev->sysfs_action);
6614                         md_new_event(mddev);
6615                 }
6616         unlock:
6617                 if (!mddev->sync_thread) {
6618                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6619                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
6620                                                &mddev->recovery))
6621                                 if (mddev->sysfs_action)
6622                                         sysfs_notify_dirent(mddev->sysfs_action);
6623                 }
6624                 mddev_unlock(mddev);
6625         }
6626 }
6627
6628 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
6629 {
6630         sysfs_notify_dirent(rdev->sysfs_state);
6631         wait_event_timeout(rdev->blocked_wait,
6632                            !test_bit(Blocked, &rdev->flags),
6633                            msecs_to_jiffies(5000));
6634         rdev_dec_pending(rdev, mddev);
6635 }
6636 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
6637
6638 static int md_notify_reboot(struct notifier_block *this,
6639                             unsigned long code, void *x)
6640 {
6641         struct list_head *tmp;
6642         mddev_t *mddev;
6643
6644         if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
6645
6646                 printk(KERN_INFO "md: stopping all md devices.\n");
6647
6648                 for_each_mddev(mddev, tmp)
6649                         if (mddev_trylock(mddev)) {
6650                                 /* Force a switch to readonly even array
6651                                  * appears to still be in use.  Hence
6652                                  * the '100'.
6653                                  */
6654                                 do_md_stop(mddev, 1, 100);
6655                                 mddev_unlock(mddev);
6656                         }
6657                 /*
6658                  * certain more exotic SCSI devices are known to be
6659                  * volatile wrt too early system reboots. While the
6660                  * right place to handle this issue is the given
6661                  * driver, we do want to have a safe RAID driver ...
6662                  */
6663                 mdelay(1000*1);
6664         }
6665         return NOTIFY_DONE;
6666 }
6667
6668 static struct notifier_block md_notifier = {
6669         .notifier_call  = md_notify_reboot,
6670         .next           = NULL,
6671         .priority       = INT_MAX, /* before any real devices */
6672 };
6673
6674 static void md_geninit(void)
6675 {
6676         dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
6677
6678         proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
6679 }
6680
6681 static int __init md_init(void)
6682 {
6683         if (register_blkdev(MD_MAJOR, "md"))
6684                 return -1;
6685         if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
6686                 unregister_blkdev(MD_MAJOR, "md");
6687                 return -1;
6688         }
6689         blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
6690                             md_probe, NULL, NULL);
6691         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
6692                             md_probe, NULL, NULL);
6693
6694         register_reboot_notifier(&md_notifier);
6695         raid_table_header = register_sysctl_table(raid_root_table);
6696
6697         md_geninit();
6698         return 0;
6699 }
6700
6701
6702 #ifndef MODULE
6703
6704 /*
6705  * Searches all registered partitions for autorun RAID arrays
6706  * at boot time.
6707  */
6708
6709 static LIST_HEAD(all_detected_devices);
6710 struct detected_devices_node {
6711         struct list_head list;
6712         dev_t dev;
6713 };
6714
6715 void md_autodetect_dev(dev_t dev)
6716 {
6717         struct detected_devices_node *node_detected_dev;
6718
6719         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
6720         if (node_detected_dev) {
6721                 node_detected_dev->dev = dev;
6722                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
6723         } else {
6724                 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
6725                         ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
6726         }
6727 }
6728
6729
6730 static void autostart_arrays(int part)
6731 {
6732         mdk_rdev_t *rdev;
6733         struct detected_devices_node *node_detected_dev;
6734         dev_t dev;
6735         int i_scanned, i_passed;
6736
6737         i_scanned = 0;
6738         i_passed = 0;
6739
6740         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
6741
6742         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
6743                 i_scanned++;
6744                 node_detected_dev = list_entry(all_detected_devices.next,
6745                                         struct detected_devices_node, list);
6746                 list_del(&node_detected_dev->list);
6747                 dev = node_detected_dev->dev;
6748                 kfree(node_detected_dev);
6749                 rdev = md_import_device(dev,0, 90);
6750                 if (IS_ERR(rdev))
6751                         continue;
6752
6753                 if (test_bit(Faulty, &rdev->flags)) {
6754                         MD_BUG();
6755                         continue;
6756                 }
6757                 set_bit(AutoDetected, &rdev->flags);
6758                 list_add(&rdev->same_set, &pending_raid_disks);
6759                 i_passed++;
6760         }
6761
6762         printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
6763                                                 i_scanned, i_passed);
6764
6765         autorun_devices(part);
6766 }
6767
6768 #endif /* !MODULE */
6769
6770 static __exit void md_exit(void)
6771 {
6772         mddev_t *mddev;
6773         struct list_head *tmp;
6774
6775         blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
6776         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
6777
6778         unregister_blkdev(MD_MAJOR,"md");
6779         unregister_blkdev(mdp_major, "mdp");
6780         unregister_reboot_notifier(&md_notifier);
6781         unregister_sysctl_table(raid_table_header);
6782         remove_proc_entry("mdstat", NULL);
6783         for_each_mddev(mddev, tmp) {
6784                 export_array(mddev);
6785                 mddev->hold_active = 0;
6786         }
6787 }
6788
6789 subsys_initcall(md_init);
6790 module_exit(md_exit)
6791
6792 static int get_ro(char *buffer, struct kernel_param *kp)
6793 {
6794         return sprintf(buffer, "%d", start_readonly);
6795 }
6796 static int set_ro(const char *val, struct kernel_param *kp)
6797 {
6798         char *e;
6799         int num = simple_strtoul(val, &e, 10);
6800         if (*val && (*e == '\0' || *e == '\n')) {
6801                 start_readonly = num;
6802                 return 0;
6803         }
6804         return -EINVAL;
6805 }
6806
6807 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
6808 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
6809
6810 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
6811
6812 EXPORT_SYMBOL(register_md_personality);
6813 EXPORT_SYMBOL(unregister_md_personality);
6814 EXPORT_SYMBOL(md_error);
6815 EXPORT_SYMBOL(md_done_sync);
6816 EXPORT_SYMBOL(md_write_start);
6817 EXPORT_SYMBOL(md_write_end);
6818 EXPORT_SYMBOL(md_register_thread);
6819 EXPORT_SYMBOL(md_unregister_thread);
6820 EXPORT_SYMBOL(md_wakeup_thread);
6821 EXPORT_SYMBOL(md_check_recovery);
6822 MODULE_LICENSE("GPL");
6823 MODULE_ALIAS("md");
6824 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);