block: push down BKL into .open and .release
[linux-2.6.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/sysctl.h>
38 #include <linux/seq_file.h>
39 #include <linux/smp_lock.h>
40 #include <linux/buffer_head.h> /* for invalidate_bdev */
41 #include <linux/poll.h>
42 #include <linux/ctype.h>
43 #include <linux/string.h>
44 #include <linux/hdreg.h>
45 #include <linux/proc_fs.h>
46 #include <linux/random.h>
47 #include <linux/reboot.h>
48 #include <linux/file.h>
49 #include <linux/compat.h>
50 #include <linux/delay.h>
51 #include <linux/raid/md_p.h>
52 #include <linux/raid/md_u.h>
53 #include <linux/slab.h>
54 #include "md.h"
55 #include "bitmap.h"
56
57 #define DEBUG 0
58 #define dprintk(x...) ((void)(DEBUG && printk(x)))
59
60
61 #ifndef MODULE
62 static void autostart_arrays(int part);
63 #endif
64
65 static LIST_HEAD(pers_list);
66 static DEFINE_SPINLOCK(pers_lock);
67
68 static void md_print_devices(void);
69
70 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
71
72 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
73
74 /*
75  * Default number of read corrections we'll attempt on an rdev
76  * before ejecting it from the array. We divide the read error
77  * count by 2 for every hour elapsed between read errors.
78  */
79 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
80 /*
81  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
82  * is 1000 KB/sec, so the extra system load does not show up that much.
83  * Increase it if you want to have more _guaranteed_ speed. Note that
84  * the RAID driver will use the maximum available bandwidth if the IO
85  * subsystem is idle. There is also an 'absolute maximum' reconstruction
86  * speed limit - in case reconstruction slows down your system despite
87  * idle IO detection.
88  *
89  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
90  * or /sys/block/mdX/md/sync_speed_{min,max}
91  */
92
93 static int sysctl_speed_limit_min = 1000;
94 static int sysctl_speed_limit_max = 200000;
95 static inline int speed_min(mddev_t *mddev)
96 {
97         return mddev->sync_speed_min ?
98                 mddev->sync_speed_min : sysctl_speed_limit_min;
99 }
100
101 static inline int speed_max(mddev_t *mddev)
102 {
103         return mddev->sync_speed_max ?
104                 mddev->sync_speed_max : sysctl_speed_limit_max;
105 }
106
107 static struct ctl_table_header *raid_table_header;
108
109 static ctl_table raid_table[] = {
110         {
111                 .procname       = "speed_limit_min",
112                 .data           = &sysctl_speed_limit_min,
113                 .maxlen         = sizeof(int),
114                 .mode           = S_IRUGO|S_IWUSR,
115                 .proc_handler   = proc_dointvec,
116         },
117         {
118                 .procname       = "speed_limit_max",
119                 .data           = &sysctl_speed_limit_max,
120                 .maxlen         = sizeof(int),
121                 .mode           = S_IRUGO|S_IWUSR,
122                 .proc_handler   = proc_dointvec,
123         },
124         { }
125 };
126
127 static ctl_table raid_dir_table[] = {
128         {
129                 .procname       = "raid",
130                 .maxlen         = 0,
131                 .mode           = S_IRUGO|S_IXUGO,
132                 .child          = raid_table,
133         },
134         { }
135 };
136
137 static ctl_table raid_root_table[] = {
138         {
139                 .procname       = "dev",
140                 .maxlen         = 0,
141                 .mode           = 0555,
142                 .child          = raid_dir_table,
143         },
144         {  }
145 };
146
147 static const struct block_device_operations md_fops;
148
149 static int start_readonly;
150
151 /*
152  * We have a system wide 'event count' that is incremented
153  * on any 'interesting' event, and readers of /proc/mdstat
154  * can use 'poll' or 'select' to find out when the event
155  * count increases.
156  *
157  * Events are:
158  *  start array, stop array, error, add device, remove device,
159  *  start build, activate spare
160  */
161 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
162 static atomic_t md_event_count;
163 void md_new_event(mddev_t *mddev)
164 {
165         atomic_inc(&md_event_count);
166         wake_up(&md_event_waiters);
167 }
168 EXPORT_SYMBOL_GPL(md_new_event);
169
170 /* Alternate version that can be called from interrupts
171  * when calling sysfs_notify isn't needed.
172  */
173 static void md_new_event_inintr(mddev_t *mddev)
174 {
175         atomic_inc(&md_event_count);
176         wake_up(&md_event_waiters);
177 }
178
179 /*
180  * Enables to iterate over all existing md arrays
181  * all_mddevs_lock protects this list.
182  */
183 static LIST_HEAD(all_mddevs);
184 static DEFINE_SPINLOCK(all_mddevs_lock);
185
186
187 /*
188  * iterates through all used mddevs in the system.
189  * We take care to grab the all_mddevs_lock whenever navigating
190  * the list, and to always hold a refcount when unlocked.
191  * Any code which breaks out of this loop while own
192  * a reference to the current mddev and must mddev_put it.
193  */
194 #define for_each_mddev(mddev,tmp)                                       \
195                                                                         \
196         for (({ spin_lock(&all_mddevs_lock);                            \
197                 tmp = all_mddevs.next;                                  \
198                 mddev = NULL;});                                        \
199              ({ if (tmp != &all_mddevs)                                 \
200                         mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
201                 spin_unlock(&all_mddevs_lock);                          \
202                 if (mddev) mddev_put(mddev);                            \
203                 mddev = list_entry(tmp, mddev_t, all_mddevs);           \
204                 tmp != &all_mddevs;});                                  \
205              ({ spin_lock(&all_mddevs_lock);                            \
206                 tmp = tmp->next;})                                      \
207                 )
208
209
210 /* Rather than calling directly into the personality make_request function,
211  * IO requests come here first so that we can check if the device is
212  * being suspended pending a reconfiguration.
213  * We hold a refcount over the call to ->make_request.  By the time that
214  * call has finished, the bio has been linked into some internal structure
215  * and so is visible to ->quiesce(), so we don't need the refcount any more.
216  */
217 static int md_make_request(struct request_queue *q, struct bio *bio)
218 {
219         const int rw = bio_data_dir(bio);
220         mddev_t *mddev = q->queuedata;
221         int rv;
222         int cpu;
223
224         if (mddev == NULL || mddev->pers == NULL) {
225                 bio_io_error(bio);
226                 return 0;
227         }
228         rcu_read_lock();
229         if (mddev->suspended || mddev->barrier) {
230                 DEFINE_WAIT(__wait);
231                 for (;;) {
232                         prepare_to_wait(&mddev->sb_wait, &__wait,
233                                         TASK_UNINTERRUPTIBLE);
234                         if (!mddev->suspended && !mddev->barrier)
235                                 break;
236                         rcu_read_unlock();
237                         schedule();
238                         rcu_read_lock();
239                 }
240                 finish_wait(&mddev->sb_wait, &__wait);
241         }
242         atomic_inc(&mddev->active_io);
243         rcu_read_unlock();
244
245         rv = mddev->pers->make_request(mddev, bio);
246
247         cpu = part_stat_lock();
248         part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
249         part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
250                       bio_sectors(bio));
251         part_stat_unlock();
252
253         if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
254                 wake_up(&mddev->sb_wait);
255
256         return rv;
257 }
258
259 /* mddev_suspend makes sure no new requests are submitted
260  * to the device, and that any requests that have been submitted
261  * are completely handled.
262  * Once ->stop is called and completes, the module will be completely
263  * unused.
264  */
265 static void mddev_suspend(mddev_t *mddev)
266 {
267         BUG_ON(mddev->suspended);
268         mddev->suspended = 1;
269         synchronize_rcu();
270         wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
271         mddev->pers->quiesce(mddev, 1);
272 }
273
274 static void mddev_resume(mddev_t *mddev)
275 {
276         mddev->suspended = 0;
277         wake_up(&mddev->sb_wait);
278         mddev->pers->quiesce(mddev, 0);
279 }
280
281 int mddev_congested(mddev_t *mddev, int bits)
282 {
283         if (mddev->barrier)
284                 return 1;
285         return mddev->suspended;
286 }
287 EXPORT_SYMBOL(mddev_congested);
288
289 /*
290  * Generic barrier handling for md
291  */
292
293 #define POST_REQUEST_BARRIER ((void*)1)
294
295 static void md_end_barrier(struct bio *bio, int err)
296 {
297         mdk_rdev_t *rdev = bio->bi_private;
298         mddev_t *mddev = rdev->mddev;
299         if (err == -EOPNOTSUPP && mddev->barrier != POST_REQUEST_BARRIER)
300                 set_bit(BIO_EOPNOTSUPP, &mddev->barrier->bi_flags);
301
302         rdev_dec_pending(rdev, mddev);
303
304         if (atomic_dec_and_test(&mddev->flush_pending)) {
305                 if (mddev->barrier == POST_REQUEST_BARRIER) {
306                         /* This was a post-request barrier */
307                         mddev->barrier = NULL;
308                         wake_up(&mddev->sb_wait);
309                 } else
310                         /* The pre-request barrier has finished */
311                         schedule_work(&mddev->barrier_work);
312         }
313         bio_put(bio);
314 }
315
316 static void submit_barriers(mddev_t *mddev)
317 {
318         mdk_rdev_t *rdev;
319
320         rcu_read_lock();
321         list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
322                 if (rdev->raid_disk >= 0 &&
323                     !test_bit(Faulty, &rdev->flags)) {
324                         /* Take two references, one is dropped
325                          * when request finishes, one after
326                          * we reclaim rcu_read_lock
327                          */
328                         struct bio *bi;
329                         atomic_inc(&rdev->nr_pending);
330                         atomic_inc(&rdev->nr_pending);
331                         rcu_read_unlock();
332                         bi = bio_alloc(GFP_KERNEL, 0);
333                         bi->bi_end_io = md_end_barrier;
334                         bi->bi_private = rdev;
335                         bi->bi_bdev = rdev->bdev;
336                         atomic_inc(&mddev->flush_pending);
337                         submit_bio(WRITE_BARRIER, bi);
338                         rcu_read_lock();
339                         rdev_dec_pending(rdev, mddev);
340                 }
341         rcu_read_unlock();
342 }
343
344 static void md_submit_barrier(struct work_struct *ws)
345 {
346         mddev_t *mddev = container_of(ws, mddev_t, barrier_work);
347         struct bio *bio = mddev->barrier;
348
349         atomic_set(&mddev->flush_pending, 1);
350
351         if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
352                 bio_endio(bio, -EOPNOTSUPP);
353         else if (bio->bi_size == 0)
354                 /* an empty barrier - all done */
355                 bio_endio(bio, 0);
356         else {
357                 bio->bi_rw &= ~REQ_HARDBARRIER;
358                 if (mddev->pers->make_request(mddev, bio))
359                         generic_make_request(bio);
360                 mddev->barrier = POST_REQUEST_BARRIER;
361                 submit_barriers(mddev);
362         }
363         if (atomic_dec_and_test(&mddev->flush_pending)) {
364                 mddev->barrier = NULL;
365                 wake_up(&mddev->sb_wait);
366         }
367 }
368
369 void md_barrier_request(mddev_t *mddev, struct bio *bio)
370 {
371         spin_lock_irq(&mddev->write_lock);
372         wait_event_lock_irq(mddev->sb_wait,
373                             !mddev->barrier,
374                             mddev->write_lock, /*nothing*/);
375         mddev->barrier = bio;
376         spin_unlock_irq(&mddev->write_lock);
377
378         atomic_set(&mddev->flush_pending, 1);
379         INIT_WORK(&mddev->barrier_work, md_submit_barrier);
380
381         submit_barriers(mddev);
382
383         if (atomic_dec_and_test(&mddev->flush_pending))
384                 schedule_work(&mddev->barrier_work);
385 }
386 EXPORT_SYMBOL(md_barrier_request);
387
388 static inline mddev_t *mddev_get(mddev_t *mddev)
389 {
390         atomic_inc(&mddev->active);
391         return mddev;
392 }
393
394 static void mddev_delayed_delete(struct work_struct *ws);
395
396 static void mddev_put(mddev_t *mddev)
397 {
398         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
399                 return;
400         if (!mddev->raid_disks && list_empty(&mddev->disks) &&
401             mddev->ctime == 0 && !mddev->hold_active) {
402                 /* Array is not configured at all, and not held active,
403                  * so destroy it */
404                 list_del(&mddev->all_mddevs);
405                 if (mddev->gendisk) {
406                         /* we did a probe so need to clean up.
407                          * Call schedule_work inside the spinlock
408                          * so that flush_scheduled_work() after
409                          * mddev_find will succeed in waiting for the
410                          * work to be done.
411                          */
412                         INIT_WORK(&mddev->del_work, mddev_delayed_delete);
413                         schedule_work(&mddev->del_work);
414                 } else
415                         kfree(mddev);
416         }
417         spin_unlock(&all_mddevs_lock);
418 }
419
420 static void mddev_init(mddev_t *mddev)
421 {
422         mutex_init(&mddev->open_mutex);
423         mutex_init(&mddev->reconfig_mutex);
424         mutex_init(&mddev->bitmap_info.mutex);
425         INIT_LIST_HEAD(&mddev->disks);
426         INIT_LIST_HEAD(&mddev->all_mddevs);
427         init_timer(&mddev->safemode_timer);
428         atomic_set(&mddev->active, 1);
429         atomic_set(&mddev->openers, 0);
430         atomic_set(&mddev->active_io, 0);
431         spin_lock_init(&mddev->write_lock);
432         atomic_set(&mddev->flush_pending, 0);
433         init_waitqueue_head(&mddev->sb_wait);
434         init_waitqueue_head(&mddev->recovery_wait);
435         mddev->reshape_position = MaxSector;
436         mddev->resync_min = 0;
437         mddev->resync_max = MaxSector;
438         mddev->level = LEVEL_NONE;
439 }
440
441 static mddev_t * mddev_find(dev_t unit)
442 {
443         mddev_t *mddev, *new = NULL;
444
445  retry:
446         spin_lock(&all_mddevs_lock);
447
448         if (unit) {
449                 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
450                         if (mddev->unit == unit) {
451                                 mddev_get(mddev);
452                                 spin_unlock(&all_mddevs_lock);
453                                 kfree(new);
454                                 return mddev;
455                         }
456
457                 if (new) {
458                         list_add(&new->all_mddevs, &all_mddevs);
459                         spin_unlock(&all_mddevs_lock);
460                         new->hold_active = UNTIL_IOCTL;
461                         return new;
462                 }
463         } else if (new) {
464                 /* find an unused unit number */
465                 static int next_minor = 512;
466                 int start = next_minor;
467                 int is_free = 0;
468                 int dev = 0;
469                 while (!is_free) {
470                         dev = MKDEV(MD_MAJOR, next_minor);
471                         next_minor++;
472                         if (next_minor > MINORMASK)
473                                 next_minor = 0;
474                         if (next_minor == start) {
475                                 /* Oh dear, all in use. */
476                                 spin_unlock(&all_mddevs_lock);
477                                 kfree(new);
478                                 return NULL;
479                         }
480                                 
481                         is_free = 1;
482                         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
483                                 if (mddev->unit == dev) {
484                                         is_free = 0;
485                                         break;
486                                 }
487                 }
488                 new->unit = dev;
489                 new->md_minor = MINOR(dev);
490                 new->hold_active = UNTIL_STOP;
491                 list_add(&new->all_mddevs, &all_mddevs);
492                 spin_unlock(&all_mddevs_lock);
493                 return new;
494         }
495         spin_unlock(&all_mddevs_lock);
496
497         new = kzalloc(sizeof(*new), GFP_KERNEL);
498         if (!new)
499                 return NULL;
500
501         new->unit = unit;
502         if (MAJOR(unit) == MD_MAJOR)
503                 new->md_minor = MINOR(unit);
504         else
505                 new->md_minor = MINOR(unit) >> MdpMinorShift;
506
507         mddev_init(new);
508
509         goto retry;
510 }
511
512 static inline int mddev_lock(mddev_t * mddev)
513 {
514         return mutex_lock_interruptible(&mddev->reconfig_mutex);
515 }
516
517 static inline int mddev_is_locked(mddev_t *mddev)
518 {
519         return mutex_is_locked(&mddev->reconfig_mutex);
520 }
521
522 static inline int mddev_trylock(mddev_t * mddev)
523 {
524         return mutex_trylock(&mddev->reconfig_mutex);
525 }
526
527 static struct attribute_group md_redundancy_group;
528
529 static void mddev_unlock(mddev_t * mddev)
530 {
531         if (mddev->to_remove) {
532                 /* These cannot be removed under reconfig_mutex as
533                  * an access to the files will try to take reconfig_mutex
534                  * while holding the file unremovable, which leads to
535                  * a deadlock.
536                  * So hold open_mutex instead - we are allowed to take
537                  * it while holding reconfig_mutex, and md_run can
538                  * use it to wait for the remove to complete.
539                  */
540                 struct attribute_group *to_remove = mddev->to_remove;
541                 mddev->to_remove = NULL;
542                 mutex_lock(&mddev->open_mutex);
543                 mutex_unlock(&mddev->reconfig_mutex);
544
545                 if (to_remove != &md_redundancy_group)
546                         sysfs_remove_group(&mddev->kobj, to_remove);
547                 if (mddev->pers == NULL ||
548                     mddev->pers->sync_request == NULL) {
549                         sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
550                         if (mddev->sysfs_action)
551                                 sysfs_put(mddev->sysfs_action);
552                         mddev->sysfs_action = NULL;
553                 }
554                 mutex_unlock(&mddev->open_mutex);
555         } else
556                 mutex_unlock(&mddev->reconfig_mutex);
557
558         md_wakeup_thread(mddev->thread);
559 }
560
561 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
562 {
563         mdk_rdev_t *rdev;
564
565         list_for_each_entry(rdev, &mddev->disks, same_set)
566                 if (rdev->desc_nr == nr)
567                         return rdev;
568
569         return NULL;
570 }
571
572 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
573 {
574         mdk_rdev_t *rdev;
575
576         list_for_each_entry(rdev, &mddev->disks, same_set)
577                 if (rdev->bdev->bd_dev == dev)
578                         return rdev;
579
580         return NULL;
581 }
582
583 static struct mdk_personality *find_pers(int level, char *clevel)
584 {
585         struct mdk_personality *pers;
586         list_for_each_entry(pers, &pers_list, list) {
587                 if (level != LEVEL_NONE && pers->level == level)
588                         return pers;
589                 if (strcmp(pers->name, clevel)==0)
590                         return pers;
591         }
592         return NULL;
593 }
594
595 /* return the offset of the super block in 512byte sectors */
596 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
597 {
598         sector_t num_sectors = bdev->bd_inode->i_size / 512;
599         return MD_NEW_SIZE_SECTORS(num_sectors);
600 }
601
602 static int alloc_disk_sb(mdk_rdev_t * rdev)
603 {
604         if (rdev->sb_page)
605                 MD_BUG();
606
607         rdev->sb_page = alloc_page(GFP_KERNEL);
608         if (!rdev->sb_page) {
609                 printk(KERN_ALERT "md: out of memory.\n");
610                 return -ENOMEM;
611         }
612
613         return 0;
614 }
615
616 static void free_disk_sb(mdk_rdev_t * rdev)
617 {
618         if (rdev->sb_page) {
619                 put_page(rdev->sb_page);
620                 rdev->sb_loaded = 0;
621                 rdev->sb_page = NULL;
622                 rdev->sb_start = 0;
623                 rdev->sectors = 0;
624         }
625 }
626
627
628 static void super_written(struct bio *bio, int error)
629 {
630         mdk_rdev_t *rdev = bio->bi_private;
631         mddev_t *mddev = rdev->mddev;
632
633         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
634                 printk("md: super_written gets error=%d, uptodate=%d\n",
635                        error, test_bit(BIO_UPTODATE, &bio->bi_flags));
636                 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
637                 md_error(mddev, rdev);
638         }
639
640         if (atomic_dec_and_test(&mddev->pending_writes))
641                 wake_up(&mddev->sb_wait);
642         bio_put(bio);
643 }
644
645 static void super_written_barrier(struct bio *bio, int error)
646 {
647         struct bio *bio2 = bio->bi_private;
648         mdk_rdev_t *rdev = bio2->bi_private;
649         mddev_t *mddev = rdev->mddev;
650
651         if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
652             error == -EOPNOTSUPP) {
653                 unsigned long flags;
654                 /* barriers don't appear to be supported :-( */
655                 set_bit(BarriersNotsupp, &rdev->flags);
656                 mddev->barriers_work = 0;
657                 spin_lock_irqsave(&mddev->write_lock, flags);
658                 bio2->bi_next = mddev->biolist;
659                 mddev->biolist = bio2;
660                 spin_unlock_irqrestore(&mddev->write_lock, flags);
661                 wake_up(&mddev->sb_wait);
662                 bio_put(bio);
663         } else {
664                 bio_put(bio2);
665                 bio->bi_private = rdev;
666                 super_written(bio, error);
667         }
668 }
669
670 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
671                    sector_t sector, int size, struct page *page)
672 {
673         /* write first size bytes of page to sector of rdev
674          * Increment mddev->pending_writes before returning
675          * and decrement it on completion, waking up sb_wait
676          * if zero is reached.
677          * If an error occurred, call md_error
678          *
679          * As we might need to resubmit the request if REQ_HARDBARRIER
680          * causes ENOTSUPP, we allocate a spare bio...
681          */
682         struct bio *bio = bio_alloc(GFP_NOIO, 1);
683         int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG;
684
685         bio->bi_bdev = rdev->bdev;
686         bio->bi_sector = sector;
687         bio_add_page(bio, page, size, 0);
688         bio->bi_private = rdev;
689         bio->bi_end_io = super_written;
690         bio->bi_rw = rw;
691
692         atomic_inc(&mddev->pending_writes);
693         if (!test_bit(BarriersNotsupp, &rdev->flags)) {
694                 struct bio *rbio;
695                 rw |= REQ_HARDBARRIER;
696                 rbio = bio_clone(bio, GFP_NOIO);
697                 rbio->bi_private = bio;
698                 rbio->bi_end_io = super_written_barrier;
699                 submit_bio(rw, rbio);
700         } else
701                 submit_bio(rw, bio);
702 }
703
704 void md_super_wait(mddev_t *mddev)
705 {
706         /* wait for all superblock writes that were scheduled to complete.
707          * if any had to be retried (due to BARRIER problems), retry them
708          */
709         DEFINE_WAIT(wq);
710         for(;;) {
711                 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
712                 if (atomic_read(&mddev->pending_writes)==0)
713                         break;
714                 while (mddev->biolist) {
715                         struct bio *bio;
716                         spin_lock_irq(&mddev->write_lock);
717                         bio = mddev->biolist;
718                         mddev->biolist = bio->bi_next ;
719                         bio->bi_next = NULL;
720                         spin_unlock_irq(&mddev->write_lock);
721                         submit_bio(bio->bi_rw, bio);
722                 }
723                 schedule();
724         }
725         finish_wait(&mddev->sb_wait, &wq);
726 }
727
728 static void bi_complete(struct bio *bio, int error)
729 {
730         complete((struct completion*)bio->bi_private);
731 }
732
733 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
734                    struct page *page, int rw)
735 {
736         struct bio *bio = bio_alloc(GFP_NOIO, 1);
737         struct completion event;
738         int ret;
739
740         rw |= REQ_SYNC | REQ_UNPLUG;
741
742         bio->bi_bdev = bdev;
743         bio->bi_sector = sector;
744         bio_add_page(bio, page, size, 0);
745         init_completion(&event);
746         bio->bi_private = &event;
747         bio->bi_end_io = bi_complete;
748         submit_bio(rw, bio);
749         wait_for_completion(&event);
750
751         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
752         bio_put(bio);
753         return ret;
754 }
755 EXPORT_SYMBOL_GPL(sync_page_io);
756
757 static int read_disk_sb(mdk_rdev_t * rdev, int size)
758 {
759         char b[BDEVNAME_SIZE];
760         if (!rdev->sb_page) {
761                 MD_BUG();
762                 return -EINVAL;
763         }
764         if (rdev->sb_loaded)
765                 return 0;
766
767
768         if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ))
769                 goto fail;
770         rdev->sb_loaded = 1;
771         return 0;
772
773 fail:
774         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
775                 bdevname(rdev->bdev,b));
776         return -EINVAL;
777 }
778
779 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
780 {
781         return  sb1->set_uuid0 == sb2->set_uuid0 &&
782                 sb1->set_uuid1 == sb2->set_uuid1 &&
783                 sb1->set_uuid2 == sb2->set_uuid2 &&
784                 sb1->set_uuid3 == sb2->set_uuid3;
785 }
786
787 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
788 {
789         int ret;
790         mdp_super_t *tmp1, *tmp2;
791
792         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
793         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
794
795         if (!tmp1 || !tmp2) {
796                 ret = 0;
797                 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
798                 goto abort;
799         }
800
801         *tmp1 = *sb1;
802         *tmp2 = *sb2;
803
804         /*
805          * nr_disks is not constant
806          */
807         tmp1->nr_disks = 0;
808         tmp2->nr_disks = 0;
809
810         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
811 abort:
812         kfree(tmp1);
813         kfree(tmp2);
814         return ret;
815 }
816
817
818 static u32 md_csum_fold(u32 csum)
819 {
820         csum = (csum & 0xffff) + (csum >> 16);
821         return (csum & 0xffff) + (csum >> 16);
822 }
823
824 static unsigned int calc_sb_csum(mdp_super_t * sb)
825 {
826         u64 newcsum = 0;
827         u32 *sb32 = (u32*)sb;
828         int i;
829         unsigned int disk_csum, csum;
830
831         disk_csum = sb->sb_csum;
832         sb->sb_csum = 0;
833
834         for (i = 0; i < MD_SB_BYTES/4 ; i++)
835                 newcsum += sb32[i];
836         csum = (newcsum & 0xffffffff) + (newcsum>>32);
837
838
839 #ifdef CONFIG_ALPHA
840         /* This used to use csum_partial, which was wrong for several
841          * reasons including that different results are returned on
842          * different architectures.  It isn't critical that we get exactly
843          * the same return value as before (we always csum_fold before
844          * testing, and that removes any differences).  However as we
845          * know that csum_partial always returned a 16bit value on
846          * alphas, do a fold to maximise conformity to previous behaviour.
847          */
848         sb->sb_csum = md_csum_fold(disk_csum);
849 #else
850         sb->sb_csum = disk_csum;
851 #endif
852         return csum;
853 }
854
855
856 /*
857  * Handle superblock details.
858  * We want to be able to handle multiple superblock formats
859  * so we have a common interface to them all, and an array of
860  * different handlers.
861  * We rely on user-space to write the initial superblock, and support
862  * reading and updating of superblocks.
863  * Interface methods are:
864  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
865  *      loads and validates a superblock on dev.
866  *      if refdev != NULL, compare superblocks on both devices
867  *    Return:
868  *      0 - dev has a superblock that is compatible with refdev
869  *      1 - dev has a superblock that is compatible and newer than refdev
870  *          so dev should be used as the refdev in future
871  *     -EINVAL superblock incompatible or invalid
872  *     -othererror e.g. -EIO
873  *
874  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
875  *      Verify that dev is acceptable into mddev.
876  *       The first time, mddev->raid_disks will be 0, and data from
877  *       dev should be merged in.  Subsequent calls check that dev
878  *       is new enough.  Return 0 or -EINVAL
879  *
880  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
881  *     Update the superblock for rdev with data in mddev
882  *     This does not write to disc.
883  *
884  */
885
886 struct super_type  {
887         char                *name;
888         struct module       *owner;
889         int                 (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev,
890                                           int minor_version);
891         int                 (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
892         void                (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
893         unsigned long long  (*rdev_size_change)(mdk_rdev_t *rdev,
894                                                 sector_t num_sectors);
895 };
896
897 /*
898  * Check that the given mddev has no bitmap.
899  *
900  * This function is called from the run method of all personalities that do not
901  * support bitmaps. It prints an error message and returns non-zero if mddev
902  * has a bitmap. Otherwise, it returns 0.
903  *
904  */
905 int md_check_no_bitmap(mddev_t *mddev)
906 {
907         if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
908                 return 0;
909         printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
910                 mdname(mddev), mddev->pers->name);
911         return 1;
912 }
913 EXPORT_SYMBOL(md_check_no_bitmap);
914
915 /*
916  * load_super for 0.90.0 
917  */
918 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
919 {
920         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
921         mdp_super_t *sb;
922         int ret;
923
924         /*
925          * Calculate the position of the superblock (512byte sectors),
926          * it's at the end of the disk.
927          *
928          * It also happens to be a multiple of 4Kb.
929          */
930         rdev->sb_start = calc_dev_sboffset(rdev->bdev);
931
932         ret = read_disk_sb(rdev, MD_SB_BYTES);
933         if (ret) return ret;
934
935         ret = -EINVAL;
936
937         bdevname(rdev->bdev, b);
938         sb = (mdp_super_t*)page_address(rdev->sb_page);
939
940         if (sb->md_magic != MD_SB_MAGIC) {
941                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
942                        b);
943                 goto abort;
944         }
945
946         if (sb->major_version != 0 ||
947             sb->minor_version < 90 ||
948             sb->minor_version > 91) {
949                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
950                         sb->major_version, sb->minor_version,
951                         b);
952                 goto abort;
953         }
954
955         if (sb->raid_disks <= 0)
956                 goto abort;
957
958         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
959                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
960                         b);
961                 goto abort;
962         }
963
964         rdev->preferred_minor = sb->md_minor;
965         rdev->data_offset = 0;
966         rdev->sb_size = MD_SB_BYTES;
967
968         if (sb->level == LEVEL_MULTIPATH)
969                 rdev->desc_nr = -1;
970         else
971                 rdev->desc_nr = sb->this_disk.number;
972
973         if (!refdev) {
974                 ret = 1;
975         } else {
976                 __u64 ev1, ev2;
977                 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
978                 if (!uuid_equal(refsb, sb)) {
979                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
980                                 b, bdevname(refdev->bdev,b2));
981                         goto abort;
982                 }
983                 if (!sb_equal(refsb, sb)) {
984                         printk(KERN_WARNING "md: %s has same UUID"
985                                " but different superblock to %s\n",
986                                b, bdevname(refdev->bdev, b2));
987                         goto abort;
988                 }
989                 ev1 = md_event(sb);
990                 ev2 = md_event(refsb);
991                 if (ev1 > ev2)
992                         ret = 1;
993                 else 
994                         ret = 0;
995         }
996         rdev->sectors = rdev->sb_start;
997
998         if (rdev->sectors < sb->size * 2 && sb->level > 1)
999                 /* "this cannot possibly happen" ... */
1000                 ret = -EINVAL;
1001
1002  abort:
1003         return ret;
1004 }
1005
1006 /*
1007  * validate_super for 0.90.0
1008  */
1009 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1010 {
1011         mdp_disk_t *desc;
1012         mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
1013         __u64 ev1 = md_event(sb);
1014
1015         rdev->raid_disk = -1;
1016         clear_bit(Faulty, &rdev->flags);
1017         clear_bit(In_sync, &rdev->flags);
1018         clear_bit(WriteMostly, &rdev->flags);
1019         clear_bit(BarriersNotsupp, &rdev->flags);
1020
1021         if (mddev->raid_disks == 0) {
1022                 mddev->major_version = 0;
1023                 mddev->minor_version = sb->minor_version;
1024                 mddev->patch_version = sb->patch_version;
1025                 mddev->external = 0;
1026                 mddev->chunk_sectors = sb->chunk_size >> 9;
1027                 mddev->ctime = sb->ctime;
1028                 mddev->utime = sb->utime;
1029                 mddev->level = sb->level;
1030                 mddev->clevel[0] = 0;
1031                 mddev->layout = sb->layout;
1032                 mddev->raid_disks = sb->raid_disks;
1033                 mddev->dev_sectors = sb->size * 2;
1034                 mddev->events = ev1;
1035                 mddev->bitmap_info.offset = 0;
1036                 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1037
1038                 if (mddev->minor_version >= 91) {
1039                         mddev->reshape_position = sb->reshape_position;
1040                         mddev->delta_disks = sb->delta_disks;
1041                         mddev->new_level = sb->new_level;
1042                         mddev->new_layout = sb->new_layout;
1043                         mddev->new_chunk_sectors = sb->new_chunk >> 9;
1044                 } else {
1045                         mddev->reshape_position = MaxSector;
1046                         mddev->delta_disks = 0;
1047                         mddev->new_level = mddev->level;
1048                         mddev->new_layout = mddev->layout;
1049                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1050                 }
1051
1052                 if (sb->state & (1<<MD_SB_CLEAN))
1053                         mddev->recovery_cp = MaxSector;
1054                 else {
1055                         if (sb->events_hi == sb->cp_events_hi && 
1056                                 sb->events_lo == sb->cp_events_lo) {
1057                                 mddev->recovery_cp = sb->recovery_cp;
1058                         } else
1059                                 mddev->recovery_cp = 0;
1060                 }
1061
1062                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1063                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1064                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1065                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1066
1067                 mddev->max_disks = MD_SB_DISKS;
1068
1069                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1070                     mddev->bitmap_info.file == NULL)
1071                         mddev->bitmap_info.offset =
1072                                 mddev->bitmap_info.default_offset;
1073
1074         } else if (mddev->pers == NULL) {
1075                 /* Insist on good event counter while assembling, except
1076                  * for spares (which don't need an event count) */
1077                 ++ev1;
1078                 if (sb->disks[rdev->desc_nr].state & (
1079                             (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1080                         if (ev1 < mddev->events) 
1081                                 return -EINVAL;
1082         } else if (mddev->bitmap) {
1083                 /* if adding to array with a bitmap, then we can accept an
1084                  * older device ... but not too old.
1085                  */
1086                 if (ev1 < mddev->bitmap->events_cleared)
1087                         return 0;
1088         } else {
1089                 if (ev1 < mddev->events)
1090                         /* just a hot-add of a new device, leave raid_disk at -1 */
1091                         return 0;
1092         }
1093
1094         if (mddev->level != LEVEL_MULTIPATH) {
1095                 desc = sb->disks + rdev->desc_nr;
1096
1097                 if (desc->state & (1<<MD_DISK_FAULTY))
1098                         set_bit(Faulty, &rdev->flags);
1099                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1100                             desc->raid_disk < mddev->raid_disks */) {
1101                         set_bit(In_sync, &rdev->flags);
1102                         rdev->raid_disk = desc->raid_disk;
1103                 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1104                         /* active but not in sync implies recovery up to
1105                          * reshape position.  We don't know exactly where
1106                          * that is, so set to zero for now */
1107                         if (mddev->minor_version >= 91) {
1108                                 rdev->recovery_offset = 0;
1109                                 rdev->raid_disk = desc->raid_disk;
1110                         }
1111                 }
1112                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1113                         set_bit(WriteMostly, &rdev->flags);
1114         } else /* MULTIPATH are always insync */
1115                 set_bit(In_sync, &rdev->flags);
1116         return 0;
1117 }
1118
1119 /*
1120  * sync_super for 0.90.0
1121  */
1122 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1123 {
1124         mdp_super_t *sb;
1125         mdk_rdev_t *rdev2;
1126         int next_spare = mddev->raid_disks;
1127
1128
1129         /* make rdev->sb match mddev data..
1130          *
1131          * 1/ zero out disks
1132          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1133          * 3/ any empty disks < next_spare become removed
1134          *
1135          * disks[0] gets initialised to REMOVED because
1136          * we cannot be sure from other fields if it has
1137          * been initialised or not.
1138          */
1139         int i;
1140         int active=0, working=0,failed=0,spare=0,nr_disks=0;
1141
1142         rdev->sb_size = MD_SB_BYTES;
1143
1144         sb = (mdp_super_t*)page_address(rdev->sb_page);
1145
1146         memset(sb, 0, sizeof(*sb));
1147
1148         sb->md_magic = MD_SB_MAGIC;
1149         sb->major_version = mddev->major_version;
1150         sb->patch_version = mddev->patch_version;
1151         sb->gvalid_words  = 0; /* ignored */
1152         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1153         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1154         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1155         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1156
1157         sb->ctime = mddev->ctime;
1158         sb->level = mddev->level;
1159         sb->size = mddev->dev_sectors / 2;
1160         sb->raid_disks = mddev->raid_disks;
1161         sb->md_minor = mddev->md_minor;
1162         sb->not_persistent = 0;
1163         sb->utime = mddev->utime;
1164         sb->state = 0;
1165         sb->events_hi = (mddev->events>>32);
1166         sb->events_lo = (u32)mddev->events;
1167
1168         if (mddev->reshape_position == MaxSector)
1169                 sb->minor_version = 90;
1170         else {
1171                 sb->minor_version = 91;
1172                 sb->reshape_position = mddev->reshape_position;
1173                 sb->new_level = mddev->new_level;
1174                 sb->delta_disks = mddev->delta_disks;
1175                 sb->new_layout = mddev->new_layout;
1176                 sb->new_chunk = mddev->new_chunk_sectors << 9;
1177         }
1178         mddev->minor_version = sb->minor_version;
1179         if (mddev->in_sync)
1180         {
1181                 sb->recovery_cp = mddev->recovery_cp;
1182                 sb->cp_events_hi = (mddev->events>>32);
1183                 sb->cp_events_lo = (u32)mddev->events;
1184                 if (mddev->recovery_cp == MaxSector)
1185                         sb->state = (1<< MD_SB_CLEAN);
1186         } else
1187                 sb->recovery_cp = 0;
1188
1189         sb->layout = mddev->layout;
1190         sb->chunk_size = mddev->chunk_sectors << 9;
1191
1192         if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1193                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1194
1195         sb->disks[0].state = (1<<MD_DISK_REMOVED);
1196         list_for_each_entry(rdev2, &mddev->disks, same_set) {
1197                 mdp_disk_t *d;
1198                 int desc_nr;
1199                 int is_active = test_bit(In_sync, &rdev2->flags);
1200
1201                 if (rdev2->raid_disk >= 0 &&
1202                     sb->minor_version >= 91)
1203                         /* we have nowhere to store the recovery_offset,
1204                          * but if it is not below the reshape_position,
1205                          * we can piggy-back on that.
1206                          */
1207                         is_active = 1;
1208                 if (rdev2->raid_disk < 0 ||
1209                     test_bit(Faulty, &rdev2->flags))
1210                         is_active = 0;
1211                 if (is_active)
1212                         desc_nr = rdev2->raid_disk;
1213                 else
1214                         desc_nr = next_spare++;
1215                 rdev2->desc_nr = desc_nr;
1216                 d = &sb->disks[rdev2->desc_nr];
1217                 nr_disks++;
1218                 d->number = rdev2->desc_nr;
1219                 d->major = MAJOR(rdev2->bdev->bd_dev);
1220                 d->minor = MINOR(rdev2->bdev->bd_dev);
1221                 if (is_active)
1222                         d->raid_disk = rdev2->raid_disk;
1223                 else
1224                         d->raid_disk = rdev2->desc_nr; /* compatibility */
1225                 if (test_bit(Faulty, &rdev2->flags))
1226                         d->state = (1<<MD_DISK_FAULTY);
1227                 else if (is_active) {
1228                         d->state = (1<<MD_DISK_ACTIVE);
1229                         if (test_bit(In_sync, &rdev2->flags))
1230                                 d->state |= (1<<MD_DISK_SYNC);
1231                         active++;
1232                         working++;
1233                 } else {
1234                         d->state = 0;
1235                         spare++;
1236                         working++;
1237                 }
1238                 if (test_bit(WriteMostly, &rdev2->flags))
1239                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
1240         }
1241         /* now set the "removed" and "faulty" bits on any missing devices */
1242         for (i=0 ; i < mddev->raid_disks ; i++) {
1243                 mdp_disk_t *d = &sb->disks[i];
1244                 if (d->state == 0 && d->number == 0) {
1245                         d->number = i;
1246                         d->raid_disk = i;
1247                         d->state = (1<<MD_DISK_REMOVED);
1248                         d->state |= (1<<MD_DISK_FAULTY);
1249                         failed++;
1250                 }
1251         }
1252         sb->nr_disks = nr_disks;
1253         sb->active_disks = active;
1254         sb->working_disks = working;
1255         sb->failed_disks = failed;
1256         sb->spare_disks = spare;
1257
1258         sb->this_disk = sb->disks[rdev->desc_nr];
1259         sb->sb_csum = calc_sb_csum(sb);
1260 }
1261
1262 /*
1263  * rdev_size_change for 0.90.0
1264  */
1265 static unsigned long long
1266 super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1267 {
1268         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1269                 return 0; /* component must fit device */
1270         if (rdev->mddev->bitmap_info.offset)
1271                 return 0; /* can't move bitmap */
1272         rdev->sb_start = calc_dev_sboffset(rdev->bdev);
1273         if (!num_sectors || num_sectors > rdev->sb_start)
1274                 num_sectors = rdev->sb_start;
1275         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1276                        rdev->sb_page);
1277         md_super_wait(rdev->mddev);
1278         return num_sectors / 2; /* kB for sysfs */
1279 }
1280
1281
1282 /*
1283  * version 1 superblock
1284  */
1285
1286 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1287 {
1288         __le32 disk_csum;
1289         u32 csum;
1290         unsigned long long newcsum;
1291         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1292         __le32 *isuper = (__le32*)sb;
1293         int i;
1294
1295         disk_csum = sb->sb_csum;
1296         sb->sb_csum = 0;
1297         newcsum = 0;
1298         for (i=0; size>=4; size -= 4 )
1299                 newcsum += le32_to_cpu(*isuper++);
1300
1301         if (size == 2)
1302                 newcsum += le16_to_cpu(*(__le16*) isuper);
1303
1304         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1305         sb->sb_csum = disk_csum;
1306         return cpu_to_le32(csum);
1307 }
1308
1309 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1310 {
1311         struct mdp_superblock_1 *sb;
1312         int ret;
1313         sector_t sb_start;
1314         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1315         int bmask;
1316
1317         /*
1318          * Calculate the position of the superblock in 512byte sectors.
1319          * It is always aligned to a 4K boundary and
1320          * depeding on minor_version, it can be:
1321          * 0: At least 8K, but less than 12K, from end of device
1322          * 1: At start of device
1323          * 2: 4K from start of device.
1324          */
1325         switch(minor_version) {
1326         case 0:
1327                 sb_start = rdev->bdev->bd_inode->i_size >> 9;
1328                 sb_start -= 8*2;
1329                 sb_start &= ~(sector_t)(4*2-1);
1330                 break;
1331         case 1:
1332                 sb_start = 0;
1333                 break;
1334         case 2:
1335                 sb_start = 8;
1336                 break;
1337         default:
1338                 return -EINVAL;
1339         }
1340         rdev->sb_start = sb_start;
1341
1342         /* superblock is rarely larger than 1K, but it can be larger,
1343          * and it is safe to read 4k, so we do that
1344          */
1345         ret = read_disk_sb(rdev, 4096);
1346         if (ret) return ret;
1347
1348
1349         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1350
1351         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1352             sb->major_version != cpu_to_le32(1) ||
1353             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1354             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1355             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1356                 return -EINVAL;
1357
1358         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1359                 printk("md: invalid superblock checksum on %s\n",
1360                         bdevname(rdev->bdev,b));
1361                 return -EINVAL;
1362         }
1363         if (le64_to_cpu(sb->data_size) < 10) {
1364                 printk("md: data_size too small on %s\n",
1365                        bdevname(rdev->bdev,b));
1366                 return -EINVAL;
1367         }
1368
1369         rdev->preferred_minor = 0xffff;
1370         rdev->data_offset = le64_to_cpu(sb->data_offset);
1371         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1372
1373         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1374         bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1375         if (rdev->sb_size & bmask)
1376                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1377
1378         if (minor_version
1379             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1380                 return -EINVAL;
1381
1382         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1383                 rdev->desc_nr = -1;
1384         else
1385                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1386
1387         if (!refdev) {
1388                 ret = 1;
1389         } else {
1390                 __u64 ev1, ev2;
1391                 struct mdp_superblock_1 *refsb = 
1392                         (struct mdp_superblock_1*)page_address(refdev->sb_page);
1393
1394                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1395                     sb->level != refsb->level ||
1396                     sb->layout != refsb->layout ||
1397                     sb->chunksize != refsb->chunksize) {
1398                         printk(KERN_WARNING "md: %s has strangely different"
1399                                 " superblock to %s\n",
1400                                 bdevname(rdev->bdev,b),
1401                                 bdevname(refdev->bdev,b2));
1402                         return -EINVAL;
1403                 }
1404                 ev1 = le64_to_cpu(sb->events);
1405                 ev2 = le64_to_cpu(refsb->events);
1406
1407                 if (ev1 > ev2)
1408                         ret = 1;
1409                 else
1410                         ret = 0;
1411         }
1412         if (minor_version)
1413                 rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) -
1414                         le64_to_cpu(sb->data_offset);
1415         else
1416                 rdev->sectors = rdev->sb_start;
1417         if (rdev->sectors < le64_to_cpu(sb->data_size))
1418                 return -EINVAL;
1419         rdev->sectors = le64_to_cpu(sb->data_size);
1420         if (le64_to_cpu(sb->size) > rdev->sectors)
1421                 return -EINVAL;
1422         return ret;
1423 }
1424
1425 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1426 {
1427         struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1428         __u64 ev1 = le64_to_cpu(sb->events);
1429
1430         rdev->raid_disk = -1;
1431         clear_bit(Faulty, &rdev->flags);
1432         clear_bit(In_sync, &rdev->flags);
1433         clear_bit(WriteMostly, &rdev->flags);
1434         clear_bit(BarriersNotsupp, &rdev->flags);
1435
1436         if (mddev->raid_disks == 0) {
1437                 mddev->major_version = 1;
1438                 mddev->patch_version = 0;
1439                 mddev->external = 0;
1440                 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1441                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1442                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1443                 mddev->level = le32_to_cpu(sb->level);
1444                 mddev->clevel[0] = 0;
1445                 mddev->layout = le32_to_cpu(sb->layout);
1446                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1447                 mddev->dev_sectors = le64_to_cpu(sb->size);
1448                 mddev->events = ev1;
1449                 mddev->bitmap_info.offset = 0;
1450                 mddev->bitmap_info.default_offset = 1024 >> 9;
1451                 
1452                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1453                 memcpy(mddev->uuid, sb->set_uuid, 16);
1454
1455                 mddev->max_disks =  (4096-256)/2;
1456
1457                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1458                     mddev->bitmap_info.file == NULL )
1459                         mddev->bitmap_info.offset =
1460                                 (__s32)le32_to_cpu(sb->bitmap_offset);
1461
1462                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1463                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1464                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1465                         mddev->new_level = le32_to_cpu(sb->new_level);
1466                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1467                         mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1468                 } else {
1469                         mddev->reshape_position = MaxSector;
1470                         mddev->delta_disks = 0;
1471                         mddev->new_level = mddev->level;
1472                         mddev->new_layout = mddev->layout;
1473                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1474                 }
1475
1476         } else if (mddev->pers == NULL) {
1477                 /* Insist of good event counter while assembling, except for
1478                  * spares (which don't need an event count) */
1479                 ++ev1;
1480                 if (rdev->desc_nr >= 0 &&
1481                     rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1482                     le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
1483                         if (ev1 < mddev->events)
1484                                 return -EINVAL;
1485         } else if (mddev->bitmap) {
1486                 /* If adding to array with a bitmap, then we can accept an
1487                  * older device, but not too old.
1488                  */
1489                 if (ev1 < mddev->bitmap->events_cleared)
1490                         return 0;
1491         } else {
1492                 if (ev1 < mddev->events)
1493                         /* just a hot-add of a new device, leave raid_disk at -1 */
1494                         return 0;
1495         }
1496         if (mddev->level != LEVEL_MULTIPATH) {
1497                 int role;
1498                 if (rdev->desc_nr < 0 ||
1499                     rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1500                         role = 0xffff;
1501                         rdev->desc_nr = -1;
1502                 } else
1503                         role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1504                 switch(role) {
1505                 case 0xffff: /* spare */
1506                         break;
1507                 case 0xfffe: /* faulty */
1508                         set_bit(Faulty, &rdev->flags);
1509                         break;
1510                 default:
1511                         if ((le32_to_cpu(sb->feature_map) &
1512                              MD_FEATURE_RECOVERY_OFFSET))
1513                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1514                         else
1515                                 set_bit(In_sync, &rdev->flags);
1516                         rdev->raid_disk = role;
1517                         break;
1518                 }
1519                 if (sb->devflags & WriteMostly1)
1520                         set_bit(WriteMostly, &rdev->flags);
1521         } else /* MULTIPATH are always insync */
1522                 set_bit(In_sync, &rdev->flags);
1523
1524         return 0;
1525 }
1526
1527 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1528 {
1529         struct mdp_superblock_1 *sb;
1530         mdk_rdev_t *rdev2;
1531         int max_dev, i;
1532         /* make rdev->sb match mddev and rdev data. */
1533
1534         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1535
1536         sb->feature_map = 0;
1537         sb->pad0 = 0;
1538         sb->recovery_offset = cpu_to_le64(0);
1539         memset(sb->pad1, 0, sizeof(sb->pad1));
1540         memset(sb->pad2, 0, sizeof(sb->pad2));
1541         memset(sb->pad3, 0, sizeof(sb->pad3));
1542
1543         sb->utime = cpu_to_le64((__u64)mddev->utime);
1544         sb->events = cpu_to_le64(mddev->events);
1545         if (mddev->in_sync)
1546                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1547         else
1548                 sb->resync_offset = cpu_to_le64(0);
1549
1550         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1551
1552         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1553         sb->size = cpu_to_le64(mddev->dev_sectors);
1554         sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1555         sb->level = cpu_to_le32(mddev->level);
1556         sb->layout = cpu_to_le32(mddev->layout);
1557
1558         if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1559                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1560                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1561         }
1562
1563         if (rdev->raid_disk >= 0 &&
1564             !test_bit(In_sync, &rdev->flags)) {
1565                 sb->feature_map |=
1566                         cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1567                 sb->recovery_offset =
1568                         cpu_to_le64(rdev->recovery_offset);
1569         }
1570
1571         if (mddev->reshape_position != MaxSector) {
1572                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1573                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1574                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1575                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1576                 sb->new_level = cpu_to_le32(mddev->new_level);
1577                 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1578         }
1579
1580         max_dev = 0;
1581         list_for_each_entry(rdev2, &mddev->disks, same_set)
1582                 if (rdev2->desc_nr+1 > max_dev)
1583                         max_dev = rdev2->desc_nr+1;
1584
1585         if (max_dev > le32_to_cpu(sb->max_dev)) {
1586                 int bmask;
1587                 sb->max_dev = cpu_to_le32(max_dev);
1588                 rdev->sb_size = max_dev * 2 + 256;
1589                 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1590                 if (rdev->sb_size & bmask)
1591                         rdev->sb_size = (rdev->sb_size | bmask) + 1;
1592         }
1593         for (i=0; i<max_dev;i++)
1594                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1595         
1596         list_for_each_entry(rdev2, &mddev->disks, same_set) {
1597                 i = rdev2->desc_nr;
1598                 if (test_bit(Faulty, &rdev2->flags))
1599                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1600                 else if (test_bit(In_sync, &rdev2->flags))
1601                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1602                 else if (rdev2->raid_disk >= 0)
1603                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1604                 else
1605                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1606         }
1607
1608         sb->sb_csum = calc_sb_1_csum(sb);
1609 }
1610
1611 static unsigned long long
1612 super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1613 {
1614         struct mdp_superblock_1 *sb;
1615         sector_t max_sectors;
1616         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1617                 return 0; /* component must fit device */
1618         if (rdev->sb_start < rdev->data_offset) {
1619                 /* minor versions 1 and 2; superblock before data */
1620                 max_sectors = rdev->bdev->bd_inode->i_size >> 9;
1621                 max_sectors -= rdev->data_offset;
1622                 if (!num_sectors || num_sectors > max_sectors)
1623                         num_sectors = max_sectors;
1624         } else if (rdev->mddev->bitmap_info.offset) {
1625                 /* minor version 0 with bitmap we can't move */
1626                 return 0;
1627         } else {
1628                 /* minor version 0; superblock after data */
1629                 sector_t sb_start;
1630                 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
1631                 sb_start &= ~(sector_t)(4*2 - 1);
1632                 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1633                 if (!num_sectors || num_sectors > max_sectors)
1634                         num_sectors = max_sectors;
1635                 rdev->sb_start = sb_start;
1636         }
1637         sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
1638         sb->data_size = cpu_to_le64(num_sectors);
1639         sb->super_offset = rdev->sb_start;
1640         sb->sb_csum = calc_sb_1_csum(sb);
1641         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1642                        rdev->sb_page);
1643         md_super_wait(rdev->mddev);
1644         return num_sectors / 2; /* kB for sysfs */
1645 }
1646
1647 static struct super_type super_types[] = {
1648         [0] = {
1649                 .name   = "0.90.0",
1650                 .owner  = THIS_MODULE,
1651                 .load_super         = super_90_load,
1652                 .validate_super     = super_90_validate,
1653                 .sync_super         = super_90_sync,
1654                 .rdev_size_change   = super_90_rdev_size_change,
1655         },
1656         [1] = {
1657                 .name   = "md-1",
1658                 .owner  = THIS_MODULE,
1659                 .load_super         = super_1_load,
1660                 .validate_super     = super_1_validate,
1661                 .sync_super         = super_1_sync,
1662                 .rdev_size_change   = super_1_rdev_size_change,
1663         },
1664 };
1665
1666 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1667 {
1668         mdk_rdev_t *rdev, *rdev2;
1669
1670         rcu_read_lock();
1671         rdev_for_each_rcu(rdev, mddev1)
1672                 rdev_for_each_rcu(rdev2, mddev2)
1673                         if (rdev->bdev->bd_contains ==
1674                             rdev2->bdev->bd_contains) {
1675                                 rcu_read_unlock();
1676                                 return 1;
1677                         }
1678         rcu_read_unlock();
1679         return 0;
1680 }
1681
1682 static LIST_HEAD(pending_raid_disks);
1683
1684 /*
1685  * Try to register data integrity profile for an mddev
1686  *
1687  * This is called when an array is started and after a disk has been kicked
1688  * from the array. It only succeeds if all working and active component devices
1689  * are integrity capable with matching profiles.
1690  */
1691 int md_integrity_register(mddev_t *mddev)
1692 {
1693         mdk_rdev_t *rdev, *reference = NULL;
1694
1695         if (list_empty(&mddev->disks))
1696                 return 0; /* nothing to do */
1697         if (blk_get_integrity(mddev->gendisk))
1698                 return 0; /* already registered */
1699         list_for_each_entry(rdev, &mddev->disks, same_set) {
1700                 /* skip spares and non-functional disks */
1701                 if (test_bit(Faulty, &rdev->flags))
1702                         continue;
1703                 if (rdev->raid_disk < 0)
1704                         continue;
1705                 /*
1706                  * If at least one rdev is not integrity capable, we can not
1707                  * enable data integrity for the md device.
1708                  */
1709                 if (!bdev_get_integrity(rdev->bdev))
1710                         return -EINVAL;
1711                 if (!reference) {
1712                         /* Use the first rdev as the reference */
1713                         reference = rdev;
1714                         continue;
1715                 }
1716                 /* does this rdev's profile match the reference profile? */
1717                 if (blk_integrity_compare(reference->bdev->bd_disk,
1718                                 rdev->bdev->bd_disk) < 0)
1719                         return -EINVAL;
1720         }
1721         /*
1722          * All component devices are integrity capable and have matching
1723          * profiles, register the common profile for the md device.
1724          */
1725         if (blk_integrity_register(mddev->gendisk,
1726                         bdev_get_integrity(reference->bdev)) != 0) {
1727                 printk(KERN_ERR "md: failed to register integrity for %s\n",
1728                         mdname(mddev));
1729                 return -EINVAL;
1730         }
1731         printk(KERN_NOTICE "md: data integrity on %s enabled\n",
1732                 mdname(mddev));
1733         return 0;
1734 }
1735 EXPORT_SYMBOL(md_integrity_register);
1736
1737 /* Disable data integrity if non-capable/non-matching disk is being added */
1738 void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
1739 {
1740         struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
1741         struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
1742
1743         if (!bi_mddev) /* nothing to do */
1744                 return;
1745         if (rdev->raid_disk < 0) /* skip spares */
1746                 return;
1747         if (bi_rdev && blk_integrity_compare(mddev->gendisk,
1748                                              rdev->bdev->bd_disk) >= 0)
1749                 return;
1750         printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
1751         blk_integrity_unregister(mddev->gendisk);
1752 }
1753 EXPORT_SYMBOL(md_integrity_add_rdev);
1754
1755 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1756 {
1757         char b[BDEVNAME_SIZE];
1758         struct kobject *ko;
1759         char *s;
1760         int err;
1761
1762         if (rdev->mddev) {
1763                 MD_BUG();
1764                 return -EINVAL;
1765         }
1766
1767         /* prevent duplicates */
1768         if (find_rdev(mddev, rdev->bdev->bd_dev))
1769                 return -EEXIST;
1770
1771         /* make sure rdev->sectors exceeds mddev->dev_sectors */
1772         if (rdev->sectors && (mddev->dev_sectors == 0 ||
1773                         rdev->sectors < mddev->dev_sectors)) {
1774                 if (mddev->pers) {
1775                         /* Cannot change size, so fail
1776                          * If mddev->level <= 0, then we don't care
1777                          * about aligning sizes (e.g. linear)
1778                          */
1779                         if (mddev->level > 0)
1780                                 return -ENOSPC;
1781                 } else
1782                         mddev->dev_sectors = rdev->sectors;
1783         }
1784
1785         /* Verify rdev->desc_nr is unique.
1786          * If it is -1, assign a free number, else
1787          * check number is not in use
1788          */
1789         if (rdev->desc_nr < 0) {
1790                 int choice = 0;
1791                 if (mddev->pers) choice = mddev->raid_disks;
1792                 while (find_rdev_nr(mddev, choice))
1793                         choice++;
1794                 rdev->desc_nr = choice;
1795         } else {
1796                 if (find_rdev_nr(mddev, rdev->desc_nr))
1797                         return -EBUSY;
1798         }
1799         if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
1800                 printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
1801                        mdname(mddev), mddev->max_disks);
1802                 return -EBUSY;
1803         }
1804         bdevname(rdev->bdev,b);
1805         while ( (s=strchr(b, '/')) != NULL)
1806                 *s = '!';
1807
1808         rdev->mddev = mddev;
1809         printk(KERN_INFO "md: bind<%s>\n", b);
1810
1811         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
1812                 goto fail;
1813
1814         ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
1815         if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1816                 kobject_del(&rdev->kobj);
1817                 goto fail;
1818         }
1819         rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, NULL, "state");
1820
1821         list_add_rcu(&rdev->same_set, &mddev->disks);
1822         bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
1823
1824         /* May as well allow recovery to be retried once */
1825         mddev->recovery_disabled = 0;
1826
1827         return 0;
1828
1829  fail:
1830         printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1831                b, mdname(mddev));
1832         return err;
1833 }
1834
1835 static void md_delayed_delete(struct work_struct *ws)
1836 {
1837         mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1838         kobject_del(&rdev->kobj);
1839         kobject_put(&rdev->kobj);
1840 }
1841
1842 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1843 {
1844         char b[BDEVNAME_SIZE];
1845         if (!rdev->mddev) {
1846                 MD_BUG();
1847                 return;
1848         }
1849         bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1850         list_del_rcu(&rdev->same_set);
1851         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1852         rdev->mddev = NULL;
1853         sysfs_remove_link(&rdev->kobj, "block");
1854         sysfs_put(rdev->sysfs_state);
1855         rdev->sysfs_state = NULL;
1856         /* We need to delay this, otherwise we can deadlock when
1857          * writing to 'remove' to "dev/state".  We also need
1858          * to delay it due to rcu usage.
1859          */
1860         synchronize_rcu();
1861         INIT_WORK(&rdev->del_work, md_delayed_delete);
1862         kobject_get(&rdev->kobj);
1863         schedule_work(&rdev->del_work);
1864 }
1865
1866 /*
1867  * prevent the device from being mounted, repartitioned or
1868  * otherwise reused by a RAID array (or any other kernel
1869  * subsystem), by bd_claiming the device.
1870  */
1871 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1872 {
1873         int err = 0;
1874         struct block_device *bdev;
1875         char b[BDEVNAME_SIZE];
1876
1877         bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1878         if (IS_ERR(bdev)) {
1879                 printk(KERN_ERR "md: could not open %s.\n",
1880                         __bdevname(dev, b));
1881                 return PTR_ERR(bdev);
1882         }
1883         err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
1884         if (err) {
1885                 printk(KERN_ERR "md: could not bd_claim %s.\n",
1886                         bdevname(bdev, b));
1887                 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1888                 return err;
1889         }
1890         if (!shared)
1891                 set_bit(AllReserved, &rdev->flags);
1892         rdev->bdev = bdev;
1893         return err;
1894 }
1895
1896 static void unlock_rdev(mdk_rdev_t *rdev)
1897 {
1898         struct block_device *bdev = rdev->bdev;
1899         rdev->bdev = NULL;
1900         if (!bdev)
1901                 MD_BUG();
1902         bd_release(bdev);
1903         blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1904 }
1905
1906 void md_autodetect_dev(dev_t dev);
1907
1908 static void export_rdev(mdk_rdev_t * rdev)
1909 {
1910         char b[BDEVNAME_SIZE];
1911         printk(KERN_INFO "md: export_rdev(%s)\n",
1912                 bdevname(rdev->bdev,b));
1913         if (rdev->mddev)
1914                 MD_BUG();
1915         free_disk_sb(rdev);
1916 #ifndef MODULE
1917         if (test_bit(AutoDetected, &rdev->flags))
1918                 md_autodetect_dev(rdev->bdev->bd_dev);
1919 #endif
1920         unlock_rdev(rdev);
1921         kobject_put(&rdev->kobj);
1922 }
1923
1924 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1925 {
1926         unbind_rdev_from_array(rdev);
1927         export_rdev(rdev);
1928 }
1929
1930 static void export_array(mddev_t *mddev)
1931 {
1932         mdk_rdev_t *rdev, *tmp;
1933
1934         rdev_for_each(rdev, tmp, mddev) {
1935                 if (!rdev->mddev) {
1936                         MD_BUG();
1937                         continue;
1938                 }
1939                 kick_rdev_from_array(rdev);
1940         }
1941         if (!list_empty(&mddev->disks))
1942                 MD_BUG();
1943         mddev->raid_disks = 0;
1944         mddev->major_version = 0;
1945 }
1946
1947 static void print_desc(mdp_disk_t *desc)
1948 {
1949         printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1950                 desc->major,desc->minor,desc->raid_disk,desc->state);
1951 }
1952
1953 static void print_sb_90(mdp_super_t *sb)
1954 {
1955         int i;
1956
1957         printk(KERN_INFO 
1958                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1959                 sb->major_version, sb->minor_version, sb->patch_version,
1960                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1961                 sb->ctime);
1962         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1963                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1964                 sb->md_minor, sb->layout, sb->chunk_size);
1965         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
1966                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1967                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1968                 sb->failed_disks, sb->spare_disks,
1969                 sb->sb_csum, (unsigned long)sb->events_lo);
1970
1971         printk(KERN_INFO);
1972         for (i = 0; i < MD_SB_DISKS; i++) {
1973                 mdp_disk_t *desc;
1974
1975                 desc = sb->disks + i;
1976                 if (desc->number || desc->major || desc->minor ||
1977                     desc->raid_disk || (desc->state && (desc->state != 4))) {
1978                         printk("     D %2d: ", i);
1979                         print_desc(desc);
1980                 }
1981         }
1982         printk(KERN_INFO "md:     THIS: ");
1983         print_desc(&sb->this_disk);
1984 }
1985
1986 static void print_sb_1(struct mdp_superblock_1 *sb)
1987 {
1988         __u8 *uuid;
1989
1990         uuid = sb->set_uuid;
1991         printk(KERN_INFO
1992                "md:  SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
1993                "md:    Name: \"%s\" CT:%llu\n",
1994                 le32_to_cpu(sb->major_version),
1995                 le32_to_cpu(sb->feature_map),
1996                 uuid,
1997                 sb->set_name,
1998                 (unsigned long long)le64_to_cpu(sb->ctime)
1999                        & MD_SUPERBLOCK_1_TIME_SEC_MASK);
2000
2001         uuid = sb->device_uuid;
2002         printk(KERN_INFO
2003                "md:       L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
2004                         " RO:%llu\n"
2005                "md:     Dev:%08x UUID: %pU\n"
2006                "md:       (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
2007                "md:         (MaxDev:%u) \n",
2008                 le32_to_cpu(sb->level),
2009                 (unsigned long long)le64_to_cpu(sb->size),
2010                 le32_to_cpu(sb->raid_disks),
2011                 le32_to_cpu(sb->layout),
2012                 le32_to_cpu(sb->chunksize),
2013                 (unsigned long long)le64_to_cpu(sb->data_offset),
2014                 (unsigned long long)le64_to_cpu(sb->data_size),
2015                 (unsigned long long)le64_to_cpu(sb->super_offset),
2016                 (unsigned long long)le64_to_cpu(sb->recovery_offset),
2017                 le32_to_cpu(sb->dev_number),
2018                 uuid,
2019                 sb->devflags,
2020                 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
2021                 (unsigned long long)le64_to_cpu(sb->events),
2022                 (unsigned long long)le64_to_cpu(sb->resync_offset),
2023                 le32_to_cpu(sb->sb_csum),
2024                 le32_to_cpu(sb->max_dev)
2025                 );
2026 }
2027
2028 static void print_rdev(mdk_rdev_t *rdev, int major_version)
2029 {
2030         char b[BDEVNAME_SIZE];
2031         printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
2032                 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
2033                 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
2034                 rdev->desc_nr);
2035         if (rdev->sb_loaded) {
2036                 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
2037                 switch (major_version) {
2038                 case 0:
2039                         print_sb_90((mdp_super_t*)page_address(rdev->sb_page));
2040                         break;
2041                 case 1:
2042                         print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page));
2043                         break;
2044                 }
2045         } else
2046                 printk(KERN_INFO "md: no rdev superblock!\n");
2047 }
2048
2049 static void md_print_devices(void)
2050 {
2051         struct list_head *tmp;
2052         mdk_rdev_t *rdev;
2053         mddev_t *mddev;
2054         char b[BDEVNAME_SIZE];
2055
2056         printk("\n");
2057         printk("md:     **********************************\n");
2058         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
2059         printk("md:     **********************************\n");
2060         for_each_mddev(mddev, tmp) {
2061
2062                 if (mddev->bitmap)
2063                         bitmap_print_sb(mddev->bitmap);
2064                 else
2065                         printk("%s: ", mdname(mddev));
2066                 list_for_each_entry(rdev, &mddev->disks, same_set)
2067                         printk("<%s>", bdevname(rdev->bdev,b));
2068                 printk("\n");
2069
2070                 list_for_each_entry(rdev, &mddev->disks, same_set)
2071                         print_rdev(rdev, mddev->major_version);
2072         }
2073         printk("md:     **********************************\n");
2074         printk("\n");
2075 }
2076
2077
2078 static void sync_sbs(mddev_t * mddev, int nospares)
2079 {
2080         /* Update each superblock (in-memory image), but
2081          * if we are allowed to, skip spares which already
2082          * have the right event counter, or have one earlier
2083          * (which would mean they aren't being marked as dirty
2084          * with the rest of the array)
2085          */
2086         mdk_rdev_t *rdev;
2087
2088         /* First make sure individual recovery_offsets are correct */
2089         list_for_each_entry(rdev, &mddev->disks, same_set) {
2090                 if (rdev->raid_disk >= 0 &&
2091                     mddev->delta_disks >= 0 &&
2092                     !test_bit(In_sync, &rdev->flags) &&
2093                     mddev->curr_resync_completed > rdev->recovery_offset)
2094                                 rdev->recovery_offset = mddev->curr_resync_completed;
2095
2096         }       
2097         list_for_each_entry(rdev, &mddev->disks, same_set) {
2098                 if (rdev->sb_events == mddev->events ||
2099                     (nospares &&
2100                      rdev->raid_disk < 0 &&
2101                      rdev->sb_events+1 == mddev->events)) {
2102                         /* Don't update this superblock */
2103                         rdev->sb_loaded = 2;
2104                 } else {
2105                         super_types[mddev->major_version].
2106                                 sync_super(mddev, rdev);
2107                         rdev->sb_loaded = 1;
2108                 }
2109         }
2110 }
2111
2112 static void md_update_sb(mddev_t * mddev, int force_change)
2113 {
2114         mdk_rdev_t *rdev;
2115         int sync_req;
2116         int nospares = 0;
2117
2118         mddev->utime = get_seconds();
2119         if (mddev->external)
2120                 return;
2121 repeat:
2122         spin_lock_irq(&mddev->write_lock);
2123
2124         set_bit(MD_CHANGE_PENDING, &mddev->flags);
2125         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2126                 force_change = 1;
2127         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2128                 /* just a clean<-> dirty transition, possibly leave spares alone,
2129                  * though if events isn't the right even/odd, we will have to do
2130                  * spares after all
2131                  */
2132                 nospares = 1;
2133         if (force_change)
2134                 nospares = 0;
2135         if (mddev->degraded)
2136                 /* If the array is degraded, then skipping spares is both
2137                  * dangerous and fairly pointless.
2138                  * Dangerous because a device that was removed from the array
2139                  * might have a event_count that still looks up-to-date,
2140                  * so it can be re-added without a resync.
2141                  * Pointless because if there are any spares to skip,
2142                  * then a recovery will happen and soon that array won't
2143                  * be degraded any more and the spare can go back to sleep then.
2144                  */
2145                 nospares = 0;
2146
2147         sync_req = mddev->in_sync;
2148
2149         /* If this is just a dirty<->clean transition, and the array is clean
2150          * and 'events' is odd, we can roll back to the previous clean state */
2151         if (nospares
2152             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2153             && mddev->can_decrease_events
2154             && mddev->events != 1) {
2155                 mddev->events--;
2156                 mddev->can_decrease_events = 0;
2157         } else {
2158                 /* otherwise we have to go forward and ... */
2159                 mddev->events ++;
2160                 mddev->can_decrease_events = nospares;
2161         }
2162
2163         if (!mddev->events) {
2164                 /*
2165                  * oops, this 64-bit counter should never wrap.
2166                  * Either we are in around ~1 trillion A.C., assuming
2167                  * 1 reboot per second, or we have a bug:
2168                  */
2169                 MD_BUG();
2170                 mddev->events --;
2171         }
2172
2173         /*
2174          * do not write anything to disk if using
2175          * nonpersistent superblocks
2176          */
2177         if (!mddev->persistent) {
2178                 if (!mddev->external)
2179                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2180
2181                 spin_unlock_irq(&mddev->write_lock);
2182                 wake_up(&mddev->sb_wait);
2183                 return;
2184         }
2185         sync_sbs(mddev, nospares);
2186         spin_unlock_irq(&mddev->write_lock);
2187
2188         dprintk(KERN_INFO 
2189                 "md: updating %s RAID superblock on device (in sync %d)\n",
2190                 mdname(mddev),mddev->in_sync);
2191
2192         bitmap_update_sb(mddev->bitmap);
2193         list_for_each_entry(rdev, &mddev->disks, same_set) {
2194                 char b[BDEVNAME_SIZE];
2195                 dprintk(KERN_INFO "md: ");
2196                 if (rdev->sb_loaded != 1)
2197                         continue; /* no noise on spare devices */
2198                 if (test_bit(Faulty, &rdev->flags))
2199                         dprintk("(skipping faulty ");
2200
2201                 dprintk("%s ", bdevname(rdev->bdev,b));
2202                 if (!test_bit(Faulty, &rdev->flags)) {
2203                         md_super_write(mddev,rdev,
2204                                        rdev->sb_start, rdev->sb_size,
2205                                        rdev->sb_page);
2206                         dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
2207                                 bdevname(rdev->bdev,b),
2208                                 (unsigned long long)rdev->sb_start);
2209                         rdev->sb_events = mddev->events;
2210
2211                 } else
2212                         dprintk(")\n");
2213                 if (mddev->level == LEVEL_MULTIPATH)
2214                         /* only need to write one superblock... */
2215                         break;
2216         }
2217         md_super_wait(mddev);
2218         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2219
2220         spin_lock_irq(&mddev->write_lock);
2221         if (mddev->in_sync != sync_req ||
2222             test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2223                 /* have to write it out again */
2224                 spin_unlock_irq(&mddev->write_lock);
2225                 goto repeat;
2226         }
2227         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2228         spin_unlock_irq(&mddev->write_lock);
2229         wake_up(&mddev->sb_wait);
2230         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2231                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2232
2233 }
2234
2235 /* words written to sysfs files may, or may not, be \n terminated.
2236  * We want to accept with case. For this we use cmd_match.
2237  */
2238 static int cmd_match(const char *cmd, const char *str)
2239 {
2240         /* See if cmd, written into a sysfs file, matches
2241          * str.  They must either be the same, or cmd can
2242          * have a trailing newline
2243          */
2244         while (*cmd && *str && *cmd == *str) {
2245                 cmd++;
2246                 str++;
2247         }
2248         if (*cmd == '\n')
2249                 cmd++;
2250         if (*str || *cmd)
2251                 return 0;
2252         return 1;
2253 }
2254
2255 struct rdev_sysfs_entry {
2256         struct attribute attr;
2257         ssize_t (*show)(mdk_rdev_t *, char *);
2258         ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
2259 };
2260
2261 static ssize_t
2262 state_show(mdk_rdev_t *rdev, char *page)
2263 {
2264         char *sep = "";
2265         size_t len = 0;
2266
2267         if (test_bit(Faulty, &rdev->flags)) {
2268                 len+= sprintf(page+len, "%sfaulty",sep);
2269                 sep = ",";
2270         }
2271         if (test_bit(In_sync, &rdev->flags)) {
2272                 len += sprintf(page+len, "%sin_sync",sep);
2273                 sep = ",";
2274         }
2275         if (test_bit(WriteMostly, &rdev->flags)) {
2276                 len += sprintf(page+len, "%swrite_mostly",sep);
2277                 sep = ",";
2278         }
2279         if (test_bit(Blocked, &rdev->flags)) {
2280                 len += sprintf(page+len, "%sblocked", sep);
2281                 sep = ",";
2282         }
2283         if (!test_bit(Faulty, &rdev->flags) &&
2284             !test_bit(In_sync, &rdev->flags)) {
2285                 len += sprintf(page+len, "%sspare", sep);
2286                 sep = ",";
2287         }
2288         return len+sprintf(page+len, "\n");
2289 }
2290
2291 static ssize_t
2292 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2293 {
2294         /* can write
2295          *  faulty  - simulates and error
2296          *  remove  - disconnects the device
2297          *  writemostly - sets write_mostly
2298          *  -writemostly - clears write_mostly
2299          *  blocked - sets the Blocked flag
2300          *  -blocked - clears the Blocked flag
2301          *  insync - sets Insync providing device isn't active
2302          */
2303         int err = -EINVAL;
2304         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2305                 md_error(rdev->mddev, rdev);
2306                 err = 0;
2307         } else if (cmd_match(buf, "remove")) {
2308                 if (rdev->raid_disk >= 0)
2309                         err = -EBUSY;
2310                 else {
2311                         mddev_t *mddev = rdev->mddev;
2312                         kick_rdev_from_array(rdev);
2313                         if (mddev->pers)
2314                                 md_update_sb(mddev, 1);
2315                         md_new_event(mddev);
2316                         err = 0;
2317                 }
2318         } else if (cmd_match(buf, "writemostly")) {
2319                 set_bit(WriteMostly, &rdev->flags);
2320                 err = 0;
2321         } else if (cmd_match(buf, "-writemostly")) {
2322                 clear_bit(WriteMostly, &rdev->flags);
2323                 err = 0;
2324         } else if (cmd_match(buf, "blocked")) {
2325                 set_bit(Blocked, &rdev->flags);
2326                 err = 0;
2327         } else if (cmd_match(buf, "-blocked")) {
2328                 clear_bit(Blocked, &rdev->flags);
2329                 wake_up(&rdev->blocked_wait);
2330                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2331                 md_wakeup_thread(rdev->mddev->thread);
2332
2333                 err = 0;
2334         } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2335                 set_bit(In_sync, &rdev->flags);
2336                 err = 0;
2337         }
2338         if (!err && rdev->sysfs_state)
2339                 sysfs_notify_dirent(rdev->sysfs_state);
2340         return err ? err : len;
2341 }
2342 static struct rdev_sysfs_entry rdev_state =
2343 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
2344
2345 static ssize_t
2346 errors_show(mdk_rdev_t *rdev, char *page)
2347 {
2348         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2349 }
2350
2351 static ssize_t
2352 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2353 {
2354         char *e;
2355         unsigned long n = simple_strtoul(buf, &e, 10);
2356         if (*buf && (*e == 0 || *e == '\n')) {
2357                 atomic_set(&rdev->corrected_errors, n);
2358                 return len;
2359         }
2360         return -EINVAL;
2361 }
2362 static struct rdev_sysfs_entry rdev_errors =
2363 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2364
2365 static ssize_t
2366 slot_show(mdk_rdev_t *rdev, char *page)
2367 {
2368         if (rdev->raid_disk < 0)
2369                 return sprintf(page, "none\n");
2370         else
2371                 return sprintf(page, "%d\n", rdev->raid_disk);
2372 }
2373
2374 static ssize_t
2375 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2376 {
2377         char *e;
2378         int err;
2379         char nm[20];
2380         int slot = simple_strtoul(buf, &e, 10);
2381         if (strncmp(buf, "none", 4)==0)
2382                 slot = -1;
2383         else if (e==buf || (*e && *e!= '\n'))
2384                 return -EINVAL;
2385         if (rdev->mddev->pers && slot == -1) {
2386                 /* Setting 'slot' on an active array requires also
2387                  * updating the 'rd%d' link, and communicating
2388                  * with the personality with ->hot_*_disk.
2389                  * For now we only support removing
2390                  * failed/spare devices.  This normally happens automatically,
2391                  * but not when the metadata is externally managed.
2392                  */
2393                 if (rdev->raid_disk == -1)
2394                         return -EEXIST;
2395                 /* personality does all needed checks */
2396                 if (rdev->mddev->pers->hot_add_disk == NULL)
2397                         return -EINVAL;
2398                 err = rdev->mddev->pers->
2399                         hot_remove_disk(rdev->mddev, rdev->raid_disk);
2400                 if (err)
2401                         return err;
2402                 sprintf(nm, "rd%d", rdev->raid_disk);
2403                 sysfs_remove_link(&rdev->mddev->kobj, nm);
2404                 rdev->raid_disk = -1;
2405                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2406                 md_wakeup_thread(rdev->mddev->thread);
2407         } else if (rdev->mddev->pers) {
2408                 mdk_rdev_t *rdev2;
2409                 /* Activating a spare .. or possibly reactivating
2410                  * if we ever get bitmaps working here.
2411                  */
2412
2413                 if (rdev->raid_disk != -1)
2414                         return -EBUSY;
2415
2416                 if (rdev->mddev->pers->hot_add_disk == NULL)
2417                         return -EINVAL;
2418
2419                 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
2420                         if (rdev2->raid_disk == slot)
2421                                 return -EEXIST;
2422
2423                 rdev->raid_disk = slot;
2424                 if (test_bit(In_sync, &rdev->flags))
2425                         rdev->saved_raid_disk = slot;
2426                 else
2427                         rdev->saved_raid_disk = -1;
2428                 err = rdev->mddev->pers->
2429                         hot_add_disk(rdev->mddev, rdev);
2430                 if (err) {
2431                         rdev->raid_disk = -1;
2432                         return err;
2433                 } else
2434                         sysfs_notify_dirent(rdev->sysfs_state);
2435                 sprintf(nm, "rd%d", rdev->raid_disk);
2436                 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
2437                         printk(KERN_WARNING
2438                                "md: cannot register "
2439                                "%s for %s\n",
2440                                nm, mdname(rdev->mddev));
2441
2442                 /* don't wakeup anyone, leave that to userspace. */
2443         } else {
2444                 if (slot >= rdev->mddev->raid_disks)
2445                         return -ENOSPC;
2446                 rdev->raid_disk = slot;
2447                 /* assume it is working */
2448                 clear_bit(Faulty, &rdev->flags);
2449                 clear_bit(WriteMostly, &rdev->flags);
2450                 set_bit(In_sync, &rdev->flags);
2451                 sysfs_notify_dirent(rdev->sysfs_state);
2452         }
2453         return len;
2454 }
2455
2456
2457 static struct rdev_sysfs_entry rdev_slot =
2458 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2459
2460 static ssize_t
2461 offset_show(mdk_rdev_t *rdev, char *page)
2462 {
2463         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2464 }
2465
2466 static ssize_t
2467 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2468 {
2469         char *e;
2470         unsigned long long offset = simple_strtoull(buf, &e, 10);
2471         if (e==buf || (*e && *e != '\n'))
2472                 return -EINVAL;
2473         if (rdev->mddev->pers && rdev->raid_disk >= 0)
2474                 return -EBUSY;
2475         if (rdev->sectors && rdev->mddev->external)
2476                 /* Must set offset before size, so overlap checks
2477                  * can be sane */
2478                 return -EBUSY;
2479         rdev->data_offset = offset;
2480         return len;
2481 }
2482
2483 static struct rdev_sysfs_entry rdev_offset =
2484 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2485
2486 static ssize_t
2487 rdev_size_show(mdk_rdev_t *rdev, char *page)
2488 {
2489         return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2490 }
2491
2492 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2493 {
2494         /* check if two start/length pairs overlap */
2495         if (s1+l1 <= s2)
2496                 return 0;
2497         if (s2+l2 <= s1)
2498                 return 0;
2499         return 1;
2500 }
2501
2502 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2503 {
2504         unsigned long long blocks;
2505         sector_t new;
2506
2507         if (strict_strtoull(buf, 10, &blocks) < 0)
2508                 return -EINVAL;
2509
2510         if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2511                 return -EINVAL; /* sector conversion overflow */
2512
2513         new = blocks * 2;
2514         if (new != blocks * 2)
2515                 return -EINVAL; /* unsigned long long to sector_t overflow */
2516
2517         *sectors = new;
2518         return 0;
2519 }
2520
2521 static ssize_t
2522 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2523 {
2524         mddev_t *my_mddev = rdev->mddev;
2525         sector_t oldsectors = rdev->sectors;
2526         sector_t sectors;
2527
2528         if (strict_blocks_to_sectors(buf, &sectors) < 0)
2529                 return -EINVAL;
2530         if (my_mddev->pers && rdev->raid_disk >= 0) {
2531                 if (my_mddev->persistent) {
2532                         sectors = super_types[my_mddev->major_version].
2533                                 rdev_size_change(rdev, sectors);
2534                         if (!sectors)
2535                                 return -EBUSY;
2536                 } else if (!sectors)
2537                         sectors = (rdev->bdev->bd_inode->i_size >> 9) -
2538                                 rdev->data_offset;
2539         }
2540         if (sectors < my_mddev->dev_sectors)
2541                 return -EINVAL; /* component must fit device */
2542
2543         rdev->sectors = sectors;
2544         if (sectors > oldsectors && my_mddev->external) {
2545                 /* need to check that all other rdevs with the same ->bdev
2546                  * do not overlap.  We need to unlock the mddev to avoid
2547                  * a deadlock.  We have already changed rdev->sectors, and if
2548                  * we have to change it back, we will have the lock again.
2549                  */
2550                 mddev_t *mddev;
2551                 int overlap = 0;
2552                 struct list_head *tmp;
2553
2554                 mddev_unlock(my_mddev);
2555                 for_each_mddev(mddev, tmp) {
2556                         mdk_rdev_t *rdev2;
2557
2558                         mddev_lock(mddev);
2559                         list_for_each_entry(rdev2, &mddev->disks, same_set)
2560                                 if (test_bit(AllReserved, &rdev2->flags) ||
2561                                     (rdev->bdev == rdev2->bdev &&
2562                                      rdev != rdev2 &&
2563                                      overlaps(rdev->data_offset, rdev->sectors,
2564                                               rdev2->data_offset,
2565                                               rdev2->sectors))) {
2566                                         overlap = 1;
2567                                         break;
2568                                 }
2569                         mddev_unlock(mddev);
2570                         if (overlap) {
2571                                 mddev_put(mddev);
2572                                 break;
2573                         }
2574                 }
2575                 mddev_lock(my_mddev);
2576                 if (overlap) {
2577                         /* Someone else could have slipped in a size
2578                          * change here, but doing so is just silly.
2579                          * We put oldsectors back because we *know* it is
2580                          * safe, and trust userspace not to race with
2581                          * itself
2582                          */
2583                         rdev->sectors = oldsectors;
2584                         return -EBUSY;
2585                 }
2586         }
2587         return len;
2588 }
2589
2590 static struct rdev_sysfs_entry rdev_size =
2591 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2592
2593
2594 static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page)
2595 {
2596         unsigned long long recovery_start = rdev->recovery_offset;
2597
2598         if (test_bit(In_sync, &rdev->flags) ||
2599             recovery_start == MaxSector)
2600                 return sprintf(page, "none\n");
2601
2602         return sprintf(page, "%llu\n", recovery_start);
2603 }
2604
2605 static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2606 {
2607         unsigned long long recovery_start;
2608
2609         if (cmd_match(buf, "none"))
2610                 recovery_start = MaxSector;
2611         else if (strict_strtoull(buf, 10, &recovery_start))
2612                 return -EINVAL;
2613
2614         if (rdev->mddev->pers &&
2615             rdev->raid_disk >= 0)
2616                 return -EBUSY;
2617
2618         rdev->recovery_offset = recovery_start;
2619         if (recovery_start == MaxSector)
2620                 set_bit(In_sync, &rdev->flags);
2621         else
2622                 clear_bit(In_sync, &rdev->flags);
2623         return len;
2624 }
2625
2626 static struct rdev_sysfs_entry rdev_recovery_start =
2627 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
2628
2629 static struct attribute *rdev_default_attrs[] = {
2630         &rdev_state.attr,
2631         &rdev_errors.attr,
2632         &rdev_slot.attr,
2633         &rdev_offset.attr,
2634         &rdev_size.attr,
2635         &rdev_recovery_start.attr,
2636         NULL,
2637 };
2638 static ssize_t
2639 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2640 {
2641         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2642         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2643         mddev_t *mddev = rdev->mddev;
2644         ssize_t rv;
2645
2646         if (!entry->show)
2647                 return -EIO;
2648
2649         rv = mddev ? mddev_lock(mddev) : -EBUSY;
2650         if (!rv) {
2651                 if (rdev->mddev == NULL)
2652                         rv = -EBUSY;
2653                 else
2654                         rv = entry->show(rdev, page);
2655                 mddev_unlock(mddev);
2656         }
2657         return rv;
2658 }
2659
2660 static ssize_t
2661 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2662               const char *page, size_t length)
2663 {
2664         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2665         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2666         ssize_t rv;
2667         mddev_t *mddev = rdev->mddev;
2668
2669         if (!entry->store)
2670                 return -EIO;
2671         if (!capable(CAP_SYS_ADMIN))
2672                 return -EACCES;
2673         rv = mddev ? mddev_lock(mddev): -EBUSY;
2674         if (!rv) {
2675                 if (rdev->mddev == NULL)
2676                         rv = -EBUSY;
2677                 else
2678                         rv = entry->store(rdev, page, length);
2679                 mddev_unlock(mddev);
2680         }
2681         return rv;
2682 }
2683
2684 static void rdev_free(struct kobject *ko)
2685 {
2686         mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
2687         kfree(rdev);
2688 }
2689 static const struct sysfs_ops rdev_sysfs_ops = {
2690         .show           = rdev_attr_show,
2691         .store          = rdev_attr_store,
2692 };
2693 static struct kobj_type rdev_ktype = {
2694         .release        = rdev_free,
2695         .sysfs_ops      = &rdev_sysfs_ops,
2696         .default_attrs  = rdev_default_attrs,
2697 };
2698
2699 /*
2700  * Import a device. If 'super_format' >= 0, then sanity check the superblock
2701  *
2702  * mark the device faulty if:
2703  *
2704  *   - the device is nonexistent (zero size)
2705  *   - the device has no valid superblock
2706  *
2707  * a faulty rdev _never_ has rdev->sb set.
2708  */
2709 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
2710 {
2711         char b[BDEVNAME_SIZE];
2712         int err;
2713         mdk_rdev_t *rdev;
2714         sector_t size;
2715
2716         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2717         if (!rdev) {
2718                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
2719                 return ERR_PTR(-ENOMEM);
2720         }
2721
2722         if ((err = alloc_disk_sb(rdev)))
2723                 goto abort_free;
2724
2725         err = lock_rdev(rdev, newdev, super_format == -2);
2726         if (err)
2727                 goto abort_free;
2728
2729         kobject_init(&rdev->kobj, &rdev_ktype);
2730
2731         rdev->desc_nr = -1;
2732         rdev->saved_raid_disk = -1;
2733         rdev->raid_disk = -1;
2734         rdev->flags = 0;
2735         rdev->data_offset = 0;
2736         rdev->sb_events = 0;
2737         rdev->last_read_error.tv_sec  = 0;
2738         rdev->last_read_error.tv_nsec = 0;
2739         atomic_set(&rdev->nr_pending, 0);
2740         atomic_set(&rdev->read_errors, 0);
2741         atomic_set(&rdev->corrected_errors, 0);
2742
2743         size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2744         if (!size) {
2745                 printk(KERN_WARNING 
2746                         "md: %s has zero or unknown size, marking faulty!\n",
2747                         bdevname(rdev->bdev,b));
2748                 err = -EINVAL;
2749                 goto abort_free;
2750         }
2751
2752         if (super_format >= 0) {
2753                 err = super_types[super_format].
2754                         load_super(rdev, NULL, super_minor);
2755                 if (err == -EINVAL) {
2756                         printk(KERN_WARNING
2757                                 "md: %s does not have a valid v%d.%d "
2758                                "superblock, not importing!\n",
2759                                 bdevname(rdev->bdev,b),
2760                                super_format, super_minor);
2761                         goto abort_free;
2762                 }
2763                 if (err < 0) {
2764                         printk(KERN_WARNING 
2765                                 "md: could not read %s's sb, not importing!\n",
2766                                 bdevname(rdev->bdev,b));
2767                         goto abort_free;
2768                 }
2769         }
2770
2771         INIT_LIST_HEAD(&rdev->same_set);
2772         init_waitqueue_head(&rdev->blocked_wait);
2773
2774         return rdev;
2775
2776 abort_free:
2777         if (rdev->sb_page) {
2778                 if (rdev->bdev)
2779                         unlock_rdev(rdev);
2780                 free_disk_sb(rdev);
2781         }
2782         kfree(rdev);
2783         return ERR_PTR(err);
2784 }
2785
2786 /*
2787  * Check a full RAID array for plausibility
2788  */
2789
2790
2791 static void analyze_sbs(mddev_t * mddev)
2792 {
2793         int i;
2794         mdk_rdev_t *rdev, *freshest, *tmp;
2795         char b[BDEVNAME_SIZE];
2796
2797         freshest = NULL;
2798         rdev_for_each(rdev, tmp, mddev)
2799                 switch (super_types[mddev->major_version].
2800                         load_super(rdev, freshest, mddev->minor_version)) {
2801                 case 1:
2802                         freshest = rdev;
2803                         break;
2804                 case 0:
2805                         break;
2806                 default:
2807                         printk( KERN_ERR \
2808                                 "md: fatal superblock inconsistency in %s"
2809                                 " -- removing from array\n", 
2810                                 bdevname(rdev->bdev,b));
2811                         kick_rdev_from_array(rdev);
2812                 }
2813
2814
2815         super_types[mddev->major_version].
2816                 validate_super(mddev, freshest);
2817
2818         i = 0;
2819         rdev_for_each(rdev, tmp, mddev) {
2820                 if (mddev->max_disks &&
2821                     (rdev->desc_nr >= mddev->max_disks ||
2822                      i > mddev->max_disks)) {
2823                         printk(KERN_WARNING
2824                                "md: %s: %s: only %d devices permitted\n",
2825                                mdname(mddev), bdevname(rdev->bdev, b),
2826                                mddev->max_disks);
2827                         kick_rdev_from_array(rdev);
2828                         continue;
2829                 }
2830                 if (rdev != freshest)
2831                         if (super_types[mddev->major_version].
2832                             validate_super(mddev, rdev)) {
2833                                 printk(KERN_WARNING "md: kicking non-fresh %s"
2834                                         " from array!\n",
2835                                         bdevname(rdev->bdev,b));
2836                                 kick_rdev_from_array(rdev);
2837                                 continue;
2838                         }
2839                 if (mddev->level == LEVEL_MULTIPATH) {
2840                         rdev->desc_nr = i++;
2841                         rdev->raid_disk = rdev->desc_nr;
2842                         set_bit(In_sync, &rdev->flags);
2843                 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
2844                         rdev->raid_disk = -1;
2845                         clear_bit(In_sync, &rdev->flags);
2846                 }
2847         }
2848 }
2849
2850 /* Read a fixed-point number.
2851  * Numbers in sysfs attributes should be in "standard" units where
2852  * possible, so time should be in seconds.
2853  * However we internally use a a much smaller unit such as 
2854  * milliseconds or jiffies.
2855  * This function takes a decimal number with a possible fractional
2856  * component, and produces an integer which is the result of
2857  * multiplying that number by 10^'scale'.
2858  * all without any floating-point arithmetic.
2859  */
2860 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
2861 {
2862         unsigned long result = 0;
2863         long decimals = -1;
2864         while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
2865                 if (*cp == '.')
2866                         decimals = 0;
2867                 else if (decimals < scale) {
2868                         unsigned int value;
2869                         value = *cp - '0';
2870                         result = result * 10 + value;
2871                         if (decimals >= 0)
2872                                 decimals++;
2873                 }
2874                 cp++;
2875         }
2876         if (*cp == '\n')
2877                 cp++;
2878         if (*cp)
2879                 return -EINVAL;
2880         if (decimals < 0)
2881                 decimals = 0;
2882         while (decimals < scale) {
2883                 result *= 10;
2884                 decimals ++;
2885         }
2886         *res = result;
2887         return 0;
2888 }
2889
2890
2891 static void md_safemode_timeout(unsigned long data);
2892
2893 static ssize_t
2894 safe_delay_show(mddev_t *mddev, char *page)
2895 {
2896         int msec = (mddev->safemode_delay*1000)/HZ;
2897         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2898 }
2899 static ssize_t
2900 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2901 {
2902         unsigned long msec;
2903
2904         if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
2905                 return -EINVAL;
2906         if (msec == 0)
2907                 mddev->safemode_delay = 0;
2908         else {
2909                 unsigned long old_delay = mddev->safemode_delay;
2910                 mddev->safemode_delay = (msec*HZ)/1000;
2911                 if (mddev->safemode_delay == 0)
2912                         mddev->safemode_delay = 1;
2913                 if (mddev->safemode_delay < old_delay)
2914                         md_safemode_timeout((unsigned long)mddev);
2915         }
2916         return len;
2917 }
2918 static struct md_sysfs_entry md_safe_delay =
2919 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2920
2921 static ssize_t
2922 level_show(mddev_t *mddev, char *page)
2923 {
2924         struct mdk_personality *p = mddev->pers;
2925         if (p)
2926                 return sprintf(page, "%s\n", p->name);
2927         else if (mddev->clevel[0])
2928                 return sprintf(page, "%s\n", mddev->clevel);
2929         else if (mddev->level != LEVEL_NONE)
2930                 return sprintf(page, "%d\n", mddev->level);
2931         else
2932                 return 0;
2933 }
2934
2935 static ssize_t
2936 level_store(mddev_t *mddev, const char *buf, size_t len)
2937 {
2938         char clevel[16];
2939         ssize_t rv = len;
2940         struct mdk_personality *pers;
2941         long level;
2942         void *priv;
2943         mdk_rdev_t *rdev;
2944
2945         if (mddev->pers == NULL) {
2946                 if (len == 0)
2947                         return 0;
2948                 if (len >= sizeof(mddev->clevel))
2949                         return -ENOSPC;
2950                 strncpy(mddev->clevel, buf, len);
2951                 if (mddev->clevel[len-1] == '\n')
2952                         len--;
2953                 mddev->clevel[len] = 0;
2954                 mddev->level = LEVEL_NONE;
2955                 return rv;
2956         }
2957
2958         /* request to change the personality.  Need to ensure:
2959          *  - array is not engaged in resync/recovery/reshape
2960          *  - old personality can be suspended
2961          *  - new personality will access other array.
2962          */
2963
2964         if (mddev->sync_thread || mddev->reshape_position != MaxSector)
2965                 return -EBUSY;
2966
2967         if (!mddev->pers->quiesce) {
2968                 printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
2969                        mdname(mddev), mddev->pers->name);
2970                 return -EINVAL;
2971         }
2972
2973         /* Now find the new personality */
2974         if (len == 0 || len >= sizeof(clevel))
2975                 return -EINVAL;
2976         strncpy(clevel, buf, len);
2977         if (clevel[len-1] == '\n')
2978                 len--;
2979         clevel[len] = 0;
2980         if (strict_strtol(clevel, 10, &level))
2981                 level = LEVEL_NONE;
2982
2983         if (request_module("md-%s", clevel) != 0)
2984                 request_module("md-level-%s", clevel);
2985         spin_lock(&pers_lock);
2986         pers = find_pers(level, clevel);
2987         if (!pers || !try_module_get(pers->owner)) {
2988                 spin_unlock(&pers_lock);
2989                 printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
2990                 return -EINVAL;
2991         }
2992         spin_unlock(&pers_lock);
2993
2994         if (pers == mddev->pers) {
2995                 /* Nothing to do! */
2996                 module_put(pers->owner);
2997                 return rv;
2998         }
2999         if (!pers->takeover) {
3000                 module_put(pers->owner);
3001                 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
3002                        mdname(mddev), clevel);
3003                 return -EINVAL;
3004         }
3005
3006         list_for_each_entry(rdev, &mddev->disks, same_set)
3007                 rdev->new_raid_disk = rdev->raid_disk;
3008
3009         /* ->takeover must set new_* and/or delta_disks
3010          * if it succeeds, and may set them when it fails.
3011          */
3012         priv = pers->takeover(mddev);
3013         if (IS_ERR(priv)) {
3014                 mddev->new_level = mddev->level;
3015                 mddev->new_layout = mddev->layout;
3016                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3017                 mddev->raid_disks -= mddev->delta_disks;
3018                 mddev->delta_disks = 0;
3019                 module_put(pers->owner);
3020                 printk(KERN_WARNING "md: %s: %s would not accept array\n",
3021                        mdname(mddev), clevel);
3022                 return PTR_ERR(priv);
3023         }
3024
3025         /* Looks like we have a winner */
3026         mddev_suspend(mddev);
3027         mddev->pers->stop(mddev);
3028         
3029         if (mddev->pers->sync_request == NULL &&
3030             pers->sync_request != NULL) {
3031                 /* need to add the md_redundancy_group */
3032                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3033                         printk(KERN_WARNING
3034                                "md: cannot register extra attributes for %s\n",
3035                                mdname(mddev));
3036                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
3037         }               
3038         if (mddev->pers->sync_request != NULL &&
3039             pers->sync_request == NULL) {
3040                 /* need to remove the md_redundancy_group */
3041                 if (mddev->to_remove == NULL)
3042                         mddev->to_remove = &md_redundancy_group;
3043         }
3044
3045         if (mddev->pers->sync_request == NULL &&
3046             mddev->external) {
3047                 /* We are converting from a no-redundancy array
3048                  * to a redundancy array and metadata is managed
3049                  * externally so we need to be sure that writes
3050                  * won't block due to a need to transition
3051                  *      clean->dirty
3052                  * until external management is started.
3053                  */
3054                 mddev->in_sync = 0;
3055                 mddev->safemode_delay = 0;
3056                 mddev->safemode = 0;
3057         }
3058
3059         list_for_each_entry(rdev, &mddev->disks, same_set) {
3060                 char nm[20];
3061                 if (rdev->raid_disk < 0)
3062                         continue;
3063                 if (rdev->new_raid_disk > mddev->raid_disks)
3064                         rdev->new_raid_disk = -1;
3065                 if (rdev->new_raid_disk == rdev->raid_disk)
3066                         continue;
3067                 sprintf(nm, "rd%d", rdev->raid_disk);
3068                 sysfs_remove_link(&mddev->kobj, nm);
3069         }
3070         list_for_each_entry(rdev, &mddev->disks, same_set) {
3071                 if (rdev->raid_disk < 0)
3072                         continue;
3073                 if (rdev->new_raid_disk == rdev->raid_disk)
3074                         continue;
3075                 rdev->raid_disk = rdev->new_raid_disk;
3076                 if (rdev->raid_disk < 0)
3077                         clear_bit(In_sync, &rdev->flags);
3078                 else {
3079                         char nm[20];
3080                         sprintf(nm, "rd%d", rdev->raid_disk);
3081                         if(sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
3082                                 printk("md: cannot register %s for %s after level change\n",
3083                                        nm, mdname(mddev));
3084                 }
3085         }
3086
3087         module_put(mddev->pers->owner);
3088         mddev->pers = pers;
3089         mddev->private = priv;
3090         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3091         mddev->level = mddev->new_level;
3092         mddev->layout = mddev->new_layout;
3093         mddev->chunk_sectors = mddev->new_chunk_sectors;
3094         mddev->delta_disks = 0;
3095         if (mddev->pers->sync_request == NULL) {
3096                 /* this is now an array without redundancy, so
3097                  * it must always be in_sync
3098                  */
3099                 mddev->in_sync = 1;
3100                 del_timer_sync(&mddev->safemode_timer);
3101         }
3102         pers->run(mddev);
3103         mddev_resume(mddev);
3104         set_bit(MD_CHANGE_DEVS, &mddev->flags);
3105         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3106         md_wakeup_thread(mddev->thread);
3107         sysfs_notify(&mddev->kobj, NULL, "level");
3108         md_new_event(mddev);
3109         return rv;
3110 }
3111
3112 static struct md_sysfs_entry md_level =
3113 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3114
3115
3116 static ssize_t
3117 layout_show(mddev_t *mddev, char *page)
3118 {
3119         /* just a number, not meaningful for all levels */
3120         if (mddev->reshape_position != MaxSector &&
3121             mddev->layout != mddev->new_layout)
3122                 return sprintf(page, "%d (%d)\n",
3123                                mddev->new_layout, mddev->layout);
3124         return sprintf(page, "%d\n", mddev->layout);
3125 }
3126
3127 static ssize_t
3128 layout_store(mddev_t *mddev, const char *buf, size_t len)
3129 {
3130         char *e;
3131         unsigned long n = simple_strtoul(buf, &e, 10);
3132
3133         if (!*buf || (*e && *e != '\n'))
3134                 return -EINVAL;
3135
3136         if (mddev->pers) {
3137                 int err;
3138                 if (mddev->pers->check_reshape == NULL)
3139                         return -EBUSY;
3140                 mddev->new_layout = n;
3141                 err = mddev->pers->check_reshape(mddev);
3142                 if (err) {
3143                         mddev->new_layout = mddev->layout;
3144                         return err;
3145                 }
3146         } else {
3147                 mddev->new_layout = n;
3148                 if (mddev->reshape_position == MaxSector)
3149                         mddev->layout = n;
3150         }
3151         return len;
3152 }
3153 static struct md_sysfs_entry md_layout =
3154 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3155
3156
3157 static ssize_t
3158 raid_disks_show(mddev_t *mddev, char *page)
3159 {
3160         if (mddev->raid_disks == 0)
3161                 return 0;
3162         if (mddev->reshape_position != MaxSector &&
3163             mddev->delta_disks != 0)
3164                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3165                                mddev->raid_disks - mddev->delta_disks);
3166         return sprintf(page, "%d\n", mddev->raid_disks);
3167 }
3168
3169 static int update_raid_disks(mddev_t *mddev, int raid_disks);
3170
3171 static ssize_t
3172 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
3173 {
3174         char *e;
3175         int rv = 0;
3176         unsigned long n = simple_strtoul(buf, &e, 10);
3177
3178         if (!*buf || (*e && *e != '\n'))
3179                 return -EINVAL;
3180
3181         if (mddev->pers)
3182                 rv = update_raid_disks(mddev, n);
3183         else if (mddev->reshape_position != MaxSector) {
3184                 int olddisks = mddev->raid_disks - mddev->delta_disks;
3185                 mddev->delta_disks = n - olddisks;
3186                 mddev->raid_disks = n;
3187         } else
3188                 mddev->raid_disks = n;
3189         return rv ? rv : len;
3190 }
3191 static struct md_sysfs_entry md_raid_disks =
3192 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3193
3194 static ssize_t
3195 chunk_size_show(mddev_t *mddev, char *page)
3196 {
3197         if (mddev->reshape_position != MaxSector &&
3198             mddev->chunk_sectors != mddev->new_chunk_sectors)
3199                 return sprintf(page, "%d (%d)\n",
3200                                mddev->new_chunk_sectors << 9,
3201                                mddev->chunk_sectors << 9);
3202         return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3203 }
3204
3205 static ssize_t
3206 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
3207 {
3208         char *e;
3209         unsigned long n = simple_strtoul(buf, &e, 10);
3210
3211         if (!*buf || (*e && *e != '\n'))
3212                 return -EINVAL;
3213
3214         if (mddev->pers) {
3215                 int err;
3216                 if (mddev->pers->check_reshape == NULL)
3217                         return -EBUSY;
3218                 mddev->new_chunk_sectors = n >> 9;
3219                 err = mddev->pers->check_reshape(mddev);
3220                 if (err) {
3221                         mddev->new_chunk_sectors = mddev->chunk_sectors;
3222                         return err;
3223                 }
3224         } else {
3225                 mddev->new_chunk_sectors = n >> 9;
3226                 if (mddev->reshape_position == MaxSector)
3227                         mddev->chunk_sectors = n >> 9;
3228         }
3229         return len;
3230 }
3231 static struct md_sysfs_entry md_chunk_size =
3232 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3233
3234 static ssize_t
3235 resync_start_show(mddev_t *mddev, char *page)
3236 {
3237         if (mddev->recovery_cp == MaxSector)
3238                 return sprintf(page, "none\n");
3239         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3240 }
3241
3242 static ssize_t
3243 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
3244 {
3245         char *e;
3246         unsigned long long n = simple_strtoull(buf, &e, 10);
3247
3248         if (mddev->pers)
3249                 return -EBUSY;
3250         if (cmd_match(buf, "none"))
3251                 n = MaxSector;
3252         else if (!*buf || (*e && *e != '\n'))
3253                 return -EINVAL;
3254
3255         mddev->recovery_cp = n;
3256         return len;
3257 }
3258 static struct md_sysfs_entry md_resync_start =
3259 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
3260
3261 /*
3262  * The array state can be:
3263  *
3264  * clear
3265  *     No devices, no size, no level
3266  *     Equivalent to STOP_ARRAY ioctl
3267  * inactive
3268  *     May have some settings, but array is not active
3269  *        all IO results in error
3270  *     When written, doesn't tear down array, but just stops it
3271  * suspended (not supported yet)
3272  *     All IO requests will block. The array can be reconfigured.
3273  *     Writing this, if accepted, will block until array is quiescent
3274  * readonly
3275  *     no resync can happen.  no superblocks get written.
3276  *     write requests fail
3277  * read-auto
3278  *     like readonly, but behaves like 'clean' on a write request.
3279  *
3280  * clean - no pending writes, but otherwise active.
3281  *     When written to inactive array, starts without resync
3282  *     If a write request arrives then
3283  *       if metadata is known, mark 'dirty' and switch to 'active'.
3284  *       if not known, block and switch to write-pending
3285  *     If written to an active array that has pending writes, then fails.
3286  * active
3287  *     fully active: IO and resync can be happening.
3288  *     When written to inactive array, starts with resync
3289  *
3290  * write-pending
3291  *     clean, but writes are blocked waiting for 'active' to be written.
3292  *
3293  * active-idle
3294  *     like active, but no writes have been seen for a while (100msec).
3295  *
3296  */
3297 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3298                    write_pending, active_idle, bad_word};
3299 static char *array_states[] = {
3300         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3301         "write-pending", "active-idle", NULL };
3302
3303 static int match_word(const char *word, char **list)
3304 {
3305         int n;
3306         for (n=0; list[n]; n++)
3307                 if (cmd_match(word, list[n]))
3308                         break;
3309         return n;
3310 }
3311
3312 static ssize_t
3313 array_state_show(mddev_t *mddev, char *page)
3314 {
3315         enum array_state st = inactive;
3316
3317         if (mddev->pers)
3318                 switch(mddev->ro) {
3319                 case 1:
3320                         st = readonly;
3321                         break;
3322                 case 2:
3323                         st = read_auto;
3324                         break;
3325                 case 0:
3326                         if (mddev->in_sync)
3327                                 st = clean;
3328                         else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
3329                                 st = write_pending;
3330                         else if (mddev->safemode)
3331                                 st = active_idle;
3332                         else
3333                                 st = active;
3334                 }
3335         else {
3336                 if (list_empty(&mddev->disks) &&
3337                     mddev->raid_disks == 0 &&
3338                     mddev->dev_sectors == 0)
3339                         st = clear;
3340                 else
3341                         st = inactive;
3342         }
3343         return sprintf(page, "%s\n", array_states[st]);
3344 }
3345
3346 static int do_md_stop(mddev_t * mddev, int ro, int is_open);
3347 static int md_set_readonly(mddev_t * mddev, int is_open);
3348 static int do_md_run(mddev_t * mddev);
3349 static int restart_array(mddev_t *mddev);
3350
3351 static ssize_t
3352 array_state_store(mddev_t *mddev, const char *buf, size_t len)
3353 {
3354         int err = -EINVAL;
3355         enum array_state st = match_word(buf, array_states);
3356         switch(st) {
3357         case bad_word:
3358                 break;
3359         case clear:
3360                 /* stopping an active array */
3361                 if (atomic_read(&mddev->openers) > 0)
3362                         return -EBUSY;
3363                 err = do_md_stop(mddev, 0, 0);
3364                 break;
3365         case inactive:
3366                 /* stopping an active array */
3367                 if (mddev->pers) {
3368                         if (atomic_read(&mddev->openers) > 0)
3369                                 return -EBUSY;
3370                         err = do_md_stop(mddev, 2, 0);
3371                 } else
3372                         err = 0; /* already inactive */
3373                 break;
3374         case suspended:
3375                 break; /* not supported yet */
3376         case readonly:
3377                 if (mddev->pers)
3378                         err = md_set_readonly(mddev, 0);
3379                 else {
3380                         mddev->ro = 1;
3381                         set_disk_ro(mddev->gendisk, 1);
3382                         err = do_md_run(mddev);
3383                 }
3384                 break;
3385         case read_auto:
3386                 if (mddev->pers) {
3387                         if (mddev->ro == 0)
3388                                 err = md_set_readonly(mddev, 0);
3389                         else if (mddev->ro == 1)
3390                                 err = restart_array(mddev);
3391                         if (err == 0) {
3392                                 mddev->ro = 2;
3393                                 set_disk_ro(mddev->gendisk, 0);
3394                         }
3395                 } else {
3396                         mddev->ro = 2;
3397                         err = do_md_run(mddev);
3398                 }
3399                 break;
3400         case clean:
3401                 if (mddev->pers) {
3402                         restart_array(mddev);
3403                         spin_lock_irq(&mddev->write_lock);
3404                         if (atomic_read(&mddev->writes_pending) == 0) {
3405                                 if (mddev->in_sync == 0) {
3406                                         mddev->in_sync = 1;
3407                                         if (mddev->safemode == 1)
3408                                                 mddev->safemode = 0;
3409                                         if (mddev->persistent)
3410                                                 set_bit(MD_CHANGE_CLEAN,
3411                                                         &mddev->flags);
3412                                 }
3413                                 err = 0;
3414                         } else
3415                                 err = -EBUSY;
3416                         spin_unlock_irq(&mddev->write_lock);
3417                 } else
3418                         err = -EINVAL;
3419                 break;
3420         case active:
3421                 if (mddev->pers) {
3422                         restart_array(mddev);
3423                         if (mddev->external)
3424                                 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
3425                         wake_up(&mddev->sb_wait);
3426                         err = 0;
3427                 } else {
3428                         mddev->ro = 0;
3429                         set_disk_ro(mddev->gendisk, 0);
3430                         err = do_md_run(mddev);
3431                 }
3432                 break;
3433         case write_pending:
3434         case active_idle:
3435                 /* these cannot be set */
3436                 break;
3437         }
3438         if (err)
3439                 return err;
3440         else {
3441                 sysfs_notify_dirent(mddev->sysfs_state);
3442                 return len;
3443         }
3444 }
3445 static struct md_sysfs_entry md_array_state =
3446 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3447
3448 static ssize_t
3449 max_corrected_read_errors_show(mddev_t *mddev, char *page) {
3450         return sprintf(page, "%d\n",
3451                        atomic_read(&mddev->max_corr_read_errors));
3452 }
3453
3454 static ssize_t
3455 max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len)
3456 {
3457         char *e;
3458         unsigned long n = simple_strtoul(buf, &e, 10);
3459
3460         if (*buf && (*e == 0 || *e == '\n')) {
3461                 atomic_set(&mddev->max_corr_read_errors, n);
3462                 return len;
3463         }
3464         return -EINVAL;
3465 }
3466
3467 static struct md_sysfs_entry max_corr_read_errors =
3468 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
3469         max_corrected_read_errors_store);
3470
3471 static ssize_t
3472 null_show(mddev_t *mddev, char *page)
3473 {
3474         return -EINVAL;
3475 }
3476
3477 static ssize_t
3478 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
3479 {
3480         /* buf must be %d:%d\n? giving major and minor numbers */
3481         /* The new device is added to the array.
3482          * If the array has a persistent superblock, we read the
3483          * superblock to initialise info and check validity.
3484          * Otherwise, only checking done is that in bind_rdev_to_array,
3485          * which mainly checks size.
3486          */
3487         char *e;
3488         int major = simple_strtoul(buf, &e, 10);
3489         int minor;
3490         dev_t dev;
3491         mdk_rdev_t *rdev;
3492         int err;
3493
3494         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
3495                 return -EINVAL;
3496         minor = simple_strtoul(e+1, &e, 10);
3497         if (*e && *e != '\n')
3498                 return -EINVAL;
3499         dev = MKDEV(major, minor);
3500         if (major != MAJOR(dev) ||
3501             minor != MINOR(dev))
3502                 return -EOVERFLOW;
3503
3504
3505         if (mddev->persistent) {
3506                 rdev = md_import_device(dev, mddev->major_version,
3507                                         mddev->minor_version);
3508                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
3509                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3510                                                        mdk_rdev_t, same_set);
3511                         err = super_types[mddev->major_version]
3512                                 .load_super(rdev, rdev0, mddev->minor_version);
3513                         if (err < 0)
3514                                 goto out;
3515                 }
3516         } else if (mddev->external)
3517                 rdev = md_import_device(dev, -2, -1);
3518         else
3519                 rdev = md_import_device(dev, -1, -1);
3520
3521         if (IS_ERR(rdev))
3522                 return PTR_ERR(rdev);
3523         err = bind_rdev_to_array(rdev, mddev);
3524  out:
3525         if (err)
3526                 export_rdev(rdev);
3527         return err ? err : len;
3528 }
3529
3530 static struct md_sysfs_entry md_new_device =
3531 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
3532
3533 static ssize_t
3534 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
3535 {
3536         char *end;
3537         unsigned long chunk, end_chunk;
3538
3539         if (!mddev->bitmap)
3540                 goto out;
3541         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
3542         while (*buf) {
3543                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
3544                 if (buf == end) break;
3545                 if (*end == '-') { /* range */
3546                         buf = end + 1;
3547                         end_chunk = simple_strtoul(buf, &end, 0);
3548                         if (buf == end) break;
3549                 }
3550                 if (*end && !isspace(*end)) break;
3551                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
3552                 buf = skip_spaces(end);
3553         }
3554         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
3555 out:
3556         return len;
3557 }
3558
3559 static struct md_sysfs_entry md_bitmap =
3560 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
3561
3562 static ssize_t
3563 size_show(mddev_t *mddev, char *page)
3564 {
3565         return sprintf(page, "%llu\n",
3566                 (unsigned long long)mddev->dev_sectors / 2);
3567 }
3568
3569 static int update_size(mddev_t *mddev, sector_t num_sectors);
3570
3571 static ssize_t
3572 size_store(mddev_t *mddev, const char *buf, size_t len)
3573 {
3574         /* If array is inactive, we can reduce the component size, but
3575          * not increase it (except from 0).
3576          * If array is active, we can try an on-line resize
3577          */
3578         sector_t sectors;
3579         int err = strict_blocks_to_sectors(buf, &sectors);
3580
3581         if (err < 0)
3582                 return err;
3583         if (mddev->pers) {
3584                 err = update_size(mddev, sectors);
3585                 md_update_sb(mddev, 1);
3586         } else {
3587                 if (mddev->dev_sectors == 0 ||
3588                     mddev->dev_sectors > sectors)
3589                         mddev->dev_sectors = sectors;
3590                 else
3591                         err = -ENOSPC;
3592         }
3593         return err ? err : len;
3594 }
3595
3596 static struct md_sysfs_entry md_size =
3597 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
3598
3599
3600 /* Metdata version.
3601  * This is one of
3602  *   'none' for arrays with no metadata (good luck...)
3603  *   'external' for arrays with externally managed metadata,
3604  * or N.M for internally known formats
3605  */
3606 static ssize_t
3607 metadata_show(mddev_t *mddev, char *page)
3608 {
3609         if (mddev->persistent)
3610                 return sprintf(page, "%d.%d\n",
3611                                mddev->major_version, mddev->minor_version);
3612         else if (mddev->external)
3613                 return sprintf(page, "external:%s\n", mddev->metadata_type);
3614         else
3615                 return sprintf(page, "none\n");
3616 }
3617
3618 static ssize_t
3619 metadata_store(mddev_t *mddev, const char *buf, size_t len)
3620 {
3621         int major, minor;
3622         char *e;
3623         /* Changing the details of 'external' metadata is
3624          * always permitted.  Otherwise there must be
3625          * no devices attached to the array.
3626          */
3627         if (mddev->external && strncmp(buf, "external:", 9) == 0)
3628                 ;
3629         else if (!list_empty(&mddev->disks))
3630                 return -EBUSY;
3631
3632         if (cmd_match(buf, "none")) {
3633                 mddev->persistent = 0;
3634                 mddev->external = 0;
3635                 mddev->major_version = 0;
3636                 mddev->minor_version = 90;
3637                 return len;
3638         }
3639         if (strncmp(buf, "external:", 9) == 0) {
3640                 size_t namelen = len-9;
3641                 if (namelen >= sizeof(mddev->metadata_type))
3642                         namelen = sizeof(mddev->metadata_type)-1;
3643                 strncpy(mddev->metadata_type, buf+9, namelen);
3644                 mddev->metadata_type[namelen] = 0;
3645                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
3646                         mddev->metadata_type[--namelen] = 0;
3647                 mddev->persistent = 0;
3648                 mddev->external = 1;
3649                 mddev->major_version = 0;
3650                 mddev->minor_version = 90;
3651                 return len;
3652         }
3653         major = simple_strtoul(buf, &e, 10);
3654         if (e==buf || *e != '.')
3655                 return -EINVAL;
3656         buf = e+1;
3657         minor = simple_strtoul(buf, &e, 10);
3658         if (e==buf || (*e && *e != '\n') )
3659                 return -EINVAL;
3660         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
3661                 return -ENOENT;
3662         mddev->major_version = major;
3663         mddev->minor_version = minor;
3664         mddev->persistent = 1;
3665         mddev->external = 0;
3666         return len;
3667 }
3668
3669 static struct md_sysfs_entry md_metadata =
3670 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
3671
3672 static ssize_t
3673 action_show(mddev_t *mddev, char *page)
3674 {
3675         char *type = "idle";
3676         if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3677                 type = "frozen";
3678         else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3679             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
3680                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3681                         type = "reshape";
3682                 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3683                         if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
3684                                 type = "resync";
3685                         else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
3686                                 type = "check";
3687                         else
3688                                 type = "repair";
3689                 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
3690                         type = "recover";
3691         }
3692         return sprintf(page, "%s\n", type);
3693 }
3694
3695 static ssize_t
3696 action_store(mddev_t *mddev, const char *page, size_t len)
3697 {
3698         if (!mddev->pers || !mddev->pers->sync_request)
3699                 return -EINVAL;
3700
3701         if (cmd_match(page, "frozen"))
3702                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3703         else
3704                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3705
3706         if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
3707                 if (mddev->sync_thread) {
3708                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3709                         md_unregister_thread(mddev->sync_thread);
3710                         mddev->sync_thread = NULL;
3711                         mddev->recovery = 0;
3712                 }
3713         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3714                    test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
3715                 return -EBUSY;
3716         else if (cmd_match(page, "resync"))
3717                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3718         else if (cmd_match(page, "recover")) {
3719                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3720                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3721         } else if (cmd_match(page, "reshape")) {
3722                 int err;
3723                 if (mddev->pers->start_reshape == NULL)
3724                         return -EINVAL;
3725                 err = mddev->pers->start_reshape(mddev);
3726                 if (err)
3727                         return err;
3728                 sysfs_notify(&mddev->kobj, NULL, "degraded");
3729         } else {
3730                 if (cmd_match(page, "check"))
3731                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3732                 else if (!cmd_match(page, "repair"))
3733                         return -EINVAL;
3734                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3735                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3736         }
3737         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3738         md_wakeup_thread(mddev->thread);
3739         sysfs_notify_dirent(mddev->sysfs_action);
3740         return len;
3741 }
3742
3743 static ssize_t
3744 mismatch_cnt_show(mddev_t *mddev, char *page)
3745 {
3746         return sprintf(page, "%llu\n",
3747                        (unsigned long long) mddev->resync_mismatches);
3748 }
3749
3750 static struct md_sysfs_entry md_scan_mode =
3751 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
3752
3753
3754 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
3755
3756 static ssize_t
3757 sync_min_show(mddev_t *mddev, char *page)
3758 {
3759         return sprintf(page, "%d (%s)\n", speed_min(mddev),
3760                        mddev->sync_speed_min ? "local": "system");
3761 }
3762
3763 static ssize_t
3764 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
3765 {
3766         int min;
3767         char *e;
3768         if (strncmp(buf, "system", 6)==0) {
3769                 mddev->sync_speed_min = 0;
3770                 return len;
3771         }
3772         min = simple_strtoul(buf, &e, 10);
3773         if (buf == e || (*e && *e != '\n') || min <= 0)
3774                 return -EINVAL;
3775         mddev->sync_speed_min = min;
3776         return len;
3777 }
3778
3779 static struct md_sysfs_entry md_sync_min =
3780 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
3781
3782 static ssize_t
3783 sync_max_show(mddev_t *mddev, char *page)
3784 {
3785         return sprintf(page, "%d (%s)\n", speed_max(mddev),
3786                        mddev->sync_speed_max ? "local": "system");
3787 }
3788
3789 static ssize_t
3790 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
3791 {
3792         int max;
3793         char *e;
3794         if (strncmp(buf, "system", 6)==0) {
3795                 mddev->sync_speed_max = 0;
3796                 return len;
3797         }
3798         max = simple_strtoul(buf, &e, 10);
3799         if (buf == e || (*e && *e != '\n') || max <= 0)
3800                 return -EINVAL;
3801         mddev->sync_speed_max = max;
3802         return len;
3803 }
3804
3805 static struct md_sysfs_entry md_sync_max =
3806 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
3807
3808 static ssize_t
3809 degraded_show(mddev_t *mddev, char *page)
3810 {
3811         return sprintf(page, "%d\n", mddev->degraded);
3812 }
3813 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
3814
3815 static ssize_t
3816 sync_force_parallel_show(mddev_t *mddev, char *page)
3817 {
3818         return sprintf(page, "%d\n", mddev->parallel_resync);
3819 }
3820
3821 static ssize_t
3822 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
3823 {
3824         long n;
3825
3826         if (strict_strtol(buf, 10, &n))
3827                 return -EINVAL;
3828
3829         if (n != 0 && n != 1)
3830                 return -EINVAL;
3831
3832         mddev->parallel_resync = n;
3833
3834         if (mddev->sync_thread)
3835                 wake_up(&resync_wait);
3836
3837         return len;
3838 }
3839
3840 /* force parallel resync, even with shared block devices */
3841 static struct md_sysfs_entry md_sync_force_parallel =
3842 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
3843        sync_force_parallel_show, sync_force_parallel_store);
3844
3845 static ssize_t
3846 sync_speed_show(mddev_t *mddev, char *page)
3847 {
3848         unsigned long resync, dt, db;
3849         if (mddev->curr_resync == 0)
3850                 return sprintf(page, "none\n");
3851         resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
3852         dt = (jiffies - mddev->resync_mark) / HZ;
3853         if (!dt) dt++;
3854         db = resync - mddev->resync_mark_cnt;
3855         return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
3856 }
3857
3858 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
3859
3860 static ssize_t
3861 sync_completed_show(mddev_t *mddev, char *page)
3862 {
3863         unsigned long max_sectors, resync;
3864
3865         if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3866                 return sprintf(page, "none\n");
3867
3868         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3869                 max_sectors = mddev->resync_max_sectors;
3870         else
3871                 max_sectors = mddev->dev_sectors;
3872
3873         resync = mddev->curr_resync_completed;
3874         return sprintf(page, "%lu / %lu\n", resync, max_sectors);
3875 }
3876
3877 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
3878
3879 static ssize_t
3880 min_sync_show(mddev_t *mddev, char *page)
3881 {
3882         return sprintf(page, "%llu\n",
3883                        (unsigned long long)mddev->resync_min);
3884 }
3885 static ssize_t
3886 min_sync_store(mddev_t *mddev, const char *buf, size_t len)
3887 {
3888         unsigned long long min;
3889         if (strict_strtoull(buf, 10, &min))
3890                 return -EINVAL;
3891         if (min > mddev->resync_max)
3892                 return -EINVAL;
3893         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3894                 return -EBUSY;
3895
3896         /* Must be a multiple of chunk_size */
3897         if (mddev->chunk_sectors) {
3898                 sector_t temp = min;
3899                 if (sector_div(temp, mddev->chunk_sectors))
3900                         return -EINVAL;
3901         }
3902         mddev->resync_min = min;
3903
3904         return len;
3905 }
3906
3907 static struct md_sysfs_entry md_min_sync =
3908 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
3909
3910 static ssize_t
3911 max_sync_show(mddev_t *mddev, char *page)
3912 {
3913         if (mddev->resync_max == MaxSector)
3914                 return sprintf(page, "max\n");
3915         else
3916                 return sprintf(page, "%llu\n",
3917                                (unsigned long long)mddev->resync_max);
3918 }
3919 static ssize_t
3920 max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3921 {
3922         if (strncmp(buf, "max", 3) == 0)
3923                 mddev->resync_max = MaxSector;
3924         else {
3925                 unsigned long long max;
3926                 if (strict_strtoull(buf, 10, &max))
3927                         return -EINVAL;
3928                 if (max < mddev->resync_min)
3929                         return -EINVAL;
3930                 if (max < mddev->resync_max &&
3931                     mddev->ro == 0 &&
3932                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3933                         return -EBUSY;
3934
3935                 /* Must be a multiple of chunk_size */
3936                 if (mddev->chunk_sectors) {
3937                         sector_t temp = max;
3938                         if (sector_div(temp, mddev->chunk_sectors))
3939                                 return -EINVAL;
3940                 }
3941                 mddev->resync_max = max;
3942         }
3943         wake_up(&mddev->recovery_wait);
3944         return len;
3945 }
3946
3947 static struct md_sysfs_entry md_max_sync =
3948 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
3949
3950 static ssize_t
3951 suspend_lo_show(mddev_t *mddev, char *page)
3952 {
3953         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
3954 }
3955
3956 static ssize_t
3957 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
3958 {
3959         char *e;
3960         unsigned long long new = simple_strtoull(buf, &e, 10);
3961
3962         if (mddev->pers == NULL || 
3963             mddev->pers->quiesce == NULL)
3964                 return -EINVAL;
3965         if (buf == e || (*e && *e != '\n'))
3966                 return -EINVAL;
3967         if (new >= mddev->suspend_hi ||
3968             (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
3969                 mddev->suspend_lo = new;
3970                 mddev->pers->quiesce(mddev, 2);
3971                 return len;
3972         } else
3973                 return -EINVAL;
3974 }
3975 static struct md_sysfs_entry md_suspend_lo =
3976 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
3977
3978
3979 static ssize_t
3980 suspend_hi_show(mddev_t *mddev, char *page)
3981 {
3982         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
3983 }
3984
3985 static ssize_t
3986 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
3987 {
3988         char *e;
3989         unsigned long long new = simple_strtoull(buf, &e, 10);
3990
3991         if (mddev->pers == NULL ||
3992             mddev->pers->quiesce == NULL)
3993                 return -EINVAL;
3994         if (buf == e || (*e && *e != '\n'))
3995                 return -EINVAL;
3996         if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
3997             (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
3998                 mddev->suspend_hi = new;
3999                 mddev->pers->quiesce(mddev, 1);
4000                 mddev->pers->quiesce(mddev, 0);
4001                 return len;
4002         } else
4003                 return -EINVAL;
4004 }
4005 static struct md_sysfs_entry md_suspend_hi =
4006 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
4007
4008 static ssize_t
4009 reshape_position_show(mddev_t *mddev, char *page)
4010 {
4011         if (mddev->reshape_position != MaxSector)
4012                 return sprintf(page, "%llu\n",
4013                                (unsigned long long)mddev->reshape_position);
4014         strcpy(page, "none\n");
4015         return 5;
4016 }
4017
4018 static ssize_t
4019 reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
4020 {
4021         char *e;
4022         unsigned long long new = simple_strtoull(buf, &e, 10);
4023         if (mddev->pers)
4024                 return -EBUSY;
4025         if (buf == e || (*e && *e != '\n'))
4026                 return -EINVAL;
4027         mddev->reshape_position = new;
4028         mddev->delta_disks = 0;
4029         mddev->new_level = mddev->level;
4030         mddev->new_layout = mddev->layout;
4031         mddev->new_chunk_sectors = mddev->chunk_sectors;
4032         return len;
4033 }
4034
4035 static struct md_sysfs_entry md_reshape_position =
4036 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
4037        reshape_position_store);
4038
4039 static ssize_t
4040 array_size_show(mddev_t *mddev, char *page)
4041 {
4042         if (mddev->external_size)
4043                 return sprintf(page, "%llu\n",
4044                                (unsigned long long)mddev->array_sectors/2);
4045         else
4046                 return sprintf(page, "default\n");
4047 }
4048
4049 static ssize_t
4050 array_size_store(mddev_t *mddev, const char *buf, size_t len)
4051 {
4052         sector_t sectors;
4053
4054         if (strncmp(buf, "default", 7) == 0) {
4055                 if (mddev->pers)
4056                         sectors = mddev->pers->size(mddev, 0, 0);
4057                 else
4058                         sectors = mddev->array_sectors;
4059
4060                 mddev->external_size = 0;
4061         } else {
4062                 if (strict_blocks_to_sectors(buf, &sectors) < 0)
4063                         return -EINVAL;
4064                 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4065                         return -E2BIG;
4066
4067                 mddev->external_size = 1;
4068         }
4069
4070         mddev->array_sectors = sectors;
4071         set_capacity(mddev->gendisk, mddev->array_sectors);
4072         if (mddev->pers)
4073                 revalidate_disk(mddev->gendisk);
4074
4075         return len;
4076 }
4077
4078 static struct md_sysfs_entry md_array_size =
4079 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
4080        array_size_store);
4081
4082 static struct attribute *md_default_attrs[] = {
4083         &md_level.attr,
4084         &md_layout.attr,
4085         &md_raid_disks.attr,
4086         &md_chunk_size.attr,
4087         &md_size.attr,
4088         &md_resync_start.attr,
4089         &md_metadata.attr,
4090         &md_new_device.attr,
4091         &md_safe_delay.attr,
4092         &md_array_state.attr,
4093         &md_reshape_position.attr,
4094         &md_array_size.attr,
4095         &max_corr_read_errors.attr,
4096         NULL,
4097 };
4098
4099 static struct attribute *md_redundancy_attrs[] = {
4100         &md_scan_mode.attr,
4101         &md_mismatches.attr,
4102         &md_sync_min.attr,
4103         &md_sync_max.attr,
4104         &md_sync_speed.attr,
4105         &md_sync_force_parallel.attr,
4106         &md_sync_completed.attr,
4107         &md_min_sync.attr,
4108         &md_max_sync.attr,
4109         &md_suspend_lo.attr,
4110         &md_suspend_hi.attr,
4111         &md_bitmap.attr,
4112         &md_degraded.attr,
4113         NULL,
4114 };
4115 static struct attribute_group md_redundancy_group = {
4116         .name = NULL,
4117         .attrs = md_redundancy_attrs,
4118 };
4119
4120
4121 static ssize_t
4122 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4123 {
4124         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4125         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
4126         ssize_t rv;
4127
4128         if (!entry->show)
4129                 return -EIO;
4130         rv = mddev_lock(mddev);
4131         if (!rv) {
4132                 rv = entry->show(mddev, page);
4133                 mddev_unlock(mddev);
4134         }
4135         return rv;
4136 }
4137
4138 static ssize_t
4139 md_attr_store(struct kobject *kobj, struct attribute *attr,
4140               const char *page, size_t length)
4141 {
4142         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4143         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
4144         ssize_t rv;
4145
4146         if (!entry->store)
4147                 return -EIO;
4148         if (!capable(CAP_SYS_ADMIN))
4149                 return -EACCES;
4150         rv = mddev_lock(mddev);
4151         if (mddev->hold_active == UNTIL_IOCTL)
4152                 mddev->hold_active = 0;
4153         if (!rv) {
4154                 rv = entry->store(mddev, page, length);
4155                 mddev_unlock(mddev);
4156         }
4157         return rv;
4158 }
4159
4160 static void md_free(struct kobject *ko)
4161 {
4162         mddev_t *mddev = container_of(ko, mddev_t, kobj);
4163
4164         if (mddev->sysfs_state)
4165                 sysfs_put(mddev->sysfs_state);
4166
4167         if (mddev->gendisk) {
4168                 del_gendisk(mddev->gendisk);
4169                 put_disk(mddev->gendisk);
4170         }
4171         if (mddev->queue)
4172                 blk_cleanup_queue(mddev->queue);
4173
4174         kfree(mddev);
4175 }
4176
4177 static const struct sysfs_ops md_sysfs_ops = {
4178         .show   = md_attr_show,
4179         .store  = md_attr_store,
4180 };
4181 static struct kobj_type md_ktype = {
4182         .release        = md_free,
4183         .sysfs_ops      = &md_sysfs_ops,
4184         .default_attrs  = md_default_attrs,
4185 };
4186
4187 int mdp_major = 0;
4188
4189 static void mddev_delayed_delete(struct work_struct *ws)
4190 {
4191         mddev_t *mddev = container_of(ws, mddev_t, del_work);
4192
4193         sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
4194         kobject_del(&mddev->kobj);
4195         kobject_put(&mddev->kobj);
4196 }
4197
4198 static int md_alloc(dev_t dev, char *name)
4199 {
4200         static DEFINE_MUTEX(disks_mutex);
4201         mddev_t *mddev = mddev_find(dev);
4202         struct gendisk *disk;
4203         int partitioned;
4204         int shift;
4205         int unit;
4206         int error;
4207
4208         if (!mddev)
4209                 return -ENODEV;
4210
4211         partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
4212         shift = partitioned ? MdpMinorShift : 0;
4213         unit = MINOR(mddev->unit) >> shift;
4214
4215         /* wait for any previous instance if this device
4216          * to be completed removed (mddev_delayed_delete).
4217          */
4218         flush_scheduled_work();
4219
4220         mutex_lock(&disks_mutex);
4221         error = -EEXIST;
4222         if (mddev->gendisk)
4223                 goto abort;
4224
4225         if (name) {
4226                 /* Need to ensure that 'name' is not a duplicate.
4227                  */
4228                 mddev_t *mddev2;
4229                 spin_lock(&all_mddevs_lock);
4230
4231                 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
4232                         if (mddev2->gendisk &&
4233                             strcmp(mddev2->gendisk->disk_name, name) == 0) {
4234                                 spin_unlock(&all_mddevs_lock);
4235                                 goto abort;
4236                         }
4237                 spin_unlock(&all_mddevs_lock);
4238         }
4239
4240         error = -ENOMEM;
4241         mddev->queue = blk_alloc_queue(GFP_KERNEL);
4242         if (!mddev->queue)
4243                 goto abort;
4244         mddev->queue->queuedata = mddev;
4245
4246         /* Can be unlocked because the queue is new: no concurrency */
4247         queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
4248
4249         blk_queue_make_request(mddev->queue, md_make_request);
4250
4251         disk = alloc_disk(1 << shift);
4252         if (!disk) {
4253                 blk_cleanup_queue(mddev->queue);
4254                 mddev->queue = NULL;
4255                 goto abort;
4256         }
4257         disk->major = MAJOR(mddev->unit);
4258         disk->first_minor = unit << shift;
4259         if (name)
4260                 strcpy(disk->disk_name, name);
4261         else if (partitioned)
4262                 sprintf(disk->disk_name, "md_d%d", unit);
4263         else
4264                 sprintf(disk->disk_name, "md%d", unit);
4265         disk->fops = &md_fops;
4266         disk->private_data = mddev;
4267         disk->queue = mddev->queue;
4268         /* Allow extended partitions.  This makes the
4269          * 'mdp' device redundant, but we can't really
4270          * remove it now.
4271          */
4272         disk->flags |= GENHD_FL_EXT_DEVT;
4273         add_disk(disk);
4274         mddev->gendisk = disk;
4275         error = kobject_init_and_add(&mddev->kobj, &md_ktype,
4276                                      &disk_to_dev(disk)->kobj, "%s", "md");
4277         if (error) {
4278                 /* This isn't possible, but as kobject_init_and_add is marked
4279                  * __must_check, we must do something with the result
4280                  */
4281                 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
4282                        disk->disk_name);
4283                 error = 0;
4284         }
4285         if (sysfs_create_group(&mddev->kobj, &md_bitmap_group))
4286                 printk(KERN_DEBUG "pointless warning\n");
4287  abort:
4288         mutex_unlock(&disks_mutex);
4289         if (!error) {
4290                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
4291                 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, NULL, "array_state");
4292         }
4293         mddev_put(mddev);
4294         return error;
4295 }
4296
4297 static struct kobject *md_probe(dev_t dev, int *part, void *data)
4298 {
4299         md_alloc(dev, NULL);
4300         return NULL;
4301 }
4302
4303 static int add_named_array(const char *val, struct kernel_param *kp)
4304 {
4305         /* val must be "md_*" where * is not all digits.
4306          * We allocate an array with a large free minor number, and
4307          * set the name to val.  val must not already be an active name.
4308          */
4309         int len = strlen(val);
4310         char buf[DISK_NAME_LEN];
4311
4312         while (len && val[len-1] == '\n')
4313                 len--;
4314         if (len >= DISK_NAME_LEN)
4315                 return -E2BIG;
4316         strlcpy(buf, val, len+1);
4317         if (strncmp(buf, "md_", 3) != 0)
4318                 return -EINVAL;
4319         return md_alloc(0, buf);
4320 }
4321
4322 static void md_safemode_timeout(unsigned long data)
4323 {
4324         mddev_t *mddev = (mddev_t *) data;
4325
4326         if (!atomic_read(&mddev->writes_pending)) {
4327                 mddev->safemode = 1;
4328                 if (mddev->external)
4329                         sysfs_notify_dirent(mddev->sysfs_state);
4330         }
4331         md_wakeup_thread(mddev->thread);
4332 }
4333
4334 static int start_dirty_degraded;
4335
4336 static int md_run(mddev_t *mddev)
4337 {
4338         int err;
4339         mdk_rdev_t *rdev;
4340         struct mdk_personality *pers;
4341
4342         if (list_empty(&mddev->disks))
4343                 /* cannot run an array with no devices.. */
4344                 return -EINVAL;
4345
4346         if (mddev->pers)
4347                 return -EBUSY;
4348
4349         /* These two calls synchronise us with the
4350          * sysfs_remove_group calls in mddev_unlock,
4351          * so they must have completed.
4352          */
4353         mutex_lock(&mddev->open_mutex);
4354         mutex_unlock(&mddev->open_mutex);
4355
4356         /*
4357          * Analyze all RAID superblock(s)
4358          */
4359         if (!mddev->raid_disks) {
4360                 if (!mddev->persistent)
4361                         return -EINVAL;
4362                 analyze_sbs(mddev);
4363         }
4364
4365         if (mddev->level != LEVEL_NONE)
4366                 request_module("md-level-%d", mddev->level);
4367         else if (mddev->clevel[0])
4368                 request_module("md-%s", mddev->clevel);
4369
4370         /*
4371          * Drop all container device buffers, from now on
4372          * the only valid external interface is through the md
4373          * device.
4374          */
4375         list_for_each_entry(rdev, &mddev->disks, same_set) {
4376                 if (test_bit(Faulty, &rdev->flags))
4377                         continue;
4378                 sync_blockdev(rdev->bdev);
4379                 invalidate_bdev(rdev->bdev);
4380
4381                 /* perform some consistency tests on the device.
4382                  * We don't want the data to overlap the metadata,
4383                  * Internal Bitmap issues have been handled elsewhere.
4384                  */
4385                 if (rdev->data_offset < rdev->sb_start) {
4386                         if (mddev->dev_sectors &&
4387                             rdev->data_offset + mddev->dev_sectors
4388                             > rdev->sb_start) {
4389                                 printk("md: %s: data overlaps metadata\n",
4390                                        mdname(mddev));
4391                                 return -EINVAL;
4392                         }
4393                 } else {
4394                         if (rdev->sb_start + rdev->sb_size/512
4395                             > rdev->data_offset) {
4396                                 printk("md: %s: metadata overlaps data\n",
4397                                        mdname(mddev));
4398                                 return -EINVAL;
4399                         }
4400                 }
4401                 sysfs_notify_dirent(rdev->sysfs_state);
4402         }
4403
4404         spin_lock(&pers_lock);
4405         pers = find_pers(mddev->level, mddev->clevel);
4406         if (!pers || !try_module_get(pers->owner)) {
4407                 spin_unlock(&pers_lock);
4408                 if (mddev->level != LEVEL_NONE)
4409                         printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
4410                                mddev->level);
4411                 else
4412                         printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
4413                                mddev->clevel);
4414                 return -EINVAL;
4415         }
4416         mddev->pers = pers;
4417         spin_unlock(&pers_lock);
4418         if (mddev->level != pers->level) {
4419                 mddev->level = pers->level;
4420                 mddev->new_level = pers->level;
4421         }
4422         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4423
4424         if (mddev->reshape_position != MaxSector &&
4425             pers->start_reshape == NULL) {
4426                 /* This personality cannot handle reshaping... */
4427                 mddev->pers = NULL;
4428                 module_put(pers->owner);
4429                 return -EINVAL;
4430         }
4431
4432         if (pers->sync_request) {
4433                 /* Warn if this is a potentially silly
4434                  * configuration.
4435                  */
4436                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4437                 mdk_rdev_t *rdev2;
4438                 int warned = 0;
4439
4440                 list_for_each_entry(rdev, &mddev->disks, same_set)
4441                         list_for_each_entry(rdev2, &mddev->disks, same_set) {
4442                                 if (rdev < rdev2 &&
4443                                     rdev->bdev->bd_contains ==
4444                                     rdev2->bdev->bd_contains) {
4445                                         printk(KERN_WARNING
4446                                                "%s: WARNING: %s appears to be"
4447                                                " on the same physical disk as"
4448                                                " %s.\n",
4449                                                mdname(mddev),
4450                                                bdevname(rdev->bdev,b),
4451                                                bdevname(rdev2->bdev,b2));
4452                                         warned = 1;
4453                                 }
4454                         }
4455
4456                 if (warned)
4457                         printk(KERN_WARNING
4458                                "True protection against single-disk"
4459                                " failure might be compromised.\n");
4460         }
4461
4462         mddev->recovery = 0;
4463         /* may be over-ridden by personality */
4464         mddev->resync_max_sectors = mddev->dev_sectors;
4465
4466         mddev->barriers_work = 1;
4467         mddev->ok_start_degraded = start_dirty_degraded;
4468
4469         if (start_readonly && mddev->ro == 0)
4470                 mddev->ro = 2; /* read-only, but switch on first write */
4471
4472         err = mddev->pers->run(mddev);
4473         if (err)
4474                 printk(KERN_ERR "md: pers->run() failed ...\n");
4475         else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
4476                 WARN_ONCE(!mddev->external_size, "%s: default size too small,"
4477                           " but 'external_size' not in effect?\n", __func__);
4478                 printk(KERN_ERR
4479                        "md: invalid array_size %llu > default size %llu\n",
4480                        (unsigned long long)mddev->array_sectors / 2,
4481                        (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
4482                 err = -EINVAL;
4483                 mddev->pers->stop(mddev);
4484         }
4485         if (err == 0 && mddev->pers->sync_request) {
4486                 err = bitmap_create(mddev);
4487                 if (err) {
4488                         printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
4489                                mdname(mddev), err);
4490                         mddev->pers->stop(mddev);
4491                 }
4492         }
4493         if (err) {
4494                 module_put(mddev->pers->owner);
4495                 mddev->pers = NULL;
4496                 bitmap_destroy(mddev);
4497                 return err;
4498         }
4499         if (mddev->pers->sync_request) {
4500                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4501                         printk(KERN_WARNING
4502                                "md: cannot register extra attributes for %s\n",
4503                                mdname(mddev));
4504                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
4505         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
4506                 mddev->ro = 0;
4507
4508         atomic_set(&mddev->writes_pending,0);
4509         atomic_set(&mddev->max_corr_read_errors,
4510                    MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
4511         mddev->safemode = 0;
4512         mddev->safemode_timer.function = md_safemode_timeout;
4513         mddev->safemode_timer.data = (unsigned long) mddev;
4514         mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
4515         mddev->in_sync = 1;
4516
4517         list_for_each_entry(rdev, &mddev->disks, same_set)
4518                 if (rdev->raid_disk >= 0) {
4519                         char nm[20];
4520                         sprintf(nm, "rd%d", rdev->raid_disk);
4521                         if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
4522                                 printk("md: cannot register %s for %s\n",
4523                                        nm, mdname(mddev));
4524                 }
4525         
4526         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4527         
4528         if (mddev->flags)
4529                 md_update_sb(mddev, 0);
4530
4531         md_wakeup_thread(mddev->thread);
4532         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4533
4534         md_new_event(mddev);
4535         sysfs_notify_dirent(mddev->sysfs_state);
4536         if (mddev->sysfs_action)
4537                 sysfs_notify_dirent(mddev->sysfs_action);
4538         sysfs_notify(&mddev->kobj, NULL, "degraded");
4539         return 0;
4540 }
4541
4542 static int do_md_run(mddev_t *mddev)
4543 {
4544         int err;
4545
4546         err = md_run(mddev);
4547         if (err)
4548                 goto out;
4549
4550         set_capacity(mddev->gendisk, mddev->array_sectors);
4551         revalidate_disk(mddev->gendisk);
4552         kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4553 out:
4554         return err;
4555 }
4556
4557 static int restart_array(mddev_t *mddev)
4558 {
4559         struct gendisk *disk = mddev->gendisk;
4560
4561         /* Complain if it has no devices */
4562         if (list_empty(&mddev->disks))
4563                 return -ENXIO;
4564         if (!mddev->pers)
4565                 return -EINVAL;
4566         if (!mddev->ro)
4567                 return -EBUSY;
4568         mddev->safemode = 0;
4569         mddev->ro = 0;
4570         set_disk_ro(disk, 0);
4571         printk(KERN_INFO "md: %s switched to read-write mode.\n",
4572                 mdname(mddev));
4573         /* Kick recovery or resync if necessary */
4574         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4575         md_wakeup_thread(mddev->thread);
4576         md_wakeup_thread(mddev->sync_thread);
4577         sysfs_notify_dirent(mddev->sysfs_state);
4578         return 0;
4579 }
4580
4581 /* similar to deny_write_access, but accounts for our holding a reference
4582  * to the file ourselves */
4583 static int deny_bitmap_write_access(struct file * file)
4584 {
4585         struct inode *inode = file->f_mapping->host;
4586
4587         spin_lock(&inode->i_lock);
4588         if (atomic_read(&inode->i_writecount) > 1) {
4589                 spin_unlock(&inode->i_lock);
4590                 return -ETXTBSY;
4591         }
4592         atomic_set(&inode->i_writecount, -1);
4593         spin_unlock(&inode->i_lock);
4594
4595         return 0;
4596 }
4597
4598 void restore_bitmap_write_access(struct file *file)
4599 {
4600         struct inode *inode = file->f_mapping->host;
4601
4602         spin_lock(&inode->i_lock);
4603         atomic_set(&inode->i_writecount, 1);
4604         spin_unlock(&inode->i_lock);
4605 }
4606
4607 static void md_clean(mddev_t *mddev)
4608 {
4609         mddev->array_sectors = 0;
4610         mddev->external_size = 0;
4611         mddev->dev_sectors = 0;
4612         mddev->raid_disks = 0;
4613         mddev->recovery_cp = 0;
4614         mddev->resync_min = 0;
4615         mddev->resync_max = MaxSector;
4616         mddev->reshape_position = MaxSector;
4617         mddev->external = 0;
4618         mddev->persistent = 0;
4619         mddev->level = LEVEL_NONE;
4620         mddev->clevel[0] = 0;
4621         mddev->flags = 0;
4622         mddev->ro = 0;
4623         mddev->metadata_type[0] = 0;
4624         mddev->chunk_sectors = 0;
4625         mddev->ctime = mddev->utime = 0;
4626         mddev->layout = 0;
4627         mddev->max_disks = 0;
4628         mddev->events = 0;
4629         mddev->can_decrease_events = 0;
4630         mddev->delta_disks = 0;
4631         mddev->new_level = LEVEL_NONE;
4632         mddev->new_layout = 0;
4633         mddev->new_chunk_sectors = 0;
4634         mddev->curr_resync = 0;
4635         mddev->resync_mismatches = 0;
4636         mddev->suspend_lo = mddev->suspend_hi = 0;
4637         mddev->sync_speed_min = mddev->sync_speed_max = 0;
4638         mddev->recovery = 0;
4639         mddev->in_sync = 0;
4640         mddev->degraded = 0;
4641         mddev->barriers_work = 0;
4642         mddev->safemode = 0;
4643         mddev->bitmap_info.offset = 0;
4644         mddev->bitmap_info.default_offset = 0;
4645         mddev->bitmap_info.chunksize = 0;
4646         mddev->bitmap_info.daemon_sleep = 0;
4647         mddev->bitmap_info.max_write_behind = 0;
4648 }
4649
4650 static void md_stop_writes(mddev_t *mddev)
4651 {
4652         if (mddev->sync_thread) {
4653                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4654                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4655                 md_unregister_thread(mddev->sync_thread);
4656                 mddev->sync_thread = NULL;
4657         }
4658
4659         del_timer_sync(&mddev->safemode_timer);
4660
4661         bitmap_flush(mddev);
4662         md_super_wait(mddev);
4663
4664         if (!mddev->in_sync || mddev->flags) {
4665                 /* mark array as shutdown cleanly */
4666                 mddev->in_sync = 1;
4667                 md_update_sb(mddev, 1);
4668         }
4669 }
4670
4671 static void md_stop(mddev_t *mddev)
4672 {
4673         md_stop_writes(mddev);
4674
4675         mddev->pers->stop(mddev);
4676         if (mddev->pers->sync_request && mddev->to_remove == NULL)
4677                 mddev->to_remove = &md_redundancy_group;
4678         module_put(mddev->pers->owner);
4679         mddev->pers = NULL;
4680         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4681 }
4682
4683 static int md_set_readonly(mddev_t *mddev, int is_open)
4684 {
4685         int err = 0;
4686         mutex_lock(&mddev->open_mutex);
4687         if (atomic_read(&mddev->openers) > is_open) {
4688                 printk("md: %s still in use.\n",mdname(mddev));
4689                 err = -EBUSY;
4690                 goto out;
4691         }
4692         if (mddev->pers) {
4693                 md_stop_writes(mddev);
4694
4695                 err  = -ENXIO;
4696                 if (mddev->ro==1)
4697                         goto out;
4698                 mddev->ro = 1;
4699                 set_disk_ro(mddev->gendisk, 1);
4700                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4701                 sysfs_notify_dirent(mddev->sysfs_state);
4702                 err = 0;        
4703         }
4704 out:
4705         mutex_unlock(&mddev->open_mutex);
4706         return err;
4707 }
4708
4709 /* mode:
4710  *   0 - completely stop and dis-assemble array
4711  *   2 - stop but do not disassemble array
4712  */
4713 static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4714 {
4715         int err = 0;
4716         struct gendisk *disk = mddev->gendisk;
4717         mdk_rdev_t *rdev;
4718
4719         mutex_lock(&mddev->open_mutex);
4720         if (atomic_read(&mddev->openers) > is_open) {
4721                 printk("md: %s still in use.\n",mdname(mddev));
4722                 err = -EBUSY;
4723         } else if (mddev->pers) {
4724
4725                 if (mddev->ro)
4726                         set_disk_ro(disk, 0);
4727
4728                 md_stop(mddev);
4729                 mddev->queue->merge_bvec_fn = NULL;
4730                 mddev->queue->unplug_fn = NULL;
4731                 mddev->queue->backing_dev_info.congested_fn = NULL;
4732
4733                 /* tell userspace to handle 'inactive' */
4734                 sysfs_notify_dirent(mddev->sysfs_state);
4735
4736                 list_for_each_entry(rdev, &mddev->disks, same_set)
4737                         if (rdev->raid_disk >= 0) {
4738                                 char nm[20];
4739                                 sprintf(nm, "rd%d", rdev->raid_disk);
4740                                 sysfs_remove_link(&mddev->kobj, nm);
4741                         }
4742
4743                 set_capacity(disk, 0);
4744                 revalidate_disk(disk);
4745
4746                 if (mddev->ro)
4747                         mddev->ro = 0;
4748                 
4749                 err = 0;
4750         }
4751         mutex_unlock(&mddev->open_mutex);
4752         if (err)
4753                 return err;
4754         /*
4755          * Free resources if final stop
4756          */
4757         if (mode == 0) {
4758
4759                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
4760
4761                 bitmap_destroy(mddev);
4762                 if (mddev->bitmap_info.file) {
4763                         restore_bitmap_write_access(mddev->bitmap_info.file);
4764                         fput(mddev->bitmap_info.file);
4765                         mddev->bitmap_info.file = NULL;
4766                 }
4767                 mddev->bitmap_info.offset = 0;
4768
4769                 export_array(mddev);
4770
4771                 md_clean(mddev);
4772                 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4773                 if (mddev->hold_active == UNTIL_STOP)
4774                         mddev->hold_active = 0;
4775
4776         }
4777         err = 0;
4778         blk_integrity_unregister(disk);
4779         md_new_event(mddev);
4780         sysfs_notify_dirent(mddev->sysfs_state);
4781         return err;
4782 }
4783
4784 #ifndef MODULE
4785 static void autorun_array(mddev_t *mddev)
4786 {
4787         mdk_rdev_t *rdev;
4788         int err;
4789
4790         if (list_empty(&mddev->disks))
4791                 return;
4792
4793         printk(KERN_INFO "md: running: ");
4794
4795         list_for_each_entry(rdev, &mddev->disks, same_set) {
4796                 char b[BDEVNAME_SIZE];
4797                 printk("<%s>", bdevname(rdev->bdev,b));
4798         }
4799         printk("\n");
4800
4801         err = do_md_run(mddev);
4802         if (err) {
4803                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
4804                 do_md_stop(mddev, 0, 0);
4805         }
4806 }
4807
4808 /*
4809  * lets try to run arrays based on all disks that have arrived
4810  * until now. (those are in pending_raid_disks)
4811  *
4812  * the method: pick the first pending disk, collect all disks with
4813  * the same UUID, remove all from the pending list and put them into
4814  * the 'same_array' list. Then order this list based on superblock
4815  * update time (freshest comes first), kick out 'old' disks and
4816  * compare superblocks. If everything's fine then run it.
4817  *
4818  * If "unit" is allocated, then bump its reference count
4819  */
4820 static void autorun_devices(int part)
4821 {
4822         mdk_rdev_t *rdev0, *rdev, *tmp;
4823         mddev_t *mddev;
4824         char b[BDEVNAME_SIZE];
4825
4826         printk(KERN_INFO "md: autorun ...\n");
4827         while (!list_empty(&pending_raid_disks)) {
4828                 int unit;
4829                 dev_t dev;
4830                 LIST_HEAD(candidates);
4831                 rdev0 = list_entry(pending_raid_disks.next,
4832                                          mdk_rdev_t, same_set);
4833
4834                 printk(KERN_INFO "md: considering %s ...\n",
4835                         bdevname(rdev0->bdev,b));
4836                 INIT_LIST_HEAD(&candidates);
4837                 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
4838                         if (super_90_load(rdev, rdev0, 0) >= 0) {
4839                                 printk(KERN_INFO "md:  adding %s ...\n",
4840                                         bdevname(rdev->bdev,b));
4841                                 list_move(&rdev->same_set, &candidates);
4842                         }
4843                 /*
4844                  * now we have a set of devices, with all of them having
4845                  * mostly sane superblocks. It's time to allocate the
4846                  * mddev.
4847                  */
4848                 if (part) {
4849                         dev = MKDEV(mdp_major,
4850                                     rdev0->preferred_minor << MdpMinorShift);
4851                         unit = MINOR(dev) >> MdpMinorShift;
4852                 } else {
4853                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
4854                         unit = MINOR(dev);
4855                 }
4856                 if (rdev0->preferred_minor != unit) {
4857                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
4858                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
4859                         break;
4860                 }
4861
4862                 md_probe(dev, NULL, NULL);
4863                 mddev = mddev_find(dev);
4864                 if (!mddev || !mddev->gendisk) {
4865                         if (mddev)
4866                                 mddev_put(mddev);
4867                         printk(KERN_ERR
4868                                 "md: cannot allocate memory for md drive.\n");
4869                         break;
4870                 }
4871                 if (mddev_lock(mddev)) 
4872                         printk(KERN_WARNING "md: %s locked, cannot run\n",
4873                                mdname(mddev));
4874                 else if (mddev->raid_disks || mddev->major_version
4875                          || !list_empty(&mddev->disks)) {
4876                         printk(KERN_WARNING 
4877                                 "md: %s already running, cannot run %s\n",
4878                                 mdname(mddev), bdevname(rdev0->bdev,b));
4879                         mddev_unlock(mddev);
4880                 } else {
4881                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
4882                         mddev->persistent = 1;
4883                         rdev_for_each_list(rdev, tmp, &candidates) {
4884                                 list_del_init(&rdev->same_set);
4885                                 if (bind_rdev_to_array(rdev, mddev))
4886                                         export_rdev(rdev);
4887                         }
4888                         autorun_array(mddev);
4889                         mddev_unlock(mddev);
4890                 }
4891                 /* on success, candidates will be empty, on error
4892                  * it won't...
4893                  */
4894                 rdev_for_each_list(rdev, tmp, &candidates) {
4895                         list_del_init(&rdev->same_set);
4896                         export_rdev(rdev);
4897                 }
4898                 mddev_put(mddev);
4899         }
4900         printk(KERN_INFO "md: ... autorun DONE.\n");
4901 }
4902 #endif /* !MODULE */
4903
4904 static int get_version(void __user * arg)
4905 {
4906         mdu_version_t ver;
4907
4908         ver.major = MD_MAJOR_VERSION;
4909         ver.minor = MD_MINOR_VERSION;
4910         ver.patchlevel = MD_PATCHLEVEL_VERSION;
4911
4912         if (copy_to_user(arg, &ver, sizeof(ver)))
4913                 return -EFAULT;
4914
4915         return 0;
4916 }
4917
4918 static int get_array_info(mddev_t * mddev, void __user * arg)
4919 {
4920         mdu_array_info_t info;
4921         int nr,working,insync,failed,spare;
4922         mdk_rdev_t *rdev;
4923
4924         nr=working=insync=failed=spare=0;
4925         list_for_each_entry(rdev, &mddev->disks, same_set) {
4926                 nr++;
4927                 if (test_bit(Faulty, &rdev->flags))
4928                         failed++;
4929                 else {
4930                         working++;
4931                         if (test_bit(In_sync, &rdev->flags))
4932                                 insync++;       
4933                         else
4934                                 spare++;
4935                 }
4936         }
4937
4938         info.major_version = mddev->major_version;
4939         info.minor_version = mddev->minor_version;
4940         info.patch_version = MD_PATCHLEVEL_VERSION;
4941         info.ctime         = mddev->ctime;
4942         info.level         = mddev->level;
4943         info.size          = mddev->dev_sectors / 2;
4944         if (info.size != mddev->dev_sectors / 2) /* overflow */
4945                 info.size = -1;
4946         info.nr_disks      = nr;
4947         info.raid_disks    = mddev->raid_disks;
4948         info.md_minor      = mddev->md_minor;
4949         info.not_persistent= !mddev->persistent;
4950
4951         info.utime         = mddev->utime;
4952         info.state         = 0;
4953         if (mddev->in_sync)
4954                 info.state = (1<<MD_SB_CLEAN);
4955         if (mddev->bitmap && mddev->bitmap_info.offset)
4956                 info.state = (1<<MD_SB_BITMAP_PRESENT);
4957         info.active_disks  = insync;
4958         info.working_disks = working;
4959         info.failed_disks  = failed;
4960         info.spare_disks   = spare;
4961
4962         info.layout        = mddev->layout;
4963         info.chunk_size    = mddev->chunk_sectors << 9;
4964
4965         if (copy_to_user(arg, &info, sizeof(info)))
4966                 return -EFAULT;
4967
4968         return 0;
4969 }
4970
4971 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
4972 {
4973         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
4974         char *ptr, *buf = NULL;
4975         int err = -ENOMEM;
4976
4977         if (md_allow_write(mddev))
4978                 file = kmalloc(sizeof(*file), GFP_NOIO);
4979         else
4980                 file = kmalloc(sizeof(*file), GFP_KERNEL);
4981
4982         if (!file)
4983                 goto out;
4984
4985         /* bitmap disabled, zero the first byte and copy out */
4986         if (!mddev->bitmap || !mddev->bitmap->file) {
4987                 file->pathname[0] = '\0';
4988                 goto copy_out;
4989         }
4990
4991         buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
4992         if (!buf)
4993                 goto out;
4994
4995         ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
4996         if (IS_ERR(ptr))
4997                 goto out;
4998
4999         strcpy(file->pathname, ptr);
5000
5001 copy_out:
5002         err = 0;
5003         if (copy_to_user(arg, file, sizeof(*file)))
5004                 err = -EFAULT;
5005 out:
5006         kfree(buf);
5007         kfree(file);
5008         return err;
5009 }
5010
5011 static int get_disk_info(mddev_t * mddev, void __user * arg)
5012 {
5013         mdu_disk_info_t info;
5014         mdk_rdev_t *rdev;
5015
5016         if (copy_from_user(&info, arg, sizeof(info)))
5017                 return -EFAULT;
5018
5019         rdev = find_rdev_nr(mddev, info.number);
5020         if (rdev) {
5021                 info.major = MAJOR(rdev->bdev->bd_dev);
5022                 info.minor = MINOR(rdev->bdev->bd_dev);
5023                 info.raid_disk = rdev->raid_disk;
5024                 info.state = 0;
5025                 if (test_bit(Faulty, &rdev->flags))
5026                         info.state |= (1<<MD_DISK_FAULTY);
5027                 else if (test_bit(In_sync, &rdev->flags)) {
5028                         info.state |= (1<<MD_DISK_ACTIVE);
5029                         info.state |= (1<<MD_DISK_SYNC);
5030                 }
5031                 if (test_bit(WriteMostly, &rdev->flags))
5032                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
5033         } else {
5034                 info.major = info.minor = 0;
5035                 info.raid_disk = -1;
5036                 info.state = (1<<MD_DISK_REMOVED);
5037         }
5038
5039         if (copy_to_user(arg, &info, sizeof(info)))
5040                 return -EFAULT;
5041
5042         return 0;
5043 }
5044
5045 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
5046 {
5047         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5048         mdk_rdev_t *rdev;
5049         dev_t dev = MKDEV(info->major,info->minor);
5050
5051         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
5052                 return -EOVERFLOW;
5053
5054         if (!mddev->raid_disks) {
5055                 int err;
5056                 /* expecting a device which has a superblock */
5057                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
5058                 if (IS_ERR(rdev)) {
5059                         printk(KERN_WARNING 
5060                                 "md: md_import_device returned %ld\n",
5061                                 PTR_ERR(rdev));
5062                         return PTR_ERR(rdev);
5063                 }
5064                 if (!list_empty(&mddev->disks)) {
5065                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
5066                                                         mdk_rdev_t, same_set);
5067                         err = super_types[mddev->major_version]
5068                                 .load_super(rdev, rdev0, mddev->minor_version);
5069                         if (err < 0) {
5070                                 printk(KERN_WARNING 
5071                                         "md: %s has different UUID to %s\n",
5072                                         bdevname(rdev->bdev,b), 
5073                                         bdevname(rdev0->bdev,b2));
5074                                 export_rdev(rdev);
5075                                 return -EINVAL;
5076                         }
5077                 }
5078                 err = bind_rdev_to_array(rdev, mddev);
5079                 if (err)
5080                         export_rdev(rdev);
5081                 return err;
5082         }
5083
5084         /*
5085          * add_new_disk can be used once the array is assembled
5086          * to add "hot spares".  They must already have a superblock
5087          * written
5088          */
5089         if (mddev->pers) {
5090                 int err;
5091                 if (!mddev->pers->hot_add_disk) {
5092                         printk(KERN_WARNING 
5093                                 "%s: personality does not support diskops!\n",
5094                                mdname(mddev));
5095                         return -EINVAL;
5096                 }
5097                 if (mddev->persistent)
5098                         rdev = md_import_device(dev, mddev->major_version,
5099                                                 mddev->minor_version);
5100                 else
5101                         rdev = md_import_device(dev, -1, -1);
5102                 if (IS_ERR(rdev)) {
5103                         printk(KERN_WARNING 
5104                                 "md: md_import_device returned %ld\n",
5105                                 PTR_ERR(rdev));
5106                         return PTR_ERR(rdev);
5107                 }
5108                 /* set save_raid_disk if appropriate */
5109                 if (!mddev->persistent) {
5110                         if (info->state & (1<<MD_DISK_SYNC)  &&
5111                             info->raid_disk < mddev->raid_disks)
5112                                 rdev->raid_disk = info->raid_disk;
5113                         else
5114                                 rdev->raid_disk = -1;
5115                 } else
5116                         super_types[mddev->major_version].
5117                                 validate_super(mddev, rdev);
5118                 rdev->saved_raid_disk = rdev->raid_disk;
5119
5120                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
5121                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5122                         set_bit(WriteMostly, &rdev->flags);
5123                 else
5124                         clear_bit(WriteMostly, &rdev->flags);
5125
5126                 rdev->raid_disk = -1;
5127                 err = bind_rdev_to_array(rdev, mddev);
5128                 if (!err && !mddev->pers->hot_remove_disk) {
5129                         /* If there is hot_add_disk but no hot_remove_disk
5130                          * then added disks for geometry changes,
5131                          * and should be added immediately.
5132                          */
5133                         super_types[mddev->major_version].
5134                                 validate_super(mddev, rdev);
5135                         err = mddev->pers->hot_add_disk(mddev, rdev);
5136                         if (err)
5137                                 unbind_rdev_from_array(rdev);
5138                 }
5139                 if (err)
5140                         export_rdev(rdev);
5141                 else
5142                         sysfs_notify_dirent(rdev->sysfs_state);
5143
5144                 md_update_sb(mddev, 1);
5145                 if (mddev->degraded)
5146                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5147                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5148                 md_wakeup_thread(mddev->thread);
5149                 return err;
5150         }
5151
5152         /* otherwise, add_new_disk is only allowed
5153          * for major_version==0 superblocks
5154          */
5155         if (mddev->major_version != 0) {
5156                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
5157                        mdname(mddev));
5158                 return -EINVAL;
5159         }
5160
5161         if (!(info->state & (1<<MD_DISK_FAULTY))) {
5162                 int err;
5163                 rdev = md_import_device(dev, -1, 0);
5164                 if (IS_ERR(rdev)) {
5165                         printk(KERN_WARNING 
5166                                 "md: error, md_import_device() returned %ld\n",
5167                                 PTR_ERR(rdev));
5168                         return PTR_ERR(rdev);
5169                 }
5170                 rdev->desc_nr = info->number;
5171                 if (info->raid_disk < mddev->raid_disks)
5172                         rdev->raid_disk = info->raid_disk;
5173                 else
5174                         rdev->raid_disk = -1;
5175
5176                 if (rdev->raid_disk < mddev->raid_disks)
5177                         if (info->state & (1<<MD_DISK_SYNC))
5178                                 set_bit(In_sync, &rdev->flags);
5179
5180                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5181                         set_bit(WriteMostly, &rdev->flags);
5182
5183                 if (!mddev->persistent) {
5184                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
5185                         rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
5186                 } else 
5187                         rdev->sb_start = calc_dev_sboffset(rdev->bdev);
5188                 rdev->sectors = rdev->sb_start;
5189
5190                 err = bind_rdev_to_array(rdev, mddev);
5191                 if (err) {
5192                         export_rdev(rdev);
5193                         return err;
5194                 }
5195         }
5196
5197         return 0;
5198 }
5199
5200 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
5201 {
5202         char b[BDEVNAME_SIZE];
5203         mdk_rdev_t *rdev;
5204
5205         rdev = find_rdev(mddev, dev);
5206         if (!rdev)
5207                 return -ENXIO;
5208
5209         if (rdev->raid_disk >= 0)
5210                 goto busy;
5211
5212         kick_rdev_from_array(rdev);
5213         md_update_sb(mddev, 1);
5214         md_new_event(mddev);
5215
5216         return 0;
5217 busy:
5218         printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
5219                 bdevname(rdev->bdev,b), mdname(mddev));
5220         return -EBUSY;
5221 }
5222
5223 static int hot_add_disk(mddev_t * mddev, dev_t dev)
5224 {
5225         char b[BDEVNAME_SIZE];
5226         int err;
5227         mdk_rdev_t *rdev;
5228
5229         if (!mddev->pers)
5230                 return -ENODEV;
5231
5232         if (mddev->major_version != 0) {
5233                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
5234                         " version-0 superblocks.\n",
5235                         mdname(mddev));
5236                 return -EINVAL;
5237         }
5238         if (!mddev->pers->hot_add_disk) {
5239                 printk(KERN_WARNING 
5240                         "%s: personality does not support diskops!\n",
5241                         mdname(mddev));
5242                 return -EINVAL;
5243         }
5244
5245         rdev = md_import_device(dev, -1, 0);
5246         if (IS_ERR(rdev)) {
5247                 printk(KERN_WARNING 
5248                         "md: error, md_import_device() returned %ld\n",
5249                         PTR_ERR(rdev));
5250                 return -EINVAL;
5251         }
5252
5253         if (mddev->persistent)
5254                 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
5255         else
5256                 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
5257
5258         rdev->sectors = rdev->sb_start;
5259
5260         if (test_bit(Faulty, &rdev->flags)) {
5261                 printk(KERN_WARNING 
5262                         "md: can not hot-add faulty %s disk to %s!\n",
5263                         bdevname(rdev->bdev,b), mdname(mddev));
5264                 err = -EINVAL;
5265                 goto abort_export;
5266         }
5267         clear_bit(In_sync, &rdev->flags);
5268         rdev->desc_nr = -1;
5269         rdev->saved_raid_disk = -1;
5270         err = bind_rdev_to_array(rdev, mddev);
5271         if (err)
5272                 goto abort_export;
5273
5274         /*
5275          * The rest should better be atomic, we can have disk failures
5276          * noticed in interrupt contexts ...
5277          */
5278
5279         rdev->raid_disk = -1;
5280
5281         md_update_sb(mddev, 1);
5282
5283         /*
5284          * Kick recovery, maybe this spare has to be added to the
5285          * array immediately.
5286          */
5287         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5288         md_wakeup_thread(mddev->thread);
5289         md_new_event(mddev);
5290         return 0;
5291
5292 abort_export:
5293         export_rdev(rdev);
5294         return err;
5295 }
5296
5297 static int set_bitmap_file(mddev_t *mddev, int fd)
5298 {
5299         int err;
5300
5301         if (mddev->pers) {
5302                 if (!mddev->pers->quiesce)
5303                         return -EBUSY;
5304                 if (mddev->recovery || mddev->sync_thread)
5305                         return -EBUSY;
5306                 /* we should be able to change the bitmap.. */
5307         }
5308
5309
5310         if (fd >= 0) {
5311                 if (mddev->bitmap)
5312                         return -EEXIST; /* cannot add when bitmap is present */
5313                 mddev->bitmap_info.file = fget(fd);
5314
5315                 if (mddev->bitmap_info.file == NULL) {
5316                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
5317                                mdname(mddev));
5318                         return -EBADF;
5319                 }
5320
5321                 err = deny_bitmap_write_access(mddev->bitmap_info.file);
5322                 if (err) {
5323                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
5324                                mdname(mddev));
5325                         fput(mddev->bitmap_info.file);
5326                         mddev->bitmap_info.file = NULL;
5327                         return err;
5328                 }
5329                 mddev->bitmap_info.offset = 0; /* file overrides offset */
5330         } else if (mddev->bitmap == NULL)
5331                 return -ENOENT; /* cannot remove what isn't there */
5332         err = 0;
5333         if (mddev->pers) {
5334                 mddev->pers->quiesce(mddev, 1);
5335                 if (fd >= 0)
5336                         err = bitmap_create(mddev);
5337                 if (fd < 0 || err) {
5338                         bitmap_destroy(mddev);
5339                         fd = -1; /* make sure to put the file */
5340                 }
5341                 mddev->pers->quiesce(mddev, 0);
5342         }
5343         if (fd < 0) {
5344                 if (mddev->bitmap_info.file) {
5345                         restore_bitmap_write_access(mddev->bitmap_info.file);
5346                         fput(mddev->bitmap_info.file);
5347                 }
5348                 mddev->bitmap_info.file = NULL;
5349         }
5350
5351         return err;
5352 }
5353
5354 /*
5355  * set_array_info is used two different ways
5356  * The original usage is when creating a new array.
5357  * In this usage, raid_disks is > 0 and it together with
5358  *  level, size, not_persistent,layout,chunksize determine the
5359  *  shape of the array.
5360  *  This will always create an array with a type-0.90.0 superblock.
5361  * The newer usage is when assembling an array.
5362  *  In this case raid_disks will be 0, and the major_version field is
5363  *  use to determine which style super-blocks are to be found on the devices.
5364  *  The minor and patch _version numbers are also kept incase the
5365  *  super_block handler wishes to interpret them.
5366  */
5367 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
5368 {
5369
5370         if (info->raid_disks == 0) {
5371                 /* just setting version number for superblock loading */
5372                 if (info->major_version < 0 ||
5373                     info->major_version >= ARRAY_SIZE(super_types) ||
5374                     super_types[info->major_version].name == NULL) {
5375                         /* maybe try to auto-load a module? */
5376                         printk(KERN_INFO 
5377                                 "md: superblock version %d not known\n",
5378                                 info->major_version);
5379                         return -EINVAL;
5380                 }
5381                 mddev->major_version = info->major_version;
5382                 mddev->minor_version = info->minor_version;
5383                 mddev->patch_version = info->patch_version;
5384                 mddev->persistent = !info->not_persistent;
5385                 /* ensure mddev_put doesn't delete this now that there
5386                  * is some minimal configuration.
5387                  */
5388                 mddev->ctime         = get_seconds();
5389                 return 0;
5390         }
5391         mddev->major_version = MD_MAJOR_VERSION;
5392         mddev->minor_version = MD_MINOR_VERSION;
5393         mddev->patch_version = MD_PATCHLEVEL_VERSION;
5394         mddev->ctime         = get_seconds();
5395
5396         mddev->level         = info->level;
5397         mddev->clevel[0]     = 0;
5398         mddev->dev_sectors   = 2 * (sector_t)info->size;
5399         mddev->raid_disks    = info->raid_disks;
5400         /* don't set md_minor, it is determined by which /dev/md* was
5401          * openned
5402          */
5403         if (info->state & (1<<MD_SB_CLEAN))
5404                 mddev->recovery_cp = MaxSector;
5405         else
5406                 mddev->recovery_cp = 0;
5407         mddev->persistent    = ! info->not_persistent;
5408         mddev->external      = 0;
5409
5410         mddev->layout        = info->layout;
5411         mddev->chunk_sectors = info->chunk_size >> 9;
5412
5413         mddev->max_disks     = MD_SB_DISKS;
5414
5415         if (mddev->persistent)
5416                 mddev->flags         = 0;
5417         set_bit(MD_CHANGE_DEVS, &mddev->flags);
5418
5419         mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
5420         mddev->bitmap_info.offset = 0;
5421
5422         mddev->reshape_position = MaxSector;
5423
5424         /*
5425          * Generate a 128 bit UUID
5426          */
5427         get_random_bytes(mddev->uuid, 16);
5428
5429         mddev->new_level = mddev->level;
5430         mddev->new_chunk_sectors = mddev->chunk_sectors;
5431         mddev->new_layout = mddev->layout;
5432         mddev->delta_disks = 0;
5433
5434         return 0;
5435 }
5436
5437 void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors)
5438 {
5439         WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
5440
5441         if (mddev->external_size)
5442                 return;
5443
5444         mddev->array_sectors = array_sectors;
5445 }
5446 EXPORT_SYMBOL(md_set_array_sectors);
5447
5448 static int update_size(mddev_t *mddev, sector_t num_sectors)
5449 {
5450         mdk_rdev_t *rdev;
5451         int rv;
5452         int fit = (num_sectors == 0);
5453
5454         if (mddev->pers->resize == NULL)
5455                 return -EINVAL;
5456         /* The "num_sectors" is the number of sectors of each device that
5457          * is used.  This can only make sense for arrays with redundancy.
5458          * linear and raid0 always use whatever space is available. We can only
5459          * consider changing this number if no resync or reconstruction is
5460          * happening, and if the new size is acceptable. It must fit before the
5461          * sb_start or, if that is <data_offset, it must fit before the size
5462          * of each device.  If num_sectors is zero, we find the largest size
5463          * that fits.
5464
5465          */
5466         if (mddev->sync_thread)
5467                 return -EBUSY;
5468         if (mddev->bitmap)
5469                 /* Sorry, cannot grow a bitmap yet, just remove it,
5470                  * grow, and re-add.
5471                  */
5472                 return -EBUSY;
5473         list_for_each_entry(rdev, &mddev->disks, same_set) {
5474                 sector_t avail = rdev->sectors;
5475
5476                 if (fit && (num_sectors == 0 || num_sectors > avail))
5477                         num_sectors = avail;
5478                 if (avail < num_sectors)
5479                         return -ENOSPC;
5480         }
5481         rv = mddev->pers->resize(mddev, num_sectors);
5482         if (!rv)
5483                 revalidate_disk(mddev->gendisk);
5484         return rv;
5485 }
5486
5487 static int update_raid_disks(mddev_t *mddev, int raid_disks)
5488 {
5489         int rv;
5490         /* change the number of raid disks */
5491         if (mddev->pers->check_reshape == NULL)
5492                 return -EINVAL;
5493         if (raid_disks <= 0 ||
5494             (mddev->max_disks && raid_disks >= mddev->max_disks))
5495                 return -EINVAL;
5496         if (mddev->sync_thread || mddev->reshape_position != MaxSector)
5497                 return -EBUSY;
5498         mddev->delta_disks = raid_disks - mddev->raid_disks;
5499
5500         rv = mddev->pers->check_reshape(mddev);
5501         return rv;
5502 }
5503
5504
5505 /*
5506  * update_array_info is used to change the configuration of an
5507  * on-line array.
5508  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
5509  * fields in the info are checked against the array.
5510  * Any differences that cannot be handled will cause an error.
5511  * Normally, only one change can be managed at a time.
5512  */
5513 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
5514 {
5515         int rv = 0;
5516         int cnt = 0;
5517         int state = 0;
5518
5519         /* calculate expected state,ignoring low bits */
5520         if (mddev->bitmap && mddev->bitmap_info.offset)
5521                 state |= (1 << MD_SB_BITMAP_PRESENT);
5522
5523         if (mddev->major_version != info->major_version ||
5524             mddev->minor_version != info->minor_version ||
5525 /*          mddev->patch_version != info->patch_version || */
5526             mddev->ctime         != info->ctime         ||
5527             mddev->level         != info->level         ||
5528 /*          mddev->layout        != info->layout        || */
5529             !mddev->persistent   != info->not_persistent||
5530             mddev->chunk_sectors != info->chunk_size >> 9 ||
5531             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
5532             ((state^info->state) & 0xfffffe00)
5533                 )
5534                 return -EINVAL;
5535         /* Check there is only one change */
5536         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5537                 cnt++;
5538         if (mddev->raid_disks != info->raid_disks)
5539                 cnt++;
5540         if (mddev->layout != info->layout)
5541                 cnt++;
5542         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
5543                 cnt++;
5544         if (cnt == 0)
5545                 return 0;
5546         if (cnt > 1)
5547                 return -EINVAL;
5548
5549         if (mddev->layout != info->layout) {
5550                 /* Change layout
5551                  * we don't need to do anything at the md level, the
5552                  * personality will take care of it all.
5553                  */
5554                 if (mddev->pers->check_reshape == NULL)
5555                         return -EINVAL;
5556                 else {
5557                         mddev->new_layout = info->layout;
5558                         rv = mddev->pers->check_reshape(mddev);
5559                         if (rv)
5560                                 mddev->new_layout = mddev->layout;
5561                         return rv;
5562                 }
5563         }
5564         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5565                 rv = update_size(mddev, (sector_t)info->size * 2);
5566
5567         if (mddev->raid_disks    != info->raid_disks)
5568                 rv = update_raid_disks(mddev, info->raid_disks);
5569
5570         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
5571                 if (mddev->pers->quiesce == NULL)
5572                         return -EINVAL;
5573                 if (mddev->recovery || mddev->sync_thread)
5574                         return -EBUSY;
5575                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
5576                         /* add the bitmap */
5577                         if (mddev->bitmap)
5578                                 return -EEXIST;
5579                         if (mddev->bitmap_info.default_offset == 0)
5580                                 return -EINVAL;
5581                         mddev->bitmap_info.offset =
5582                                 mddev->bitmap_info.default_offset;
5583                         mddev->pers->quiesce(mddev, 1);
5584                         rv = bitmap_create(mddev);
5585                         if (rv)
5586                                 bitmap_destroy(mddev);
5587                         mddev->pers->quiesce(mddev, 0);
5588                 } else {
5589                         /* remove the bitmap */
5590                         if (!mddev->bitmap)
5591                                 return -ENOENT;
5592                         if (mddev->bitmap->file)
5593                                 return -EINVAL;
5594                         mddev->pers->quiesce(mddev, 1);
5595                         bitmap_destroy(mddev);
5596                         mddev->pers->quiesce(mddev, 0);
5597                         mddev->bitmap_info.offset = 0;
5598                 }
5599         }
5600         md_update_sb(mddev, 1);
5601         return rv;
5602 }
5603
5604 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
5605 {
5606         mdk_rdev_t *rdev;
5607
5608         if (mddev->pers == NULL)
5609                 return -ENODEV;
5610
5611         rdev = find_rdev(mddev, dev);
5612         if (!rdev)
5613                 return -ENODEV;
5614
5615         md_error(mddev, rdev);
5616         return 0;
5617 }
5618
5619 /*
5620  * We have a problem here : there is no easy way to give a CHS
5621  * virtual geometry. We currently pretend that we have a 2 heads
5622  * 4 sectors (with a BIG number of cylinders...). This drives
5623  * dosfs just mad... ;-)
5624  */
5625 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
5626 {
5627         mddev_t *mddev = bdev->bd_disk->private_data;
5628
5629         geo->heads = 2;
5630         geo->sectors = 4;
5631         geo->cylinders = mddev->array_sectors / 8;
5632         return 0;
5633 }
5634
5635 static int md_ioctl(struct block_device *bdev, fmode_t mode,
5636                         unsigned int cmd, unsigned long arg)
5637 {
5638         int err = 0;
5639         void __user *argp = (void __user *)arg;
5640         mddev_t *mddev = NULL;
5641         int ro;
5642
5643         if (!capable(CAP_SYS_ADMIN))
5644                 return -EACCES;
5645
5646         /*
5647          * Commands dealing with the RAID driver but not any
5648          * particular array:
5649          */
5650         switch (cmd)
5651         {
5652                 case RAID_VERSION:
5653                         err = get_version(argp);
5654                         goto done;
5655
5656                 case PRINT_RAID_DEBUG:
5657                         err = 0;
5658                         md_print_devices();
5659                         goto done;
5660
5661 #ifndef MODULE
5662                 case RAID_AUTORUN:
5663                         err = 0;
5664                         autostart_arrays(arg);
5665                         goto done;
5666 #endif
5667                 default:;
5668         }
5669
5670         /*
5671          * Commands creating/starting a new array:
5672          */
5673
5674         mddev = bdev->bd_disk->private_data;
5675
5676         if (!mddev) {
5677                 BUG();
5678                 goto abort;
5679         }
5680
5681         err = mddev_lock(mddev);
5682         if (err) {
5683                 printk(KERN_INFO 
5684                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
5685                         err, cmd);
5686                 goto abort;
5687         }
5688
5689         switch (cmd)
5690         {
5691                 case SET_ARRAY_INFO:
5692                         {
5693                                 mdu_array_info_t info;
5694                                 if (!arg)
5695                                         memset(&info, 0, sizeof(info));
5696                                 else if (copy_from_user(&info, argp, sizeof(info))) {
5697                                         err = -EFAULT;
5698                                         goto abort_unlock;
5699                                 }
5700                                 if (mddev->pers) {
5701                                         err = update_array_info(mddev, &info);
5702                                         if (err) {
5703                                                 printk(KERN_WARNING "md: couldn't update"
5704                                                        " array info. %d\n", err);
5705                                                 goto abort_unlock;
5706                                         }
5707                                         goto done_unlock;
5708                                 }
5709                                 if (!list_empty(&mddev->disks)) {
5710                                         printk(KERN_WARNING
5711                                                "md: array %s already has disks!\n",
5712                                                mdname(mddev));
5713                                         err = -EBUSY;
5714                                         goto abort_unlock;
5715                                 }
5716                                 if (mddev->raid_disks) {
5717                                         printk(KERN_WARNING
5718                                                "md: array %s already initialised!\n",
5719                                                mdname(mddev));
5720                                         err = -EBUSY;
5721                                         goto abort_unlock;
5722                                 }
5723                                 err = set_array_info(mddev, &info);
5724                                 if (err) {
5725                                         printk(KERN_WARNING "md: couldn't set"
5726                                                " array info. %d\n", err);
5727                                         goto abort_unlock;
5728                                 }
5729                         }
5730                         goto done_unlock;
5731
5732                 default:;
5733         }
5734
5735         /*
5736          * Commands querying/configuring an existing array:
5737          */
5738         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
5739          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
5740         if ((!mddev->raid_disks && !mddev->external)
5741             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
5742             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
5743             && cmd != GET_BITMAP_FILE) {
5744                 err = -ENODEV;
5745                 goto abort_unlock;
5746         }
5747
5748         /*
5749          * Commands even a read-only array can execute:
5750          */
5751         switch (cmd)
5752         {
5753                 case GET_ARRAY_INFO:
5754                         err = get_array_info(mddev, argp);
5755                         goto done_unlock;
5756
5757                 case GET_BITMAP_FILE:
5758                         err = get_bitmap_file(mddev, argp);
5759                         goto done_unlock;
5760
5761                 case GET_DISK_INFO:
5762                         err = get_disk_info(mddev, argp);
5763                         goto done_unlock;
5764
5765                 case RESTART_ARRAY_RW:
5766                         err = restart_array(mddev);
5767                         goto done_unlock;
5768
5769                 case STOP_ARRAY:
5770                         err = do_md_stop(mddev, 0, 1);
5771                         goto done_unlock;
5772
5773                 case STOP_ARRAY_RO:
5774                         err = md_set_readonly(mddev, 1);
5775                         goto done_unlock;
5776
5777                 case BLKROSET:
5778                         if (get_user(ro, (int __user *)(arg))) {
5779                                 err = -EFAULT;
5780                                 goto done_unlock;
5781                         }
5782                         err = -EINVAL;
5783
5784                         /* if the bdev is going readonly the value of mddev->ro
5785                          * does not matter, no writes are coming
5786                          */
5787                         if (ro)
5788                                 goto done_unlock;
5789
5790                         /* are we are already prepared for writes? */
5791                         if (mddev->ro != 1)
5792                                 goto done_unlock;
5793
5794                         /* transitioning to readauto need only happen for
5795                          * arrays that call md_write_start
5796                          */
5797                         if (mddev->pers) {
5798                                 err = restart_array(mddev);
5799                                 if (err == 0) {
5800                                         mddev->ro = 2;
5801                                         set_disk_ro(mddev->gendisk, 0);
5802                                 }
5803                         }
5804                         goto done_unlock;
5805         }
5806
5807         /*
5808          * The remaining ioctls are changing the state of the
5809          * superblock, so we do not allow them on read-only arrays.
5810          * However non-MD ioctls (e.g. get-size) will still come through
5811          * here and hit the 'default' below, so only disallow
5812          * 'md' ioctls, and switch to rw mode if started auto-readonly.
5813          */
5814         if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
5815                 if (mddev->ro == 2) {
5816                         mddev->ro = 0;
5817                         sysfs_notify_dirent(mddev->sysfs_state);
5818                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5819                         md_wakeup_thread(mddev->thread);
5820                 } else {
5821                         err = -EROFS;
5822                         goto abort_unlock;
5823                 }
5824         }
5825
5826         switch (cmd)
5827         {
5828                 case ADD_NEW_DISK:
5829                 {
5830                         mdu_disk_info_t info;
5831                         if (copy_from_user(&info, argp, sizeof(info)))
5832                                 err = -EFAULT;
5833                         else
5834                                 err = add_new_disk(mddev, &info);
5835                         goto done_unlock;
5836                 }
5837
5838                 case HOT_REMOVE_DISK:
5839                         err = hot_remove_disk(mddev, new_decode_dev(arg));
5840                         goto done_unlock;
5841
5842                 case HOT_ADD_DISK:
5843                         err = hot_add_disk(mddev, new_decode_dev(arg));
5844                         goto done_unlock;
5845
5846                 case SET_DISK_FAULTY:
5847                         err = set_disk_faulty(mddev, new_decode_dev(arg));
5848                         goto done_unlock;
5849
5850                 case RUN_ARRAY:
5851                         err = do_md_run(mddev);
5852                         goto done_unlock;
5853
5854                 case SET_BITMAP_FILE:
5855                         err = set_bitmap_file(mddev, (int)arg);
5856                         goto done_unlock;
5857
5858                 default:
5859                         err = -EINVAL;
5860                         goto abort_unlock;
5861         }
5862
5863 done_unlock:
5864 abort_unlock:
5865         if (mddev->hold_active == UNTIL_IOCTL &&
5866             err != -EINVAL)
5867                 mddev->hold_active = 0;
5868         mddev_unlock(mddev);
5869
5870         return err;
5871 done:
5872         if (err)
5873                 MD_BUG();
5874 abort:
5875         return err;
5876 }
5877 #ifdef CONFIG_COMPAT
5878 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
5879                     unsigned int cmd, unsigned long arg)
5880 {
5881         switch (cmd) {
5882         case HOT_REMOVE_DISK:
5883         case HOT_ADD_DISK:
5884         case SET_DISK_FAULTY:
5885         case SET_BITMAP_FILE:
5886                 /* These take in integer arg, do not convert */
5887                 break;
5888         default:
5889                 arg = (unsigned long)compat_ptr(arg);
5890                 break;
5891         }
5892
5893         return md_ioctl(bdev, mode, cmd, arg);
5894 }
5895 #endif /* CONFIG_COMPAT */
5896
5897 static int md_open(struct block_device *bdev, fmode_t mode)
5898 {
5899         /*
5900          * Succeed if we can lock the mddev, which confirms that
5901          * it isn't being stopped right now.
5902          */
5903         mddev_t *mddev = mddev_find(bdev->bd_dev);
5904         int err;
5905
5906         lock_kernel();
5907         if (mddev->gendisk != bdev->bd_disk) {
5908                 /* we are racing with mddev_put which is discarding this
5909                  * bd_disk.
5910                  */
5911                 mddev_put(mddev);
5912                 /* Wait until bdev->bd_disk is definitely gone */
5913                 flush_scheduled_work();
5914                 /* Then retry the open from the top */
5915                 unlock_kernel();
5916                 return -ERESTARTSYS;
5917         }
5918         BUG_ON(mddev != bdev->bd_disk->private_data);
5919
5920         if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
5921                 goto out;
5922
5923         err = 0;
5924         atomic_inc(&mddev->openers);