]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - drivers/md/raid10.c
md/raid1,10: Remove use-after-free bug in make_request.
[linux-2.6.git] / drivers / md / raid10.c
index d1d6891263469f5c84be025c004e411ed93f2171..d7a8468ddeabc493284d5acc2f81529f9e638461 100644 (file)
@@ -5,7 +5,7 @@
  *
  * RAID-10 support for md.
  *
- * Base on code in raid1.c.  See raid1.c for futher copyright information.
+ * Base on code in raid1.c.  See raid1.c for further copyright information.
  *
  *
  * This program is free software; you can redistribute it and/or modify
@@ -22,6 +22,7 @@
 #include <linux/delay.h>
 #include <linux/blkdev.h>
 #include <linux/seq_file.h>
+#include <linux/ratelimit.h>
 #include "md.h"
 #include "raid10.h"
 #include "raid0.h"
  */
 #define        NR_RAID10_BIOS 256
 
-static void unplug_slaves(mddev_t *mddev);
-
 static void allow_barrier(conf_t *conf);
 static void lower_barrier(conf_t *conf);
 
 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
 {
        conf_t *conf = data;
-       r10bio_t *r10_bio;
        int size = offsetof(struct r10bio_s, devs[conf->copies]);
 
        /* allocate a r10bio with room for raid_disks entries in the bios array */
-       r10_bio = kzalloc(size, gfp_flags);
-       if (!r10_bio && conf->mddev)
-               unplug_slaves(conf->mddev);
-
-       return r10_bio;
+       return kzalloc(size, gfp_flags);
 }
 
 static void r10bio_pool_free(void *r10_bio, void *data)
@@ -106,10 +100,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
        int nalloc;
 
        r10_bio = r10bio_pool_alloc(gfp_flags, conf);
-       if (!r10_bio) {
-               unplug_slaves(conf->mddev);
+       if (!r10_bio)
                return NULL;
-       }
 
        if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
                nalloc = conf->copies; /* resync */
@@ -120,7 +112,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
         * Allocate bios.
         */
        for (j = nalloc ; j-- ; ) {
-               bio = bio_alloc(gfp_flags, RESYNC_PAGES);
+               bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
                if (!bio)
                        goto out_free_bio;
                r10_bio->devs[j].bio = bio;
@@ -132,7 +124,14 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
        for (j = 0 ; j < nalloc; j++) {
                bio = r10_bio->devs[j].bio;
                for (i = 0; i < RESYNC_PAGES; i++) {
-                       page = alloc_page(gfp_flags);
+                       if (j == 1 && !test_bit(MD_RECOVERY_SYNC,
+                                               &conf->mddev->recovery)) {
+                               /* we can share bv_page's during recovery */
+                               struct bio *rbio = r10_bio->devs[0].bio;
+                               page = rbio->bi_io_vec[i].bv_page;
+                               get_page(page);
+                       } else
+                               page = alloc_page(gfp_flags);
                        if (unlikely(!page))
                                goto out_free_pages;
 
@@ -182,7 +181,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
 
        for (i = 0; i < conf->copies; i++) {
                struct bio **bio = & r10_bio->devs[i].bio;
-               if (*bio && *bio != IO_BLOCKED)
+               if (!BIO_SPECIAL(*bio))
                        bio_put(*bio);
                *bio = NULL;
        }
@@ -192,12 +191,6 @@ static void free_r10bio(r10bio_t *r10_bio)
 {
        conf_t *conf = r10_bio->mddev->private;
 
-       /*
-        * Wake up any possible resync thread that waits for the device
-        * to go idle.
-        */
-       allow_barrier(conf);
-
        put_all_bios(conf, r10_bio);
        mempool_free(r10_bio, conf->r10bio_pool);
 }
@@ -236,9 +229,27 @@ static void reschedule_retry(r10bio_t *r10_bio)
 static void raid_end_bio_io(r10bio_t *r10_bio)
 {
        struct bio *bio = r10_bio->master_bio;
+       int done;
+       conf_t *conf = r10_bio->mddev->private;
 
-       bio_endio(bio,
-               test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
+       if (bio->bi_phys_segments) {
+               unsigned long flags;
+               spin_lock_irqsave(&conf->device_lock, flags);
+               bio->bi_phys_segments--;
+               done = (bio->bi_phys_segments == 0);
+               spin_unlock_irqrestore(&conf->device_lock, flags);
+       } else
+               done = 1;
+       if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
+               clear_bit(BIO_UPTODATE, &bio->bi_flags);
+       if (done) {
+               bio_endio(bio, 0);
+               /*
+                * Wake up any possible resync thread that waits for the device
+                * to go idle.
+                */
+               allow_barrier(conf);
+       }
        free_r10bio(r10_bio);
 }
 
@@ -253,6 +264,26 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio)
                r10_bio->devs[slot].addr + (r10_bio->sectors);
 }
 
+/*
+ * Find the disk number which triggered given bio
+ */
+static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio,
+                        struct bio *bio, int *slotp)
+{
+       int slot;
+
+       for (slot = 0; slot < conf->copies; slot++)
+               if (r10_bio->devs[slot].bio == bio)
+                       break;
+
+       BUG_ON(slot == conf->copies);
+       update_head_pos(slot, r10_bio);
+
+       if (slotp)
+               *slotp = slot;
+       return r10_bio->devs[slot].devnum;
+}
+
 static void raid10_end_read_request(struct bio *bio, int error)
 {
        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -280,41 +311,66 @@ static void raid10_end_read_request(struct bio *bio, int error)
                 */
                set_bit(R10BIO_Uptodate, &r10_bio->state);
                raid_end_bio_io(r10_bio);
+               rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
        } else {
                /*
-                * oops, read error:
+                * oops, read error - keep the refcount on the rdev
                 */
                char b[BDEVNAME_SIZE];
-               if (printk_ratelimit())
-                       printk(KERN_ERR "md/raid10:%s: %s: rescheduling sector %llu\n",
-                              mdname(conf->mddev),
-                              bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector);
+               printk_ratelimited(KERN_ERR
+                                  "md/raid10:%s: %s: rescheduling sector %llu\n",
+                                  mdname(conf->mddev),
+                                  bdevname(conf->mirrors[dev].rdev->bdev, b),
+                                  (unsigned long long)r10_bio->sector);
+               set_bit(R10BIO_ReadError, &r10_bio->state);
                reschedule_retry(r10_bio);
        }
+}
 
-       rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
+static void close_write(r10bio_t *r10_bio)
+{
+       /* clear the bitmap if all writes complete successfully */
+       bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
+                       r10_bio->sectors,
+                       !test_bit(R10BIO_Degraded, &r10_bio->state),
+                       0);
+       md_write_end(r10_bio->mddev);
+}
+
+static void one_write_done(r10bio_t *r10_bio)
+{
+       if (atomic_dec_and_test(&r10_bio->remaining)) {
+               if (test_bit(R10BIO_WriteError, &r10_bio->state))
+                       reschedule_retry(r10_bio);
+               else {
+                       close_write(r10_bio);
+                       if (test_bit(R10BIO_MadeGood, &r10_bio->state))
+                               reschedule_retry(r10_bio);
+                       else
+                               raid_end_bio_io(r10_bio);
+               }
+       }
 }
 
 static void raid10_end_write_request(struct bio *bio, int error)
 {
        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
        r10bio_t *r10_bio = bio->bi_private;
-       int slot, dev;
+       int dev;
+       int dec_rdev = 1;
        conf_t *conf = r10_bio->mddev->private;
+       int slot;
 
-       for (slot = 0; slot < conf->copies; slot++)
-               if (r10_bio->devs[slot].bio == bio)
-                       break;
-       dev = r10_bio->devs[slot].devnum;
+       dev = find_bio_disk(conf, r10_bio, bio, &slot);
 
        /*
         * this branch is our 'one mirror IO has finished' event handler:
         */
        if (!uptodate) {
-               md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
-               /* an I/O failed, we can't clear the bitmap */
-               set_bit(R10BIO_Degraded, &r10_bio->state);
-       } else
+               set_bit(WriteErrorSeen, &conf->mirrors[dev].rdev->flags);
+               set_bit(R10BIO_WriteError, &r10_bio->state);
+               dec_rdev = 0;
+       } else {
                /*
                 * Set R10BIO_Uptodate in our master bio, so that
                 * we will return a good error code for to the higher
@@ -324,39 +380,44 @@ static void raid10_end_write_request(struct bio *bio, int error)
                 * user-side. So if something waits for IO, then it will
                 * wait for the 'master' bio.
                 */
+               sector_t first_bad;
+               int bad_sectors;
+
                set_bit(R10BIO_Uptodate, &r10_bio->state);
 
-       update_head_pos(slot, r10_bio);
+               /* Maybe we can clear some bad blocks. */
+               if (is_badblock(conf->mirrors[dev].rdev,
+                               r10_bio->devs[slot].addr,
+                               r10_bio->sectors,
+                               &first_bad, &bad_sectors)) {
+                       bio_put(bio);
+                       r10_bio->devs[slot].bio = IO_MADE_GOOD;
+                       dec_rdev = 0;
+                       set_bit(R10BIO_MadeGood, &r10_bio->state);
+               }
+       }
 
        /*
         *
         * Let's see if all mirrored write operations have finished
         * already.
         */
-       if (atomic_dec_and_test(&r10_bio->remaining)) {
-               /* clear the bitmap if all writes complete successfully */
-               bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
-                               r10_bio->sectors,
-                               !test_bit(R10BIO_Degraded, &r10_bio->state),
-                               0);
-               md_write_end(r10_bio->mddev);
-               raid_end_bio_io(r10_bio);
-       }
-
-       rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
+       one_write_done(r10_bio);
+       if (dec_rdev)
+               rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
 }
 
 
 /*
  * RAID10 layout manager
- * Aswell as the chunksize and raid_disks count, there are two
+ * As well as the chunksize and raid_disks count, there are two
  * parameters: near_copies and far_copies.
  * near_copies * far_copies must be <= raid_disks.
  * Normally one of these will be 1.
  * If both are 1, we get raid0.
  * If near_copies == raid_disks, we get raid1.
  *
- * Chunks are layed out in raid0 style with near_copies copies of the
+ * Chunks are laid out in raid0 style with near_copies copies of the
  * first chunk, followed by near_copies copies of the next chunk and
  * so on.
  * If far_copies > 1, then after 1/far_copies of the array has been assigned
@@ -494,16 +555,25 @@ static int raid10_mergeable_bvec(struct request_queue *q,
  * FIXME: possibly should rethink readbalancing and do it differently
  * depending on near_copies / far_copies geometry.
  */
-static int read_balance(conf_t *conf, r10bio_t *r10_bio)
+static int read_balance(conf_t *conf, r10bio_t *r10_bio, int *max_sectors)
 {
        const sector_t this_sector = r10_bio->sector;
-       int disk, slot, nslot;
-       const int sectors = r10_bio->sectors;
-       sector_t new_distance, current_distance;
+       int disk, slot;
+       int sectors = r10_bio->sectors;
+       int best_good_sectors;
+       sector_t new_distance, best_dist;
        mdk_rdev_t *rdev;
+       int do_balance;
+       int best_slot;
 
        raid10_find_phys(conf, r10_bio);
        rcu_read_lock();
+retry:
+       sectors = r10_bio->sectors;
+       best_slot = -1;
+       best_dist = MaxSector;
+       best_good_sectors = 0;
+       do_balance = 1;
        /*
         * Check if we can balance. We can balance on the whole
         * device if no resync is going on (recovery is ok), or below
@@ -511,123 +581,100 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
         * above the resync window.
         */
        if (conf->mddev->recovery_cp < MaxSector
-           && (this_sector + sectors >= conf->next_resync)) {
-               /* make sure that disk is operational */
-               slot = 0;
-               disk = r10_bio->devs[slot].devnum;
+           && (this_sector + sectors >= conf->next_resync))
+               do_balance = 0;
 
-               while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
-                      r10_bio->devs[slot].bio == IO_BLOCKED ||
-                      !test_bit(In_sync, &rdev->flags)) {
-                       slot++;
-                       if (slot == conf->copies) {
-                               slot = 0;
-                               disk = -1;
-                               break;
-                       }
-                       disk = r10_bio->devs[slot].devnum;
-               }
-               goto rb_out;
-       }
+       for (slot = 0; slot < conf->copies ; slot++) {
+               sector_t first_bad;
+               int bad_sectors;
+               sector_t dev_sector;
 
-
-       /* make sure the disk is operational */
-       slot = 0;
-       disk = r10_bio->devs[slot].devnum;
-       while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
-              r10_bio->devs[slot].bio == IO_BLOCKED ||
-              !test_bit(In_sync, &rdev->flags)) {
-               slot ++;
-               if (slot == conf->copies) {
-                       disk = -1;
-                       goto rb_out;
-               }
+               if (r10_bio->devs[slot].bio == IO_BLOCKED)
+                       continue;
                disk = r10_bio->devs[slot].devnum;
-       }
-
-
-       current_distance = abs(r10_bio->devs[slot].addr -
-                              conf->mirrors[disk].head_position);
-
-       /* Find the disk whose head is closest,
-        * or - for far > 1 - find the closest to partition beginning */
-
-       for (nslot = slot; nslot < conf->copies; nslot++) {
-               int ndisk = r10_bio->devs[nslot].devnum;
-
+               rdev = rcu_dereference(conf->mirrors[disk].rdev);
+               if (rdev == NULL)
+                       continue;
+               if (!test_bit(In_sync, &rdev->flags))
+                       continue;
 
-               if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL ||
-                   r10_bio->devs[nslot].bio == IO_BLOCKED ||
-                   !test_bit(In_sync, &rdev->flags))
+               dev_sector = r10_bio->devs[slot].addr;
+               if (is_badblock(rdev, dev_sector, sectors,
+                               &first_bad, &bad_sectors)) {
+                       if (best_dist < MaxSector)
+                               /* Already have a better slot */
+                               continue;
+                       if (first_bad <= dev_sector) {
+                               /* Cannot read here.  If this is the
+                                * 'primary' device, then we must not read
+                                * beyond 'bad_sectors' from another device.
+                                */
+                               bad_sectors -= (dev_sector - first_bad);
+                               if (!do_balance && sectors > bad_sectors)
+                                       sectors = bad_sectors;
+                               if (best_good_sectors > sectors)
+                                       best_good_sectors = sectors;
+                       } else {
+                               sector_t good_sectors =
+                                       first_bad - dev_sector;
+                               if (good_sectors > best_good_sectors) {
+                                       best_good_sectors = good_sectors;
+                                       best_slot = slot;
+                               }
+                               if (!do_balance)
+                                       /* Must read from here */
+                                       break;
+                       }
                        continue;
+               } else
+                       best_good_sectors = sectors;
+
+               if (!do_balance)
+                       break;
 
                /* This optimisation is debatable, and completely destroys
                 * sequential read speed for 'far copies' arrays.  So only
                 * keep it for 'near' arrays, and review those later.
                 */
-               if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending)) {
-                       disk = ndisk;
-                       slot = nslot;
+               if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending))
                        break;
-               }
 
                /* for far > 1 always use the lowest address */
                if (conf->far_copies > 1)
-                       new_distance = r10_bio->devs[nslot].addr;
+                       new_distance = r10_bio->devs[slot].addr;
                else
-                       new_distance = abs(r10_bio->devs[nslot].addr -
-                                          conf->mirrors[ndisk].head_position);
-               if (new_distance < current_distance) {
-                       current_distance = new_distance;
-                       disk = ndisk;
-                       slot = nslot;
+                       new_distance = abs(r10_bio->devs[slot].addr -
+                                          conf->mirrors[disk].head_position);
+               if (new_distance < best_dist) {
+                       best_dist = new_distance;
+                       best_slot = slot;
                }
        }
+       if (slot == conf->copies)
+               slot = best_slot;
 
-rb_out:
-       r10_bio->read_slot = slot;
-/*     conf->next_seq_sect = this_sector + sectors;*/
-
-       if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL)
-               atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
-       else
+       if (slot >= 0) {
+               disk = r10_bio->devs[slot].devnum;
+               rdev = rcu_dereference(conf->mirrors[disk].rdev);
+               if (!rdev)
+                       goto retry;
+               atomic_inc(&rdev->nr_pending);
+               if (test_bit(Faulty, &rdev->flags)) {
+                       /* Cannot risk returning a device that failed
+                        * before we inc'ed nr_pending
+                        */
+                       rdev_dec_pending(rdev, conf->mddev);
+                       goto retry;
+               }
+               r10_bio->read_slot = slot;
+       } else
                disk = -1;
        rcu_read_unlock();
+       *max_sectors = best_good_sectors;
 
        return disk;
 }
 
-static void unplug_slaves(mddev_t *mddev)
-{
-       conf_t *conf = mddev->private;
-       int i;
-
-       rcu_read_lock();
-       for (i=0; i < conf->raid_disks; i++) {
-               mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
-               if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
-                       struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
-
-                       atomic_inc(&rdev->nr_pending);
-                       rcu_read_unlock();
-
-                       blk_unplug(r_queue);
-
-                       rdev_dec_pending(rdev, mddev);
-                       rcu_read_lock();
-               }
-       }
-       rcu_read_unlock();
-}
-
-static void raid10_unplug(struct request_queue *q)
-{
-       mddev_t *mddev = q->queuedata;
-
-       unplug_slaves(q->queuedata);
-       md_wakeup_thread(mddev->thread);
-}
-
 static int raid10_congested(void *data, int bits)
 {
        mddev_t *mddev = data;
@@ -649,20 +696,16 @@ static int raid10_congested(void *data, int bits)
        return ret;
 }
 
-static int flush_pending_writes(conf_t *conf)
+static void flush_pending_writes(conf_t *conf)
 {
        /* Any writes that have been queued but are awaiting
         * bitmap updates get flushed here.
-        * We return 1 if any requests were actually submitted.
         */
-       int rv = 0;
-
        spin_lock_irq(&conf->device_lock);
 
        if (conf->pending_bio_list.head) {
                struct bio *bio;
                bio = bio_list_get(&conf->pending_bio_list);
-               blk_remove_plug(conf->mddev->queue);
                spin_unlock_irq(&conf->device_lock);
                /* flush any pending bitmap writes to disk
                 * before proceeding w/ I/O */
@@ -674,11 +717,10 @@ static int flush_pending_writes(conf_t *conf)
                        generic_make_request(bio);
                        bio = next;
                }
-               rv = 1;
        } else
                spin_unlock_irq(&conf->device_lock);
-       return rv;
 }
+
 /* Barriers....
  * Sometimes we need to suspend IO while we do something else,
  * either some resync/recovery, or reconfigure the array.
@@ -708,17 +750,15 @@ static void raise_barrier(conf_t *conf, int force)
 
        /* Wait until no block IO is waiting (unless 'force') */
        wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
-                           conf->resync_lock,
-                           raid10_unplug(conf->mddev->queue));
+                           conf->resync_lock, );
 
        /* block any new IO from starting */
        conf->barrier++;
 
-       /* No wait for all pending IO to complete */
+       /* Now wait for all pending IO to complete */
        wait_event_lock_irq(conf->wait_barrier,
                            !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
-                           conf->resync_lock,
-                           raid10_unplug(conf->mddev->queue));
+                           conf->resync_lock, );
 
        spin_unlock_irq(&conf->resync_lock);
 }
@@ -739,7 +779,7 @@ static void wait_barrier(conf_t *conf)
                conf->nr_waiting++;
                wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
                                    conf->resync_lock,
-                                   raid10_unplug(conf->mddev->queue));
+                                   );
                conf->nr_waiting--;
        }
        conf->nr_pending++;
@@ -775,8 +815,8 @@ static void freeze_array(conf_t *conf)
        wait_event_lock_irq(conf->wait_barrier,
                            conf->nr_pending == conf->nr_queued+1,
                            conf->resync_lock,
-                           ({ flush_pending_writes(conf);
-                              raid10_unplug(conf->mddev->queue); }));
+                           flush_pending_writes(conf));
+
        spin_unlock_irq(&conf->resync_lock);
 }
 
@@ -799,13 +839,16 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        int i;
        int chunk_sects = conf->chunk_mask + 1;
        const int rw = bio_data_dir(bio);
-       const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
-       struct bio_list bl;
+       const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
+       const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
        unsigned long flags;
        mdk_rdev_t *blocked_rdev;
+       int plugged;
+       int sectors_handled;
+       int max_sectors;
 
-       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
-               md_barrier_request(mddev, bio);
+       if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+               md_flush_request(mddev, bio);
                return 0;
        }
 
@@ -877,19 +920,35 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        r10_bio->sector = bio->bi_sector;
        r10_bio->state = 0;
 
+       /* We might need to issue multiple reads to different
+        * devices if there are bad blocks around, so we keep
+        * track of the number of reads in bio->bi_phys_segments.
+        * If this is 0, there is only one r10_bio and no locking
+        * will be needed when the request completes.  If it is
+        * non-zero, then it is the number of not-completed requests.
+        */
+       bio->bi_phys_segments = 0;
+       clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+
        if (rw == READ) {
                /*
                 * read balancing logic:
                 */
-               int disk = read_balance(conf, r10_bio);
-               int slot = r10_bio->read_slot;
+               int disk;
+               int slot;
+
+read_again:
+               disk = read_balance(conf, r10_bio, &max_sectors);
+               slot = r10_bio->read_slot;
                if (disk < 0) {
                        raid_end_bio_io(r10_bio);
                        return 0;
                }
                mirror = conf->mirrors + disk;
 
-               read_bio = bio_clone(bio, GFP_NOIO);
+               read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+               md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
+                           max_sectors);
 
                r10_bio->devs[slot].bio = read_bio;
 
@@ -897,10 +956,40 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                        mirror->rdev->data_offset;
                read_bio->bi_bdev = mirror->rdev->bdev;
                read_bio->bi_end_io = raid10_end_read_request;
-               read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+               read_bio->bi_rw = READ | do_sync;
                read_bio->bi_private = r10_bio;
 
-               generic_make_request(read_bio);
+               if (max_sectors < r10_bio->sectors) {
+                       /* Could not read all from this device, so we will
+                        * need another r10_bio.
+                        */
+                       sectors_handled = (r10_bio->sectors + max_sectors
+                                          - bio->bi_sector);
+                       r10_bio->sectors = max_sectors;
+                       spin_lock_irq(&conf->device_lock);
+                       if (bio->bi_phys_segments == 0)
+                               bio->bi_phys_segments = 2;
+                       else
+                               bio->bi_phys_segments++;
+                       spin_unlock(&conf->device_lock);
+                       /* Cannot call generic_make_request directly
+                        * as that will be queued in __generic_make_request
+                        * and subsequent mempool_alloc might block
+                        * waiting for it.  so hand bio over to raid10d.
+                        */
+                       reschedule_retry(r10_bio);
+
+                       r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+
+                       r10_bio->master_bio = bio;
+                       r10_bio->sectors = ((bio->bi_size >> 9)
+                                           - sectors_handled);
+                       r10_bio->state = 0;
+                       r10_bio->mddev = mddev;
+                       r10_bio->sector = bio->bi_sector + sectors_handled;
+                       goto read_again;
+               } else
+                       generic_make_request(read_bio);
                return 0;
        }
 
@@ -910,11 +999,22 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        /* first select target devices under rcu_lock and
         * inc refcount on their rdev.  Record them by setting
         * bios[x] to bio
+        * If there are known/acknowledged bad blocks on any device
+        * on which we have seen a write error, we want to avoid
+        * writing to those blocks.  This potentially requires several
+        * writes to write around the bad blocks.  Each set of writes
+        * gets its own r10_bio with a set of bios attached.  The number
+        * of r10_bios is recored in bio->bi_phys_segments just as with
+        * the read case.
         */
+       plugged = mddev_check_plugged(mddev);
+
        raid10_find_phys(conf, r10_bio);
- retry_write:
+retry_write:
        blocked_rdev = NULL;
        rcu_read_lock();
+       max_sectors = r10_bio->sectors;
+
        for (i = 0;  i < conf->copies; i++) {
                int d = r10_bio->devs[i].devnum;
                mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
@@ -923,13 +1023,55 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                        blocked_rdev = rdev;
                        break;
                }
-               if (rdev && !test_bit(Faulty, &rdev->flags)) {
-                       atomic_inc(&rdev->nr_pending);
-                       r10_bio->devs[i].bio = bio;
-               } else {
-                       r10_bio->devs[i].bio = NULL;
+               r10_bio->devs[i].bio = NULL;
+               if (!rdev || test_bit(Faulty, &rdev->flags)) {
                        set_bit(R10BIO_Degraded, &r10_bio->state);
+                       continue;
+               }
+               if (test_bit(WriteErrorSeen, &rdev->flags)) {
+                       sector_t first_bad;
+                       sector_t dev_sector = r10_bio->devs[i].addr;
+                       int bad_sectors;
+                       int is_bad;
+
+                       is_bad = is_badblock(rdev, dev_sector,
+                                            max_sectors,
+                                            &first_bad, &bad_sectors);
+                       if (is_bad < 0) {
+                               /* Mustn't write here until the bad block
+                                * is acknowledged
+                                */
+                               atomic_inc(&rdev->nr_pending);
+                               set_bit(BlockedBadBlocks, &rdev->flags);
+                               blocked_rdev = rdev;
+                               break;
+                       }
+                       if (is_bad && first_bad <= dev_sector) {
+                               /* Cannot write here at all */
+                               bad_sectors -= (dev_sector - first_bad);
+                               if (bad_sectors < max_sectors)
+                                       /* Mustn't write more than bad_sectors
+                                        * to other devices yet
+                                        */
+                                       max_sectors = bad_sectors;
+                               /* We don't set R10BIO_Degraded as that
+                                * only applies if the disk is missing,
+                                * so it might be re-added, and we want to
+                                * know to recover this chunk.
+                                * In this case the device is here, and the
+                                * fact that this chunk is not in-sync is
+                                * recorded in the bad block log.
+                                */
+                               continue;
+                       }
+                       if (is_bad) {
+                               int good_sectors = first_bad - dev_sector;
+                               if (good_sectors < max_sectors)
+                                       max_sectors = good_sectors;
+                       }
                }
+               r10_bio->devs[i].bio = bio;
+               atomic_inc(&rdev->nr_pending);
        }
        rcu_read_unlock();
 
@@ -949,48 +1091,73 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                goto retry_write;
        }
 
-       atomic_set(&r10_bio->remaining, 0);
+       if (max_sectors < r10_bio->sectors) {
+               /* We are splitting this into multiple parts, so
+                * we need to prepare for allocating another r10_bio.
+                */
+               r10_bio->sectors = max_sectors;
+               spin_lock_irq(&conf->device_lock);
+               if (bio->bi_phys_segments == 0)
+                       bio->bi_phys_segments = 2;
+               else
+                       bio->bi_phys_segments++;
+               spin_unlock_irq(&conf->device_lock);
+       }
+       sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
+
+       atomic_set(&r10_bio->remaining, 1);
+       bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
 
-       bio_list_init(&bl);
        for (i = 0; i < conf->copies; i++) {
                struct bio *mbio;
                int d = r10_bio->devs[i].devnum;
                if (!r10_bio->devs[i].bio)
                        continue;
 
-               mbio = bio_clone(bio, GFP_NOIO);
+               mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+               md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+                           max_sectors);
                r10_bio->devs[i].bio = mbio;
 
-               mbio->bi_sector = r10_bio->devs[i].addr+
-                       conf->mirrors[d].rdev->data_offset;
+               mbio->bi_sector = (r10_bio->devs[i].addr+
+                                  conf->mirrors[d].rdev->data_offset);
                mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
                mbio->bi_end_io = raid10_end_write_request;
-               mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO);
+               mbio->bi_rw = WRITE | do_sync | do_fua;
                mbio->bi_private = r10_bio;
 
                atomic_inc(&r10_bio->remaining);
-               bio_list_add(&bl, mbio);
+               spin_lock_irqsave(&conf->device_lock, flags);
+               bio_list_add(&conf->pending_bio_list, mbio);
+               spin_unlock_irqrestore(&conf->device_lock, flags);
        }
 
-       if (unlikely(!atomic_read(&r10_bio->remaining))) {
-               /* the array is dead */
-               md_write_end(mddev);
-               raid_end_bio_io(r10_bio);
-               return 0;
-       }
+       /* Don't remove the bias on 'remaining' (one_write_done) until
+        * after checking if we need to go around again.
+        */
 
-       bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
-       spin_lock_irqsave(&conf->device_lock, flags);
-       bio_list_merge(&conf->pending_bio_list, &bl);
-       blk_plug_device(mddev->queue);
-       spin_unlock_irqrestore(&conf->device_lock, flags);
+       if (sectors_handled < (bio->bi_size >> 9)) {
+               one_write_done(r10_bio);
+               /* We need another r10_bio.  It has already been counted
+                * in bio->bi_phys_segments.
+                */
+               r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+
+               r10_bio->master_bio = bio;
+               r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+
+               r10_bio->mddev = mddev;
+               r10_bio->sector = bio->bi_sector + sectors_handled;
+               r10_bio->state = 0;
+               goto retry_write;
+       }
+       one_write_done(r10_bio);
 
        /* In case raid10d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
 
-       if (do_sync)
+       if (do_sync || !mddev->bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
-
        return 0;
 }
 
@@ -1018,6 +1185,30 @@ static void status(struct seq_file *seq, mddev_t *mddev)
        seq_printf(seq, "]");
 }
 
+/* check if there are enough drives for
+ * every block to appear on atleast one.
+ * Don't consider the device numbered 'ignore'
+ * as we might be about to remove it.
+ */
+static int enough(conf_t *conf, int ignore)
+{
+       int first = 0;
+
+       do {
+               int n = conf->copies;
+               int cnt = 0;
+               while (n--) {
+                       if (conf->mirrors[first].rdev &&
+                           first != ignore)
+                               cnt++;
+                       first = (first+1) % conf->raid_disks;
+               }
+               if (cnt == 0)
+                       return 0;
+       } while (first != 0);
+       return 1;
+}
+
 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
 {
        char b[BDEVNAME_SIZE];
@@ -1030,13 +1221,9 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
         * else mark the drive as failed
         */
        if (test_bit(In_sync, &rdev->flags)
-           && conf->raid_disks-mddev->degraded == 1)
+           && !enough(conf, rdev->raid_disk))
                /*
                 * Don't fail the drive, just return an IO error.
-                * The test should really be more sophisticated than
-                * "working_disks == 1", but it isn't critical, and
-                * can wait until we do more sophisticated "is the drive
-                * really dead" tests...
                 */
                return;
        if (test_and_clear_bit(In_sync, &rdev->flags)) {
@@ -1049,10 +1236,12 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
                 */
                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
        }
+       set_bit(Blocked, &rdev->flags);
        set_bit(Faulty, &rdev->flags);
        set_bit(MD_CHANGE_DEVS, &mddev->flags);
-       printk(KERN_ALERT "md/raid10:%s: Disk failure on %s, disabling device.\n"
-              KERN_ALERT "md/raid10:%s: Operation continuing on %d devices.\n",
+       printk(KERN_ALERT
+              "md/raid10:%s: Disk failure on %s, disabling device.\n"
+              "md/raid10:%s: Operation continuing on %d devices.\n",
               mdname(mddev), bdevname(rdev->bdev, b),
               mdname(mddev), conf->raid_disks - mddev->degraded);
 }
@@ -1090,32 +1279,13 @@ static void close_sync(conf_t *conf)
        conf->r10buf_pool = NULL;
 }
 
-/* check if there are enough drives for
- * every block to appear on atleast one
- */
-static int enough(conf_t *conf)
-{
-       int first = 0;
-
-       do {
-               int n = conf->copies;
-               int cnt = 0;
-               while (n--) {
-                       if (conf->mirrors[first].rdev)
-                               cnt++;
-                       first = (first+1) % conf->raid_disks;
-               }
-               if (cnt == 0)
-                       return 0;
-       } while (first != 0);
-       return 1;
-}
-
 static int raid10_spare_active(mddev_t *mddev)
 {
        int i;
        conf_t *conf = mddev->private;
        mirror_info_t *tmp;
+       int count = 0;
+       unsigned long flags;
 
        /*
         * Find all non-in_sync disks within the RAID10 configuration
@@ -1126,15 +1296,16 @@ static int raid10_spare_active(mddev_t *mddev)
                if (tmp->rdev
                    && !test_bit(Faulty, &tmp->rdev->flags)
                    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
-                       unsigned long flags;
-                       spin_lock_irqsave(&conf->device_lock, flags);
-                       mddev->degraded--;
-                       spin_unlock_irqrestore(&conf->device_lock, flags);
+                       count++;
+                       sysfs_notify_dirent(tmp->rdev->sysfs_state);
                }
        }
+       spin_lock_irqsave(&conf->device_lock, flags);
+       mddev->degraded -= count;
+       spin_unlock_irqrestore(&conf->device_lock, flags);
 
        print_conf(conf);
-       return 0;
+       return count;
 }
 
 
@@ -1143,7 +1314,6 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
        conf_t *conf = mddev->private;
        int err = -EEXIST;
        int mirror;
-       mirror_info_t *p;
        int first = 0;
        int last = conf->raid_disks - 1;
 
@@ -1152,44 +1322,47 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
                 * very different from resync
                 */
                return -EBUSY;
-       if (!enough(conf))
+       if (!enough(conf, -1))
                return -EINVAL;
 
        if (rdev->raid_disk >= 0)
                first = last = rdev->raid_disk;
 
-       if (rdev->saved_raid_disk >= 0 &&
-           rdev->saved_raid_disk >= first &&
+       if (rdev->saved_raid_disk >= first &&
            conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
                mirror = rdev->saved_raid_disk;
        else
                mirror = first;
-       for ( ; mirror <= last ; mirror++)
-               if ( !(p=conf->mirrors+mirror)->rdev) {
-
-                       disk_stack_limits(mddev->gendisk, rdev->bdev,
-                                         rdev->data_offset << 9);
-                       /* as we don't honour merge_bvec_fn, we must
-                        * never risk violating it, so limit
-                        * ->max_segments to one lying with a single
-                        * page, as a one page request is never in
-                        * violation.
-                        */
-                       if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
-                               blk_queue_max_segments(mddev->queue, 1);
-                               blk_queue_segment_boundary(mddev->queue,
-                                                          PAGE_CACHE_SIZE - 1);
-                       }
+       for ( ; mirror <= last ; mirror++) {
+               mirror_info_t *p = &conf->mirrors[mirror];
+               if (p->recovery_disabled == mddev->recovery_disabled)
+                       continue;
+               if (!p->rdev)
+                       continue;
 
-                       p->head_position = 0;
-                       rdev->raid_disk = mirror;
-                       err = 0;
-                       if (rdev->saved_raid_disk != mirror)
-                               conf->fullsync = 1;
-                       rcu_assign_pointer(p->rdev, rdev);
-                       break;
+               disk_stack_limits(mddev->gendisk, rdev->bdev,
+                                 rdev->data_offset << 9);
+               /* as we don't honour merge_bvec_fn, we must
+                * never risk violating it, so limit
+                * ->max_segments to one lying with a single
+                * page, as a one page request is never in
+                * violation.
+                */
+               if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+                       blk_queue_max_segments(mddev->queue, 1);
+                       blk_queue_segment_boundary(mddev->queue,
+                                                  PAGE_CACHE_SIZE - 1);
                }
 
+               p->head_position = 0;
+               rdev->raid_disk = mirror;
+               err = 0;
+               if (rdev->saved_raid_disk != mirror)
+                       conf->fullsync = 1;
+               rcu_assign_pointer(p->rdev, rdev);
+               break;
+       }
+
        md_integrity_add_rdev(rdev, mddev);
        print_conf(conf);
        return err;
@@ -1214,7 +1387,8 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
                 * is not possible.
                 */
                if (!test_bit(Faulty, &rdev->flags) &&
-                   enough(conf)) {
+                   mddev->recovery_disabled != p->recovery_disabled &&
+                   enough(conf, -1)) {
                        err = -EBUSY;
                        goto abort;
                }
@@ -1226,7 +1400,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
                        p->rdev = rdev;
                        goto abort;
                }
-               md_integrity_register(mddev);
+               err = md_integrity_register(mddev);
        }
 abort:
 
@@ -1239,24 +1413,18 @@ static void end_sync_read(struct bio *bio, int error)
 {
        r10bio_t *r10_bio = bio->bi_private;
        conf_t *conf = r10_bio->mddev->private;
-       int i,d;
+       int d;
 
-       for (i=0; i<conf->copies; i++)
-               if (r10_bio->devs[i].bio == bio)
-                       break;
-       BUG_ON(i == conf->copies);
-       update_head_pos(i, r10_bio);
-       d = r10_bio->devs[i].devnum;
+       d = find_bio_disk(conf, r10_bio, bio, NULL);
 
        if (test_bit(BIO_UPTODATE, &bio->bi_flags))
                set_bit(R10BIO_Uptodate, &r10_bio->state);
-       else {
+       else
+               /* The write handler will notice the lack of
+                * R10BIO_Uptodate and record any errors etc
+                */
                atomic_add(r10_bio->sectors,
                           &conf->mirrors[d].rdev->corrected_errors);
-               if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
-                       md_error(r10_bio->mddev,
-                                conf->mirrors[d].rdev);
-       }
 
        /* for reconstruct, we always reschedule after a read.
         * for resync, only after all reads
@@ -1271,40 +1439,60 @@ static void end_sync_read(struct bio *bio, int error)
        }
 }
 
-static void end_sync_write(struct bio *bio, int error)
+static void end_sync_request(r10bio_t *r10_bio)
 {
-       int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       r10bio_t *r10_bio = bio->bi_private;
        mddev_t *mddev = r10_bio->mddev;
-       conf_t *conf = mddev->private;
-       int i,d;
-
-       for (i = 0; i < conf->copies; i++)
-               if (r10_bio->devs[i].bio == bio)
-                       break;
-       d = r10_bio->devs[i].devnum;
 
-       if (!uptodate)
-               md_error(mddev, conf->mirrors[d].rdev);
-
-       update_head_pos(i, r10_bio);
-
-       rdev_dec_pending(conf->mirrors[d].rdev, mddev);
        while (atomic_dec_and_test(&r10_bio->remaining)) {
                if (r10_bio->master_bio == NULL) {
                        /* the primary of several recovery bios */
                        sector_t s = r10_bio->sectors;
-                       put_buf(r10_bio);
+                       if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
+                           test_bit(R10BIO_WriteError, &r10_bio->state))
+                               reschedule_retry(r10_bio);
+                       else
+                               put_buf(r10_bio);
                        md_done_sync(mddev, s, 1);
                        break;
                } else {
                        r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
-                       put_buf(r10_bio);
+                       if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
+                           test_bit(R10BIO_WriteError, &r10_bio->state))
+                               reschedule_retry(r10_bio);
+                       else
+                               put_buf(r10_bio);
                        r10_bio = r10_bio2;
                }
        }
 }
 
+static void end_sync_write(struct bio *bio, int error)
+{
+       int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+       r10bio_t *r10_bio = bio->bi_private;
+       mddev_t *mddev = r10_bio->mddev;
+       conf_t *conf = mddev->private;
+       int d;
+       sector_t first_bad;
+       int bad_sectors;
+       int slot;
+
+       d = find_bio_disk(conf, r10_bio, bio, &slot);
+
+       if (!uptodate) {
+               set_bit(WriteErrorSeen, &conf->mirrors[d].rdev->flags);
+               set_bit(R10BIO_WriteError, &r10_bio->state);
+       } else if (is_badblock(conf->mirrors[d].rdev,
+                            r10_bio->devs[slot].addr,
+                            r10_bio->sectors,
+                            &first_bad, &bad_sectors))
+               set_bit(R10BIO_MadeGood, &r10_bio->state);
+
+       rdev_dec_pending(conf->mirrors[d].rdev, mddev);
+
+       end_sync_request(r10_bio);
+}
+
 /*
  * Note: sync and recover and handled very differently for raid10
  * This code is for resync.
@@ -1364,11 +1552,12 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
                        if (j == vcnt)
                                continue;
                        mddev->resync_mismatches += r10_bio->sectors;
+                       if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+                               /* Don't fix anything. */
+                               continue;
                }
-               if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
-                       /* Don't fix anything. */
-                       continue;
-               /* Ok, we need to write this bio
+               /* Ok, we need to write this bio, either to correct an
+                * inconsistency or to correct an unreadable block.
                 * First we need to fixup bv_offset, bv_len and
                 * bi_vecs, as the read request might have corrupted these
                 */
@@ -1420,32 +1609,107 @@ done:
  * The second for writing.
  *
  */
+static void fix_recovery_read_error(r10bio_t *r10_bio)
+{
+       /* We got a read error during recovery.
+        * We repeat the read in smaller page-sized sections.
+        * If a read succeeds, write it to the new device or record
+        * a bad block if we cannot.
+        * If a read fails, record a bad block on both old and
+        * new devices.
+        */
+       mddev_t *mddev = r10_bio->mddev;
+       conf_t *conf = mddev->private;
+       struct bio *bio = r10_bio->devs[0].bio;
+       sector_t sect = 0;
+       int sectors = r10_bio->sectors;
+       int idx = 0;
+       int dr = r10_bio->devs[0].devnum;
+       int dw = r10_bio->devs[1].devnum;
+
+       while (sectors) {
+               int s = sectors;
+               mdk_rdev_t *rdev;
+               sector_t addr;
+               int ok;
+
+               if (s > (PAGE_SIZE>>9))
+                       s = PAGE_SIZE >> 9;
+
+               rdev = conf->mirrors[dr].rdev;
+               addr = r10_bio->devs[0].addr + sect,
+               ok = sync_page_io(rdev,
+                                 addr,
+                                 s << 9,
+                                 bio->bi_io_vec[idx].bv_page,
+                                 READ, false);
+               if (ok) {
+                       rdev = conf->mirrors[dw].rdev;
+                       addr = r10_bio->devs[1].addr + sect;
+                       ok = sync_page_io(rdev,
+                                         addr,
+                                         s << 9,
+                                         bio->bi_io_vec[idx].bv_page,
+                                         WRITE, false);
+                       if (!ok)
+                               set_bit(WriteErrorSeen, &rdev->flags);
+               }
+               if (!ok) {
+                       /* We don't worry if we cannot set a bad block -
+                        * it really is bad so there is no loss in not
+                        * recording it yet
+                        */
+                       rdev_set_badblocks(rdev, addr, s, 0);
+
+                       if (rdev != conf->mirrors[dw].rdev) {
+                               /* need bad block on destination too */
+                               mdk_rdev_t *rdev2 = conf->mirrors[dw].rdev;
+                               addr = r10_bio->devs[1].addr + sect;
+                               ok = rdev_set_badblocks(rdev2, addr, s, 0);
+                               if (!ok) {
+                                       /* just abort the recovery */
+                                       printk(KERN_NOTICE
+                                              "md/raid10:%s: recovery aborted"
+                                              " due to read error\n",
+                                              mdname(mddev));
+
+                                       conf->mirrors[dw].recovery_disabled
+                                               = mddev->recovery_disabled;
+                                       set_bit(MD_RECOVERY_INTR,
+                                               &mddev->recovery);
+                                       break;
+                               }
+                       }
+               }
+
+               sectors -= s;
+               sect += s;
+               idx++;
+       }
+}
 
 static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
 {
        conf_t *conf = mddev->private;
-       int i, d;
-       struct bio *bio, *wbio;
+       int d;
+       struct bio *wbio;
 
+       if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
+               fix_recovery_read_error(r10_bio);
+               end_sync_request(r10_bio);
+               return;
+       }
 
-       /* move the pages across to the second bio
+       /*
+        * share the pages with the first bio
         * and submit the write request
         */
-       bio = r10_bio->devs[0].bio;
        wbio = r10_bio->devs[1].bio;
-       for (i=0; i < wbio->bi_vcnt; i++) {
-               struct page *p = bio->bi_io_vec[i].bv_page;
-               bio->bi_io_vec[i].bv_page = wbio->bi_io_vec[i].bv_page;
-               wbio->bi_io_vec[i].bv_page = p;
-       }
        d = r10_bio->devs[1].devnum;
 
        atomic_inc(&conf->mirrors[d].rdev->nr_pending);
        md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
-       if (test_bit(R10BIO_Uptodate, &r10_bio->state))
-               generic_make_request(wbio);
-       else
-               bio_endio(wbio, -EIO);
+       generic_make_request(wbio);
 }
 
 
@@ -1486,6 +1750,26 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
                atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
 }
 
+static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
+                           int sectors, struct page *page, int rw)
+{
+       sector_t first_bad;
+       int bad_sectors;
+
+       if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
+           && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
+               return -1;
+       if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+               /* success */
+               return 1;
+       if (rw == WRITE)
+               set_bit(WriteErrorSeen, &rdev->flags);
+       /* need to record an error - either for the block or the device */
+       if (!rdev_set_badblocks(rdev, sector, sectors, 0))
+               md_error(rdev->mddev, rdev);
+       return 0;
+}
+
 /*
  * This is a kernel thread which:
  *
@@ -1502,40 +1786,33 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
        int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
        int d = r10_bio->devs[r10_bio->read_slot].devnum;
 
-       rcu_read_lock();
-       rdev = rcu_dereference(conf->mirrors[d].rdev);
-       if (rdev) { /* If rdev is not NULL */
-               char b[BDEVNAME_SIZE];
-               int cur_read_error_count = 0;
+       /* still own a reference to this rdev, so it cannot
+        * have been cleared recently.
+        */
+       rdev = conf->mirrors[d].rdev;
 
-               bdevname(rdev->bdev, b);
+       if (test_bit(Faulty, &rdev->flags))
+               /* drive has already been failed, just ignore any
+                  more fix_read_error() attempts */
+               return;
 
-               if (test_bit(Faulty, &rdev->flags)) {
-                       rcu_read_unlock();
-                       /* drive has already been failed, just ignore any
-                          more fix_read_error() attempts */
-                       return;
-               }
+       check_decay_read_errors(mddev, rdev);
+       atomic_inc(&rdev->read_errors);
+       if (atomic_read(&rdev->read_errors) > max_read_errors) {
+               char b[BDEVNAME_SIZE];
+               bdevname(rdev->bdev, b);
 
-               check_decay_read_errors(mddev, rdev);
-               atomic_inc(&rdev->read_errors);
-               cur_read_error_count = atomic_read(&rdev->read_errors);
-               if (cur_read_error_count > max_read_errors) {
-                       rcu_read_unlock();
-                       printk(KERN_NOTICE
-                              "md/raid10:%s: %s: Raid device exceeded "
-                              "read_error threshold "
-                              "[cur %d:max %d]\n",
-                              mdname(mddev),
-                              b, cur_read_error_count, max_read_errors);
-                       printk(KERN_NOTICE
-                              "md/raid10:%s: %s: Failing raid "
-                              "device\n", mdname(mddev), b);
-                       md_error(mddev, conf->mirrors[d].rdev);
-                       return;
-               }
+               printk(KERN_NOTICE
+                      "md/raid10:%s: %s: Raid device exceeded "
+                      "read_error threshold [cur %d:max %d]\n",
+                      mdname(mddev), b,
+                      atomic_read(&rdev->read_errors), max_read_errors);
+               printk(KERN_NOTICE
+                      "md/raid10:%s: %s: Failing raid device\n",
+                      mdname(mddev), b);
+               md_error(mddev, conf->mirrors[d].rdev);
+               return;
        }
-       rcu_read_unlock();
 
        while(sectors) {
                int s = sectors;
@@ -1548,17 +1825,22 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
 
                rcu_read_lock();
                do {
+                       sector_t first_bad;
+                       int bad_sectors;
+
                        d = r10_bio->devs[sl].devnum;
                        rdev = rcu_dereference(conf->mirrors[d].rdev);
                        if (rdev &&
-                           test_bit(In_sync, &rdev->flags)) {
+                           test_bit(In_sync, &rdev->flags) &&
+                           is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
+                                       &first_bad, &bad_sectors) == 0) {
                                atomic_inc(&rdev->nr_pending);
                                rcu_read_unlock();
-                               success = sync_page_io(rdev->bdev,
+                               success = sync_page_io(rdev,
                                                       r10_bio->devs[sl].addr +
-                                                      sect + rdev->data_offset,
+                                                      sect,
                                                       s<<9,
-                                                      conf->tmppage, READ);
+                                                      conf->tmppage, READ, false);
                                rdev_dec_pending(rdev, mddev);
                                rcu_read_lock();
                                if (success)
@@ -1571,9 +1853,19 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
                rcu_read_unlock();
 
                if (!success) {
-                       /* Cannot read from anywhere -- bye bye array */
+                       /* Cannot read from anywhere, just mark the block
+                        * as bad on the first device to discourage future
+                        * reads.
+                        */
                        int dn = r10_bio->devs[r10_bio->read_slot].devnum;
-                       md_error(mddev, conf->mirrors[dn].rdev);
+                       rdev = conf->mirrors[dn].rdev;
+
+                       if (!rdev_set_badblocks(
+                                   rdev,
+                                   r10_bio->devs[r10_bio->read_slot].addr
+                                   + sect,
+                                   s, 0))
+                               md_error(mddev, rdev);
                        break;
                }
 
@@ -1588,80 +1880,82 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
                        sl--;
                        d = r10_bio->devs[sl].devnum;
                        rdev = rcu_dereference(conf->mirrors[d].rdev);
-                       if (rdev &&
-                           test_bit(In_sync, &rdev->flags)) {
-                               atomic_inc(&rdev->nr_pending);
-                               rcu_read_unlock();
-                               atomic_add(s, &rdev->corrected_errors);
-                               if (sync_page_io(rdev->bdev,
-                                                r10_bio->devs[sl].addr +
-                                                sect + rdev->data_offset,
-                                                s<<9, conf->tmppage, WRITE)
-                                   == 0) {
-                                       /* Well, this device is dead */
-                                       printk(KERN_NOTICE
-                                              "md/raid10:%s: read correction "
-                                              "write failed"
-                                              " (%d sectors at %llu on %s)\n",
-                                              mdname(mddev), s,
-                                              (unsigned long long)(sect+
-                                              rdev->data_offset),
-                                              bdevname(rdev->bdev, b));
-                                       printk(KERN_NOTICE "md/raid10:%s: %s: failing "
-                                              "drive\n",
-                                              mdname(mddev),
-                                              bdevname(rdev->bdev, b));
-                                       md_error(mddev, rdev);
-                               }
-                               rdev_dec_pending(rdev, mddev);
-                               rcu_read_lock();
+                       if (!rdev ||
+                           !test_bit(In_sync, &rdev->flags))
+                               continue;
+
+                       atomic_inc(&rdev->nr_pending);
+                       rcu_read_unlock();
+                       if (r10_sync_page_io(rdev,
+                                            r10_bio->devs[sl].addr +
+                                            sect,
+                                            s<<9, conf->tmppage, WRITE)
+                           == 0) {
+                               /* Well, this device is dead */
+                               printk(KERN_NOTICE
+                                      "md/raid10:%s: read correction "
+                                      "write failed"
+                                      " (%d sectors at %llu on %s)\n",
+                                      mdname(mddev), s,
+                                      (unsigned long long)(
+                                              sect + rdev->data_offset),
+                                      bdevname(rdev->bdev, b));
+                               printk(KERN_NOTICE "md/raid10:%s: %s: failing "
+                                      "drive\n",
+                                      mdname(mddev),
+                                      bdevname(rdev->bdev, b));
                        }
+                       rdev_dec_pending(rdev, mddev);
+                       rcu_read_lock();
                }
                sl = start;
                while (sl != r10_bio->read_slot) {
+                       char b[BDEVNAME_SIZE];
 
                        if (sl==0)
                                sl = conf->copies;
                        sl--;
                        d = r10_bio->devs[sl].devnum;
                        rdev = rcu_dereference(conf->mirrors[d].rdev);
-                       if (rdev &&
-                           test_bit(In_sync, &rdev->flags)) {
-                               char b[BDEVNAME_SIZE];
-                               atomic_inc(&rdev->nr_pending);
-                               rcu_read_unlock();
-                               if (sync_page_io(rdev->bdev,
-                                                r10_bio->devs[sl].addr +
-                                                sect + rdev->data_offset,
-                                                s<<9, conf->tmppage,
-                                                READ) == 0) {
-                                       /* Well, this device is dead */
-                                       printk(KERN_NOTICE
-                                              "md/raid10:%s: unable to read back "
-                                              "corrected sectors"
-                                              " (%d sectors at %llu on %s)\n",
-                                              mdname(mddev), s,
-                                              (unsigned long long)(sect+
-                                                   rdev->data_offset),
-                                              bdevname(rdev->bdev, b));
-                                       printk(KERN_NOTICE "md/raid10:%s: %s: failing drive\n",
-                                              mdname(mddev),
-                                              bdevname(rdev->bdev, b));
-
-                                       md_error(mddev, rdev);
-                               } else {
-                                       printk(KERN_INFO
-                                              "md/raid10:%s: read error corrected"
-                                              " (%d sectors at %llu on %s)\n",
-                                              mdname(mddev), s,
-                                              (unsigned long long)(sect+
-                                                   rdev->data_offset),
-                                              bdevname(rdev->bdev, b));
-                               }
+                       if (!rdev ||
+                           !test_bit(In_sync, &rdev->flags))
+                               continue;
 
-                               rdev_dec_pending(rdev, mddev);
-                               rcu_read_lock();
+                       atomic_inc(&rdev->nr_pending);
+                       rcu_read_unlock();
+                       switch (r10_sync_page_io(rdev,
+                                            r10_bio->devs[sl].addr +
+                                            sect,
+                                            s<<9, conf->tmppage,
+                                                READ)) {
+                       case 0:
+                               /* Well, this device is dead */
+                               printk(KERN_NOTICE
+                                      "md/raid10:%s: unable to read back "
+                                      "corrected sectors"
+                                      " (%d sectors at %llu on %s)\n",
+                                      mdname(mddev), s,
+                                      (unsigned long long)(
+                                              sect + rdev->data_offset),
+                                      bdevname(rdev->bdev, b));
+                               printk(KERN_NOTICE "md/raid10:%s: %s: failing "
+                                      "drive\n",
+                                      mdname(mddev),
+                                      bdevname(rdev->bdev, b));
+                               break;
+                       case 1:
+                               printk(KERN_INFO
+                                      "md/raid10:%s: read error corrected"
+                                      " (%d sectors at %llu on %s)\n",
+                                      mdname(mddev), s,
+                                      (unsigned long long)(
+                                              sect + rdev->data_offset),
+                                      bdevname(rdev->bdev, b));
+                               atomic_add(s, &rdev->corrected_errors);
                        }
+
+                       rdev_dec_pending(rdev, mddev);
+                       rcu_read_lock();
                }
                rcu_read_unlock();
 
@@ -1670,22 +1964,256 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
        }
 }
 
+static void bi_complete(struct bio *bio, int error)
+{
+       complete((struct completion *)bio->bi_private);
+}
+
+static int submit_bio_wait(int rw, struct bio *bio)
+{
+       struct completion event;
+       rw |= REQ_SYNC;
+
+       init_completion(&event);
+       bio->bi_private = &event;
+       bio->bi_end_io = bi_complete;
+       submit_bio(rw, bio);
+       wait_for_completion(&event);
+
+       return test_bit(BIO_UPTODATE, &bio->bi_flags);
+}
+
+static int narrow_write_error(r10bio_t *r10_bio, int i)
+{
+       struct bio *bio = r10_bio->master_bio;
+       mddev_t *mddev = r10_bio->mddev;
+       conf_t *conf = mddev->private;
+       mdk_rdev_t *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
+       /* bio has the data to be written to slot 'i' where
+        * we just recently had a write error.
+        * We repeatedly clone the bio and trim down to one block,
+        * then try the write.  Where the write fails we record
+        * a bad block.
+        * It is conceivable that the bio doesn't exactly align with
+        * blocks.  We must handle this.
+        *
+        * We currently own a reference to the rdev.
+        */
+
+       int block_sectors;
+       sector_t sector;
+       int sectors;
+       int sect_to_write = r10_bio->sectors;
+       int ok = 1;
+
+       if (rdev->badblocks.shift < 0)
+               return 0;
+
+       block_sectors = 1 << rdev->badblocks.shift;
+       sector = r10_bio->sector;
+       sectors = ((r10_bio->sector + block_sectors)
+                  & ~(sector_t)(block_sectors - 1))
+               - sector;
+
+       while (sect_to_write) {
+               struct bio *wbio;
+               if (sectors > sect_to_write)
+                       sectors = sect_to_write;
+               /* Write at 'sector' for 'sectors' */
+               wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+               md_trim_bio(wbio, sector - bio->bi_sector, sectors);
+               wbio->bi_sector = (r10_bio->devs[i].addr+
+                                  rdev->data_offset+
+                                  (sector - r10_bio->sector));
+               wbio->bi_bdev = rdev->bdev;
+               if (submit_bio_wait(WRITE, wbio) == 0)
+                       /* Failure! */
+                       ok = rdev_set_badblocks(rdev, sector,
+                                               sectors, 0)
+                               && ok;
+
+               bio_put(wbio);
+               sect_to_write -= sectors;
+               sector += sectors;
+               sectors = block_sectors;
+       }
+       return ok;
+}
+
+static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
+{
+       int slot = r10_bio->read_slot;
+       int mirror = r10_bio->devs[slot].devnum;
+       struct bio *bio;
+       conf_t *conf = mddev->private;
+       mdk_rdev_t *rdev;
+       char b[BDEVNAME_SIZE];
+       unsigned long do_sync;
+       int max_sectors;
+
+       /* we got a read error. Maybe the drive is bad.  Maybe just
+        * the block and we can fix it.
+        * We freeze all other IO, and try reading the block from
+        * other devices.  When we find one, we re-write
+        * and check it that fixes the read error.
+        * This is all done synchronously while the array is
+        * frozen.
+        */
+       if (mddev->ro == 0) {
+               freeze_array(conf);
+               fix_read_error(conf, mddev, r10_bio);
+               unfreeze_array(conf);
+       }
+       rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
+
+       bio = r10_bio->devs[slot].bio;
+       bdevname(bio->bi_bdev, b);
+       r10_bio->devs[slot].bio =
+               mddev->ro ? IO_BLOCKED : NULL;
+read_more:
+       mirror = read_balance(conf, r10_bio, &max_sectors);
+       if (mirror == -1) {
+               printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
+                      " read error for block %llu\n",
+                      mdname(mddev), b,
+                      (unsigned long long)r10_bio->sector);
+               raid_end_bio_io(r10_bio);
+               bio_put(bio);
+               return;
+       }
+
+       do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
+       if (bio)
+               bio_put(bio);
+       slot = r10_bio->read_slot;
+       rdev = conf->mirrors[mirror].rdev;
+       printk_ratelimited(
+               KERN_ERR
+               "md/raid10:%s: %s: redirecting"
+               "sector %llu to another mirror\n",
+               mdname(mddev),
+               bdevname(rdev->bdev, b),
+               (unsigned long long)r10_bio->sector);
+       bio = bio_clone_mddev(r10_bio->master_bio,
+                             GFP_NOIO, mddev);
+       md_trim_bio(bio,
+                   r10_bio->sector - bio->bi_sector,
+                   max_sectors);
+       r10_bio->devs[slot].bio = bio;
+       bio->bi_sector = r10_bio->devs[slot].addr
+               + rdev->data_offset;
+       bio->bi_bdev = rdev->bdev;
+       bio->bi_rw = READ | do_sync;
+       bio->bi_private = r10_bio;
+       bio->bi_end_io = raid10_end_read_request;
+       if (max_sectors < r10_bio->sectors) {
+               /* Drat - have to split this up more */
+               struct bio *mbio = r10_bio->master_bio;
+               int sectors_handled =
+                       r10_bio->sector + max_sectors
+                       - mbio->bi_sector;
+               r10_bio->sectors = max_sectors;
+               spin_lock_irq(&conf->device_lock);
+               if (mbio->bi_phys_segments == 0)
+                       mbio->bi_phys_segments = 2;
+               else
+                       mbio->bi_phys_segments++;
+               spin_unlock_irq(&conf->device_lock);
+               generic_make_request(bio);
+               bio = NULL;
+
+               r10_bio = mempool_alloc(conf->r10bio_pool,
+                                       GFP_NOIO);
+               r10_bio->master_bio = mbio;
+               r10_bio->sectors = (mbio->bi_size >> 9)
+                       - sectors_handled;
+               r10_bio->state = 0;
+               set_bit(R10BIO_ReadError,
+                       &r10_bio->state);
+               r10_bio->mddev = mddev;
+               r10_bio->sector = mbio->bi_sector
+                       + sectors_handled;
+
+               goto read_more;
+       } else
+               generic_make_request(bio);
+}
+
+static void handle_write_completed(conf_t *conf, r10bio_t *r10_bio)
+{
+       /* Some sort of write request has finished and it
+        * succeeded in writing where we thought there was a
+        * bad block.  So forget the bad block.
+        * Or possibly if failed and we need to record
+        * a bad block.
+        */
+       int m;
+       mdk_rdev_t *rdev;
+
+       if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
+           test_bit(R10BIO_IsRecover, &r10_bio->state)) {
+               for (m = 0; m < conf->copies; m++) {
+                       int dev = r10_bio->devs[m].devnum;
+                       rdev = conf->mirrors[dev].rdev;
+                       if (r10_bio->devs[m].bio == NULL)
+                               continue;
+                       if (test_bit(BIO_UPTODATE,
+                                    &r10_bio->devs[m].bio->bi_flags)) {
+                               rdev_clear_badblocks(
+                                       rdev,
+                                       r10_bio->devs[m].addr,
+                                       r10_bio->sectors);
+                       } else {
+                               if (!rdev_set_badblocks(
+                                           rdev,
+                                           r10_bio->devs[m].addr,
+                                           r10_bio->sectors, 0))
+                                       md_error(conf->mddev, rdev);
+                       }
+               }
+               put_buf(r10_bio);
+       } else {
+               for (m = 0; m < conf->copies; m++) {
+                       int dev = r10_bio->devs[m].devnum;
+                       struct bio *bio = r10_bio->devs[m].bio;
+                       rdev = conf->mirrors[dev].rdev;
+                       if (bio == IO_MADE_GOOD) {
+                               rdev_clear_badblocks(
+                                       rdev,
+                                       r10_bio->devs[m].addr,
+                                       r10_bio->sectors);
+                               rdev_dec_pending(rdev, conf->mddev);
+                       } else if (bio != NULL &&
+                                  !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+                               if (!narrow_write_error(r10_bio, m)) {
+                                       md_error(conf->mddev, rdev);
+                                       set_bit(R10BIO_Degraded,
+                                               &r10_bio->state);
+                               }
+                               rdev_dec_pending(rdev, conf->mddev);
+                       }
+               }
+               if (test_bit(R10BIO_WriteError,
+                            &r10_bio->state))
+                       close_write(r10_bio);
+               raid_end_bio_io(r10_bio);
+       }
+}
+
 static void raid10d(mddev_t *mddev)
 {
        r10bio_t *r10_bio;
-       struct bio *bio;
        unsigned long flags;
        conf_t *conf = mddev->private;
        struct list_head *head = &conf->retry_list;
-       int unplug=0;
-       mdk_rdev_t *rdev;
+       struct blk_plug plug;
 
        md_check_recovery(mddev);
 
+       blk_start_plug(&plug);
        for (;;) {
-               char b[BDEVNAME_SIZE];
 
-               unplug += flush_pending_writes(conf);
+               flush_pending_writes(conf);
 
                spin_lock_irqsave(&conf->device_lock, flags);
                if (list_empty(head)) {
@@ -1699,66 +2227,28 @@ static void raid10d(mddev_t *mddev)
 
                mddev = r10_bio->mddev;
                conf = mddev->private;
-               if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
+               if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
+                   test_bit(R10BIO_WriteError, &r10_bio->state))
+                       handle_write_completed(conf, r10_bio);
+               else if (test_bit(R10BIO_IsSync, &r10_bio->state))
                        sync_request_write(mddev, r10_bio);
-                       unplug = 1;
-               } else  if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
+               else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
                        recovery_request_write(mddev, r10_bio);
-                       unplug = 1;
-               } else {
-                       int mirror;
-                       /* we got a read error. Maybe the drive is bad.  Maybe just
-                        * the block and we can fix it.
-                        * We freeze all other IO, and try reading the block from
-                        * other devices.  When we find one, we re-write
-                        * and check it that fixes the read error.
-                        * This is all done synchronously while the array is
-                        * frozen.
+               else if (test_bit(R10BIO_ReadError, &r10_bio->state))
+                       handle_read_error(mddev, r10_bio);
+               else {
+                       /* just a partial read to be scheduled from a
+                        * separate context
                         */
-                       if (mddev->ro == 0) {
-                               freeze_array(conf);
-                               fix_read_error(conf, mddev, r10_bio);
-                               unfreeze_array(conf);
-                       }
-
-                       bio = r10_bio->devs[r10_bio->read_slot].bio;
-                       r10_bio->devs[r10_bio->read_slot].bio =
-                               mddev->ro ? IO_BLOCKED : NULL;
-                       mirror = read_balance(conf, r10_bio);
-                       if (mirror == -1) {
-                               printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
-                                      " read error for block %llu\n",
-                                      mdname(mddev),
-                                      bdevname(bio->bi_bdev,b),
-                                      (unsigned long long)r10_bio->sector);
-                               raid_end_bio_io(r10_bio);
-                               bio_put(bio);
-                       } else {
-                               const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO);
-                               bio_put(bio);
-                               rdev = conf->mirrors[mirror].rdev;
-                               if (printk_ratelimit())
-                                       printk(KERN_ERR "md/raid10:%s: %s: redirecting sector %llu to"
-                                              " another mirror\n",
-                                              mdname(mddev),
-                                              bdevname(rdev->bdev,b),
-                                              (unsigned long long)r10_bio->sector);
-                               bio = bio_clone(r10_bio->master_bio, GFP_NOIO);
-                               r10_bio->devs[r10_bio->read_slot].bio = bio;
-                               bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
-                                       + rdev->data_offset;
-                               bio->bi_bdev = rdev->bdev;
-                               bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
-                               bio->bi_private = r10_bio;
-                               bio->bi_end_io = raid10_end_read_request;
-                               unplug = 1;
-                               generic_make_request(bio);
-                       }
+                       int slot = r10_bio->read_slot;
+                       generic_make_request(r10_bio->devs[slot].bio);
                }
+
                cond_resched();
+               if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+                       md_check_recovery(mddev);
        }
-       if (unplug)
-               unplug_slaves(mddev);
+       blk_finish_plug(&plug);
 }
 
 
@@ -1807,17 +2297,16 @@ static int init_resync(conf_t *conf)
  *
  */
 
-static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
+static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
+                            int *skipped, int go_faster)
 {
        conf_t *conf = mddev->private;
        r10bio_t *r10_bio;
        struct bio *biolist = NULL, *bio;
        sector_t max_sector, nr_sectors;
-       int disk;
        int i;
        int max_sync;
-       int sync_blocks;
-
+       sector_t sync_blocks;
        sector_t sectors_skipped = 0;
        int chunks_skipped = 0;
 
@@ -1899,111 +2388,163 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
        max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
        if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
                /* recovery... the complicated one */
-               int j, k;
+               int j;
                r10_bio = NULL;
 
-               for (i=0 ; i<conf->raid_disks; i++)
-                       if (conf->mirrors[i].rdev &&
-                           !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) {
-                               int still_degraded = 0;
-                               /* want to reconstruct this device */
-                               r10bio_t *rb2 = r10_bio;
-                               sector_t sect = raid10_find_virt(conf, sector_nr, i);
-                               int must_sync;
-                               /* Unless we are doing a full sync, we only need
-                                * to recover the block if it is set in the bitmap
-                                */
-                               must_sync = bitmap_start_sync(mddev->bitmap, sect,
-                                                             &sync_blocks, 1);
-                               if (sync_blocks < max_sync)
-                                       max_sync = sync_blocks;
-                               if (!must_sync &&
-                                   !conf->fullsync) {
-                                       /* yep, skip the sync_blocks here, but don't assume
-                                        * that there will never be anything to do here
-                                        */
-                                       chunks_skipped = -1;
-                                       continue;
-                               }
+               for (i=0 ; i<conf->raid_disks; i++) {
+                       int still_degraded;
+                       r10bio_t *rb2;
+                       sector_t sect;
+                       int must_sync;
+                       int any_working;
 
-                               r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
-                               raise_barrier(conf, rb2 != NULL);
-                               atomic_set(&r10_bio->remaining, 0);
+                       if (conf->mirrors[i].rdev == NULL ||
+                           test_bit(In_sync, &conf->mirrors[i].rdev->flags)) 
+                               continue;
 
-                               r10_bio->master_bio = (struct bio*)rb2;
-                               if (rb2)
-                                       atomic_inc(&rb2->remaining);
-                               r10_bio->mddev = mddev;
-                               set_bit(R10BIO_IsRecover, &r10_bio->state);
-                               r10_bio->sector = sect;
+                       still_degraded = 0;
+                       /* want to reconstruct this device */
+                       rb2 = r10_bio;
+                       sect = raid10_find_virt(conf, sector_nr, i);
+                       /* Unless we are doing a full sync, we only need
+                        * to recover the block if it is set in the bitmap
+                        */
+                       must_sync = bitmap_start_sync(mddev->bitmap, sect,
+                                                     &sync_blocks, 1);
+                       if (sync_blocks < max_sync)
+                               max_sync = sync_blocks;
+                       if (!must_sync &&
+                           !conf->fullsync) {
+                               /* yep, skip the sync_blocks here, but don't assume
+                                * that there will never be anything to do here
+                                */
+                               chunks_skipped = -1;
+                               continue;
+                       }
 
-                               raid10_find_phys(conf, r10_bio);
+                       r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+                       raise_barrier(conf, rb2 != NULL);
+                       atomic_set(&r10_bio->remaining, 0);
 
-                               /* Need to check if the array will still be
-                                * degraded
-                                */
-                               for (j=0; j<conf->raid_disks; j++)
-                                       if (conf->mirrors[j].rdev == NULL ||
-                                           test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
-                                               still_degraded = 1;
-                                               break;
-                                       }
+                       r10_bio->master_bio = (struct bio*)rb2;
+                       if (rb2)
+                               atomic_inc(&rb2->remaining);
+                       r10_bio->mddev = mddev;
+                       set_bit(R10BIO_IsRecover, &r10_bio->state);
+                       r10_bio->sector = sect;
 
-                               must_sync = bitmap_start_sync(mddev->bitmap, sect,
-                                                             &sync_blocks, still_degraded);
-
-                               for (j=0; j<conf->copies;j++) {
-                                       int d = r10_bio->devs[j].devnum;
-                                       if (conf->mirrors[d].rdev &&
-                                           test_bit(In_sync, &conf->mirrors[d].rdev->flags)) {
-                                               /* This is where we read from */
-                                               bio = r10_bio->devs[0].bio;
-                                               bio->bi_next = biolist;
-                                               biolist = bio;
-                                               bio->bi_private = r10_bio;
-                                               bio->bi_end_io = end_sync_read;
-                                               bio->bi_rw = READ;
-                                               bio->bi_sector = r10_bio->devs[j].addr +
-                                                       conf->mirrors[d].rdev->data_offset;
-                                               bio->bi_bdev = conf->mirrors[d].rdev->bdev;
-                                               atomic_inc(&conf->mirrors[d].rdev->nr_pending);
-                                               atomic_inc(&r10_bio->remaining);
-                                               /* and we write to 'i' */
-
-                                               for (k=0; k<conf->copies; k++)
-                                                       if (r10_bio->devs[k].devnum == i)
-                                                               break;
-                                               BUG_ON(k == conf->copies);
-                                               bio = r10_bio->devs[1].bio;
-                                               bio->bi_next = biolist;
-                                               biolist = bio;
-                                               bio->bi_private = r10_bio;
-                                               bio->bi_end_io = end_sync_write;
-                                               bio->bi_rw = WRITE;
-                                               bio->bi_sector = r10_bio->devs[k].addr +
-                                                       conf->mirrors[i].rdev->data_offset;
-                                               bio->bi_bdev = conf->mirrors[i].rdev->bdev;
-
-                                               r10_bio->devs[0].devnum = d;
-                                               r10_bio->devs[1].devnum = i;
+                       raid10_find_phys(conf, r10_bio);
 
-                                               break;
+                       /* Need to check if the array will still be
+                        * degraded
+                        */
+                       for (j=0; j<conf->raid_disks; j++)
+                               if (conf->mirrors[j].rdev == NULL ||
+                                   test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
+                                       still_degraded = 1;
+                                       break;
+                               }
+
+                       must_sync = bitmap_start_sync(mddev->bitmap, sect,
+                                                     &sync_blocks, still_degraded);
+
+                       any_working = 0;
+                       for (j=0; j<conf->copies;j++) {
+                               int k;
+                               int d = r10_bio->devs[j].devnum;
+                               sector_t from_addr, to_addr;
+                               mdk_rdev_t *rdev;
+                               sector_t sector, first_bad;
+                               int bad_sectors;
+                               if (!conf->mirrors[d].rdev ||
+                                   !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
+                                       continue;
+                               /* This is where we read from */
+                               any_working = 1;
+                               rdev = conf->mirrors[d].rdev;
+                               sector = r10_bio->devs[j].addr;
+
+                               if (is_badblock(rdev, sector, max_sync,
+                                               &first_bad, &bad_sectors)) {
+                                       if (first_bad > sector)
+                                               max_sync = first_bad - sector;
+                                       else {
+                                               bad_sectors -= (sector
+                                                               - first_bad);
+                                               if (max_sync > bad_sectors)
+                                                       max_sync = bad_sectors;
+                                               continue;
                                        }
                                }
-                               if (j == conf->copies) {
-                                       /* Cannot recover, so abort the recovery */
-                                       put_buf(r10_bio);
-                                       if (rb2)
-                                               atomic_dec(&rb2->remaining);
-                                       r10_bio = rb2;
+                               bio = r10_bio->devs[0].bio;
+                               bio->bi_next = biolist;
+                               biolist = bio;
+                               bio->bi_private = r10_bio;
+                               bio->bi_end_io = end_sync_read;
+                               bio->bi_rw = READ;
+                               from_addr = r10_bio->devs[j].addr;
+                               bio->bi_sector = from_addr +
+                                       conf->mirrors[d].rdev->data_offset;
+                               bio->bi_bdev = conf->mirrors[d].rdev->bdev;
+                               atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+                               atomic_inc(&r10_bio->remaining);
+                               /* and we write to 'i' */
+
+                               for (k=0; k<conf->copies; k++)
+                                       if (r10_bio->devs[k].devnum == i)
+                                               break;
+                               BUG_ON(k == conf->copies);
+                               bio = r10_bio->devs[1].bio;
+                               bio->bi_next = biolist;
+                               biolist = bio;
+                               bio->bi_private = r10_bio;
+                               bio->bi_end_io = end_sync_write;
+                               bio->bi_rw = WRITE;
+                               to_addr = r10_bio->devs[k].addr;
+                               bio->bi_sector = to_addr +
+                                       conf->mirrors[i].rdev->data_offset;
+                               bio->bi_bdev = conf->mirrors[i].rdev->bdev;
+
+                               r10_bio->devs[0].devnum = d;
+                               r10_bio->devs[0].addr = from_addr;
+                               r10_bio->devs[1].devnum = i;
+                               r10_bio->devs[1].addr = to_addr;
+
+                               break;
+                       }
+                       if (j == conf->copies) {
+                               /* Cannot recover, so abort the recovery or
+                                * record a bad block */
+                               put_buf(r10_bio);
+                               if (rb2)
+                                       atomic_dec(&rb2->remaining);
+                               r10_bio = rb2;
+                               if (any_working) {
+                                       /* problem is that there are bad blocks
+                                        * on other device(s)
+                                        */
+                                       int k;
+                                       for (k = 0; k < conf->copies; k++)
+                                               if (r10_bio->devs[k].devnum == i)
+                                                       break;
+                                       if (!rdev_set_badblocks(
+                                                   conf->mirrors[i].rdev,
+                                                   r10_bio->devs[k].addr,
+                                                   max_sync, 0))
+                                               any_working = 0;
+                               }
+                               if (!any_working)  {
                                        if (!test_and_set_bit(MD_RECOVERY_INTR,
                                                              &mddev->recovery))
                                                printk(KERN_INFO "md/raid10:%s: insufficient "
                                                       "working devices for recovery.\n",
                                                       mdname(mddev));
-                                       break;
+                                       conf->mirrors[i].recovery_disabled
+                                               = mddev->recovery_disabled;
                                }
+                               break;
                        }
+               }
                if (biolist == NULL) {
                        while (r10_bio) {
                                r10bio_t *rb2 = r10_bio;
@@ -2021,7 +2562,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
 
                if (!bitmap_start_sync(mddev->bitmap, sector_nr,
                                       &sync_blocks, mddev->degraded) &&
-                   !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+                   !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
+                                                &mddev->recovery)) {
                        /* We can skip this block */
                        *skipped = 1;
                        return sync_blocks + sectors_skipped;
@@ -2043,12 +2585,28 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
 
                for (i=0; i<conf->copies; i++) {
                        int d = r10_bio->devs[i].devnum;
+                       sector_t first_bad, sector;
+                       int bad_sectors;
+
                        bio = r10_bio->devs[i].bio;
                        bio->bi_end_io = NULL;
                        clear_bit(BIO_UPTODATE, &bio->bi_flags);
                        if (conf->mirrors[d].rdev == NULL ||
                            test_bit(Faulty, &conf->mirrors[d].rdev->flags))
                                continue;
+                       sector = r10_bio->devs[i].addr;
+                       if (is_badblock(conf->mirrors[d].rdev,
+                                       sector, max_sync,
+                                       &first_bad, &bad_sectors)) {
+                               if (first_bad > sector)
+                                       max_sync = first_bad - sector;
+                               else {
+                                       bad_sectors -= (sector - first_bad);
+                                       if (max_sync > bad_sectors)
+                                               max_sync = max_sync;
+                                       continue;
+                               }
+                       }
                        atomic_inc(&conf->mirrors[d].rdev->nr_pending);
                        atomic_inc(&r10_bio->remaining);
                        bio->bi_next = biolist;
@@ -2056,7 +2614,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
                        bio->bi_private = r10_bio;
                        bio->bi_end_io = end_sync_read;
                        bio->bi_rw = READ;
-                       bio->bi_sector = r10_bio->devs[i].addr +
+                       bio->bi_sector = sector +
                                conf->mirrors[d].rdev->data_offset;
                        bio->bi_bdev = conf->mirrors[d].rdev->bdev;
                        count++;
@@ -2066,7 +2624,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
                        for (i=0; i<conf->copies; i++) {
                                int d = r10_bio->devs[i].devnum;
                                if (r10_bio->devs[i].bio->bi_end_io)
-                                       rdev_dec_pending(conf->mirrors[d].rdev, mddev);
+                                       rdev_dec_pending(conf->mirrors[d].rdev,
+                                                        mddev);
                        }
                        put_buf(r10_bio);
                        biolist = NULL;
@@ -2091,26 +2650,27 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
        do {
                struct page *page;
                int len = PAGE_SIZE;
-               disk = 0;
                if (sector_nr + (len>>9) > max_sector)
                        len = (max_sector - sector_nr) << 9;
                if (len == 0)
                        break;
                for (bio= biolist ; bio ; bio=bio->bi_next) {
+                       struct bio *bio2;
                        page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
-                       if (bio_add_page(bio, page, len, 0) == 0) {
-                               /* stop here */
-                               struct bio *bio2;
-                               bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
-                               for (bio2 = biolist; bio2 && bio2 != bio; bio2 = bio2->bi_next) {
-                                       /* remove last page from this bio */
-                                       bio2->bi_vcnt--;
-                                       bio2->bi_size -= len;
-                                       bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
-                               }
-                               goto bio_full;
+                       if (bio_add_page(bio, page, len, 0))
+                               continue;
+
+                       /* stop here */
+                       bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
+                       for (bio2 = biolist;
+                            bio2 && bio2 != bio;
+                            bio2 = bio2->bi_next) {
+                               /* remove last page from this bio */
+                               bio2->bi_vcnt--;
+                               bio2->bi_size -= len;
+                               bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
                        }
-                       disk = i;
+                       goto bio_full;
                }
                nr_sectors += len>>9;
                sector_nr += len>>9;
@@ -2141,7 +2701,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
        return sectors_skipped + nr_sectors;
  giveup:
        /* There is nowhere to write, so all non-sync
-        * drives must be failed, so try the next chunk...
+        * drives must be failed or in resync, all drives
+        * have a bad block, so try the next chunk...
         */
        if (sector_nr + max_sync < max_sector)
                max_sector = sector_nr + max_sync;
@@ -2299,8 +2860,6 @@ static int run(mddev_t *mddev)
        if (!conf)
                goto out;
 
-       mddev->queue->queue_lock = &conf->device_lock;
-
        mddev->thread = conf->thread;
        conf->thread = NULL;
 
@@ -2313,6 +2872,7 @@ static int run(mddev_t *mddev)
                                 (conf->raid_disks / conf->near_copies));
 
        list_for_each_entry(rdev, &mddev->disks, same_set) {
+
                disk_idx = rdev->raid_disk;
                if (disk_idx >= conf->raid_disks
                    || disk_idx < 0)
@@ -2335,7 +2895,7 @@ static int run(mddev_t *mddev)
                disk->head_position = 0;
        }
        /* need to check that every block has at least one working mirror */
-       if (!enough(conf)) {
+       if (!enough(conf, -1)) {
                printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
                       mdname(mddev));
                goto out_free_conf;
@@ -2371,7 +2931,6 @@ static int run(mddev_t *mddev)
        md_set_array_sectors(mddev, size);
        mddev->resync_max_sectors = size;
 
-       mddev->queue->unplug_fn = raid10_unplug;
        mddev->queue->backing_dev_info.congested_fn = raid10_congested;
        mddev->queue->backing_dev_info.congested_data = mddev;
 
@@ -2389,17 +2948,20 @@ static int run(mddev_t *mddev)
 
        if (conf->near_copies < conf->raid_disks)
                blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
-       md_integrity_register(mddev);
+
+       if (md_integrity_register(mddev))
+               goto out_free_conf;
+
        return 0;
 
 out_free_conf:
+       md_unregister_thread(mddev->thread);
        if (conf->r10bio_pool)
                mempool_destroy(conf->r10bio_pool);
        safe_put_page(conf->tmppage);
        kfree(conf->mirrors);
        kfree(conf);
        mddev->private = NULL;
-       md_unregister_thread(mddev->thread);
 out:
        return -EIO;
 }
@@ -2458,11 +3020,13 @@ static void *raid10_takeover_raid0(mddev_t *mddev)
        mddev->recovery_cp = MaxSector;
 
        conf = setup_conf(mddev);
-       if (!IS_ERR(conf))
+       if (!IS_ERR(conf)) {
                list_for_each_entry(rdev, &mddev->disks, same_set)
                        if (rdev->raid_disk >= 0)
                                rdev->new_raid_disk = rdev->raid_disk * 2;
-               
+               conf->barrier = 1;
+       }
+
        return conf;
 }