block: make bi_phys_segments an unsigned int instead of short
[linux-2.6.git] / drivers / md / raid5.c
index b915936..37e5465 100644 (file)
 const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
 #endif
 
+/*
+ * We maintain a biased count of active stripes in the bottom 16 bits of
+ * bi_phys_segments, and a count of processed stripes in the upper 16 bits
+ */
+static inline int raid5_bi_phys_segments(struct bio *bio)
+{
+       return bio->bi_phys_segments & 0xffff;
+}
+
+static inline int raid5_bi_hw_segments(struct bio *bio)
+{
+       return (bio->bi_phys_segments >> 16) & 0xffff;
+}
+
+static inline int raid5_dec_bi_phys_segments(struct bio *bio)
+{
+       --bio->bi_phys_segments;
+       return raid5_bi_phys_segments(bio);
+}
+
+static inline int raid5_dec_bi_hw_segments(struct bio *bio)
+{
+       unsigned short val = raid5_bi_hw_segments(bio);
+
+       --val;
+       bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
+       return val;
+}
+
+static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
+{
+       bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
+}
+
 static inline int raid6_next_disk(int disk, int raid_disks)
 {
        disk++;
@@ -507,7 +541,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
                        while (rbi && rbi->bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                rbi2 = r5_next_bio(rbi, dev->sector);
-                               if (--rbi->bi_phys_segments == 0) {
+                               if (!raid5_dec_bi_phys_segments(rbi)) {
                                        rbi->bi_next = return_bi;
                                        return_bi = rbi;
                                }
@@ -574,8 +608,7 @@ static void ops_complete_compute5(void *stripe_head_ref)
        release_stripe(sh);
 }
 
-static struct dma_async_tx_descriptor *
-ops_run_compute5(struct stripe_head *sh, unsigned long ops_request)
+static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh)
 {
        /* kernel stack size limits the total number of disks */
        int disks = sh->disks;
@@ -605,10 +638,6 @@ ops_run_compute5(struct stripe_head *sh, unsigned long ops_request)
                        ASYNC_TX_XOR_ZERO_DST, NULL,
                        ops_complete_compute5, sh);
 
-       /* ack now if postxor is not set to be run */
-       if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request))
-               async_tx_ack(tx);
-
        return tx;
 }
 
@@ -637,7 +666,7 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
        for (i = disks; i--; ) {
                struct r5dev *dev = &sh->dev[i];
                /* Only process blocks that are known to be uptodate */
-               if (dev->towrite && test_bit(R5_Wantprexor, &dev->flags))
+               if (test_bit(R5_Wantdrain, &dev->flags))
                        xor_srcs[count++] = dev->page;
        }
 
@@ -649,16 +678,10 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
 }
 
 static struct dma_async_tx_descriptor *
-ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
-                unsigned long ops_request)
+ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
 {
        int disks = sh->disks;
-       int pd_idx = sh->pd_idx, i;
-
-       /* check if prexor is active which means only process blocks
-        * that are part of a read-modify-write (Wantprexor)
-        */
-       int prexor = test_bit(STRIPE_OP_PREXOR, &ops_request);
+       int i;
 
        pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
@@ -666,20 +689,8 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
        for (i = disks; i--; ) {
                struct r5dev *dev = &sh->dev[i];
                struct bio *chosen;
-               int towrite;
 
-               towrite = 0;
-               if (prexor) { /* rmw */
-                       if (dev->towrite &&
-                           test_bit(R5_Wantprexor, &dev->flags))
-                               towrite = 1;
-               } else { /* rcw */
-                       if (i != pd_idx && dev->towrite &&
-                               test_bit(R5_LOCKED, &dev->flags))
-                               towrite = 1;
-               }
-
-               if (towrite) {
+               if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
                        struct bio *wbi;
 
                        spin_lock(&sh->lock);
@@ -704,18 +715,6 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
 static void ops_complete_postxor(void *stripe_head_ref)
 {
        struct stripe_head *sh = stripe_head_ref;
-
-       pr_debug("%s: stripe %llu\n", __func__,
-               (unsigned long long)sh->sector);
-
-       sh->reconstruct_state = reconstruct_state_result;
-       set_bit(STRIPE_HANDLE, &sh->state);
-       release_stripe(sh);
-}
-
-static void ops_complete_write(void *stripe_head_ref)
-{
-       struct stripe_head *sh = stripe_head_ref;
        int disks = sh->disks, i, pd_idx = sh->pd_idx;
 
        pr_debug("%s: stripe %llu\n", __func__,
@@ -727,14 +726,21 @@ static void ops_complete_write(void *stripe_head_ref)
                        set_bit(R5_UPTODATE, &dev->flags);
        }
 
-       sh->reconstruct_state = reconstruct_state_drain_result;
+       if (sh->reconstruct_state == reconstruct_state_drain_run)
+               sh->reconstruct_state = reconstruct_state_drain_result;
+       else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
+               sh->reconstruct_state = reconstruct_state_prexor_drain_result;
+       else {
+               BUG_ON(sh->reconstruct_state != reconstruct_state_run);
+               sh->reconstruct_state = reconstruct_state_result;
+       }
+
        set_bit(STRIPE_HANDLE, &sh->state);
        release_stripe(sh);
 }
 
 static void
-ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
-               unsigned long ops_request)
+ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
 {
        /* kernel stack size limits the total number of disks */
        int disks = sh->disks;
@@ -742,9 +748,8 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
 
        int count = 0, pd_idx = sh->pd_idx, i;
        struct page *xor_dest;
-       int prexor = test_bit(STRIPE_OP_PREXOR, &ops_request);
+       int prexor = 0;
        unsigned long flags;
-       dma_async_tx_callback callback;
 
        pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
@@ -752,7 +757,8 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
        /* check if prexor is active which means only process blocks
         * that are part of a read-modify-write (written)
         */
-       if (prexor) {
+       if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
+               prexor = 1;
                xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
@@ -768,10 +774,6 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
                }
        }
 
-       /* check whether this postxor is part of a write */
-       callback = test_bit(STRIPE_OP_BIODRAIN, &ops_request) ?
-               ops_complete_write : ops_complete_postxor;
-
        /* 1/ if we prexor'd then the dest is reused as a source
         * 2/ if we did not prexor then we are redoing the parity
         * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
@@ -785,10 +787,10 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
        if (unlikely(count == 1)) {
                flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST);
                tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
-                       flags, tx, callback, sh);
+                       flags, tx, ops_complete_postxor, sh);
        } else
                tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
-                       flags, tx, callback, sh);
+                       flags, tx, ops_complete_postxor, sh);
 }
 
 static void ops_complete_check(void *stripe_head_ref)
@@ -840,19 +842,23 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request)
                overlap_clear++;
        }
 
-       if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request))
-               tx = ops_run_compute5(sh, ops_request);
+       if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
+               tx = ops_run_compute5(sh);
+               /* terminate the chain if postxor is not set to be run */
+               if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request))
+                       async_tx_ack(tx);
+       }
 
        if (test_bit(STRIPE_OP_PREXOR, &ops_request))
                tx = ops_run_prexor(sh, tx);
 
        if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
-               tx = ops_run_biodrain(sh, tx, ops_request);
+               tx = ops_run_biodrain(sh, tx);
                overlap_clear++;
        }
 
        if (test_bit(STRIPE_OP_POSTXOR, &ops_request))
-               ops_run_postxor(sh, tx, ops_request);
+               ops_run_postxor(sh, tx);
 
        if (test_bit(STRIPE_OP_CHECK, &ops_request))
                ops_run_check(sh);
@@ -939,14 +945,16 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
        struct stripe_head *osh, *nsh;
        LIST_HEAD(newstripes);
        struct disk_info *ndisks;
-       int err = 0;
+       int err;
        struct kmem_cache *sc;
        int i;
 
        if (newsize <= conf->pool_size)
                return 0; /* never bother to shrink */
 
-       md_allow_write(conf->mddev);
+       err = md_allow_write(conf->mddev);
+       if (err)
+               return err;
 
        /* Step 1 */
        sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
@@ -1646,7 +1654,7 @@ static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
 }
 
 static void
-handle_write_operations5(struct stripe_head *sh, struct stripe_head_state *s,
+schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s,
                         int rcw, int expand)
 {
        int i, pd_idx = sh->pd_idx, disks = sh->disks;
@@ -1669,6 +1677,7 @@ handle_write_operations5(struct stripe_head *sh, struct stripe_head_state *s,
 
                        if (dev->towrite) {
                                set_bit(R5_LOCKED, &dev->flags);
+                               set_bit(R5_Wantdrain, &dev->flags);
                                if (!expand)
                                        clear_bit(R5_UPTODATE, &dev->flags);
                                s->locked++;
@@ -1681,7 +1690,7 @@ handle_write_operations5(struct stripe_head *sh, struct stripe_head_state *s,
                BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
                        test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
 
-               sh->reconstruct_state = reconstruct_state_drain_run;
+               sh->reconstruct_state = reconstruct_state_prexor_drain_run;
                set_bit(STRIPE_OP_PREXOR, &s->ops_request);
                set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
                set_bit(STRIPE_OP_POSTXOR, &s->ops_request);
@@ -1691,15 +1700,10 @@ handle_write_operations5(struct stripe_head *sh, struct stripe_head_state *s,
                        if (i == pd_idx)
                                continue;
 
-                       /* For a read-modify write there may be blocks that are
-                        * locked for reading while others are ready to be
-                        * written so we distinguish these blocks by the
-                        * R5_Wantprexor bit
-                        */
                        if (dev->towrite &&
                            (test_bit(R5_UPTODATE, &dev->flags) ||
-                           test_bit(R5_Wantcompute, &dev->flags))) {
-                               set_bit(R5_Wantprexor, &dev->flags);
+                            test_bit(R5_Wantcompute, &dev->flags))) {
+                               set_bit(R5_Wantdrain, &dev->flags);
                                set_bit(R5_LOCKED, &dev->flags);
                                clear_bit(R5_UPTODATE, &dev->flags);
                                s->locked++;
@@ -1755,7 +1759,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
        if (*bip)
                bi->bi_next = *bip;
        *bip = bi;
-       bi->bi_phys_segments ++;
+       bi->bi_phys_segments++;
        spin_unlock_irq(&conf->device_lock);
        spin_unlock(&sh->lock);
 
@@ -1815,7 +1819,7 @@ static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
 }
 
 static void
-handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
+handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
                                struct stripe_head_state *s, int disks,
                                struct bio **return_bi)
 {
@@ -1849,7 +1853,7 @@ handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
                        sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
-                       if (--bi->bi_phys_segments == 0) {
+                       if (!raid5_dec_bi_phys_segments(bi)) {
                                md_write_end(conf->mddev);
                                bi->bi_next = *return_bi;
                                *return_bi = bi;
@@ -1864,7 +1868,7 @@ handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
                       sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
-                       if (--bi->bi_phys_segments == 0) {
+                       if (!raid5_dec_bi_phys_segments(bi)) {
                                md_write_end(conf->mddev);
                                bi->bi_next = *return_bi;
                                *return_bi = bi;
@@ -1888,7 +1892,7 @@ handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
                                struct bio *nextbi =
                                        r5_next_bio(bi, sh->dev[i].sector);
                                clear_bit(BIO_UPTODATE, &bi->bi_flags);
-                               if (--bi->bi_phys_segments == 0) {
+                               if (!raid5_dec_bi_phys_segments(bi)) {
                                        bi->bi_next = *return_bi;
                                        *return_bi = bi;
                                }
@@ -1906,23 +1910,28 @@ handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
                        md_wakeup_thread(conf->mddev->thread);
 }
 
-/* __handle_issuing_new_read_requests5 - returns 0 if there are no more disks
- * to process
+/* fetch_block5 - checks the given member device to see if its data needs
+ * to be read or computed to satisfy a request.
+ *
+ * Returns 1 when no more member devices need to be checked, otherwise returns
+ * 0 to tell the loop in handle_stripe_fill5 to continue
  */
-static int __handle_issuing_new_read_requests5(struct stripe_head *sh,
-                       struct stripe_head_state *s, int disk_idx, int disks)
+static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s,
+                       int disk_idx, int disks)
 {
        struct r5dev *dev = &sh->dev[disk_idx];
        struct r5dev *failed_dev = &sh->dev[s->failed_num];
 
        /* is the data in this block needed, and can we get it? */
        if (!test_bit(R5_LOCKED, &dev->flags) &&
-           !test_bit(R5_UPTODATE, &dev->flags) && (dev->toread ||
-           (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
-            s->syncing || s->expanding || (s->failed &&
-            (failed_dev->toread || (failed_dev->towrite &&
-            !test_bit(R5_OVERWRITE, &failed_dev->flags)
-            ))))) {
+           !test_bit(R5_UPTODATE, &dev->flags) &&
+           (dev->toread ||
+            (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
+            s->syncing || s->expanding ||
+            (s->failed &&
+             (failed_dev->toread ||
+              (failed_dev->towrite &&
+               !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) {
                /* We would like to get this block, possibly by computing it,
                 * otherwise read it if the backing disk is insync
                 */
@@ -1940,7 +1949,7 @@ static int __handle_issuing_new_read_requests5(struct stripe_head *sh,
                         * subsequent operation.
                         */
                        s->uptodate++;
-                       return 0; /* uptodate + compute == disks */
+                       return 1; /* uptodate + compute == disks */
                } else if (test_bit(R5_Insync, &dev->flags)) {
                        set_bit(R5_LOCKED, &dev->flags);
                        set_bit(R5_Wantread, &dev->flags);
@@ -1950,10 +1959,13 @@ static int __handle_issuing_new_read_requests5(struct stripe_head *sh,
                }
        }
 
-       return ~0;
+       return 0;
 }
 
-static void handle_issuing_new_read_requests5(struct stripe_head *sh,
+/**
+ * handle_stripe_fill5 - read or compute data to satisfy pending requests.
+ */
+static void handle_stripe_fill5(struct stripe_head *sh,
                        struct stripe_head_state *s, int disks)
 {
        int i;
@@ -1963,16 +1975,14 @@ static void handle_issuing_new_read_requests5(struct stripe_head *sh,
         * midst of changing due to a write
         */
        if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
-           !sh->reconstruct_state) {
+           !sh->reconstruct_state)
                for (i = disks; i--; )
-                       if (__handle_issuing_new_read_requests5(
-                               sh, s, i, disks) == 0)
+                       if (fetch_block5(sh, s, i, disks))
                                break;
-       }
        set_bit(STRIPE_HANDLE, &sh->state);
 }
 
-static void handle_issuing_new_read_requests6(struct stripe_head *sh,
+static void handle_stripe_fill6(struct stripe_head *sh,
                        struct stripe_head_state *s, struct r6_state *r6s,
                        int disks)
 {
@@ -2031,12 +2041,12 @@ static void handle_issuing_new_read_requests6(struct stripe_head *sh,
 }
 
 
-/* handle_completed_write_requests
+/* handle_stripe_clean_event
  * any written block on an uptodate or failed drive can be returned.
  * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
  * never LOCKED, so we don't need to test 'failed' directly.
  */
-static void handle_completed_write_requests(raid5_conf_t *conf,
+static void handle_stripe_clean_event(raid5_conf_t *conf,
        struct stripe_head *sh, int disks, struct bio **return_bi)
 {
        int i;
@@ -2057,7 +2067,7 @@ static void handle_completed_write_requests(raid5_conf_t *conf,
                                while (wbi && wbi->bi_sector <
                                        dev->sector + STRIPE_SECTORS) {
                                        wbi2 = r5_next_bio(wbi, dev->sector);
-                                       if (--wbi->bi_phys_segments == 0) {
+                                       if (!raid5_dec_bi_phys_segments(wbi)) {
                                                md_write_end(conf->mddev);
                                                wbi->bi_next = *return_bi;
                                                *return_bi = wbi;
@@ -2081,7 +2091,7 @@ static void handle_completed_write_requests(raid5_conf_t *conf,
                        md_wakeup_thread(conf->mddev->thread);
 }
 
-static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
+static void handle_stripe_dirtying5(raid5_conf_t *conf,
                struct stripe_head *sh, struct stripe_head_state *s, int disks)
 {
        int rmw = 0, rcw = 0, i;
@@ -2168,10 +2178,10 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
        if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
            (s->locked == 0 && (rcw == 0 || rmw == 0) &&
            !test_bit(STRIPE_BIT_DELAY, &sh->state)))
-               handle_write_operations5(sh, s, rcw == 0, 0);
+               schedule_reconstruction5(sh, s, rcw == 0, 0);
 }
 
-static void handle_issuing_new_write_requests6(raid5_conf_t *conf,
+static void handle_stripe_dirtying6(raid5_conf_t *conf,
                struct stripe_head *sh, struct stripe_head_state *s,
                struct r6_state *r6s, int disks)
 {
@@ -2531,7 +2541,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
  *
  */
 
-static void handle_stripe5(struct stripe_head *sh)
+static bool handle_stripe5(struct stripe_head *sh)
 {
        raid5_conf_t *conf = sh->raid_conf;
        int disks = sh->disks, i;
@@ -2592,10 +2602,10 @@ static void handle_stripe5(struct stripe_head *sh)
                if (dev->written)
                        s.written++;
                rdev = rcu_dereference(conf->disks[i].rdev);
-               if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
+               if (blocked_rdev == NULL &&
+                   rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
                        blocked_rdev = rdev;
                        atomic_inc(&rdev->nr_pending);
-                       break;
                }
                if (!rdev || !test_bit(In_sync, &rdev->flags)) {
                        /* The ReadError flag will just be confusing now */
@@ -2612,8 +2622,14 @@ static void handle_stripe5(struct stripe_head *sh)
        rcu_read_unlock();
 
        if (unlikely(blocked_rdev)) {
-               set_bit(STRIPE_HANDLE, &sh->state);
-               goto unlock;
+               if (s.syncing || s.expanding || s.expanded ||
+                   s.to_write || s.written) {
+                       set_bit(STRIPE_HANDLE, &sh->state);
+                       goto unlock;
+               }
+               /* There is nothing for the blocked_rdev to block */
+               rdev_dec_pending(blocked_rdev, conf->mddev);
+               blocked_rdev = NULL;
        }
 
        if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
@@ -2629,8 +2645,7 @@ static void handle_stripe5(struct stripe_head *sh)
         * need to be failed
         */
        if (s.failed > 1 && s.to_read+s.to_write+s.written)
-               handle_requests_to_failed_array(conf, sh, &s, disks,
-                                               &return_bi);
+               handle_failed_stripe(conf, sh, &s, disks, &return_bi);
        if (s.failed > 1 && s.syncing) {
                md_done_sync(conf->mddev, STRIPE_SECTORS,0);
                clear_bit(STRIPE_SYNCING, &sh->state);
@@ -2646,7 +2661,7 @@ static void handle_stripe5(struct stripe_head *sh)
               !test_bit(R5_LOCKED, &dev->flags) &&
               test_bit(R5_UPTODATE, &dev->flags)) ||
               (s.failed == 1 && s.failed_num == sh->pd_idx)))
-               handle_completed_write_requests(conf, sh, disks, &return_bi);
+               handle_stripe_clean_event(conf, sh, disks, &return_bi);
 
        /* Now we might consider reading some blocks, either to check/generate
         * parity, or to satisfy requests
@@ -2654,17 +2669,17 @@ static void handle_stripe5(struct stripe_head *sh)
         */
        if (s.to_read || s.non_overwrite ||
            (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
-               handle_issuing_new_read_requests5(sh, &s, disks);
+               handle_stripe_fill5(sh, &s, disks);
 
        /* Now we check to see if any write operations have recently
         * completed
         */
        prexor = 0;
-       if (sh->reconstruct_state == reconstruct_state_drain_result) {
+       if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
+               prexor = 1;
+       if (sh->reconstruct_state == reconstruct_state_drain_result ||
+           sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
                sh->reconstruct_state = reconstruct_state_idle;
-               for (i = disks; i--; )
-                       prexor += test_and_clear_bit(R5_Wantprexor,
-                                                    &sh->dev[i].flags);
 
                /* All the 'written' buffers and the parity block are ready to
                 * be written back to disk
@@ -2698,7 +2713,7 @@ static void handle_stripe5(struct stripe_head *sh)
         *    block.
         */
        if (s.to_write && !sh->reconstruct_state && !sh->check_state)
-               handle_issuing_new_write_requests5(conf, sh, &s, disks);
+               handle_stripe_dirtying5(conf, sh, &s, disks);
 
        /* maybe we need to check and possibly fix the parity for this stripe
         * Any reads will already have been scheduled, so we just see if enough
@@ -2742,10 +2757,11 @@ static void handle_stripe5(struct stripe_head *sh)
        if (sh->reconstruct_state == reconstruct_state_result) {
                sh->reconstruct_state = reconstruct_state_idle;
                clear_bit(STRIPE_EXPANDING, &sh->state);
-               for (i = conf->raid_disks; i--; )
+               for (i = conf->raid_disks; i--; ) {
                        set_bit(R5_Wantwrite, &sh->dev[i].flags);
-                       set_bit(R5_LOCKED, &dev->flags);
+                       set_bit(R5_LOCKED, &sh->dev[i].flags);
                        s.locked++;
+               }
        }
 
        if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
@@ -2754,7 +2770,7 @@ static void handle_stripe5(struct stripe_head *sh)
                sh->disks = conf->raid_disks;
                sh->pd_idx = stripe_to_pdidx(sh->sector, conf,
                        conf->raid_disks);
-               handle_write_operations5(sh, &s, 1, 1);
+               schedule_reconstruction5(sh, &s, 1, 1);
        } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
                clear_bit(STRIPE_EXPAND_READY, &sh->state);
                atomic_dec(&conf->reshape_stripes);
@@ -2779,9 +2795,11 @@ static void handle_stripe5(struct stripe_head *sh)
        ops_run_io(sh, &s);
 
        return_io(return_bi);
+
+       return blocked_rdev == NULL;
 }
 
-static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
+static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
 {
        raid6_conf_t *conf = sh->raid_conf;
        int disks = sh->disks;
@@ -2830,7 +2848,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
                                copy_data(0, rbi, dev->page, dev->sector);
                                rbi2 = r5_next_bio(rbi, dev->sector);
                                spin_lock_irq(&conf->device_lock);
-                               if (--rbi->bi_phys_segments == 0) {
+                               if (!raid5_dec_bi_phys_segments(rbi)) {
                                        rbi->bi_next = return_bi;
                                        return_bi = rbi;
                                }
@@ -2854,10 +2872,10 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
                if (dev->written)
                        s.written++;
                rdev = rcu_dereference(conf->disks[i].rdev);
-               if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
+               if (blocked_rdev == NULL &&
+                   rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
                        blocked_rdev = rdev;
                        atomic_inc(&rdev->nr_pending);
-                       break;
                }
                if (!rdev || !test_bit(In_sync, &rdev->flags)) {
                        /* The ReadError flag will just be confusing now */
@@ -2875,9 +2893,16 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
        rcu_read_unlock();
 
        if (unlikely(blocked_rdev)) {
-               set_bit(STRIPE_HANDLE, &sh->state);
-               goto unlock;
+               if (s.syncing || s.expanding || s.expanded ||
+                   s.to_write || s.written) {
+                       set_bit(STRIPE_HANDLE, &sh->state);
+                       goto unlock;
+               }
+               /* There is nothing for the blocked_rdev to block */
+               rdev_dec_pending(blocked_rdev, conf->mddev);
+               blocked_rdev = NULL;
        }
+
        pr_debug("locked=%d uptodate=%d to_read=%d"
               " to_write=%d failed=%d failed_num=%d,%d\n",
               s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
@@ -2886,8 +2911,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
         * might need to be failed
         */
        if (s.failed > 2 && s.to_read+s.to_write+s.written)
-               handle_requests_to_failed_array(conf, sh, &s, disks,
-                                               &return_bi);
+               handle_failed_stripe(conf, sh, &s, disks, &return_bi);
        if (s.failed > 2 && s.syncing) {
                md_done_sync(conf->mddev, STRIPE_SECTORS,0);
                clear_bit(STRIPE_SYNCING, &sh->state);
@@ -2912,7 +2936,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
             ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
                             && !test_bit(R5_LOCKED, &qdev->flags)
                             && test_bit(R5_UPTODATE, &qdev->flags)))))
-               handle_completed_write_requests(conf, sh, disks, &return_bi);
+               handle_stripe_clean_event(conf, sh, disks, &return_bi);
 
        /* Now we might consider reading some blocks, either to check/generate
         * parity, or to satisfy requests
@@ -2920,11 +2944,11 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
         */
        if (s.to_read || s.non_overwrite || (s.to_write && s.failed) ||
            (s.syncing && (s.uptodate < disks)) || s.expanding)
-               handle_issuing_new_read_requests6(sh, &s, &r6s, disks);
+               handle_stripe_fill6(sh, &s, &r6s, disks);
 
        /* now to consider writing and what else, if anything should be read */
        if (s.to_write)
-               handle_issuing_new_write_requests6(conf, sh, &s, &r6s, disks);
+               handle_stripe_dirtying6(conf, sh, &s, &r6s, disks);
 
        /* maybe we need to check and possibly fix the parity for this stripe
         * Any reads will already have been scheduled, so we just see if enough
@@ -2993,14 +3017,17 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
        ops_run_io(sh, &s);
 
        return_io(return_bi);
+
+       return blocked_rdev == NULL;
 }
 
-static void handle_stripe(struct stripe_head *sh, struct page *tmp_page)
+/* returns true if the stripe was handled */
+static bool handle_stripe(struct stripe_head *sh, struct page *tmp_page)
 {
        if (sh->raid_conf->level == 6)
-               handle_stripe6(sh, tmp_page);
+               return handle_stripe6(sh, tmp_page);
        else
-               handle_stripe5(sh);
+               return handle_stripe5(sh);
 }
 
 
@@ -3099,15 +3126,17 @@ static int raid5_congested(void *data, int bits)
 /* We want read requests to align with chunks where possible,
  * but write requests don't need to.
  */
-static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
+static int raid5_mergeable_bvec(struct request_queue *q,
+                               struct bvec_merge_data *bvm,
+                               struct bio_vec *biovec)
 {
        mddev_t *mddev = q->queuedata;
-       sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
+       sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
        int max;
        unsigned int chunk_sectors = mddev->chunk_size >> 9;
-       unsigned int bio_sectors = bio->bi_size >> 9;
+       unsigned int bio_sectors = bvm->bi_size >> 9;
 
-       if (bio_data_dir(bio) == WRITE)
+       if ((bvm->bi_rw & 1) == WRITE)
                return biovec->bv_len; /* always allow writes to be mergeable */
 
        max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
@@ -3160,8 +3189,11 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
        if(bi) {
                conf->retry_read_aligned_list = bi->bi_next;
                bi->bi_next = NULL;
+               /*
+                * this sets the active strip count to 1 and the processed
+                * strip count to zero (upper 8 bits)
+                */
                bi->bi_phys_segments = 1; /* biased count of active stripes */
-               bi->bi_hw_segments = 0; /* count of processed stripes */
        }
 
        return bi;
@@ -3211,8 +3243,7 @@ static int bio_fits_rdev(struct bio *bi)
        if ((bi->bi_size>>9) > q->max_sectors)
                return 0;
        blk_recount_segments(q, bi);
-       if (bi->bi_phys_segments > q->max_phys_segments ||
-           bi->bi_hw_segments > q->max_hw_segments)
+       if (bi->bi_phys_segments > q->max_phys_segments)
                return 0;
 
        if (q->merge_bvec_fn)
@@ -3473,7 +3504,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
                        
        }
        spin_lock_irq(&conf->device_lock);
-       remaining = --bi->bi_phys_segments;
+       remaining = raid5_dec_bi_phys_segments(bi);
        spin_unlock_irq(&conf->device_lock);
        if (remaining == 0) {
 
@@ -3566,7 +3597,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
                            j == raid6_next_disk(sh->pd_idx, sh->disks))
                                continue;
                        s = compute_blocknr(sh, j);
-                       if (s < (mddev->array_size<<1)) {
+                       if (s < mddev->array_sectors) {
                                skipped = 1;
                                continue;
                        }
@@ -3716,7 +3747,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
        clear_bit(STRIPE_INSYNC, &sh->state);
        spin_unlock(&sh->lock);
 
-       handle_stripe(sh, NULL);
+       /* wait for any blocked device to be handled */
+       while(unlikely(!handle_stripe(sh, NULL)))
+               ;
        release_stripe(sh);
 
        return STRIPE_SECTORS;
@@ -3755,7 +3788,7 @@ static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
                     sector += STRIPE_SECTORS,
                     scnt++) {
 
-               if (scnt < raid_bio->bi_hw_segments)
+               if (scnt < raid5_bi_hw_segments(raid_bio))
                        /* already done this stripe */
                        continue;
 
@@ -3763,7 +3796,7 @@ static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
 
                if (!sh) {
                        /* failed to get a stripe - must wait */
-                       raid_bio->bi_hw_segments = scnt;
+                       raid5_set_bi_hw_segments(raid_bio, scnt);
                        conf->retry_read_aligned = raid_bio;
                        return handled;
                }
@@ -3771,7 +3804,7 @@ static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
                set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
                if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
                        release_stripe(sh);
-                       raid_bio->bi_hw_segments = scnt;
+                       raid5_set_bi_hw_segments(raid_bio, scnt);
                        conf->retry_read_aligned = raid_bio;
                        return handled;
                }
@@ -3781,7 +3814,7 @@ static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
                handled++;
        }
        spin_lock_irq(&conf->device_lock);
-       remaining = --raid_bio->bi_phys_segments;
+       remaining = raid5_dec_bi_phys_segments(raid_bio);
        spin_unlock_irq(&conf->device_lock);
        if (remaining == 0)
                bio_endio(raid_bio, 0);
@@ -3835,10 +3868,8 @@ static void raid5d(mddev_t *mddev)
 
                sh = __get_priority_stripe(conf);
 
-               if (!sh) {
-                       async_tx_issue_pending_all();
+               if (!sh)
                        break;
-               }
                spin_unlock_irq(&conf->device_lock);
                
                handled++;
@@ -3851,6 +3882,7 @@ static void raid5d(mddev_t *mddev)
 
        spin_unlock_irq(&conf->device_lock);
 
+       async_tx_issue_pending_all();
        unplug_slaves(mddev);
 
        pr_debug("--- raid5d inactive\n");
@@ -3871,6 +3903,8 @@ raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
 {
        raid5_conf_t *conf = mddev_to_conf(mddev);
        unsigned long new;
+       int err;
+
        if (len >= PAGE_SIZE)
                return -EINVAL;
        if (!conf)
@@ -3886,7 +3920,9 @@ raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
                else
                        break;
        }
-       md_allow_write(mddev);
+       err = md_allow_write(mddev);
+       if (err)
+               return err;
        while (new > conf->max_nr_stripes) {
                if (grow_one_stripe(conf))
                        conf->max_nr_stripes++;
@@ -4211,7 +4247,7 @@ static int run(mddev_t *mddev)
        mddev->queue->backing_dev_info.congested_data = mddev;
        mddev->queue->backing_dev_info.congested_fn = raid5_congested;
 
-       mddev->array_size =  mddev->size * (conf->previous_raid_disks -
+       mddev->array_sectors = 2 * mddev->size * (conf->previous_raid_disks -
                                            conf->max_degraded);
 
        blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
@@ -4435,8 +4471,9 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
        raid5_conf_t *conf = mddev_to_conf(mddev);
 
        sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
-       mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1;
-       set_capacity(mddev->gendisk, mddev->array_size << 1);
+       mddev->array_sectors = sectors * (mddev->raid_disks
+                                         - conf->max_degraded);
+       set_capacity(mddev->gendisk, mddev->array_sectors);
        mddev->changed = 1;
        if (sectors/2  > mddev->size && mddev->recovery_cp == MaxSector) {
                mddev->recovery_cp = mddev->size << 1;
@@ -4458,6 +4495,9 @@ static int raid5_check_reshape(mddev_t *mddev)
                return -EINVAL; /* Cannot shrink array or change level yet */
        if (mddev->delta_disks == 0)
                return 0; /* nothing to do */
+       if (mddev->bitmap)
+               /* Cannot grow a bitmap yet */
+               return -EBUSY;
 
        /* Can only proceed if there are plenty of stripe_heads.
         * We need a minimum of one full stripe,, and for sensible progress
@@ -4569,15 +4609,16 @@ static void end_reshape(raid5_conf_t *conf)
        struct block_device *bdev;
 
        if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
-               conf->mddev->array_size = conf->mddev->size *
+               conf->mddev->array_sectors = 2 * conf->mddev->size *
                        (conf->raid_disks - conf->max_degraded);
-               set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1);
+               set_capacity(conf->mddev->gendisk, conf->mddev->array_sectors);
                conf->mddev->changed = 1;
 
                bdev = bdget_disk(conf->mddev->gendisk, 0);
                if (bdev) {
                        mutex_lock(&bdev->bd_inode->i_mutex);
-                       i_size_write(bdev->bd_inode, (loff_t)conf->mddev->array_size << 10);
+                       i_size_write(bdev->bd_inode,
+                                    (loff_t)conf->mddev->array_sectors << 9);
                        mutex_unlock(&bdev->bd_inode->i_mutex);
                        bdput(bdev);
                }