percpu: add __percpu sparse annotations to what's left
[linux-2.6.git] / drivers / md / raid5.c
1 /*
2  * raid5.c : Multiple Devices driver for Linux
3  *         Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4  *         Copyright (C) 1999, 2000 Ingo Molnar
5  *         Copyright (C) 2002, 2003 H. Peter Anvin
6  *
7  * RAID-4/5/6 management functions.
8  * Thanks to Penguin Computing for making the RAID-6 development possible
9  * by donating a test server!
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * You should have received a copy of the GNU General Public License
17  * (for example /usr/src/linux/COPYING); if not, write to the Free
18  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20
21 /*
22  * BITMAP UNPLUGGING:
23  *
24  * The sequencing for updating the bitmap reliably is a little
25  * subtle (and I got it wrong the first time) so it deserves some
26  * explanation.
27  *
28  * We group bitmap updates into batches.  Each batch has a number.
29  * We may write out several batches at once, but that isn't very important.
30  * conf->bm_write is the number of the last batch successfully written.
31  * conf->bm_flush is the number of the last batch that was closed to
32  *    new additions.
33  * When we discover that we will need to write to any block in a stripe
34  * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35  * the number of the batch it will be in. This is bm_flush+1.
36  * When we are ready to do a write, if that batch hasn't been written yet,
37  *   we plug the array and queue the stripe for later.
38  * When an unplug happens, we increment bm_flush, thus closing the current
39  *   batch.
40  * When we notice that bm_flush > bm_write, we write out all pending updates
41  * to the bitmap, and advance bm_write to where bm_flush was.
42  * This may occasionally write a bit out twice, but is sure never to
43  * miss any bits.
44  */
45
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/raid/pq.h>
49 #include <linux/async_tx.h>
50 #include <linux/async.h>
51 #include <linux/seq_file.h>
52 #include <linux/cpu.h>
53 #include "md.h"
54 #include "raid5.h"
55 #include "bitmap.h"
56
57 /*
58  * Stripe cache
59  */
60
61 #define NR_STRIPES              256
62 #define STRIPE_SIZE             PAGE_SIZE
63 #define STRIPE_SHIFT            (PAGE_SHIFT - 9)
64 #define STRIPE_SECTORS          (STRIPE_SIZE>>9)
65 #define IO_THRESHOLD            1
66 #define BYPASS_THRESHOLD        1
67 #define NR_HASH                 (PAGE_SIZE / sizeof(struct hlist_head))
68 #define HASH_MASK               (NR_HASH - 1)
69
70 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
71
72 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
73  * order without overlap.  There may be several bio's per stripe+device, and
74  * a bio could span several devices.
75  * When walking this list for a particular stripe+device, we must never proceed
76  * beyond a bio that extends past this device, as the next bio might no longer
77  * be valid.
78  * This macro is used to determine the 'next' bio in the list, given the sector
79  * of the current stripe+device
80  */
81 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
82 /*
83  * The following can be used to debug the driver
84  */
85 #define RAID5_PARANOIA  1
86 #if RAID5_PARANOIA && defined(CONFIG_SMP)
87 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
88 #else
89 # define CHECK_DEVLOCK()
90 #endif
91
92 #ifdef DEBUG
93 #define inline
94 #define __inline__
95 #endif
96
97 #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
98
99 /*
100  * We maintain a biased count of active stripes in the bottom 16 bits of
101  * bi_phys_segments, and a count of processed stripes in the upper 16 bits
102  */
103 static inline int raid5_bi_phys_segments(struct bio *bio)
104 {
105         return bio->bi_phys_segments & 0xffff;
106 }
107
108 static inline int raid5_bi_hw_segments(struct bio *bio)
109 {
110         return (bio->bi_phys_segments >> 16) & 0xffff;
111 }
112
113 static inline int raid5_dec_bi_phys_segments(struct bio *bio)
114 {
115         --bio->bi_phys_segments;
116         return raid5_bi_phys_segments(bio);
117 }
118
119 static inline int raid5_dec_bi_hw_segments(struct bio *bio)
120 {
121         unsigned short val = raid5_bi_hw_segments(bio);
122
123         --val;
124         bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
125         return val;
126 }
127
128 static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
129 {
130         bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
131 }
132
133 /* Find first data disk in a raid6 stripe */
134 static inline int raid6_d0(struct stripe_head *sh)
135 {
136         if (sh->ddf_layout)
137                 /* ddf always start from first device */
138                 return 0;
139         /* md starts just after Q block */
140         if (sh->qd_idx == sh->disks - 1)
141                 return 0;
142         else
143                 return sh->qd_idx + 1;
144 }
145 static inline int raid6_next_disk(int disk, int raid_disks)
146 {
147         disk++;
148         return (disk < raid_disks) ? disk : 0;
149 }
150
151 /* When walking through the disks in a raid5, starting at raid6_d0,
152  * We need to map each disk to a 'slot', where the data disks are slot
153  * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
154  * is raid_disks-1.  This help does that mapping.
155  */
156 static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
157                              int *count, int syndrome_disks)
158 {
159         int slot = *count;
160
161         if (sh->ddf_layout)
162                 (*count)++;
163         if (idx == sh->pd_idx)
164                 return syndrome_disks;
165         if (idx == sh->qd_idx)
166                 return syndrome_disks + 1;
167         if (!sh->ddf_layout)
168                 (*count)++;
169         return slot;
170 }
171
172 static void return_io(struct bio *return_bi)
173 {
174         struct bio *bi = return_bi;
175         while (bi) {
176
177                 return_bi = bi->bi_next;
178                 bi->bi_next = NULL;
179                 bi->bi_size = 0;
180                 bio_endio(bi, 0);
181                 bi = return_bi;
182         }
183 }
184
185 static void print_raid5_conf (raid5_conf_t *conf);
186
187 static int stripe_operations_active(struct stripe_head *sh)
188 {
189         return sh->check_state || sh->reconstruct_state ||
190                test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
191                test_bit(STRIPE_COMPUTE_RUN, &sh->state);
192 }
193
194 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
195 {
196         if (atomic_dec_and_test(&sh->count)) {
197                 BUG_ON(!list_empty(&sh->lru));
198                 BUG_ON(atomic_read(&conf->active_stripes)==0);
199                 if (test_bit(STRIPE_HANDLE, &sh->state)) {
200                         if (test_bit(STRIPE_DELAYED, &sh->state)) {
201                                 list_add_tail(&sh->lru, &conf->delayed_list);
202                                 blk_plug_device(conf->mddev->queue);
203                         } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
204                                    sh->bm_seq - conf->seq_write > 0) {
205                                 list_add_tail(&sh->lru, &conf->bitmap_list);
206                                 blk_plug_device(conf->mddev->queue);
207                         } else {
208                                 clear_bit(STRIPE_BIT_DELAY, &sh->state);
209                                 list_add_tail(&sh->lru, &conf->handle_list);
210                         }
211                         md_wakeup_thread(conf->mddev->thread);
212                 } else {
213                         BUG_ON(stripe_operations_active(sh));
214                         if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
215                                 atomic_dec(&conf->preread_active_stripes);
216                                 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
217                                         md_wakeup_thread(conf->mddev->thread);
218                         }
219                         atomic_dec(&conf->active_stripes);
220                         if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
221                                 list_add_tail(&sh->lru, &conf->inactive_list);
222                                 wake_up(&conf->wait_for_stripe);
223                                 if (conf->retry_read_aligned)
224                                         md_wakeup_thread(conf->mddev->thread);
225                         }
226                 }
227         }
228 }
229
230 static void release_stripe(struct stripe_head *sh)
231 {
232         raid5_conf_t *conf = sh->raid_conf;
233         unsigned long flags;
234
235         spin_lock_irqsave(&conf->device_lock, flags);
236         __release_stripe(conf, sh);
237         spin_unlock_irqrestore(&conf->device_lock, flags);
238 }
239
240 static inline void remove_hash(struct stripe_head *sh)
241 {
242         pr_debug("remove_hash(), stripe %llu\n",
243                 (unsigned long long)sh->sector);
244
245         hlist_del_init(&sh->hash);
246 }
247
248 static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
249 {
250         struct hlist_head *hp = stripe_hash(conf, sh->sector);
251
252         pr_debug("insert_hash(), stripe %llu\n",
253                 (unsigned long long)sh->sector);
254
255         CHECK_DEVLOCK();
256         hlist_add_head(&sh->hash, hp);
257 }
258
259
260 /* find an idle stripe, make sure it is unhashed, and return it. */
261 static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
262 {
263         struct stripe_head *sh = NULL;
264         struct list_head *first;
265
266         CHECK_DEVLOCK();
267         if (list_empty(&conf->inactive_list))
268                 goto out;
269         first = conf->inactive_list.next;
270         sh = list_entry(first, struct stripe_head, lru);
271         list_del_init(first);
272         remove_hash(sh);
273         atomic_inc(&conf->active_stripes);
274 out:
275         return sh;
276 }
277
278 static void shrink_buffers(struct stripe_head *sh, int num)
279 {
280         struct page *p;
281         int i;
282
283         for (i=0; i<num ; i++) {
284                 p = sh->dev[i].page;
285                 if (!p)
286                         continue;
287                 sh->dev[i].page = NULL;
288                 put_page(p);
289         }
290 }
291
292 static int grow_buffers(struct stripe_head *sh, int num)
293 {
294         int i;
295
296         for (i=0; i<num; i++) {
297                 struct page *page;
298
299                 if (!(page = alloc_page(GFP_KERNEL))) {
300                         return 1;
301                 }
302                 sh->dev[i].page = page;
303         }
304         return 0;
305 }
306
307 static void raid5_build_block(struct stripe_head *sh, int i, int previous);
308 static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
309                             struct stripe_head *sh);
310
311 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
312 {
313         raid5_conf_t *conf = sh->raid_conf;
314         int i;
315
316         BUG_ON(atomic_read(&sh->count) != 0);
317         BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
318         BUG_ON(stripe_operations_active(sh));
319
320         CHECK_DEVLOCK();
321         pr_debug("init_stripe called, stripe %llu\n",
322                 (unsigned long long)sh->sector);
323
324         remove_hash(sh);
325
326         sh->generation = conf->generation - previous;
327         sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
328         sh->sector = sector;
329         stripe_set_idx(sector, conf, previous, sh);
330         sh->state = 0;
331
332
333         for (i = sh->disks; i--; ) {
334                 struct r5dev *dev = &sh->dev[i];
335
336                 if (dev->toread || dev->read || dev->towrite || dev->written ||
337                     test_bit(R5_LOCKED, &dev->flags)) {
338                         printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
339                                (unsigned long long)sh->sector, i, dev->toread,
340                                dev->read, dev->towrite, dev->written,
341                                test_bit(R5_LOCKED, &dev->flags));
342                         BUG();
343                 }
344                 dev->flags = 0;
345                 raid5_build_block(sh, i, previous);
346         }
347         insert_hash(conf, sh);
348 }
349
350 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector,
351                                          short generation)
352 {
353         struct stripe_head *sh;
354         struct hlist_node *hn;
355
356         CHECK_DEVLOCK();
357         pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
358         hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
359                 if (sh->sector == sector && sh->generation == generation)
360                         return sh;
361         pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
362         return NULL;
363 }
364
365 static void unplug_slaves(mddev_t *mddev);
366 static void raid5_unplug_device(struct request_queue *q);
367
368 static struct stripe_head *
369 get_active_stripe(raid5_conf_t *conf, sector_t sector,
370                   int previous, int noblock, int noquiesce)
371 {
372         struct stripe_head *sh;
373
374         pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
375
376         spin_lock_irq(&conf->device_lock);
377
378         do {
379                 wait_event_lock_irq(conf->wait_for_stripe,
380                                     conf->quiesce == 0 || noquiesce,
381                                     conf->device_lock, /* nothing */);
382                 sh = __find_stripe(conf, sector, conf->generation - previous);
383                 if (!sh) {
384                         if (!conf->inactive_blocked)
385                                 sh = get_free_stripe(conf);
386                         if (noblock && sh == NULL)
387                                 break;
388                         if (!sh) {
389                                 conf->inactive_blocked = 1;
390                                 wait_event_lock_irq(conf->wait_for_stripe,
391                                                     !list_empty(&conf->inactive_list) &&
392                                                     (atomic_read(&conf->active_stripes)
393                                                      < (conf->max_nr_stripes *3/4)
394                                                      || !conf->inactive_blocked),
395                                                     conf->device_lock,
396                                                     raid5_unplug_device(conf->mddev->queue)
397                                         );
398                                 conf->inactive_blocked = 0;
399                         } else
400                                 init_stripe(sh, sector, previous);
401                 } else {
402                         if (atomic_read(&sh->count)) {
403                                 BUG_ON(!list_empty(&sh->lru)
404                                     && !test_bit(STRIPE_EXPANDING, &sh->state));
405                         } else {
406                                 if (!test_bit(STRIPE_HANDLE, &sh->state))
407                                         atomic_inc(&conf->active_stripes);
408                                 if (list_empty(&sh->lru) &&
409                                     !test_bit(STRIPE_EXPANDING, &sh->state))
410                                         BUG();
411                                 list_del_init(&sh->lru);
412                         }
413                 }
414         } while (sh == NULL);
415
416         if (sh)
417                 atomic_inc(&sh->count);
418
419         spin_unlock_irq(&conf->device_lock);
420         return sh;
421 }
422
423 static void
424 raid5_end_read_request(struct bio *bi, int error);
425 static void
426 raid5_end_write_request(struct bio *bi, int error);
427
428 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
429 {
430         raid5_conf_t *conf = sh->raid_conf;
431         int i, disks = sh->disks;
432
433         might_sleep();
434
435         for (i = disks; i--; ) {
436                 int rw;
437                 struct bio *bi;
438                 mdk_rdev_t *rdev;
439                 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
440                         rw = WRITE;
441                 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
442                         rw = READ;
443                 else
444                         continue;
445
446                 bi = &sh->dev[i].req;
447
448                 bi->bi_rw = rw;
449                 if (rw == WRITE)
450                         bi->bi_end_io = raid5_end_write_request;
451                 else
452                         bi->bi_end_io = raid5_end_read_request;
453
454                 rcu_read_lock();
455                 rdev = rcu_dereference(conf->disks[i].rdev);
456                 if (rdev && test_bit(Faulty, &rdev->flags))
457                         rdev = NULL;
458                 if (rdev)
459                         atomic_inc(&rdev->nr_pending);
460                 rcu_read_unlock();
461
462                 if (rdev) {
463                         if (s->syncing || s->expanding || s->expanded)
464                                 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
465
466                         set_bit(STRIPE_IO_STARTED, &sh->state);
467
468                         bi->bi_bdev = rdev->bdev;
469                         pr_debug("%s: for %llu schedule op %ld on disc %d\n",
470                                 __func__, (unsigned long long)sh->sector,
471                                 bi->bi_rw, i);
472                         atomic_inc(&sh->count);
473                         bi->bi_sector = sh->sector + rdev->data_offset;
474                         bi->bi_flags = 1 << BIO_UPTODATE;
475                         bi->bi_vcnt = 1;
476                         bi->bi_max_vecs = 1;
477                         bi->bi_idx = 0;
478                         bi->bi_io_vec = &sh->dev[i].vec;
479                         bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
480                         bi->bi_io_vec[0].bv_offset = 0;
481                         bi->bi_size = STRIPE_SIZE;
482                         bi->bi_next = NULL;
483                         if (rw == WRITE &&
484                             test_bit(R5_ReWrite, &sh->dev[i].flags))
485                                 atomic_add(STRIPE_SECTORS,
486                                         &rdev->corrected_errors);
487                         generic_make_request(bi);
488                 } else {
489                         if (rw == WRITE)
490                                 set_bit(STRIPE_DEGRADED, &sh->state);
491                         pr_debug("skip op %ld on disc %d for sector %llu\n",
492                                 bi->bi_rw, i, (unsigned long long)sh->sector);
493                         clear_bit(R5_LOCKED, &sh->dev[i].flags);
494                         set_bit(STRIPE_HANDLE, &sh->state);
495                 }
496         }
497 }
498
499 static struct dma_async_tx_descriptor *
500 async_copy_data(int frombio, struct bio *bio, struct page *page,
501         sector_t sector, struct dma_async_tx_descriptor *tx)
502 {
503         struct bio_vec *bvl;
504         struct page *bio_page;
505         int i;
506         int page_offset;
507         struct async_submit_ctl submit;
508         enum async_tx_flags flags = 0;
509
510         if (bio->bi_sector >= sector)
511                 page_offset = (signed)(bio->bi_sector - sector) * 512;
512         else
513                 page_offset = (signed)(sector - bio->bi_sector) * -512;
514
515         if (frombio)
516                 flags |= ASYNC_TX_FENCE;
517         init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
518
519         bio_for_each_segment(bvl, bio, i) {
520                 int len = bio_iovec_idx(bio, i)->bv_len;
521                 int clen;
522                 int b_offset = 0;
523
524                 if (page_offset < 0) {
525                         b_offset = -page_offset;
526                         page_offset += b_offset;
527                         len -= b_offset;
528                 }
529
530                 if (len > 0 && page_offset + len > STRIPE_SIZE)
531                         clen = STRIPE_SIZE - page_offset;
532                 else
533                         clen = len;
534
535                 if (clen > 0) {
536                         b_offset += bio_iovec_idx(bio, i)->bv_offset;
537                         bio_page = bio_iovec_idx(bio, i)->bv_page;
538                         if (frombio)
539                                 tx = async_memcpy(page, bio_page, page_offset,
540                                                   b_offset, clen, &submit);
541                         else
542                                 tx = async_memcpy(bio_page, page, b_offset,
543                                                   page_offset, clen, &submit);
544                 }
545                 /* chain the operations */
546                 submit.depend_tx = tx;
547
548                 if (clen < len) /* hit end of page */
549                         break;
550                 page_offset +=  len;
551         }
552
553         return tx;
554 }
555
556 static void ops_complete_biofill(void *stripe_head_ref)
557 {
558         struct stripe_head *sh = stripe_head_ref;
559         struct bio *return_bi = NULL;
560         raid5_conf_t *conf = sh->raid_conf;
561         int i;
562
563         pr_debug("%s: stripe %llu\n", __func__,
564                 (unsigned long long)sh->sector);
565
566         /* clear completed biofills */
567         spin_lock_irq(&conf->device_lock);
568         for (i = sh->disks; i--; ) {
569                 struct r5dev *dev = &sh->dev[i];
570
571                 /* acknowledge completion of a biofill operation */
572                 /* and check if we need to reply to a read request,
573                  * new R5_Wantfill requests are held off until
574                  * !STRIPE_BIOFILL_RUN
575                  */
576                 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
577                         struct bio *rbi, *rbi2;
578
579                         BUG_ON(!dev->read);
580                         rbi = dev->read;
581                         dev->read = NULL;
582                         while (rbi && rbi->bi_sector <
583                                 dev->sector + STRIPE_SECTORS) {
584                                 rbi2 = r5_next_bio(rbi, dev->sector);
585                                 if (!raid5_dec_bi_phys_segments(rbi)) {
586                                         rbi->bi_next = return_bi;
587                                         return_bi = rbi;
588                                 }
589                                 rbi = rbi2;
590                         }
591                 }
592         }
593         spin_unlock_irq(&conf->device_lock);
594         clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
595
596         return_io(return_bi);
597
598         set_bit(STRIPE_HANDLE, &sh->state);
599         release_stripe(sh);
600 }
601
602 static void ops_run_biofill(struct stripe_head *sh)
603 {
604         struct dma_async_tx_descriptor *tx = NULL;
605         raid5_conf_t *conf = sh->raid_conf;
606         struct async_submit_ctl submit;
607         int i;
608
609         pr_debug("%s: stripe %llu\n", __func__,
610                 (unsigned long long)sh->sector);
611
612         for (i = sh->disks; i--; ) {
613                 struct r5dev *dev = &sh->dev[i];
614                 if (test_bit(R5_Wantfill, &dev->flags)) {
615                         struct bio *rbi;
616                         spin_lock_irq(&conf->device_lock);
617                         dev->read = rbi = dev->toread;
618                         dev->toread = NULL;
619                         spin_unlock_irq(&conf->device_lock);
620                         while (rbi && rbi->bi_sector <
621                                 dev->sector + STRIPE_SECTORS) {
622                                 tx = async_copy_data(0, rbi, dev->page,
623                                         dev->sector, tx);
624                                 rbi = r5_next_bio(rbi, dev->sector);
625                         }
626                 }
627         }
628
629         atomic_inc(&sh->count);
630         init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
631         async_trigger_callback(&submit);
632 }
633
634 static void mark_target_uptodate(struct stripe_head *sh, int target)
635 {
636         struct r5dev *tgt;
637
638         if (target < 0)
639                 return;
640
641         tgt = &sh->dev[target];
642         set_bit(R5_UPTODATE, &tgt->flags);
643         BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
644         clear_bit(R5_Wantcompute, &tgt->flags);
645 }
646
647 static void ops_complete_compute(void *stripe_head_ref)
648 {
649         struct stripe_head *sh = stripe_head_ref;
650
651         pr_debug("%s: stripe %llu\n", __func__,
652                 (unsigned long long)sh->sector);
653
654         /* mark the computed target(s) as uptodate */
655         mark_target_uptodate(sh, sh->ops.target);
656         mark_target_uptodate(sh, sh->ops.target2);
657
658         clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
659         if (sh->check_state == check_state_compute_run)
660                 sh->check_state = check_state_compute_result;
661         set_bit(STRIPE_HANDLE, &sh->state);
662         release_stripe(sh);
663 }
664
665 /* return a pointer to the address conversion region of the scribble buffer */
666 static addr_conv_t *to_addr_conv(struct stripe_head *sh,
667                                  struct raid5_percpu *percpu)
668 {
669         return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
670 }
671
672 static struct dma_async_tx_descriptor *
673 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
674 {
675         int disks = sh->disks;
676         struct page **xor_srcs = percpu->scribble;
677         int target = sh->ops.target;
678         struct r5dev *tgt = &sh->dev[target];
679         struct page *xor_dest = tgt->page;
680         int count = 0;
681         struct dma_async_tx_descriptor *tx;
682         struct async_submit_ctl submit;
683         int i;
684
685         pr_debug("%s: stripe %llu block: %d\n",
686                 __func__, (unsigned long long)sh->sector, target);
687         BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
688
689         for (i = disks; i--; )
690                 if (i != target)
691                         xor_srcs[count++] = sh->dev[i].page;
692
693         atomic_inc(&sh->count);
694
695         init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
696                           ops_complete_compute, sh, to_addr_conv(sh, percpu));
697         if (unlikely(count == 1))
698                 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
699         else
700                 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
701
702         return tx;
703 }
704
705 /* set_syndrome_sources - populate source buffers for gen_syndrome
706  * @srcs - (struct page *) array of size sh->disks
707  * @sh - stripe_head to parse
708  *
709  * Populates srcs in proper layout order for the stripe and returns the
710  * 'count' of sources to be used in a call to async_gen_syndrome.  The P
711  * destination buffer is recorded in srcs[count] and the Q destination
712  * is recorded in srcs[count+1]].
713  */
714 static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
715 {
716         int disks = sh->disks;
717         int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
718         int d0_idx = raid6_d0(sh);
719         int count;
720         int i;
721
722         for (i = 0; i < disks; i++)
723                 srcs[i] = NULL;
724
725         count = 0;
726         i = d0_idx;
727         do {
728                 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
729
730                 srcs[slot] = sh->dev[i].page;
731                 i = raid6_next_disk(i, disks);
732         } while (i != d0_idx);
733
734         return syndrome_disks;
735 }
736
737 static struct dma_async_tx_descriptor *
738 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
739 {
740         int disks = sh->disks;
741         struct page **blocks = percpu->scribble;
742         int target;
743         int qd_idx = sh->qd_idx;
744         struct dma_async_tx_descriptor *tx;
745         struct async_submit_ctl submit;
746         struct r5dev *tgt;
747         struct page *dest;
748         int i;
749         int count;
750
751         if (sh->ops.target < 0)
752                 target = sh->ops.target2;
753         else if (sh->ops.target2 < 0)
754                 target = sh->ops.target;
755         else
756                 /* we should only have one valid target */
757                 BUG();
758         BUG_ON(target < 0);
759         pr_debug("%s: stripe %llu block: %d\n",
760                 __func__, (unsigned long long)sh->sector, target);
761
762         tgt = &sh->dev[target];
763         BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
764         dest = tgt->page;
765
766         atomic_inc(&sh->count);
767
768         if (target == qd_idx) {
769                 count = set_syndrome_sources(blocks, sh);
770                 blocks[count] = NULL; /* regenerating p is not necessary */
771                 BUG_ON(blocks[count+1] != dest); /* q should already be set */
772                 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
773                                   ops_complete_compute, sh,
774                                   to_addr_conv(sh, percpu));
775                 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
776         } else {
777                 /* Compute any data- or p-drive using XOR */
778                 count = 0;
779                 for (i = disks; i-- ; ) {
780                         if (i == target || i == qd_idx)
781                                 continue;
782                         blocks[count++] = sh->dev[i].page;
783                 }
784
785                 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
786                                   NULL, ops_complete_compute, sh,
787                                   to_addr_conv(sh, percpu));
788                 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
789         }
790
791         return tx;
792 }
793
794 static struct dma_async_tx_descriptor *
795 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
796 {
797         int i, count, disks = sh->disks;
798         int syndrome_disks = sh->ddf_layout ? disks : disks-2;
799         int d0_idx = raid6_d0(sh);
800         int faila = -1, failb = -1;
801         int target = sh->ops.target;
802         int target2 = sh->ops.target2;
803         struct r5dev *tgt = &sh->dev[target];
804         struct r5dev *tgt2 = &sh->dev[target2];
805         struct dma_async_tx_descriptor *tx;
806         struct page **blocks = percpu->scribble;
807         struct async_submit_ctl submit;
808
809         pr_debug("%s: stripe %llu block1: %d block2: %d\n",
810                  __func__, (unsigned long long)sh->sector, target, target2);
811         BUG_ON(target < 0 || target2 < 0);
812         BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
813         BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
814
815         /* we need to open-code set_syndrome_sources to handle the
816          * slot number conversion for 'faila' and 'failb'
817          */
818         for (i = 0; i < disks ; i++)
819                 blocks[i] = NULL;
820         count = 0;
821         i = d0_idx;
822         do {
823                 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
824
825                 blocks[slot] = sh->dev[i].page;
826
827                 if (i == target)
828                         faila = slot;
829                 if (i == target2)
830                         failb = slot;
831                 i = raid6_next_disk(i, disks);
832         } while (i != d0_idx);
833
834         BUG_ON(faila == failb);
835         if (failb < faila)
836                 swap(faila, failb);
837         pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
838                  __func__, (unsigned long long)sh->sector, faila, failb);
839
840         atomic_inc(&sh->count);
841
842         if (failb == syndrome_disks+1) {
843                 /* Q disk is one of the missing disks */
844                 if (faila == syndrome_disks) {
845                         /* Missing P+Q, just recompute */
846                         init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
847                                           ops_complete_compute, sh,
848                                           to_addr_conv(sh, percpu));
849                         return async_gen_syndrome(blocks, 0, syndrome_disks+2,
850                                                   STRIPE_SIZE, &submit);
851                 } else {
852                         struct page *dest;
853                         int data_target;
854                         int qd_idx = sh->qd_idx;
855
856                         /* Missing D+Q: recompute D from P, then recompute Q */
857                         if (target == qd_idx)
858                                 data_target = target2;
859                         else
860                                 data_target = target;
861
862                         count = 0;
863                         for (i = disks; i-- ; ) {
864                                 if (i == data_target || i == qd_idx)
865                                         continue;
866                                 blocks[count++] = sh->dev[i].page;
867                         }
868                         dest = sh->dev[data_target].page;
869                         init_async_submit(&submit,
870                                           ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
871                                           NULL, NULL, NULL,
872                                           to_addr_conv(sh, percpu));
873                         tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
874                                        &submit);
875
876                         count = set_syndrome_sources(blocks, sh);
877                         init_async_submit(&submit, ASYNC_TX_FENCE, tx,
878                                           ops_complete_compute, sh,
879                                           to_addr_conv(sh, percpu));
880                         return async_gen_syndrome(blocks, 0, count+2,
881                                                   STRIPE_SIZE, &submit);
882                 }
883         } else {
884                 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
885                                   ops_complete_compute, sh,
886                                   to_addr_conv(sh, percpu));
887                 if (failb == syndrome_disks) {
888                         /* We're missing D+P. */
889                         return async_raid6_datap_recov(syndrome_disks+2,
890                                                        STRIPE_SIZE, faila,
891                                                        blocks, &submit);
892                 } else {
893                         /* We're missing D+D. */
894                         return async_raid6_2data_recov(syndrome_disks+2,
895                                                        STRIPE_SIZE, faila, failb,
896                                                        blocks, &submit);
897                 }
898         }
899 }
900
901
902 static void ops_complete_prexor(void *stripe_head_ref)
903 {
904         struct stripe_head *sh = stripe_head_ref;
905
906         pr_debug("%s: stripe %llu\n", __func__,
907                 (unsigned long long)sh->sector);
908 }
909
910 static struct dma_async_tx_descriptor *
911 ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
912                struct dma_async_tx_descriptor *tx)
913 {
914         int disks = sh->disks;
915         struct page **xor_srcs = percpu->scribble;
916         int count = 0, pd_idx = sh->pd_idx, i;
917         struct async_submit_ctl submit;
918
919         /* existing parity data subtracted */
920         struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
921
922         pr_debug("%s: stripe %llu\n", __func__,
923                 (unsigned long long)sh->sector);
924
925         for (i = disks; i--; ) {
926                 struct r5dev *dev = &sh->dev[i];
927                 /* Only process blocks that are known to be uptodate */
928                 if (test_bit(R5_Wantdrain, &dev->flags))
929                         xor_srcs[count++] = dev->page;
930         }
931
932         init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
933                           ops_complete_prexor, sh, to_addr_conv(sh, percpu));
934         tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
935
936         return tx;
937 }
938
939 static struct dma_async_tx_descriptor *
940 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
941 {
942         int disks = sh->disks;
943         int i;
944
945         pr_debug("%s: stripe %llu\n", __func__,
946                 (unsigned long long)sh->sector);
947
948         for (i = disks; i--; ) {
949                 struct r5dev *dev = &sh->dev[i];
950                 struct bio *chosen;
951
952                 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
953                         struct bio *wbi;
954
955                         spin_lock(&sh->lock);
956                         chosen = dev->towrite;
957                         dev->towrite = NULL;
958                         BUG_ON(dev->written);
959                         wbi = dev->written = chosen;
960                         spin_unlock(&sh->lock);
961
962                         while (wbi && wbi->bi_sector <
963                                 dev->sector + STRIPE_SECTORS) {
964                                 tx = async_copy_data(1, wbi, dev->page,
965                                         dev->sector, tx);
966                                 wbi = r5_next_bio(wbi, dev->sector);
967                         }
968                 }
969         }
970
971         return tx;
972 }
973
974 static void ops_complete_reconstruct(void *stripe_head_ref)
975 {
976         struct stripe_head *sh = stripe_head_ref;
977         int disks = sh->disks;
978         int pd_idx = sh->pd_idx;
979         int qd_idx = sh->qd_idx;
980         int i;
981
982         pr_debug("%s: stripe %llu\n", __func__,
983                 (unsigned long long)sh->sector);
984
985         for (i = disks; i--; ) {
986                 struct r5dev *dev = &sh->dev[i];
987
988                 if (dev->written || i == pd_idx || i == qd_idx)
989                         set_bit(R5_UPTODATE, &dev->flags);
990         }
991
992         if (sh->reconstruct_state == reconstruct_state_drain_run)
993                 sh->reconstruct_state = reconstruct_state_drain_result;
994         else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
995                 sh->reconstruct_state = reconstruct_state_prexor_drain_result;
996         else {
997                 BUG_ON(sh->reconstruct_state != reconstruct_state_run);
998                 sh->reconstruct_state = reconstruct_state_result;
999         }
1000
1001         set_bit(STRIPE_HANDLE, &sh->state);
1002         release_stripe(sh);
1003 }
1004
1005 static void
1006 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1007                      struct dma_async_tx_descriptor *tx)
1008 {
1009         int disks = sh->disks;
1010         struct page **xor_srcs = percpu->scribble;
1011         struct async_submit_ctl submit;
1012         int count = 0, pd_idx = sh->pd_idx, i;
1013         struct page *xor_dest;
1014         int prexor = 0;
1015         unsigned long flags;
1016
1017         pr_debug("%s: stripe %llu\n", __func__,
1018                 (unsigned long long)sh->sector);
1019
1020         /* check if prexor is active which means only process blocks
1021          * that are part of a read-modify-write (written)
1022          */
1023         if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1024                 prexor = 1;
1025                 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1026                 for (i = disks; i--; ) {
1027                         struct r5dev *dev = &sh->dev[i];
1028                         if (dev->written)
1029                                 xor_srcs[count++] = dev->page;
1030                 }
1031         } else {
1032                 xor_dest = sh->dev[pd_idx].page;
1033                 for (i = disks; i--; ) {
1034                         struct r5dev *dev = &sh->dev[i];
1035                         if (i != pd_idx)
1036                                 xor_srcs[count++] = dev->page;
1037                 }
1038         }
1039
1040         /* 1/ if we prexor'd then the dest is reused as a source
1041          * 2/ if we did not prexor then we are redoing the parity
1042          * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1043          * for the synchronous xor case
1044          */
1045         flags = ASYNC_TX_ACK |
1046                 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
1047
1048         atomic_inc(&sh->count);
1049
1050         init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
1051                           to_addr_conv(sh, percpu));
1052         if (unlikely(count == 1))
1053                 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1054         else
1055                 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1056 }
1057
1058 static void
1059 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1060                      struct dma_async_tx_descriptor *tx)
1061 {
1062         struct async_submit_ctl submit;
1063         struct page **blocks = percpu->scribble;
1064         int count;
1065
1066         pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1067
1068         count = set_syndrome_sources(blocks, sh);
1069
1070         atomic_inc(&sh->count);
1071
1072         init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
1073                           sh, to_addr_conv(sh, percpu));
1074         async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE,  &submit);
1075 }
1076
1077 static void ops_complete_check(void *stripe_head_ref)
1078 {
1079         struct stripe_head *sh = stripe_head_ref;
1080
1081         pr_debug("%s: stripe %llu\n", __func__,
1082                 (unsigned long long)sh->sector);
1083
1084         sh->check_state = check_state_check_result;
1085         set_bit(STRIPE_HANDLE, &sh->state);
1086         release_stripe(sh);
1087 }
1088
1089 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
1090 {
1091         int disks = sh->disks;
1092         int pd_idx = sh->pd_idx;
1093         int qd_idx = sh->qd_idx;
1094         struct page *xor_dest;
1095         struct page **xor_srcs = percpu->scribble;
1096         struct dma_async_tx_descriptor *tx;
1097         struct async_submit_ctl submit;
1098         int count;
1099         int i;
1100
1101         pr_debug("%s: stripe %llu\n", __func__,
1102                 (unsigned long long)sh->sector);
1103
1104         count = 0;
1105         xor_dest = sh->dev[pd_idx].page;
1106         xor_srcs[count++] = xor_dest;
1107         for (i = disks; i--; ) {
1108                 if (i == pd_idx || i == qd_idx)
1109                         continue;
1110                 xor_srcs[count++] = sh->dev[i].page;
1111         }
1112
1113         init_async_submit(&submit, 0, NULL, NULL, NULL,
1114                           to_addr_conv(sh, percpu));
1115         tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
1116                            &sh->ops.zero_sum_result, &submit);
1117
1118         atomic_inc(&sh->count);
1119         init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
1120         tx = async_trigger_callback(&submit);
1121 }
1122
1123 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
1124 {
1125         struct page **srcs = percpu->scribble;
1126         struct async_submit_ctl submit;
1127         int count;
1128
1129         pr_debug("%s: stripe %llu checkp: %d\n", __func__,
1130                 (unsigned long long)sh->sector, checkp);
1131
1132         count = set_syndrome_sources(srcs, sh);
1133         if (!checkp)
1134                 srcs[count] = NULL;
1135
1136         atomic_inc(&sh->count);
1137         init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
1138                           sh, to_addr_conv(sh, percpu));
1139         async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
1140                            &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1141 }
1142
1143 static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1144 {
1145         int overlap_clear = 0, i, disks = sh->disks;
1146         struct dma_async_tx_descriptor *tx = NULL;
1147         raid5_conf_t *conf = sh->raid_conf;
1148         int level = conf->level;
1149         struct raid5_percpu *percpu;
1150         unsigned long cpu;
1151
1152         cpu = get_cpu();
1153         percpu = per_cpu_ptr(conf->percpu, cpu);
1154         if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
1155                 ops_run_biofill(sh);
1156                 overlap_clear++;
1157         }
1158
1159         if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
1160                 if (level < 6)
1161                         tx = ops_run_compute5(sh, percpu);
1162                 else {
1163                         if (sh->ops.target2 < 0 || sh->ops.target < 0)
1164                                 tx = ops_run_compute6_1(sh, percpu);
1165                         else
1166                                 tx = ops_run_compute6_2(sh, percpu);
1167                 }
1168                 /* terminate the chain if reconstruct is not set to be run */
1169                 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
1170                         async_tx_ack(tx);
1171         }
1172
1173         if (test_bit(STRIPE_OP_PREXOR, &ops_request))
1174                 tx = ops_run_prexor(sh, percpu, tx);
1175
1176         if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
1177                 tx = ops_run_biodrain(sh, tx);
1178                 overlap_clear++;
1179         }
1180
1181         if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
1182                 if (level < 6)
1183                         ops_run_reconstruct5(sh, percpu, tx);
1184                 else
1185                         ops_run_reconstruct6(sh, percpu, tx);
1186         }
1187
1188         if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
1189                 if (sh->check_state == check_state_run)
1190                         ops_run_check_p(sh, percpu);
1191                 else if (sh->check_state == check_state_run_q)
1192                         ops_run_check_pq(sh, percpu, 0);
1193                 else if (sh->check_state == check_state_run_pq)
1194                         ops_run_check_pq(sh, percpu, 1);
1195                 else
1196                         BUG();
1197         }
1198
1199         if (overlap_clear)
1200                 for (i = disks; i--; ) {
1201                         struct r5dev *dev = &sh->dev[i];
1202                         if (test_and_clear_bit(R5_Overlap, &dev->flags))
1203                                 wake_up(&sh->raid_conf->wait_for_overlap);
1204                 }
1205         put_cpu();
1206 }
1207
1208 #ifdef CONFIG_MULTICORE_RAID456
1209 static void async_run_ops(void *param, async_cookie_t cookie)
1210 {
1211         struct stripe_head *sh = param;
1212         unsigned long ops_request = sh->ops.request;
1213
1214         clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
1215         wake_up(&sh->ops.wait_for_ops);
1216
1217         __raid_run_ops(sh, ops_request);
1218         release_stripe(sh);
1219 }
1220
1221 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1222 {
1223         /* since handle_stripe can be called outside of raid5d context
1224          * we need to ensure sh->ops.request is de-staged before another
1225          * request arrives
1226          */
1227         wait_event(sh->ops.wait_for_ops,
1228                    !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
1229         sh->ops.request = ops_request;
1230
1231         atomic_inc(&sh->count);
1232         async_schedule(async_run_ops, sh);
1233 }
1234 #else
1235 #define raid_run_ops __raid_run_ops
1236 #endif
1237
1238 static int grow_one_stripe(raid5_conf_t *conf)
1239 {
1240         struct stripe_head *sh;
1241         int disks = max(conf->raid_disks, conf->previous_raid_disks);
1242         sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
1243         if (!sh)
1244                 return 0;
1245         memset(sh, 0, sizeof(*sh) + (disks-1)*sizeof(struct r5dev));
1246         sh->raid_conf = conf;
1247         spin_lock_init(&sh->lock);
1248         #ifdef CONFIG_MULTICORE_RAID456
1249         init_waitqueue_head(&sh->ops.wait_for_ops);
1250         #endif
1251
1252         if (grow_buffers(sh, disks)) {
1253                 shrink_buffers(sh, disks);
1254                 kmem_cache_free(conf->slab_cache, sh);
1255                 return 0;
1256         }
1257         /* we just created an active stripe so... */
1258         atomic_set(&sh->count, 1);
1259         atomic_inc(&conf->active_stripes);
1260         INIT_LIST_HEAD(&sh->lru);
1261         release_stripe(sh);
1262         return 1;
1263 }
1264
1265 static int grow_stripes(raid5_conf_t *conf, int num)
1266 {
1267         struct kmem_cache *sc;
1268         int devs = max(conf->raid_disks, conf->previous_raid_disks);
1269
1270         sprintf(conf->cache_name[0],
1271                 "raid%d-%s", conf->level, mdname(conf->mddev));
1272         sprintf(conf->cache_name[1],
1273                 "raid%d-%s-alt", conf->level, mdname(conf->mddev));
1274         conf->active_name = 0;
1275         sc = kmem_cache_create(conf->cache_name[conf->active_name],
1276                                sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
1277                                0, 0, NULL);
1278         if (!sc)
1279                 return 1;
1280         conf->slab_cache = sc;
1281         conf->pool_size = devs;
1282         while (num--)
1283                 if (!grow_one_stripe(conf))
1284                         return 1;
1285         return 0;
1286 }
1287
1288 /**
1289  * scribble_len - return the required size of the scribble region
1290  * @num - total number of disks in the array
1291  *
1292  * The size must be enough to contain:
1293  * 1/ a struct page pointer for each device in the array +2
1294  * 2/ room to convert each entry in (1) to its corresponding dma
1295  *    (dma_map_page()) or page (page_address()) address.
1296  *
1297  * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1298  * calculate over all devices (not just the data blocks), using zeros in place
1299  * of the P and Q blocks.
1300  */
1301 static size_t scribble_len(int num)
1302 {
1303         size_t len;
1304
1305         len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
1306
1307         return len;
1308 }
1309
1310 static int resize_stripes(raid5_conf_t *conf, int newsize)
1311 {
1312         /* Make all the stripes able to hold 'newsize' devices.
1313          * New slots in each stripe get 'page' set to a new page.
1314          *
1315          * This happens in stages:
1316          * 1/ create a new kmem_cache and allocate the required number of
1317          *    stripe_heads.
1318          * 2/ gather all the old stripe_heads and tranfer the pages across
1319          *    to the new stripe_heads.  This will have the side effect of
1320          *    freezing the array as once all stripe_heads have been collected,
1321          *    no IO will be possible.  Old stripe heads are freed once their
1322          *    pages have been transferred over, and the old kmem_cache is
1323          *    freed when all stripes are done.
1324          * 3/ reallocate conf->disks to be suitable bigger.  If this fails,
1325          *    we simple return a failre status - no need to clean anything up.
1326          * 4/ allocate new pages for the new slots in the new stripe_heads.
1327          *    If this fails, we don't bother trying the shrink the
1328          *    stripe_heads down again, we just leave them as they are.
1329          *    As each stripe_head is processed the new one is released into
1330          *    active service.
1331          *
1332          * Once step2 is started, we cannot afford to wait for a write,
1333          * so we use GFP_NOIO allocations.
1334          */
1335         struct stripe_head *osh, *nsh;
1336         LIST_HEAD(newstripes);
1337         struct disk_info *ndisks;
1338         unsigned long cpu;
1339         int err;
1340         struct kmem_cache *sc;
1341         int i;
1342
1343         if (newsize <= conf->pool_size)
1344                 return 0; /* never bother to shrink */
1345
1346         err = md_allow_write(conf->mddev);
1347         if (err)
1348                 return err;
1349
1350         /* Step 1 */
1351         sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
1352                                sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
1353                                0, 0, NULL);
1354         if (!sc)
1355                 return -ENOMEM;
1356
1357         for (i = conf->max_nr_stripes; i; i--) {
1358                 nsh = kmem_cache_alloc(sc, GFP_KERNEL);
1359                 if (!nsh)
1360                         break;
1361
1362                 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
1363
1364                 nsh->raid_conf = conf;
1365                 spin_lock_init(&nsh->lock);
1366                 #ifdef CONFIG_MULTICORE_RAID456
1367                 init_waitqueue_head(&nsh->ops.wait_for_ops);
1368                 #endif
1369
1370                 list_add(&nsh->lru, &newstripes);
1371         }
1372         if (i) {
1373                 /* didn't get enough, give up */
1374                 while (!list_empty(&newstripes)) {
1375                         nsh = list_entry(newstripes.next, struct stripe_head, lru);
1376                         list_del(&nsh->lru);
1377                         kmem_cache_free(sc, nsh);
1378                 }
1379                 kmem_cache_destroy(sc);
1380                 return -ENOMEM;
1381         }
1382         /* Step 2 - Must use GFP_NOIO now.
1383          * OK, we have enough stripes, start collecting inactive
1384          * stripes and copying them over
1385          */
1386         list_for_each_entry(nsh, &newstripes, lru) {
1387                 spin_lock_irq(&conf->device_lock);
1388                 wait_event_lock_irq(conf->wait_for_stripe,
1389                                     !list_empty(&conf->inactive_list),
1390                                     conf->device_lock,
1391                                     unplug_slaves(conf->mddev)
1392                         );
1393                 osh = get_free_stripe(conf);
1394                 spin_unlock_irq(&conf->device_lock);
1395                 atomic_set(&nsh->count, 1);
1396                 for(i=0; i<conf->pool_size; i++)
1397                         nsh->dev[i].page = osh->dev[i].page;
1398                 for( ; i<newsize; i++)
1399                         nsh->dev[i].page = NULL;
1400                 kmem_cache_free(conf->slab_cache, osh);
1401         }
1402         kmem_cache_destroy(conf->slab_cache);
1403
1404         /* Step 3.
1405          * At this point, we are holding all the stripes so the array
1406          * is completely stalled, so now is a good time to resize
1407          * conf->disks and the scribble region
1408          */
1409         ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
1410         if (ndisks) {
1411                 for (i=0; i<conf->raid_disks; i++)
1412                         ndisks[i] = conf->disks[i];
1413                 kfree(conf->disks);
1414                 conf->disks = ndisks;
1415         } else
1416                 err = -ENOMEM;
1417
1418         get_online_cpus();
1419         conf->scribble_len = scribble_len(newsize);
1420         for_each_present_cpu(cpu) {
1421                 struct raid5_percpu *percpu;
1422                 void *scribble;
1423
1424                 percpu = per_cpu_ptr(conf->percpu, cpu);
1425                 scribble = kmalloc(conf->scribble_len, GFP_NOIO);
1426
1427                 if (scribble) {
1428                         kfree(percpu->scribble);
1429                         percpu->scribble = scribble;
1430                 } else {
1431                         err = -ENOMEM;
1432                         break;
1433                 }
1434         }
1435         put_online_cpus();
1436
1437         /* Step 4, return new stripes to service */
1438         while(!list_empty(&newstripes)) {
1439                 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1440                 list_del_init(&nsh->lru);
1441
1442                 for (i=conf->raid_disks; i < newsize; i++)
1443                         if (nsh->dev[i].page == NULL) {
1444                                 struct page *p = alloc_page(GFP_NOIO);
1445                                 nsh->dev[i].page = p;
1446                                 if (!p)
1447                                         err = -ENOMEM;
1448                         }
1449                 release_stripe(nsh);
1450         }
1451         /* critical section pass, GFP_NOIO no longer needed */
1452
1453         conf->slab_cache = sc;
1454         conf->active_name = 1-conf->active_name;
1455         conf->pool_size = newsize;
1456         return err;
1457 }
1458
1459 static int drop_one_stripe(raid5_conf_t *conf)
1460 {
1461         struct stripe_head *sh;
1462
1463         spin_lock_irq(&conf->device_lock);
1464         sh = get_free_stripe(conf);
1465         spin_unlock_irq(&conf->device_lock);
1466         if (!sh)
1467                 return 0;
1468         BUG_ON(atomic_read(&sh->count));
1469         shrink_buffers(sh, conf->pool_size);
1470         kmem_cache_free(conf->slab_cache, sh);
1471         atomic_dec(&conf->active_stripes);
1472         return 1;
1473 }
1474
1475 static void shrink_stripes(raid5_conf_t *conf)
1476 {
1477         while (drop_one_stripe(conf))
1478                 ;
1479
1480         if (conf->slab_cache)
1481                 kmem_cache_destroy(conf->slab_cache);
1482         conf->slab_cache = NULL;
1483 }
1484
1485 static void raid5_end_read_request(struct bio * bi, int error)
1486 {
1487         struct stripe_head *sh = bi->bi_private;
1488         raid5_conf_t *conf = sh->raid_conf;
1489         int disks = sh->disks, i;
1490         int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1491         char b[BDEVNAME_SIZE];
1492         mdk_rdev_t *rdev;
1493
1494
1495         for (i=0 ; i<disks; i++)
1496                 if (bi == &sh->dev[i].req)
1497                         break;
1498
1499         pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1500                 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1501                 uptodate);
1502         if (i == disks) {
1503                 BUG();
1504                 return;
1505         }
1506
1507         if (uptodate) {
1508                 set_bit(R5_UPTODATE, &sh->dev[i].flags);
1509                 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1510                         rdev = conf->disks[i].rdev;
1511                         printk_rl(KERN_INFO "raid5:%s: read error corrected"
1512                                   " (%lu sectors at %llu on %s)\n",
1513                                   mdname(conf->mddev), STRIPE_SECTORS,
1514                                   (unsigned long long)(sh->sector
1515                                                        + rdev->data_offset),
1516                                   bdevname(rdev->bdev, b));
1517                         clear_bit(R5_ReadError, &sh->dev[i].flags);
1518                         clear_bit(R5_ReWrite, &sh->dev[i].flags);
1519                 }
1520                 if (atomic_read(&conf->disks[i].rdev->read_errors))
1521                         atomic_set(&conf->disks[i].rdev->read_errors, 0);
1522         } else {
1523                 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
1524                 int retry = 0;
1525                 rdev = conf->disks[i].rdev;
1526
1527                 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
1528                 atomic_inc(&rdev->read_errors);
1529                 if (conf->mddev->degraded)
1530                         printk_rl(KERN_WARNING
1531                                   "raid5:%s: read error not correctable "
1532                                   "(sector %llu on %s).\n",
1533                                   mdname(conf->mddev),
1534                                   (unsigned long long)(sh->sector
1535                                                        + rdev->data_offset),
1536                                   bdn);
1537                 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
1538                         /* Oh, no!!! */
1539                         printk_rl(KERN_WARNING
1540                                   "raid5:%s: read error NOT corrected!! "
1541                                   "(sector %llu on %s).\n",
1542                                   mdname(conf->mddev),
1543                                   (unsigned long long)(sh->sector
1544                                                        + rdev->data_offset),
1545                                   bdn);
1546                 else if (atomic_read(&rdev->read_errors)
1547                          > conf->max_nr_stripes)
1548                         printk(KERN_WARNING
1549                                "raid5:%s: Too many read errors, failing device %s.\n",
1550                                mdname(conf->mddev), bdn);
1551                 else
1552                         retry = 1;
1553                 if (retry)
1554                         set_bit(R5_ReadError, &sh->dev[i].flags);
1555                 else {
1556                         clear_bit(R5_ReadError, &sh->dev[i].flags);
1557                         clear_bit(R5_ReWrite, &sh->dev[i].flags);
1558                         md_error(conf->mddev, rdev);
1559                 }
1560         }
1561         rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1562         clear_bit(R5_LOCKED, &sh->dev[i].flags);
1563         set_bit(STRIPE_HANDLE, &sh->state);
1564         release_stripe(sh);
1565 }
1566
1567 static void raid5_end_write_request(struct bio *bi, int error)
1568 {
1569         struct stripe_head *sh = bi->bi_private;
1570         raid5_conf_t *conf = sh->raid_conf;
1571         int disks = sh->disks, i;
1572         int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1573
1574         for (i=0 ; i<disks; i++)
1575                 if (bi == &sh->dev[i].req)
1576                         break;
1577
1578         pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1579                 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1580                 uptodate);
1581         if (i == disks) {
1582                 BUG();
1583                 return;
1584         }
1585
1586         if (!uptodate)
1587                 md_error(conf->mddev, conf->disks[i].rdev);
1588
1589         rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1590         
1591         clear_bit(R5_LOCKED, &sh->dev[i].flags);
1592         set_bit(STRIPE_HANDLE, &sh->state);
1593         release_stripe(sh);
1594 }
1595
1596
1597 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
1598         
1599 static void raid5_build_block(struct stripe_head *sh, int i, int previous)
1600 {
1601         struct r5dev *dev = &sh->dev[i];
1602
1603         bio_init(&dev->req);
1604         dev->req.bi_io_vec = &dev->vec;
1605         dev->req.bi_vcnt++;
1606         dev->req.bi_max_vecs++;
1607         dev->vec.bv_page = dev->page;
1608         dev->vec.bv_len = STRIPE_SIZE;
1609         dev->vec.bv_offset = 0;
1610
1611         dev->req.bi_sector = sh->sector;
1612         dev->req.bi_private = sh;
1613
1614         dev->flags = 0;
1615         dev->sector = compute_blocknr(sh, i, previous);
1616 }
1617
1618 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1619 {
1620         char b[BDEVNAME_SIZE];
1621         raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1622         pr_debug("raid5: error called\n");
1623
1624         if (!test_bit(Faulty, &rdev->flags)) {
1625                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1626                 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1627                         unsigned long flags;
1628                         spin_lock_irqsave(&conf->device_lock, flags);
1629                         mddev->degraded++;
1630                         spin_unlock_irqrestore(&conf->device_lock, flags);
1631                         /*
1632                          * if recovery was running, make sure it aborts.
1633                          */
1634                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1635                 }
1636                 set_bit(Faulty, &rdev->flags);
1637                 printk(KERN_ALERT
1638                        "raid5: Disk failure on %s, disabling device.\n"
1639                        "raid5: Operation continuing on %d devices.\n",
1640                        bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
1641         }
1642 }
1643
1644 /*
1645  * Input: a 'big' sector number,
1646  * Output: index of the data and parity disk, and the sector # in them.
1647  */
1648 static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1649                                      int previous, int *dd_idx,
1650                                      struct stripe_head *sh)
1651 {
1652         long stripe;
1653         unsigned long chunk_number;
1654         unsigned int chunk_offset;
1655         int pd_idx, qd_idx;
1656         int ddf_layout = 0;
1657         sector_t new_sector;
1658         int algorithm = previous ? conf->prev_algo
1659                                  : conf->algorithm;
1660         int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1661                                          : conf->chunk_sectors;
1662         int raid_disks = previous ? conf->previous_raid_disks
1663                                   : conf->raid_disks;
1664         int data_disks = raid_disks - conf->max_degraded;
1665
1666         /* First compute the information on this sector */
1667
1668         /*
1669          * Compute the chunk number and the sector offset inside the chunk
1670          */
1671         chunk_offset = sector_div(r_sector, sectors_per_chunk);
1672         chunk_number = r_sector;
1673         BUG_ON(r_sector != chunk_number);
1674
1675         /*
1676          * Compute the stripe number
1677          */
1678         stripe = chunk_number / data_disks;
1679
1680         /*
1681          * Compute the data disk and parity disk indexes inside the stripe
1682          */
1683         *dd_idx = chunk_number % data_disks;
1684
1685         /*
1686          * Select the parity disk based on the user selected algorithm.
1687          */
1688         pd_idx = qd_idx = ~0;
1689         switch(conf->level) {
1690         case 4:
1691                 pd_idx = data_disks;
1692                 break;
1693         case 5:
1694                 switch (algorithm) {
1695                 case ALGORITHM_LEFT_ASYMMETRIC:
1696                         pd_idx = data_disks - stripe % raid_disks;
1697                         if (*dd_idx >= pd_idx)
1698                                 (*dd_idx)++;
1699                         break;
1700                 case ALGORITHM_RIGHT_ASYMMETRIC:
1701                         pd_idx = stripe % raid_disks;
1702                         if (*dd_idx >= pd_idx)
1703                                 (*dd_idx)++;
1704                         break;
1705                 case ALGORITHM_LEFT_SYMMETRIC:
1706                         pd_idx = data_disks - stripe % raid_disks;
1707                         *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1708                         break;
1709                 case ALGORITHM_RIGHT_SYMMETRIC:
1710                         pd_idx = stripe % raid_disks;
1711                         *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1712                         break;
1713                 case ALGORITHM_PARITY_0:
1714                         pd_idx = 0;
1715                         (*dd_idx)++;
1716                         break;
1717                 case ALGORITHM_PARITY_N:
1718                         pd_idx = data_disks;
1719                         break;
1720                 default:
1721                         printk(KERN_ERR "raid5: unsupported algorithm %d\n",
1722                                 algorithm);
1723                         BUG();
1724                 }
1725                 break;
1726         case 6:
1727
1728                 switch (algorithm) {
1729                 case ALGORITHM_LEFT_ASYMMETRIC:
1730                         pd_idx = raid_disks - 1 - (stripe % raid_disks);
1731                         qd_idx = pd_idx + 1;
1732                         if (pd_idx == raid_disks-1) {
1733                                 (*dd_idx)++;    /* Q D D D P */
1734                                 qd_idx = 0;
1735                         } else if (*dd_idx >= pd_idx)
1736                                 (*dd_idx) += 2; /* D D P Q D */
1737                         break;
1738                 case ALGORITHM_RIGHT_ASYMMETRIC:
1739                         pd_idx = stripe % raid_disks;
1740                         qd_idx = pd_idx + 1;
1741                         if (pd_idx == raid_disks-1) {
1742                                 (*dd_idx)++;    /* Q D D D P */
1743                                 qd_idx = 0;
1744                         } else if (*dd_idx >= pd_idx)
1745                                 (*dd_idx) += 2; /* D D P Q D */
1746                         break;
1747                 case ALGORITHM_LEFT_SYMMETRIC:
1748                         pd_idx = raid_disks - 1 - (stripe % raid_disks);
1749                         qd_idx = (pd_idx + 1) % raid_disks;
1750                         *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1751                         break;
1752                 case ALGORITHM_RIGHT_SYMMETRIC:
1753                         pd_idx = stripe % raid_disks;
1754                         qd_idx = (pd_idx + 1) % raid_disks;
1755                         *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1756                         break;
1757
1758                 case ALGORITHM_PARITY_0:
1759                         pd_idx = 0;
1760                         qd_idx = 1;
1761                         (*dd_idx) += 2;
1762                         break;
1763                 case ALGORITHM_PARITY_N:
1764                         pd_idx = data_disks;
1765                         qd_idx = data_disks + 1;
1766                         break;
1767
1768                 case ALGORITHM_ROTATING_ZERO_RESTART:
1769                         /* Exactly the same as RIGHT_ASYMMETRIC, but or
1770                          * of blocks for computing Q is different.
1771                          */
1772                         pd_idx = stripe % raid_disks;
1773                         qd_idx = pd_idx + 1;
1774                         if (pd_idx == raid_disks-1) {
1775                                 (*dd_idx)++;    /* Q D D D P */
1776                                 qd_idx = 0;
1777                         } else if (*dd_idx >= pd_idx)
1778                                 (*dd_idx) += 2; /* D D P Q D */
1779                         ddf_layout = 1;
1780                         break;
1781
1782                 case ALGORITHM_ROTATING_N_RESTART:
1783                         /* Same a left_asymmetric, by first stripe is
1784                          * D D D P Q  rather than
1785                          * Q D D D P
1786                          */
1787                         pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks);
1788                         qd_idx = pd_idx + 1;
1789                         if (pd_idx == raid_disks-1) {
1790                                 (*dd_idx)++;    /* Q D D D P */
1791                                 qd_idx = 0;
1792                         } else if (*dd_idx >= pd_idx)
1793                                 (*dd_idx) += 2; /* D D P Q D */
1794                         ddf_layout = 1;
1795                         break;
1796
1797                 case ALGORITHM_ROTATING_N_CONTINUE:
1798                         /* Same as left_symmetric but Q is before P */
1799                         pd_idx = raid_disks - 1 - (stripe % raid_disks);
1800                         qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1801                         *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1802                         ddf_layout = 1;
1803                         break;
1804
1805                 case ALGORITHM_LEFT_ASYMMETRIC_6:
1806                         /* RAID5 left_asymmetric, with Q on last device */
1807                         pd_idx = data_disks - stripe % (raid_disks-1);
1808                         if (*dd_idx >= pd_idx)
1809                                 (*dd_idx)++;
1810                         qd_idx = raid_disks - 1;
1811                         break;
1812
1813                 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1814                         pd_idx = stripe % (raid_disks-1);
1815                         if (*dd_idx >= pd_idx)
1816                                 (*dd_idx)++;
1817                         qd_idx = raid_disks - 1;
1818                         break;
1819
1820                 case ALGORITHM_LEFT_SYMMETRIC_6:
1821                         pd_idx = data_disks - stripe % (raid_disks-1);
1822                         *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1823                         qd_idx = raid_disks - 1;
1824                         break;
1825
1826                 case ALGORITHM_RIGHT_SYMMETRIC_6:
1827                         pd_idx = stripe % (raid_disks-1);
1828                         *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1829                         qd_idx = raid_disks - 1;
1830                         break;
1831
1832                 case ALGORITHM_PARITY_0_6:
1833                         pd_idx = 0;
1834                         (*dd_idx)++;
1835                         qd_idx = raid_disks - 1;
1836                         break;
1837
1838
1839                 default:
1840                         printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
1841                                algorithm);
1842                         BUG();
1843                 }
1844                 break;
1845         }
1846
1847         if (sh) {
1848                 sh->pd_idx = pd_idx;
1849                 sh->qd_idx = qd_idx;
1850                 sh->ddf_layout = ddf_layout;
1851         }
1852         /*
1853          * Finally, compute the new sector number
1854          */
1855         new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
1856         return new_sector;
1857 }
1858
1859
1860 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1861 {
1862         raid5_conf_t *conf = sh->raid_conf;
1863         int raid_disks = sh->disks;
1864         int data_disks = raid_disks - conf->max_degraded;
1865         sector_t new_sector = sh->sector, check;
1866         int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1867                                          : conf->chunk_sectors;
1868         int algorithm = previous ? conf->prev_algo
1869                                  : conf->algorithm;
1870         sector_t stripe;
1871         int chunk_offset;
1872         int chunk_number, dummy1, dd_idx = i;
1873         sector_t r_sector;
1874         struct stripe_head sh2;
1875
1876
1877         chunk_offset = sector_div(new_sector, sectors_per_chunk);
1878         stripe = new_sector;
1879         BUG_ON(new_sector != stripe);
1880
1881         if (i == sh->pd_idx)
1882                 return 0;
1883         switch(conf->level) {
1884         case 4: break;
1885         case 5:
1886                 switch (algorithm) {
1887                 case ALGORITHM_LEFT_ASYMMETRIC:
1888                 case ALGORITHM_RIGHT_ASYMMETRIC:
1889                         if (i > sh->pd_idx)
1890                                 i--;
1891                         break;
1892                 case ALGORITHM_LEFT_SYMMETRIC:
1893                 case ALGORITHM_RIGHT_SYMMETRIC:
1894                         if (i < sh->pd_idx)
1895                                 i += raid_disks;
1896                         i -= (sh->pd_idx + 1);
1897                         break;
1898                 case ALGORITHM_PARITY_0:
1899                         i -= 1;
1900                         break;
1901                 case ALGORITHM_PARITY_N:
1902                         break;
1903                 default:
1904                         printk(KERN_ERR "raid5: unsupported algorithm %d\n",
1905                                algorithm);
1906                         BUG();
1907                 }
1908                 break;
1909         case 6:
1910                 if (i == sh->qd_idx)
1911                         return 0; /* It is the Q disk */
1912                 switch (algorithm) {
1913                 case ALGORITHM_LEFT_ASYMMETRIC:
1914                 case ALGORITHM_RIGHT_ASYMMETRIC:
1915                 case ALGORITHM_ROTATING_ZERO_RESTART:
1916                 case ALGORITHM_ROTATING_N_RESTART:
1917                         if (sh->pd_idx == raid_disks-1)
1918                                 i--;    /* Q D D D P */
1919                         else if (i > sh->pd_idx)
1920                                 i -= 2; /* D D P Q D */
1921                         break;
1922                 case ALGORITHM_LEFT_SYMMETRIC:
1923                 case ALGORITHM_RIGHT_SYMMETRIC:
1924                         if (sh->pd_idx == raid_disks-1)
1925                                 i--; /* Q D D D P */
1926                         else {
1927                                 /* D D P Q D */
1928                                 if (i < sh->pd_idx)
1929                                         i += raid_disks;
1930                                 i -= (sh->pd_idx + 2);
1931                         }
1932                         break;
1933                 case ALGORITHM_PARITY_0:
1934                         i -= 2;
1935                         break;
1936                 case ALGORITHM_PARITY_N:
1937                         break;
1938                 case ALGORITHM_ROTATING_N_CONTINUE:
1939                         /* Like left_symmetric, but P is before Q */
1940                         if (sh->pd_idx == 0)
1941                                 i--;    /* P D D D Q */
1942                         else {
1943                                 /* D D Q P D */
1944                                 if (i < sh->pd_idx)
1945                                         i += raid_disks;
1946                                 i -= (sh->pd_idx + 1);
1947                         }
1948                         break;
1949                 case ALGORITHM_LEFT_ASYMMETRIC_6:
1950                 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1951                         if (i > sh->pd_idx)
1952                                 i--;
1953                         break;
1954                 case ALGORITHM_LEFT_SYMMETRIC_6:
1955                 case ALGORITHM_RIGHT_SYMMETRIC_6:
1956                         if (i < sh->pd_idx)
1957                                 i += data_disks + 1;
1958                         i -= (sh->pd_idx + 1);
1959                         break;
1960                 case ALGORITHM_PARITY_0_6:
1961                         i -= 1;
1962                         break;
1963                 default:
1964                         printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
1965                                algorithm);
1966                         BUG();
1967                 }
1968                 break;
1969         }
1970
1971         chunk_number = stripe * data_disks + i;
1972         r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
1973
1974         check = raid5_compute_sector(conf, r_sector,
1975                                      previous, &dummy1, &sh2);
1976         if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
1977                 || sh2.qd_idx != sh->qd_idx) {
1978                 printk(KERN_ERR "compute_blocknr: map not correct\n");
1979                 return 0;
1980         }
1981         return r_sector;
1982 }
1983
1984
1985 static void
1986 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
1987                          int rcw, int expand)
1988 {
1989         int i, pd_idx = sh->pd_idx, disks = sh->disks;
1990         raid5_conf_t *conf = sh->raid_conf;
1991         int level = conf->level;
1992
1993         if (rcw) {
1994                 /* if we are not expanding this is a proper write request, and
1995                  * there will be bios with new data to be drained into the
1996                  * stripe cache
1997                  */
1998                 if (!expand) {
1999                         sh->reconstruct_state = reconstruct_state_drain_run;
2000                         set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2001                 } else
2002                         sh->reconstruct_state = reconstruct_state_run;
2003
2004                 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2005
2006                 for (i = disks; i--; ) {
2007                         struct r5dev *dev = &sh->dev[i];
2008
2009                         if (dev->towrite) {
2010                                 set_bit(R5_LOCKED, &dev->flags);
2011                                 set_bit(R5_Wantdrain, &dev->flags);
2012                                 if (!expand)
2013                                         clear_bit(R5_UPTODATE, &dev->flags);
2014                                 s->locked++;
2015                         }
2016                 }
2017                 if (s->locked + conf->max_degraded == disks)
2018                         if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
2019                                 atomic_inc(&conf->pending_full_writes);
2020         } else {
2021                 BUG_ON(level == 6);
2022                 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
2023                         test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
2024
2025                 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
2026                 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
2027                 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2028                 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2029
2030                 for (i = disks; i--; ) {
2031                         struct r5dev *dev = &sh->dev[i];
2032                         if (i == pd_idx)
2033                                 continue;
2034
2035                         if (dev->towrite &&
2036                             (test_bit(R5_UPTODATE, &dev->flags) ||
2037                              test_bit(R5_Wantcompute, &dev->flags))) {
2038                                 set_bit(R5_Wantdrain, &dev->flags);
2039                                 set_bit(R5_LOCKED, &dev->flags);
2040                                 clear_bit(R5_UPTODATE, &dev->flags);
2041                                 s->locked++;
2042                         }
2043                 }
2044         }
2045
2046         /* keep the parity disk(s) locked while asynchronous operations
2047          * are in flight
2048          */
2049         set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2050         clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2051         s->locked++;
2052
2053         if (level == 6) {
2054                 int qd_idx = sh->qd_idx;
2055                 struct r5dev *dev = &sh->dev[qd_idx];
2056
2057                 set_bit(R5_LOCKED, &dev->flags);
2058                 clear_bit(R5_UPTODATE, &dev->flags);
2059                 s->locked++;
2060         }
2061
2062         pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2063                 __func__, (unsigned long long)sh->sector,
2064                 s->locked, s->ops_request);
2065 }
2066
2067 /*
2068  * Each stripe/dev can have one or more bion attached.
2069  * toread/towrite point to the first in a chain.
2070  * The bi_next chain must be in order.
2071  */
2072 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
2073 {
2074         struct bio **bip;
2075         raid5_conf_t *conf = sh->raid_conf;
2076         int firstwrite=0;
2077
2078         pr_debug("adding bh b#%llu to stripe s#%llu\n",
2079                 (unsigned long long)bi->bi_sector,
2080                 (unsigned long long)sh->sector);
2081
2082
2083         spin_lock(&sh->lock);
2084         spin_lock_irq(&conf->device_lock);
2085         if (forwrite) {
2086                 bip = &sh->dev[dd_idx].towrite;
2087                 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
2088                         firstwrite = 1;
2089         } else
2090                 bip = &sh->dev[dd_idx].toread;
2091         while (*bip && (*bip)->bi_sector < bi->bi_sector) {
2092                 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
2093                         goto overlap;
2094                 bip = & (*bip)->bi_next;
2095         }
2096         if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
2097                 goto overlap;
2098
2099         BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
2100         if (*bip)
2101                 bi->bi_next = *bip;
2102         *bip = bi;
2103         bi->bi_phys_segments++;
2104         spin_unlock_irq(&conf->device_lock);
2105         spin_unlock(&sh->lock);
2106
2107         pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2108                 (unsigned long long)bi->bi_sector,
2109                 (unsigned long long)sh->sector, dd_idx);
2110
2111         if (conf->mddev->bitmap && firstwrite) {
2112                 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2113                                   STRIPE_SECTORS, 0);
2114                 sh->bm_seq = conf->seq_flush+1;
2115                 set_bit(STRIPE_BIT_DELAY, &sh->state);
2116         }
2117
2118         if (forwrite) {
2119                 /* check if page is covered */
2120                 sector_t sector = sh->dev[dd_idx].sector;
2121                 for (bi=sh->dev[dd_idx].towrite;
2122                      sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2123                              bi && bi->bi_sector <= sector;
2124                      bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2125                         if (bi->bi_sector + (bi->bi_size>>9) >= sector)
2126                                 sector = bi->bi_sector + (bi->bi_size>>9);
2127                 }
2128                 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2129                         set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
2130         }
2131         return 1;
2132
2133  overlap:
2134         set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
2135         spin_unlock_irq(&conf->device_lock);
2136         spin_unlock(&sh->lock);
2137         return 0;
2138 }
2139
2140 static void end_reshape(raid5_conf_t *conf);
2141
2142 static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
2143                             struct stripe_head *sh)
2144 {
2145         int sectors_per_chunk =
2146                 previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
2147         int dd_idx;
2148         int chunk_offset = sector_div(stripe, sectors_per_chunk);
2149         int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
2150
2151         raid5_compute_sector(conf,
2152                              stripe * (disks - conf->max_degraded)
2153                              *sectors_per_chunk + chunk_offset,
2154                              previous,
2155                              &dd_idx, sh);
2156 }
2157
2158 static void
2159 handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
2160                                 struct stripe_head_state *s, int disks,
2161                                 struct bio **return_bi)
2162 {
2163         int i;
2164         for (i = disks; i--; ) {
2165                 struct bio *bi;
2166                 int bitmap_end = 0;
2167
2168                 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2169                         mdk_rdev_t *rdev;
2170                         rcu_read_lock();
2171                         rdev = rcu_dereference(conf->disks[i].rdev);
2172                         if (rdev && test_bit(In_sync, &rdev->flags))
2173                                 /* multiple read failures in one stripe */
2174                                 md_error(conf->mddev, rdev);
2175                         rcu_read_unlock();
2176                 }
2177                 spin_lock_irq(&conf->device_lock);
2178                 /* fail all writes first */
2179                 bi = sh->dev[i].towrite;
2180                 sh->dev[i].towrite = NULL;
2181                 if (bi) {
2182                         s->to_write--;
2183                         bitmap_end = 1;
2184                 }
2185
2186                 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2187                         wake_up(&conf->wait_for_overlap);
2188
2189                 while (bi && bi->bi_sector <
2190                         sh->dev[i].sector + STRIPE_SECTORS) {
2191                         struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2192                         clear_bit(BIO_UPTODATE, &bi->bi_flags);
2193                         if (!raid5_dec_bi_phys_segments(bi)) {
2194                                 md_write_end(conf->mddev);
2195                                 bi->bi_next = *return_bi;
2196                                 *return_bi = bi;
2197                         }
2198                         bi = nextbi;
2199                 }
2200                 /* and fail all 'written' */
2201                 bi = sh->dev[i].written;
2202                 sh->dev[i].written = NULL;
2203                 if (bi) bitmap_end = 1;
2204                 while (bi && bi->bi_sector <
2205                        sh->dev[i].sector + STRIPE_SECTORS) {
2206                         struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2207                         clear_bit(BIO_UPTODATE, &bi->bi_flags);
2208                         if (!raid5_dec_bi_phys_segments(bi)) {
2209                                 md_write_end(conf->mddev);
2210                                 bi->bi_next = *return_bi;
2211                                 *return_bi = bi;
2212                         }
2213                         bi = bi2;
2214                 }
2215
2216                 /* fail any reads if this device is non-operational and
2217                  * the data has not reached the cache yet.
2218                  */
2219                 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
2220                     (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2221                       test_bit(R5_ReadError, &sh->dev[i].flags))) {
2222                         bi = sh->dev[i].toread;
2223                         sh->dev[i].toread = NULL;
2224                         if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2225                                 wake_up(&conf->wait_for_overlap);
2226                         if (bi) s->to_read--;
2227                         while (bi && bi->bi_sector <
2228                                sh->dev[i].sector + STRIPE_SECTORS) {
2229                                 struct bio *nextbi =
2230                                         r5_next_bio(bi, sh->dev[i].sector);
2231                                 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2232                                 if (!raid5_dec_bi_phys_segments(bi)) {
2233                                         bi->bi_next = *return_bi;
2234                                         *return_bi = bi;
2235                                 }
2236                                 bi = nextbi;
2237                         }
2238                 }
2239                 spin_unlock_irq(&conf->device_lock);
2240                 if (bitmap_end)
2241                         bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2242                                         STRIPE_SECTORS, 0, 0);
2243         }
2244
2245         if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2246                 if (atomic_dec_and_test(&conf->pending_full_writes))
2247                         md_wakeup_thread(conf->mddev->thread);
2248 }
2249
2250 /* fetch_block5 - checks the given member device to see if its data needs
2251  * to be read or computed to satisfy a request.
2252  *
2253  * Returns 1 when no more member devices need to be checked, otherwise returns
2254  * 0 to tell the loop in handle_stripe_fill5 to continue
2255  */
2256 static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s,
2257                         int disk_idx, int disks)
2258 {
2259         struct r5dev *dev = &sh->dev[disk_idx];
2260         struct r5dev *failed_dev = &sh->dev[s->failed_num];
2261
2262         /* is the data in this block needed, and can we get it? */
2263         if (!test_bit(R5_LOCKED, &dev->flags) &&
2264             !test_bit(R5_UPTODATE, &dev->flags) &&
2265             (dev->toread ||
2266              (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2267              s->syncing || s->expanding ||
2268              (s->failed &&
2269               (failed_dev->toread ||
2270                (failed_dev->towrite &&
2271                 !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) {
2272                 /* We would like to get this block, possibly by computing it,
2273                  * otherwise read it if the backing disk is insync
2274                  */
2275                 if ((s->uptodate == disks - 1) &&
2276                     (s->failed && disk_idx == s->failed_num)) {
2277                         set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2278                         set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2279                         set_bit(R5_Wantcompute, &dev->flags);
2280                         sh->ops.target = disk_idx;
2281                         sh->ops.target2 = -1;
2282                         s->req_compute = 1;
2283                         /* Careful: from this point on 'uptodate' is in the eye
2284                          * of raid_run_ops which services 'compute' operations
2285                          * before writes. R5_Wantcompute flags a block that will
2286                          * be R5_UPTODATE by the time it is needed for a
2287                          * subsequent operation.
2288                          */
2289                         s->uptodate++;
2290                         return 1; /* uptodate + compute == disks */
2291                 } else if (test_bit(R5_Insync, &dev->flags)) {
2292                         set_bit(R5_LOCKED, &dev->flags);
2293                         set_bit(R5_Wantread, &dev->flags);
2294                         s->locked++;
2295                         pr_debug("Reading block %d (sync=%d)\n", disk_idx,
2296                                 s->syncing);
2297                 }
2298         }
2299
2300         return 0;
2301 }
2302
2303 /**
2304  * handle_stripe_fill5 - read or compute data to satisfy pending requests.
2305  */
2306 static void handle_stripe_fill5(struct stripe_head *sh,
2307                         struct stripe_head_state *s, int disks)
2308 {
2309         int i;
2310
2311         /* look for blocks to read/compute, skip this if a compute
2312          * is already in flight, or if the stripe contents are in the
2313          * midst of changing due to a write
2314          */
2315         if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
2316             !sh->reconstruct_state)
2317                 for (i = disks; i--; )
2318                         if (fetch_block5(sh, s, i, disks))
2319                                 break;
2320         set_bit(STRIPE_HANDLE, &sh->state);
2321 }
2322
2323 /* fetch_block6 - checks the given member device to see if its data needs
2324  * to be read or computed to satisfy a request.
2325  *
2326  * Returns 1 when no more member devices need to be checked, otherwise returns
2327  * 0 to tell the loop in handle_stripe_fill6 to continue
2328  */
2329 static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s,
2330                          struct r6_state *r6s, int disk_idx, int disks)
2331 {
2332         struct r5dev *dev = &sh->dev[disk_idx];
2333         struct r5dev *fdev[2] = { &sh->dev[r6s->failed_num[0]],
2334                                   &sh->dev[r6s->failed_num[1]] };
2335
2336         if (!test_bit(R5_LOCKED, &dev->flags) &&
2337             !test_bit(R5_UPTODATE, &dev->flags) &&
2338             (dev->toread ||
2339              (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2340              s->syncing || s->expanding ||
2341              (s->failed >= 1 &&
2342               (fdev[0]->toread || s->to_write)) ||
2343              (s->failed >= 2 &&
2344               (fdev[1]->toread || s->to_write)))) {
2345                 /* we would like to get this block, possibly by computing it,
2346                  * otherwise read it if the backing disk is insync
2347                  */
2348                 BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
2349                 BUG_ON(test_bit(R5_Wantread, &dev->flags));
2350                 if ((s->uptodate == disks - 1) &&
2351                     (s->failed && (disk_idx == r6s->failed_num[0] ||
2352                                    disk_idx == r6s->failed_num[1]))) {
2353                         /* have disk failed, and we're requested to fetch it;
2354                          * do compute it
2355                          */
2356                         pr_debug("Computing stripe %llu block %d\n",
2357                                (unsigned long long)sh->sector, disk_idx);
2358                         set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2359                         set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2360                         set_bit(R5_Wantcompute, &dev->flags);
2361                         sh->ops.target = disk_idx;
2362                         sh->ops.target2 = -1; /* no 2nd target */
2363                         s->req_compute = 1;
2364                         s->uptodate++;
2365                         return 1;
2366                 } else if (s->uptodate == disks-2 && s->failed >= 2) {
2367                         /* Computing 2-failure is *very* expensive; only
2368                          * do it if failed >= 2
2369                          */
2370                         int other;
2371                         for (other = disks; other--; ) {
2372                                 if (other == disk_idx)
2373                                         continue;
2374                                 if (!test_bit(R5_UPTODATE,
2375                                       &sh->dev[other].flags))
2376                                         break;
2377                         }
2378                         BUG_ON(other < 0);
2379                         pr_debug("Computing stripe %llu blocks %d,%d\n",
2380                                (unsigned long long)sh->sector,
2381                                disk_idx, other);
2382                         set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2383                         set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2384                         set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
2385                         set_bit(R5_Wantcompute, &sh->dev[other].flags);
2386                         sh->ops.target = disk_idx;
2387                         sh->ops.target2 = other;
2388                         s->uptodate += 2;
2389                         s->req_compute = 1;
2390                         return 1;
2391                 } else if (test_bit(R5_Insync, &dev->flags)) {
2392                         set_bit(R5_LOCKED, &dev->flags);
2393                         set_bit(R5_Wantread, &dev->flags);
2394                         s->locked++;
2395                         pr_debug("Reading block %d (sync=%d)\n",
2396                                 disk_idx, s->syncing);
2397                 }
2398         }
2399
2400         return 0;
2401 }
2402
2403 /**
2404  * handle_stripe_fill6 - read or compute data to satisfy pending requests.
2405  */
2406 static void handle_stripe_fill6(struct stripe_head *sh,
2407                         struct stripe_head_state *s, struct r6_state *r6s,
2408                         int disks)
2409 {
2410         int i;
2411
2412         /* look for blocks to read/compute, skip this if a compute
2413          * is already in flight, or if the stripe contents are in the
2414          * midst of changing due to a write
2415          */
2416         if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
2417             !sh->reconstruct_state)
2418                 for (i = disks; i--; )
2419                         if (fetch_block6(sh, s, r6s, i, disks))
2420                                 break;
2421         set_bit(STRIPE_HANDLE, &sh->state);
2422 }
2423
2424
2425 /* handle_stripe_clean_event
2426  * any written block on an uptodate or failed drive can be returned.
2427  * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2428  * never LOCKED, so we don't need to test 'failed' directly.
2429  */
2430 static void handle_stripe_clean_event(raid5_conf_t *conf,
2431         struct stripe_head *sh, int disks, struct bio **return_bi)
2432 {
2433         int i;
2434         struct r5dev *dev;
2435
2436         for (i = disks; i--; )
2437                 if (sh->dev[i].written) {
2438                         dev = &sh->dev[i];
2439                         if (!test_bit(R5_LOCKED, &dev->flags) &&
2440                                 test_bit(R5_UPTODATE, &dev->flags)) {
2441                                 /* We can return any write requests */
2442                                 struct bio *wbi, *wbi2;
2443                                 int bitmap_end = 0;
2444                                 pr_debug("Return write for disc %d\n", i);
2445                                 spin_lock_irq(&conf->device_lock);
2446                                 wbi = dev->written;
2447                                 dev->written = NULL;
2448                                 while (wbi && wbi->bi_sector <
2449                                         dev->sector + STRIPE_SECTORS) {
2450                                         wbi2 = r5_next_bio(wbi, dev->sector);
2451                                         if (!raid5_dec_bi_phys_segments(wbi)) {
2452                                                 md_write_end(conf->mddev);
2453                                                 wbi->bi_next = *return_bi;
2454                                                 *return_bi = wbi;
2455                                         }
2456                                         wbi = wbi2;
2457                                 }
2458                                 if (dev->towrite == NULL)
2459                                         bitmap_end = 1;
2460                                 spin_unlock_irq(&conf->device_lock);
2461                                 if (bitmap_end)
2462                                         bitmap_endwrite(conf->mddev->bitmap,
2463                                                         sh->sector,
2464                                                         STRIPE_SECTORS,
2465                                          !test_bit(STRIPE_DEGRADED, &sh->state),
2466                                                         0);
2467                         }
2468                 }
2469
2470         if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2471                 if (atomic_dec_and_test(&conf->pending_full_writes))
2472                         md_wakeup_thread(conf->mddev->thread);
2473 }
2474
2475 static void handle_stripe_dirtying5(raid5_conf_t *conf,
2476                 struct stripe_head *sh, struct stripe_head_state *s, int disks)
2477 {
2478         int rmw = 0, rcw = 0, i;
2479         for (i = disks; i--; ) {
2480                 /* would I have to read this buffer for read_modify_write */
2481                 struct r5dev *dev = &sh->dev[i];
2482                 if ((dev->towrite || i == sh->pd_idx) &&
2483                     !test_bit(R5_LOCKED, &dev->flags) &&
2484                     !(test_bit(R5_UPTODATE, &dev->flags) ||
2485                       test_bit(R5_Wantcompute, &dev->flags))) {
2486                         if (test_bit(R5_Insync, &dev->flags))
2487                                 rmw++;
2488                         else
2489                                 rmw += 2*disks;  /* cannot read it */
2490                 }
2491                 /* Would I have to read this buffer for reconstruct_write */
2492                 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
2493                     !test_bit(R5_LOCKED, &dev->flags) &&
2494                     !(test_bit(R5_UPTODATE, &dev->flags) ||
2495                     test_bit(R5_Wantcompute, &dev->flags))) {
2496                         if (test_bit(R5_Insync, &dev->flags)) rcw++;
2497                         else
2498                                 rcw += 2*disks;
2499                 }
2500         }
2501         pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2502                 (unsigned long long)sh->sector, rmw, rcw);
2503         set_bit(STRIPE_HANDLE, &sh->state);
2504         if (rmw < rcw && rmw > 0)
2505                 /* prefer read-modify-write, but need to get some data */
2506                 for (i = disks; i--; ) {
2507                         struct r5dev *dev = &sh->dev[i];
2508                         if ((dev->towrite || i == sh->pd_idx) &&
2509                             !test_bit(R5_LOCKED, &dev->flags) &&
2510                             !(test_bit(R5_UPTODATE, &dev->flags) ||
2511                             test_bit(R5_Wantcompute, &dev->flags)) &&
2512                             test_bit(R5_Insync, &dev->flags)) {
2513                                 if (
2514                                   test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2515                                         pr_debug("Read_old block "
2516                                                 "%d for r-m-w\n", i);
2517                                         set_bit(R5_LOCKED, &dev->flags);
2518                                         set_bit(R5_Wantread, &dev->flags);
2519                                         s->locked++;
2520                                 } else {
2521                                         set_bit(STRIPE_DELAYED, &sh->state);
2522                                         set_bit(STRIPE_HANDLE, &sh->state);
2523                                 }
2524                         }
2525                 }
2526         if (rcw <= rmw && rcw > 0)
2527                 /* want reconstruct write, but need to get some data */
2528                 for (i = disks; i--; ) {
2529                         struct r5dev *dev = &sh->dev[i];
2530                         if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2531                             i != sh->pd_idx &&
2532                             !test_bit(R5_LOCKED, &dev->flags) &&
2533                             !(test_bit(R5_UPTODATE, &dev->flags) ||
2534                             test_bit(R5_Wantcompute, &dev->flags)) &&
2535                             test_bit(R5_Insync, &dev->flags)) {
2536                                 if (
2537                                   test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2538                                         pr_debug("Read_old block "
2539                                                 "%d for Reconstruct\n", i);
2540                                         set_bit(R5_LOCKED, &dev->flags);
2541                                         set_bit(R5_Wantread, &dev->flags);
2542                                         s->locked++;
2543                                 } else {
2544                                         set_bit(STRIPE_DELAYED, &sh->state);
2545                                         set_bit(STRIPE_HANDLE, &sh->state);
2546                                 }
2547                         }
2548                 }
2549         /* now if nothing is locked, and if we have enough data,
2550          * we can start a write request
2551          */
2552         /* since handle_stripe can be called at any time we need to handle the
2553          * case where a compute block operation has been submitted and then a
2554          * subsequent call wants to start a write request.  raid_run_ops only
2555          * handles the case where compute block and reconstruct are requested
2556          * simultaneously.  If this is not the case then new writes need to be
2557          * held off until the compute completes.
2558          */
2559         if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2560             (s->locked == 0 && (rcw == 0 || rmw == 0) &&
2561             !test_bit(STRIPE_BIT_DELAY, &sh->state)))
2562                 schedule_reconstruction(sh, s, rcw == 0, 0);
2563 }
2564
2565 static void handle_stripe_dirtying6(raid5_conf_t *conf,
2566                 struct stripe_head *sh, struct stripe_head_state *s,
2567                 struct r6_state *r6s, int disks)
2568 {
2569         int rcw = 0, pd_idx = sh->pd_idx, i;
2570         int qd_idx = sh->qd_idx;
2571
2572         set_bit(STRIPE_HANDLE, &sh->state);
2573         for (i = disks; i--; ) {
2574                 struct r5dev *dev = &sh->dev[i];
2575                 /* check if we haven't enough data */
2576                 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2577                     i != pd_idx && i != qd_idx &&
2578                     !test_bit(R5_LOCKED, &dev->flags) &&
2579                     !(test_bit(R5_UPTODATE, &dev->flags) ||
2580                       test_bit(R5_Wantcompute, &dev->flags))) {
2581                         rcw++;
2582                         if (!test_bit(R5_Insync, &dev->flags))
2583                                 continue; /* it's a failed drive */
2584
2585                         if (
2586                           test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2587                                 pr_debug("Read_old stripe %llu "
2588                                         "block %d for Reconstruct\n",
2589                                      (unsigned long long)sh->sector, i);
2590                                 set_bit(R5_LOCKED, &dev->flags);
2591                                 set_bit(R5_Wantread, &dev->flags);
2592                                 s->locked++;
2593                         } else {
2594                                 pr_debug("Request delayed stripe %llu "
2595                                         "block %d for Reconstruct\n",
2596                                      (unsigned long long)sh->sector, i);
2597                                 set_bit(STRIPE_DELAYED, &sh->state);
2598                                 set_bit(STRIPE_HANDLE, &sh->state);
2599                         }
2600                 }
2601         }
2602         /* now if nothing is locked, and if we have enough data, we can start a
2603          * write request
2604          */
2605         if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2606             s->locked == 0 && rcw == 0 &&
2607             !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
2608                 schedule_reconstruction(sh, s, 1, 0);
2609         }
2610 }
2611
2612 static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2613                                 struct stripe_head_state *s, int disks)
2614 {
2615         struct r5dev *dev = NULL;
2616
2617         set_bit(STRIPE_HANDLE, &sh->state);
2618
2619         switch (sh->check_state) {
2620         case check_state_idle:
2621                 /* start a new check operation if there are no failures */
2622                 if (s->failed == 0) {
2623                         BUG_ON(s->uptodate != disks);
2624                         sh->check_state = check_state_run;
2625                         set_bit(STRIPE_OP_CHECK, &s->ops_request);
2626                         clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
2627                         s->uptodate--;
2628                         break;
2629                 }
2630                 dev = &sh->dev[s->failed_num];
2631                 /* fall through */
2632         case check_state_compute_result:
2633                 sh->check_state = check_state_idle;
2634                 if (!dev)
2635                         dev = &sh->dev[sh->pd_idx];
2636
2637                 /* check that a write has not made the stripe insync */
2638                 if (test_bit(STRIPE_INSYNC, &sh->state))
2639                         break;
2640
2641                 /* either failed parity check, or recovery is happening */
2642                 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
2643                 BUG_ON(s->uptodate != disks);
2644
2645                 set_bit(R5_LOCKED, &dev->flags);
2646                 s->locked++;
2647                 set_bit(R5_Wantwrite, &dev->flags);
2648
2649                 clear_bit(STRIPE_DEGRADED, &sh->state);
2650                 set_bit(STRIPE_INSYNC, &sh->state);
2651                 break;
2652         case check_state_run:
2653                 break; /* we will be called again upon completion */
2654         case check_state_check_result:
2655                 sh->check_state = check_state_idle;
2656
2657                 /* if a failure occurred during the check operation, leave
2658                  * STRIPE_INSYNC not set and let the stripe be handled again
2659                  */
2660                 if (s->failed)
2661                         break;
2662
2663                 /* handle a successful check operation, if parity is correct
2664                  * we are done.  Otherwise update the mismatch count and repair
2665                  * parity if !MD_RECOVERY_CHECK
2666                  */
2667                 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
2668                         /* parity is correct (on disc,
2669                          * not in buffer any more)
2670                          */
2671                         set_bit(STRIPE_INSYNC, &sh->state);
2672                 else {
2673                         conf->mddev->resync_mismatches += STRIPE_SECTORS;
2674                         if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2675                                 /* don't try to repair!! */
2676                                 set_bit(STRIPE_INSYNC, &sh->state);
2677                         else {
2678                                 sh->check_state = check_state_compute_run;
2679                                 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2680                                 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2681                                 set_bit(R5_Wantcompute,
2682                                         &sh->dev[sh->pd_idx].flags);
2683                                 sh->ops.target = sh->pd_idx;
2684                                 sh->ops.target2 = -1;
2685                                 s->uptodate++;
2686                         }
2687                 }
2688                 break;
2689         case check_state_compute_run:
2690                 break;
2691         default:
2692                 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2693                        __func__, sh->check_state,
2694                        (unsigned long long) sh->sector);
2695                 BUG();
2696         }
2697 }
2698
2699
2700 static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
2701                                   struct stripe_head_state *s,
2702                                   struct r6_state *r6s, int disks)
2703 {
2704         int pd_idx = sh->pd_idx;
2705         int qd_idx = sh->qd_idx;
2706         struct r5dev *dev;
2707
2708         set_bit(STRIPE_HANDLE, &sh->state);
2709
2710         BUG_ON(s->failed > 2);
2711
2712         /* Want to check and possibly repair P and Q.
2713          * However there could be one 'failed' device, in which
2714          * case we can only check one of them, possibly using the
2715          * other to generate missing data
2716          */
2717
2718         switch (sh->check_state) {
2719         case check_state_idle:
2720                 /* start a new check operation if there are < 2 failures */
2721                 if (s->failed == r6s->q_failed) {
2722                         /* The only possible failed device holds Q, so it
2723                          * makes sense to check P (If anything else were failed,
2724                          * we would have used P to recreate it).
2725                          */
2726                         sh->check_state = check_state_run;
2727                 }
2728                 if (!r6s->q_failed && s->failed < 2) {
2729                         /* Q is not failed, and we didn't use it to generate
2730                          * anything, so it makes sense to check it
2731                          */
2732                         if (sh->check_state == check_state_run)
2733                                 sh->check_state = check_state_run_pq;
2734                         else
2735                                 sh->check_state = check_state_run_q;
2736                 }
2737
2738                 /* discard potentially stale zero_sum_result */
2739                 sh->ops.zero_sum_result = 0;
2740
2741                 if (sh->check_state == check_state_run) {
2742                         /* async_xor_zero_sum destroys the contents of P */
2743                         clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2744                         s->uptodate--;
2745                 }
2746                 if (sh->check_state >= check_state_run &&
2747                     sh->check_state <= check_state_run_pq) {
2748                         /* async_syndrome_zero_sum preserves P and Q, so
2749                          * no need to mark them !uptodate here
2750                          */
2751                         set_bit(STRIPE_OP_CHECK, &s->ops_request);
2752                         break;
2753                 }
2754
2755                 /* we have 2-disk failure */
2756                 BUG_ON(s->failed != 2);
2757                 /* fall through */
2758         case check_state_compute_result:
2759                 sh->check_state = check_state_idle;
2760
2761                 /* check that a write has not made the stripe insync */
2762                 if (test_bit(STRIPE_INSYNC, &sh->state))
2763                         break;
2764
2765                 /* now write out any block on a failed drive,
2766                  * or P or Q if they were recomputed
2767                  */
2768                 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
2769                 if (s->failed == 2) {
2770                         dev = &sh->dev[r6s->failed_num[1]];
2771                         s->locked++;
2772                         set_bit(R5_LOCKED, &dev->flags);
2773                         set_bit(R5_Wantwrite, &dev->flags);
2774                 }
2775                 if (s->failed >= 1) {
2776                         dev = &sh->dev[r6s->failed_num[0]];
2777                         s->locked++;
2778                         set_bit(R5_LOCKED, &dev->flags);
2779                         set_bit(R5_Wantwrite, &dev->flags);
2780                 }
2781                 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
2782                         dev = &sh->dev[pd_idx];
2783                         s->locked++;
2784                         set_bit(R5_LOCKED, &dev->flags);
2785                         set_bit(R5_Wantwrite, &dev->flags);
2786                 }
2787                 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
2788                         dev = &sh->dev[qd_idx];
2789                         s->locked++;
2790                         set_bit(R5_LOCKED, &dev->flags);
2791                         set_bit(R5_Wantwrite, &dev->flags);
2792                 }
2793                 clear_bit(STRIPE_DEGRADED, &sh->state);
2794
2795                 set_bit(STRIPE_INSYNC, &sh->state);
2796                 break;
2797         case check_state_run:
2798         case check_state_run_q:
2799         case check_state_run_pq:
2800                 break; /* we will be called again upon completion */
2801         case check_state_check_result:
2802                 sh->check_state = check_state_idle;
2803
2804                 /* handle a successful check operation, if parity is correct
2805                  * we are done.  Otherwise update the mismatch count and repair
2806                  * parity if !MD_RECOVERY_CHECK
2807                  */
2808                 if (sh->ops.zero_sum_result == 0) {
2809                         /* both parities are correct */
2810                         if (!s->failed)
2811                                 set_bit(STRIPE_INSYNC, &sh->state);
2812                         else {
2813                                 /* in contrast to the raid5 case we can validate
2814                                  * parity, but still have a failure to write
2815                                  * back
2816                                  */
2817                                 sh->check_state = check_state_compute_result;
2818                                 /* Returning at this point means that we may go
2819                                  * off and bring p and/or q uptodate again so
2820                                  * we make sure to check zero_sum_result again
2821                                  * to verify if p or q need writeback
2822                                  */
2823                         }
2824                 } else {
2825                         conf->mddev->resync_mismatches += STRIPE_SECTORS;
2826                         if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2827                                 /* don't try to repair!! */
2828                                 set_bit(STRIPE_INSYNC, &sh->state);
2829                         else {
2830                                 int *target = &sh->ops.target;
2831
2832                                 sh->ops.target = -1;
2833                                 sh->ops.target2 = -1;
2834                                 sh->check_state = check_state_compute_run;
2835                                 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2836                                 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2837                                 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
2838                                         set_bit(R5_Wantcompute,
2839                                                 &sh->dev[pd_idx].flags);
2840                                         *target = pd_idx;
2841                                         target = &sh->ops.target2;
2842                                         s->uptodate++;
2843                                 }
2844                                 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
2845                                         set_bit(R5_Wantcompute,
2846                                                 &sh->dev[qd_idx].flags);
2847                                         *target = qd_idx;
2848                                         s->uptodate++;
2849                                 }
2850                         }
2851                 }
2852                 break;
2853         case check_state_compute_run:
2854                 break;
2855         default:
2856                 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2857                        __func__, sh->check_state,
2858                        (unsigned long long) sh->sector);
2859                 BUG();
2860         }
2861 }
2862
2863 static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2864                                 struct r6_state *r6s)
2865 {
2866         int i;
2867
2868         /* We have read all the blocks in this stripe and now we need to
2869          * copy some of them into a target stripe for expand.
2870          */
2871         struct dma_async_tx_descriptor *tx = NULL;
2872         clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2873         for (i = 0; i < sh->disks; i++)
2874                 if (i != sh->pd_idx && i != sh->qd_idx) {
2875                         int dd_idx, j;
2876                         struct stripe_head *sh2;
2877                         struct async_submit_ctl submit;
2878
2879                         sector_t bn = compute_blocknr(sh, i, 1);
2880                         sector_t s = raid5_compute_sector(conf, bn, 0,
2881                                                           &dd_idx, NULL);
2882                         sh2 = get_active_stripe(conf, s, 0, 1, 1);
2883                         if (sh2 == NULL)
2884                                 /* so far only the early blocks of this stripe
2885                                  * have been requested.  When later blocks
2886                                  * get requested, we will try again
2887                                  */
2888                                 continue;
2889                         if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
2890                            test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
2891                                 /* must have already done this block */
2892                                 release_stripe(sh2);
2893                                 continue;
2894                         }
2895
2896                         /* place all the copies on one channel */
2897                         init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
2898                         tx = async_memcpy(sh2->dev[dd_idx].page,
2899                                           sh->dev[i].page, 0, 0, STRIPE_SIZE,
2900                                           &submit);
2901
2902                         set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
2903                         set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
2904                         for (j = 0; j < conf->raid_disks; j++)
2905                                 if (j != sh2->pd_idx &&
2906                                     (!r6s || j != sh2->qd_idx) &&
2907                                     !test_bit(R5_Expanded, &sh2->dev[j].flags))
2908                                         break;
2909                         if (j == conf->raid_disks) {
2910                                 set_bit(STRIPE_EXPAND_READY, &sh2->state);
2911                                 set_bit(STRIPE_HANDLE, &sh2->state);
2912                         }
2913                         release_stripe(sh2);
2914
2915                 }
2916         /* done submitting copies, wait for them to complete */
2917         if (tx) {
2918                 async_tx_ack(tx);
2919                 dma_wait_for_async_tx(tx);
2920         }
2921 }
2922
2923
2924 /*
2925  * handle_stripe - do things to a stripe.
2926  *
2927  * We lock the stripe and then examine the state of various bits
2928  * to see what needs to be done.
2929  * Possible results:
2930  *    return some read request which now have data
2931  *    return some write requests which are safely on disc
2932  *    schedule a read on some buffers
2933  *    schedule a write of some buffers
2934  *    return confirmation of parity correctness
2935  *
2936  * buffers are taken off read_list or write_list, and bh_cache buffers
2937  * get BH_Lock set before the stripe lock is released.
2938  *
2939  */
2940
2941 static void handle_stripe5(struct stripe_head *sh)
2942 {
2943         raid5_conf_t *conf = sh->raid_conf;
2944         int disks = sh->disks, i;
2945         struct bio *return_bi = NULL;
2946         struct stripe_head_state s;
2947         struct r5dev *dev;
2948         mdk_rdev_t *blocked_rdev = NULL;
2949         int prexor;
2950         int dec_preread_active = 0;
2951
2952         memset(&s, 0, sizeof(s));
2953         pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
2954                  "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state,
2955                  atomic_read(&sh->count), sh->pd_idx, sh->check_state,
2956                  sh->reconstruct_state);
2957
2958         spin_lock(&sh->lock);
2959         clear_bit(STRIPE_HANDLE, &sh->state);
2960         clear_bit(STRIPE_DELAYED, &sh->state);
2961
2962         s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
2963         s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2964         s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
2965
2966         /* Now to look around and see what can be done */
2967         rcu_read_lock();
2968         for (i=disks; i--; ) {
2969                 mdk_rdev_t *rdev;
2970
2971                 dev = &sh->dev[i];
2972                 clear_bit(R5_Insync, &dev->flags);
2973
2974                 pr_debug("check %d: state 0x%lx toread %p read %p write %p "
2975                         "written %p\n", i, dev->flags, dev->toread, dev->read,
2976                         dev->towrite, dev->written);
2977
2978                 /* maybe we can request a biofill operation
2979                  *
2980                  * new wantfill requests are only permitted while
2981                  * ops_complete_biofill is guaranteed to be inactive
2982                  */
2983                 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
2984                     !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
2985                         set_bit(R5_Wantfill, &dev->flags);
2986
2987                 /* now count some things */
2988                 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
2989                 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
2990                 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++;
2991
2992                 if (test_bit(R5_Wantfill, &dev->flags))
2993                         s.to_fill++;
2994                 else if (dev->toread)
2995                         s.to_read++;
2996                 if (dev->towrite) {
2997                         s.to_write++;
2998                         if (!test_bit(R5_OVERWRITE, &dev->flags))
2999                                 s.non_overwrite++;
3000                 }
3001                 if (dev->written)
3002                         s.written++;
3003                 rdev = rcu_dereference(conf->disks[i].rdev);
3004                 if (blocked_rdev == NULL &&
3005                     rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
3006                         blocked_rdev = rdev;
3007                         atomic_inc(&rdev->nr_pending);
3008                 }
3009                 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
3010                         /* The ReadError flag will just be confusing now */
3011                         clear_bit(R5_ReadError, &dev->flags);
3012                         clear_bit(R5_ReWrite, &dev->flags);
3013                 }
3014                 if (!rdev || !test_bit(In_sync, &rdev->flags)
3015                     || test_bit(R5_ReadError, &dev->flags)) {
3016                         s.failed++;
3017                         s.failed_num = i;
3018                 } else
3019                         set_bit(R5_Insync, &dev->flags);
3020         }
3021         rcu_read_unlock();
3022
3023         if (unlikely(blocked_rdev)) {
3024                 if (s.syncing || s.expanding || s.expanded ||
3025                     s.to_write || s.written) {
3026                         set_bit(STRIPE_HANDLE, &sh->state);
3027                         goto unlock;
3028                 }
3029                 /* There is nothing for the blocked_rdev to block */
3030                 rdev_dec_pending(blocked_rdev, conf->mddev);
3031                 blocked_rdev = NULL;
3032         }
3033
3034         if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
3035                 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
3036                 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
3037         }
3038
3039         pr_debug("locked=%d uptodate=%d to_read=%d"
3040                 " to_write=%d failed=%d failed_num=%d\n",
3041                 s.locked, s.uptodate, s.to_read, s.to_write,
3042                 s.failed, s.failed_num);
3043         /* check if the array has lost two devices and, if so, some requests might
3044          * need to be failed
3045          */
3046         if (s.failed > 1 && s.to_read+s.to_write+s.written)
3047                 handle_failed_stripe(conf, sh, &s, disks, &return_bi);
3048         if (s.failed > 1 && s.syncing) {
3049                 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
3050                 clear_bit(STRIPE_SYNCING, &sh->state);
3051                 s.syncing = 0;
3052         }
3053
3054         /* might be able to return some write requests if the parity block
3055          * is safe, or on a failed drive
3056          */
3057         dev = &sh->dev[sh->pd_idx];
3058         if ( s.written &&
3059              ((test_bit(R5_Insync, &dev->flags) &&
3060                !test_bit(R5_LOCKED, &dev->flags) &&
3061                test_bit(R5_UPTODATE, &dev->flags)) ||
3062                (s.failed == 1 && s.failed_num == sh->pd_idx)))
3063                 handle_stripe_clean_event(conf, sh, disks, &return_bi);
3064
3065         /* Now we might consider reading some blocks, either to check/generate
3066          * parity, or to satisfy requests
3067          * or to load a block that is being partially written.
3068          */
3069         if (s.to_read || s.non_overwrite ||
3070             (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
3071                 handle_stripe_fill5(sh, &s, disks);
3072
3073         /* Now we check to see if any write operations have recently
3074          * completed
3075          */
3076         prexor = 0;
3077         if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
3078                 prexor = 1;
3079         if (sh->reconstruct_state == reconstruct_state_drain_result ||
3080             sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
3081                 sh->reconstruct_state = reconstruct_state_idle;
3082
3083                 /* All the 'written' buffers and the parity block are ready to
3084                  * be written back to disk
3085                  */
3086                 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
3087                 for (i = disks; i--; ) {
3088                         dev = &sh->dev[i];
3089                         if (test_bit(R5_LOCKED, &dev->flags) &&
3090                                 (i == sh->pd_idx || dev->written)) {
3091                                 pr_debug("Writing block %d\n", i);
3092                                 set_bit(R5_Wantwrite, &dev->flags);
3093                                 if (prexor)
3094                                         continue;
3095                                 if (!test_bit(R5_Insync, &dev->flags) ||
3096                                     (i == sh->pd_idx && s.failed == 0))
3097                                         set_bit(STRIPE_INSYNC, &sh->state);
3098                         }
3099                 }
3100                 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3101                         dec_preread_active = 1;
3102         }
3103
3104         /* Now to consider new write requests and what else, if anything
3105          * should be read.  We do not handle new writes when:
3106          * 1/ A 'write' operation (copy+xor) is already in flight.
3107          * 2/ A 'check' operation is in flight, as it may clobber the parity
3108          *    block.
3109          */
3110         if (s.to_write && !sh->reconstruct_state && !sh->check_state)
3111                 handle_stripe_dirtying5(conf, sh, &s, disks);
3112
3113         /* maybe we need to check and possibly fix the parity for this stripe
3114          * Any reads will already have been scheduled, so we just see if enough
3115          * data is available.  The parity check is held off while parity
3116          * dependent operations are in flight.
3117          */
3118         if (sh->check_state ||
3119             (s.syncing && s.locked == 0 &&
3120              !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3121              !test_bit(STRIPE_INSYNC, &sh->state)))
3122                 handle_parity_checks5(conf, sh, &s, disks);
3123
3124         if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
3125                 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
3126                 clear_bit(STRIPE_SYNCING, &sh->state);
3127         }
3128
3129         /* If the failed drive is just a ReadError, then we might need to progress
3130          * the repair/check process
3131          */
3132         if (s.failed == 1 && !conf->mddev->ro &&
3133             test_bit(R5_ReadError, &sh->dev[s.failed_num].flags)
3134             && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags)
3135             && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags)
3136                 ) {
3137                 dev = &sh->dev[s.failed_num];
3138                 if (!test_bit(R5_ReWrite, &dev->flags)) {
3139                         set_bit(R5_Wantwrite, &dev->flags);
3140                         set_bit(R5_ReWrite, &dev->flags);
3141                         set_bit(R5_LOCKED, &dev->flags);
3142                         s.locked++;
3143                 } else {
3144                         /* let's read it back */
3145                         set_bit(R5_Wantread, &dev->flags);
3146                         set_bit(R5_LOCKED, &dev->flags);
3147                         s.locked++;
3148                 }
3149         }
3150
3151         /* Finish reconstruct operations initiated by the expansion process */
3152         if (sh->reconstruct_state == reconstruct_state_result) {
3153                 struct stripe_head *sh2
3154                         = get_active_stripe(conf, sh->sector, 1, 1, 1);
3155                 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
3156                         /* sh cannot be written until sh2 has been read.
3157                          * so arrange for sh to be delayed a little
3158                          */
3159                         set_bit(STRIPE_DELAYED, &sh->state);
3160                         set_bit(STRIPE_HANDLE, &sh->state);
3161                         if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3162                                               &sh2->state))
3163                                 atomic_inc(&conf->preread_active_stripes);
3164                         release_stripe(sh2);
3165                         goto unlock;
3166                 }
3167                 if (sh2)
3168                         release_stripe(sh2);
3169
3170                 sh->reconstruct_state = reconstruct_state_idle;
3171                 clear_bit(STRIPE_EXPANDING, &sh->state);
3172                 for (i = conf->raid_disks; i--; ) {
3173                         set_bit(R5_Wantwrite, &sh->dev[i].flags);
3174                         set_bit(R5_LOCKED, &sh->dev[i].flags);
3175                         s.locked++;
3176                 }
3177         }
3178
3179         if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3180             !sh->reconstruct_state) {
3181                 /* Need to write out all blocks after computing parity */
3182                 sh->disks = conf->raid_disks;
3183                 stripe_set_idx(sh->sector, conf, 0, sh);
3184                 schedule_reconstruction(sh, &s, 1, 1);
3185         } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
3186                 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3187                 atomic_dec(&conf->reshape_stripes);
3188                 wake_up(&conf->wait_for_overlap);
3189                 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3190         }
3191
3192         if (s.expanding && s.locked == 0 &&
3193             !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3194                 handle_stripe_expansion(conf, sh, NULL);
3195
3196  unlock:
3197         spin_unlock(&sh->lock);
3198
3199         /* wait for this device to become unblocked */
3200         if (unlikely(blocked_rdev))
3201                 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
3202
3203         if (s.ops_request)
3204                 raid_run_ops(sh, s.ops_request);
3205
3206         ops_run_io(sh, &s);
3207
3208         if (dec_preread_active) {
3209                 /* We delay this until after ops_run_io so that if make_request
3210                  * is waiting on a barrier, it won't continue until the writes
3211                  * have actually been submitted.
3212                  */
3213                 atomic_dec(&conf->preread_active_stripes);
3214                 if (atomic_read(&conf->preread_active_stripes) <
3215                     IO_THRESHOLD)
3216                         md_wakeup_thread(conf->mddev->thread);
3217         }
3218         return_io(return_bi);
3219 }
3220
3221 static void handle_stripe6(struct stripe_head *sh)
3222 {
3223         raid5_conf_t *conf = sh->raid_conf;
3224         int disks = sh->disks;
3225         struct bio *return_bi = NULL;
3226         int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx;
3227         struct stripe_head_state s;
3228         struct r6_state r6s;
3229         struct r5dev *dev, *pdev, *qdev;
3230         mdk_rdev_t *blocked_rdev = NULL;
3231         int dec_preread_active = 0;
3232
3233         pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3234                 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3235                (unsigned long long)sh->sector, sh->state,
3236                atomic_read(&sh->count), pd_idx, qd_idx,
3237                sh->check_state, sh->reconstruct_state);
3238         memset(&s, 0, sizeof(s));
3239
3240         spin_lock(&sh->lock);
3241         clear_bit(STRIPE_HANDLE, &sh->state);
3242         clear_bit(STRIPE_DELAYED, &sh->state);
3243
3244         s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
3245         s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3246         s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
3247         /* Now to look around and see what can be done */
3248
3249         rcu_read_lock();
3250         for (i=disks; i--; ) {
3251                 mdk_rdev_t *rdev;
3252                 dev = &sh->dev[i];
3253                 clear_bit(R5_Insync, &dev->flags);
3254
3255                 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3256                         i, dev->flags, dev->toread, dev->towrite, dev->written);
3257                 /* maybe we can reply to a read
3258                  *
3259                  * new wantfill requests are only permitted while
3260                  * ops_complete_biofill is guaranteed to be inactive
3261                  */
3262                 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
3263                     !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
3264                         set_bit(R5_Wantfill, &dev->flags);
3265
3266                 /* now count some things */
3267                 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
3268                 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
3269                 if (test_bit(R5_Wantcompute, &dev->flags)) {
3270                         s.compute++;
3271                         BUG_ON(s.compute > 2);
3272                 }
3273
3274                 if (test_bit(R5_Wantfill, &dev->flags)) {
3275                         s.to_fill++;
3276                 } else if (dev->toread)
3277                         s.to_read++;
3278                 if (dev->towrite) {
3279                         s.to_write++;
3280                         if (!test_bit(R5_OVERWRITE, &dev->flags))
3281                                 s.non_overwrite++;
3282                 }
3283                 if (dev->written)
3284                         s.written++;
3285                 rdev = rcu_dereference(conf->disks[i].rdev);
3286                 if (blocked_rdev == NULL &&
3287                     rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
3288                         blocked_rdev = rdev;
3289                         atomic_inc(&rdev->nr_pending);
3290                 }
3291                 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
3292                         /* The ReadError flag will just be confusing now */
3293                         clear_bit(R5_ReadError, &dev->flags);
3294                         clear_bit(R5_ReWrite, &dev->flags);
3295                 }
3296                 if (!rdev || !test_bit(In_sync, &rdev->flags)
3297                     || test_bit(R5_ReadError, &dev->flags)) {
3298                         if (s.failed < 2)
3299                                 r6s.failed_num[s.failed] = i;
3300                         s.failed++;
3301                 } else
3302                         set_bit(R5_Insync, &dev->flags);
3303         }
3304         rcu_read_unlock();
3305
3306         if (unlikely(blocked_rdev)) {
3307                 if (s.syncing || s.expanding || s.expanded ||
3308                     s.to_write || s.written) {
3309                         set_bit(STRIPE_HANDLE, &sh->state);
3310                         goto unlock;
3311                 }
3312                 /* There is nothing for the blocked_rdev to block */
3313                 rdev_dec_pending(blocked_rdev, conf->mddev);
3314                 blocked_rdev = NULL;
3315         }
3316
3317         if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
3318                 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
3319                 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
3320         }
3321
3322         pr_debug("locked=%d uptodate=%d to_read=%d"
3323                " to_write=%d failed=%d failed_num=%d,%d\n",
3324                s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
3325                r6s.failed_num[0], r6s.failed_num[1]);
3326         /* check if the array has lost >2 devices and, if so, some requests
3327          * might need to be failed
3328          */
3329         if (s.failed > 2 && s.to_read+s.to_write+s.written)
3330                 handle_failed_stripe(conf, sh, &s, disks, &return_bi);
3331         if (s.failed > 2 && s.syncing) {
3332                 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
3333                 clear_bit(STRIPE_SYNCING, &sh->state);
3334                 s.syncing = 0;
3335         }
3336
3337         /*
3338          * might be able to return some write requests if the parity blocks
3339          * are safe, or on a failed drive
3340          */
3341         pdev = &sh->dev[pd_idx];
3342         r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx)
3343                 || (s.failed >= 2 && r6s.failed_num[1] == pd_idx);
3344         qdev = &sh->dev[qd_idx];
3345         r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == qd_idx)
3346                 || (s.failed >= 2 && r6s.failed_num[1] == qd_idx);
3347
3348         if ( s.written &&
3349              ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
3350                              && !test_bit(R5_LOCKED, &pdev->flags)
3351                              && test_bit(R5_UPTODATE, &pdev->flags)))) &&
3352              ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
3353                              && !test_bit(R5_LOCKED, &qdev->flags)
3354                              && test_bit(R5_UPTODATE, &qdev->flags)))))
3355                 handle_stripe_clean_event(conf, sh, disks, &return_bi);
3356
3357         /* Now we might consider reading some blocks, either to check/generate
3358          * parity, or to satisfy requests
3359          * or to load a block that is being partially written.
3360          */
3361         if (s.to_read || s.non_overwrite || (s.to_write && s.failed) ||
3362             (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
3363                 handle_stripe_fill6(sh, &s, &r6s, disks);
3364
3365         /* Now we check to see if any write operations have recently
3366          * completed
3367          */
3368         if (sh->reconstruct_state == reconstruct_state_drain_result) {
3369
3370                 sh->reconstruct_state = reconstruct_state_idle;
3371                 /* All the 'written' buffers and the parity blocks are ready to
3372                  * be written back to disk
3373                  */
3374                 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
3375                 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags));
3376                 for (i = disks; i--; ) {
3377                         dev = &sh->dev[i];
3378                         if (test_bit(R5_LOCKED, &dev->flags) &&
3379                             (i == sh->pd_idx || i == qd_idx ||
3380                              dev->written)) {
3381                                 pr_debug("Writing block %d\n", i);
3382                                 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
3383                                 set_bit(R5_Wantwrite, &dev->flags);
3384                                 if (!test_bit(R5_Insync, &dev->flags) ||
3385                                     ((i == sh->pd_idx || i == qd_idx) &&
3386                                       s.failed == 0))
3387                                         set_bit(STRIPE_INSYNC, &sh->state);
3388                         }
3389                 }
3390                 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3391                         dec_preread_active = 1;
3392         }
3393
3394         /* Now to consider new write requests and what else, if anything
3395          * should be read.  We do not handle new writes when:
3396          * 1/ A 'write' operation (copy+gen_syndrome) is already in flight.
3397          * 2/ A 'check' operation is in flight, as it may clobber the parity
3398          *    block.
3399          */
3400         if (s.to_write && !sh->reconstruct_state && !sh->check_state)
3401                 handle_stripe_dirtying6(conf, sh, &s, &r6s, disks);
3402
3403         /* maybe we need to check and possibly fix the parity for this stripe
3404          * Any reads will already have been scheduled, so we just see if enough
3405          * data is available.  The parity check is held off while parity
3406          * dependent operations are in flight.
3407          */
3408         if (sh->check_state ||
3409             (s.syncing && s.locked == 0 &&
3410              !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3411              !test_bit(STRIPE_INSYNC, &sh->state)))
3412                 handle_parity_checks6(conf, sh, &s, &r6s, disks);
3413
3414         if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
3415                 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
3416                 clear_bit(STRIPE_SYNCING, &sh->state);
3417         }
3418
3419         /* If the failed drives are just a ReadError, then we might need
3420          * to progress the repair/check process
3421          */
3422         if (s.failed <= 2 && !conf->mddev->ro)
3423                 for (i = 0; i < s.failed; i++) {
3424                         dev = &sh->dev[r6s.failed_num[i]];
3425                         if (test_bit(R5_ReadError, &dev->flags)
3426                             && !test_bit(R5_LOCKED, &dev->flags)
3427                             && test_bit(R5_UPTODATE, &dev->flags)
3428                                 ) {
3429                                 if (!test_bit(R5_ReWrite, &dev->flags)) {
3430                                         set_bit(R5_Wantwrite, &dev->flags);
3431                                         set_bit(R5_ReWrite, &dev->flags);
3432                                         set_bit(R5_LOCKED, &dev->flags);
3433                                         s.locked++;
3434                                 } else {
3435                                         /* let's read it back */
3436                                         set_bit(R5_Wantread, &dev->flags);
3437                                         set_bit(R5_LOCKED, &dev->flags);
3438                                         s.locked++;
3439                                 }
3440                         }
3441                 }
3442
3443         /* Finish reconstruct operations initiated by the expansion process */
3444         if (sh->reconstruct_state == reconstruct_state_result) {
3445                 sh->reconstruct_state = reconstruct_state_idle;
3446                 clear_bit(STRIPE_EXPANDING, &sh->state);
3447                 for (i = conf->raid_disks; i--; ) {
3448                         set_bit(R5_Wantwrite, &sh->dev[i].flags);
3449                         set_bit(R5_LOCKED, &sh->dev[i].flags);
3450                         s.locked++;
3451                 }
3452         }
3453
3454         if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3455             !sh->reconstruct_state) {
3456                 struct stripe_head *sh2
3457                         = get_active_stripe(conf, sh->sector, 1, 1, 1);
3458                 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
3459                         /* sh cannot be written until sh2 has been read.
3460                          * so arrange for sh to be delayed a little
3461                          */
3462                         set_bit(STRIPE_DELAYED, &sh->state);
3463                         set_bit(STRIPE_HANDLE, &sh->state);
3464                         if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3465                                               &sh2->state))
3466                                 atomic_inc(&conf->preread_active_stripes);
3467                         release_stripe(sh2);
3468                         goto unlock;
3469                 }
3470                 if (sh2)
3471                         release_stripe(sh2);
3472
3473                 /* Need to write out all blocks after computing P&Q */
3474                 sh->disks = conf->raid_disks;
3475                 stripe_set_idx(sh->sector, conf, 0, sh);
3476                 schedule_reconstruction(sh, &s, 1, 1);
3477         } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
3478                 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3479                 atomic_dec(&conf->reshape_stripes);
3480                 wake_up(&conf->wait_for_overlap);
3481                 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3482         }
3483
3484         if (s.expanding && s.locked == 0 &&
3485             !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3486                 handle_stripe_expansion(conf, sh, &r6s);
3487
3488  unlock:
3489         spin_unlock(&sh->lock);
3490
3491         /* wait for this device to become unblocked */
3492         if (unlikely(blocked_rdev))
3493                 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
3494
3495         if (s.ops_request)
3496                 raid_run_ops(sh, s.ops_request);
3497
3498         ops_run_io(sh, &s);
3499
3500
3501         if (dec_preread_active) {
3502                 /* We delay this until after ops_run_io so that if make_request
3503                  * is waiting on a barrier, it won't continue until the writes
3504                  * have actually been submitted.
3505                  */
3506                 atomic_dec(&conf->preread_active_stripes);
3507                 if (atomic_read(&conf->preread_active_stripes) <
3508                     IO_THRESHOLD)
3509                         md_wakeup_thread(conf->mddev->thread);
3510         }
3511
3512         return_io(return_bi);
3513 }
3514
3515 static void handle_stripe(struct stripe_head *sh)
3516 {
3517         if (sh->raid_conf->level == 6)
3518                 handle_stripe6(sh);
3519         else
3520                 handle_stripe5(sh);
3521 }
3522
3523 static void raid5_activate_delayed(raid5_conf_t *conf)
3524 {
3525         if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
3526                 while (!list_empty(&conf->delayed_list)) {
3527                         struct list_head *l = conf->delayed_list.next;
3528                         struct stripe_head *sh;
3529                         sh = list_entry(l, struct stripe_head, lru);
3530                         list_del_init(l);
3531                         clear_bit(STRIPE_DELAYED, &sh->state);
3532                         if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3533                                 atomic_inc(&conf->preread_active_stripes);
3534                         list_add_tail(&sh->lru, &conf->hold_list);
3535                 }
3536         } else
3537                 blk_plug_device(conf->mddev->queue);
3538 }
3539
3540 static void activate_bit_delay(raid5_conf_t *conf)
3541 {
3542         /* device_lock is held */
3543         struct list_head head;
3544         list_add(&head, &conf->bitmap_list);
3545         list_del_init(&conf->bitmap_list);
3546         while (!list_empty(&head)) {
3547                 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
3548                 list_del_init(&sh->lru);
3549                 atomic_inc(&sh->count);
3550                 __release_stripe(conf, sh);
3551         }
3552 }
3553
3554 static void unplug_slaves(mddev_t *mddev)
3555 {
3556         raid5_conf_t *conf = mddev->private;
3557         int i;
3558         int devs = max(conf->raid_disks, conf->previous_raid_disks);
3559
3560         rcu_read_lock();
3561         for (i = 0; i < devs; i++) {
3562                 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3563                 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
3564                         struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
3565
3566                         atomic_inc(&rdev->nr_pending);
3567                         rcu_read_unlock();
3568
3569                         blk_unplug(r_queue);
3570
3571                         rdev_dec_pending(rdev, mddev);
3572                         rcu_read_lock();
3573                 }
3574         }
3575         rcu_read_unlock();
3576 }
3577
3578 static void raid5_unplug_device(struct request_queue *q)
3579 {
3580         mddev_t *mddev = q->queuedata;
3581         raid5_conf_t *conf = mddev->private;
3582         unsigned long flags;
3583
3584         spin_lock_irqsave(&conf->device_lock, flags);
3585
3586         if (blk_remove_plug(q)) {
3587                 conf->seq_flush++;
3588                 raid5_activate_delayed(conf);
3589         }
3590         md_wakeup_thread(mddev->thread);
3591
3592         spin_unlock_irqrestore(&conf->device_lock, flags);
3593
3594         unplug_slaves(mddev);
3595 }
3596
3597 static int raid5_congested(void *data, int bits)
3598 {
3599         mddev_t *mddev = data;
3600         raid5_conf_t *conf = mddev->private;
3601
3602         /* No difference between reads and writes.  Just check
3603          * how busy the stripe_cache is
3604          */
3605
3606         if (mddev_congested(mddev, bits))
3607                 return 1;
3608         if (conf->inactive_blocked)
3609                 return 1;
3610         if (conf->quiesce)
3611                 return 1;
3612         if (list_empty_careful(&conf->inactive_list))
3613                 return 1;
3614
3615         return 0;
3616 }
3617
3618 /* We want read requests to align with chunks where possible,
3619  * but write requests don't need to.
3620  */
3621 static int raid5_mergeable_bvec(struct request_queue *q,
3622                                 struct bvec_merge_data *bvm,
3623                                 struct bio_vec *biovec)
3624 {
3625         mddev_t *mddev = q->queuedata;
3626         sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
3627         int max;
3628         unsigned int chunk_sectors = mddev->chunk_sectors;
3629         unsigned int bio_sectors = bvm->bi_size >> 9;
3630
3631         if ((bvm->bi_rw & 1) == WRITE)
3632                 return biovec->bv_len; /* always allow writes to be mergeable */
3633
3634         if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3635                 chunk_sectors = mddev->new_chunk_sectors;
3636         max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3637         if (max < 0) max = 0;
3638         if (max <= biovec->bv_len && bio_sectors == 0)
3639                 return biovec->bv_len;
3640         else
3641                 return max;
3642 }
3643
3644
3645 static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
3646 {
3647         sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3648         unsigned int chunk_sectors = mddev->chunk_sectors;
3649         unsigned int bio_sectors = bio->bi_size >> 9;
3650
3651         if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3652                 chunk_sectors = mddev->new_chunk_sectors;
3653         return  chunk_sectors >=
3654                 ((sector & (chunk_sectors - 1)) + bio_sectors);
3655 }
3656
3657 /*
3658  *  add bio to the retry LIFO  ( in O(1) ... we are in interrupt )
3659  *  later sampled by raid5d.
3660  */
3661 static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf)
3662 {
3663         unsigned long flags;
3664
3665         spin_lock_irqsave(&conf->device_lock, flags);
3666
3667         bi->bi_next = conf->retry_read_aligned_list;
3668         conf->retry_read_aligned_list = bi;
3669
3670         spin_unlock_irqrestore(&conf->device_lock, flags);
3671         md_wakeup_thread(conf->mddev->thread);
3672 }
3673
3674
3675 static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
3676 {
3677         struct bio *bi;
3678
3679         bi = conf->retry_read_aligned;
3680         if (bi) {
3681                 conf->retry_read_aligned = NULL;
3682                 return bi;
3683         }
3684         bi = conf->retry_read_aligned_list;
3685         if(bi) {
3686                 conf->retry_read_aligned_list = bi->bi_next;
3687                 bi->bi_next = NULL;
3688                 /*
3689                  * this sets the active strip count to 1 and the processed
3690                  * strip count to zero (upper 8 bits)
3691                  */
3692                 bi->bi_phys_segments = 1; /* biased count of active stripes */
3693         }
3694
3695         return bi;
3696 }
3697
3698
3699 /*
3700  *  The "raid5_align_endio" should check if the read succeeded and if it
3701  *  did, call bio_endio on the original bio (having bio_put the new bio
3702  *  first).
3703  *  If the read failed..
3704  */
3705 static void raid5_align_endio(struct bio *bi, int error)
3706 {
3707         struct bio* raid_bi  = bi->bi_private;
3708         mddev_t *mddev;
3709         raid5_conf_t *conf;
3710         int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
3711         mdk_rdev_t *rdev;
3712
3713         bio_put(bi);
3714
3715         mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
3716         conf = mddev->private;
3717         rdev = (void*)raid_bi->bi_next;
3718         raid_bi->bi_next = NULL;
3719
3720         rdev_dec_pending(rdev, conf->mddev);
3721
3722         if (!error && uptodate) {
3723                 bio_endio(raid_bi, 0);
3724                 if (atomic_dec_and_test(&conf->active_aligned_reads))
3725                         wake_up(&conf->wait_for_stripe);
3726                 return;
3727         }
3728
3729
3730         pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3731
3732         add_bio_to_retry(raid_bi, conf);
3733 }
3734
3735 static int bio_fits_rdev(struct bio *bi)
3736 {
3737         struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3738
3739         if ((bi->bi_size>>9) > queue_max_sectors(q))
3740                 return 0;
3741         blk_recount_segments(q, bi);
3742         if (bi->bi_phys_segments > queue_max_phys_segments(q))
3743                 return 0;
3744
3745         if (q->merge_bvec_fn)
3746                 /* it's too hard to apply the merge_bvec_fn at this stage,
3747                  * just just give up
3748                  */
3749                 return 0;
3750
3751         return 1;
3752 }
3753
3754
3755 static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
3756 {
3757         mddev_t *mddev = q->queuedata;
3758         raid5_conf_t *conf = mddev->private;
3759         int dd_idx;
3760         struct bio* align_bi;
3761         mdk_rdev_t *rdev;
3762
3763         if (!in_chunk_boundary(mddev, raid_bio)) {
3764                 pr_debug("chunk_aligned_read : non aligned\n");
3765                 return 0;
3766         }
3767         /*
3768          * use bio_clone to make a copy of the bio
3769          */
3770         align_bi = bio_clone(raid_bio, GFP_NOIO);
3771         if (!align_bi)
3772                 return 0;
3773         /*
3774          *   set bi_end_io to a new function, and set bi_private to the
3775          *     original bio.
3776          */
3777         align_bi->bi_end_io  = raid5_align_endio;
3778         align_bi->bi_private = raid_bio;
3779         /*
3780          *      compute position
3781          */
3782         align_bi->bi_sector =  raid5_compute_sector(conf, raid_bio->bi_sector,
3783                                                     0,
3784                                                     &dd_idx, NULL);
3785
3786         rcu_read_lock();
3787         rdev = rcu_dereference(conf->disks[dd_idx].rdev);
3788         if (rdev && test_bit(In_sync, &rdev->flags)) {
3789                 atomic_inc(&rdev->nr_pending);
3790                 rcu_read_unlock();
3791                 raid_bio->bi_next = (void*)rdev;
3792                 align_bi->bi_bdev =  rdev->bdev;
3793                 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3794                 align_bi->bi_sector += rdev->data_offset;
3795
3796                 if (!bio_fits_rdev(align_bi)) {
3797                         /* too big in some way */
3798                         bio_put(align_bi);
3799                         rdev_dec_pending(rdev, mddev);
3800                         return 0;
3801                 }
3802
3803                 spin_lock_irq(&conf->device_lock);
3804                 wait_event_lock_irq(conf->wait_for_stripe,
3805                                     conf->quiesce == 0,
3806                                     conf->device_lock, /* nothing */);
3807                 atomic_inc(&conf->active_aligned_reads);
3808                 spin_unlock_irq(&conf->device_lock);
3809
3810                 generic_make_request(align_bi);
3811                 return 1;
3812         } else {
3813                 rcu_read_unlock();
3814                 bio_put(align_bi);
3815                 return 0;
3816         }
3817 }
3818
3819 /* __get_priority_stripe - get the next stripe to process
3820  *
3821  * Full stripe writes are allowed to pass preread active stripes up until
3822  * the bypass_threshold is exceeded.  In general the bypass_count
3823  * increments when the handle_list is handled before the hold_list; however, it
3824  * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3825  * stripe with in flight i/o.  The bypass_count will be reset when the
3826  * head of the hold_list has changed, i.e. the head was promoted to the
3827  * handle_list.
3828  */
3829 static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
3830 {
3831         struct stripe_head *sh;
3832
3833         pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3834                   __func__,
3835                   list_empty(&conf->handle_list) ? "empty" : "busy",
3836                   list_empty(&conf->hold_list) ? "empty" : "busy",
3837                   atomic_read(&conf->pending_full_writes), conf->bypass_count);
3838
3839         if (!list_empty(&conf->handle_list)) {
3840                 sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
3841
3842                 if (list_empty(&conf->hold_list))
3843                         conf->bypass_count = 0;
3844                 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
3845                         if (conf->hold_list.next == conf->last_hold)
3846                                 conf->bypass_count++;
3847                         else {
3848                                 conf->last_hold = conf->hold_list.next;
3849                                 conf->bypass_count -= conf->bypass_threshold;
3850                                 if (conf->bypass_count < 0)
3851                                         conf->bypass_count = 0;
3852                         }
3853                 }
3854         } else if (!list_empty(&conf->hold_list) &&
3855                    ((conf->bypass_threshold &&
3856                      conf->bypass_count > conf->bypass_threshold) ||
3857                     atomic_read(&conf->pending_full_writes) == 0)) {
3858                 sh = list_entry(conf->hold_list.next,
3859                                 typeof(*sh), lru);
3860                 conf->bypass_count -= conf->bypass_threshold;
3861                 if (conf->bypass_count < 0)
3862                         conf->bypass_count = 0;
3863         } else
3864                 return NULL;
3865
3866         list_del_init(&sh->lru);
3867         atomic_inc(&sh->count);
3868         BUG_ON(atomic_read(&sh->count) != 1);
3869         return sh;
3870 }
3871
3872 static int make_request(struct request_queue *q, struct bio * bi)
3873 {
3874         mddev_t *mddev = q->queuedata;
3875         raid5_conf_t *conf = mddev->private;
3876         int dd_idx;
3877         sector_t new_sector;
3878         sector_t logical_sector, last_sector;
3879         struct stripe_head *sh;
3880         const int rw = bio_data_dir(bi);
3881         int cpu, remaining;
3882
3883         if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
3884                 /* Drain all pending writes.  We only really need
3885                  * to ensure they have been submitted, but this is
3886                  * easier.
3887                  */
3888                 mddev->pers->quiesce(mddev, 1);
3889                 mddev->pers->quiesce(mddev, 0);
3890                 md_barrier_request(mddev, bi);
3891                 return 0;
3892         }
3893
3894         md_write_start(mddev, bi);
3895
3896         cpu = part_stat_lock();
3897         part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
3898         part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
3899                       bio_sectors(bi));
3900         part_stat_unlock();
3901
3902         if (rw == READ &&
3903              mddev->reshape_position == MaxSector &&
3904              chunk_aligned_read(q,bi))
3905                 return 0;
3906
3907         logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
3908         last_sector = bi->bi_sector + (bi->bi_size>>9);
3909         bi->bi_next = NULL;
3910         bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
3911
3912         for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
3913                 DEFINE_WAIT(w);
3914                 int disks, data_disks;
3915                 int previous;
3916
3917         retry:
3918                 previous = 0;
3919                 disks = conf->raid_disks;
3920                 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
3921                 if (unlikely(conf->reshape_progress != MaxSector)) {
3922                         /* spinlock is needed as reshape_progress may be
3923                          * 64bit on a 32bit platform, and so it might be
3924                          * possible to see a half-updated value
3925                          * Ofcourse reshape_progress could change after
3926                          * the lock is dropped, so once we get a reference
3927                          * to the stripe that we think it is, we will have
3928                          * to check again.
3929                          */
3930                         spin_lock_irq(&conf->device_lock);
3931                         if (mddev->delta_disks < 0
3932                             ? logical_sector < conf->reshape_progress
3933                             : logical_sector >= conf->reshape_progress) {
3934                                 disks = conf->previous_raid_disks;
3935                                 previous = 1;
3936                         } else {
3937                                 if (mddev->delta_disks < 0
3938                                     ? logical_sector < conf->reshape_safe
3939                                     : logical_sector >= conf->reshape_safe) {
3940                                         spin_unlock_irq(&conf->device_lock);
3941                                         schedule();
3942                                         goto retry;
3943                                 }
3944                         }
3945                         spin_unlock_irq(&conf->device_lock);
3946                 }
3947                 data_disks = disks - conf->max_degraded;
3948
3949                 new_sector = raid5_compute_sector(conf, logical_sector,
3950                                                   previous,
3951                                                   &dd_idx, NULL);
3952                 pr_debug("raid5: make_request, sector %llu logical %llu\n",
3953                         (unsigned long long)new_sector, 
3954                         (unsigned long long)logical_sector);
3955
3956                 sh = get_active_stripe(conf, new_sector, previous,
3957                                        (bi->bi_rw&RWA_MASK), 0);
3958                 if (sh) {
3959                         if (unlikely(previous)) {
3960                                 /* expansion might have moved on while waiting for a
3961                                  * stripe, so we must do the range check again.
3962                                  * Expansion could still move past after this
3963                                  * test, but as we are holding a reference to
3964                                  * 'sh', we know that if that happens,
3965                                  *  STRIPE_EXPANDING will get set and the expansion
3966                                  * won't proceed until we finish with the stripe.
3967                                  */
3968                                 int must_retry = 0;
3969                                 spin_lock_irq(&conf->device_lock);
3970                                 if (mddev->delta_disks < 0
3971                                     ? logical_sector >= conf->reshape_progress
3972                                     : logical_sector < conf->reshape_progress)
3973                                         /* mismatch, need to try again */
3974                                         must_retry = 1;
3975                                 spin_unlock_irq(&conf->device_lock);
3976                                 if (must_retry) {
3977                                         release_stripe(sh);
3978                                         schedule();
3979                                         goto retry;
3980                                 }
3981                         }
3982
3983                         if (bio_data_dir(bi) == WRITE &&
3984                             logical_sector >= mddev->suspend_lo &&
3985                             logical_sector < mddev->suspend_hi) {
3986                                 release_stripe(sh);
3987                                 /* As the suspend_* range is controlled by
3988                                  * userspace, we want an interruptible
3989                                  * wait.
3990                                  */
3991                                 flush_signals(current);
3992                                 prepare_to_wait(&conf->wait_for_overlap,
3993                                                 &w, TASK_INTERRUPTIBLE);
3994                                 if (logical_sector >= mddev->suspend_lo &&
3995                                     logical_sector < mddev->suspend_hi)
3996                                         schedule();
3997                                 goto retry;
3998                         }
3999
4000                         if (test_bit(STRIPE_EXPANDING, &sh->state) ||
4001                             !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
4002                                 /* Stripe is busy expanding or
4003                                  * add failed due to overlap.  Flush everything
4004                                  * and wait a while
4005                                  */
4006                                 raid5_unplug_device(mddev->queue);
4007                                 release_stripe(sh);
4008                                 schedule();
4009                                 goto retry;
4010                         }
4011                         finish_wait(&conf->wait_for_overlap, &w);
4012                         set_bit(STRIPE_HANDLE, &sh->state);
4013                         clear_bit(STRIPE_DELAYED, &sh->state);
4014                         if (mddev->barrier && 
4015                             !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4016                                 atomic_inc(&conf->preread_active_stripes);
4017                         release_stripe(sh);
4018                 } else {
4019                         /* cannot get stripe for read-ahead, just give-up */
4020                         clear_bit(BIO_UPTODATE, &bi->bi_flags);
4021                         finish_wait(&conf->wait_for_overlap, &w);
4022                         break;
4023                 }
4024                         
4025         }
4026         spin_lock_irq(&conf->device_lock);
4027         remaining = raid5_dec_bi_phys_segments(bi);
4028         spin_unlock_irq(&conf->device_lock);
4029         if (remaining == 0) {
4030
4031                 if ( rw == WRITE )
4032                         md_write_end(mddev);
4033
4034                 bio_endio(bi, 0);
4035         }
4036
4037         if (mddev->barrier) {
4038                 /* We need to wait for the stripes to all be handled.
4039                  * So: wait for preread_active_stripes to drop to 0.
4040                  */
4041                 wait_event(mddev->thread->wqueue,
4042                            atomic_read(&conf->preread_active_stripes) == 0);
4043         }
4044         return 0;
4045 }
4046
4047 static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks);
4048
4049 static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
4050 {
4051         /* reshaping is quite different to recovery/resync so it is
4052          * handled quite separately ... here.
4053          *
4054          * On each call to sync_request, we gather one chunk worth of
4055          * destination stripes and flag them as expanding.
4056          * Then we find all the source stripes and request reads.
4057          * As the reads complete, handle_stripe will copy the data
4058          * into the destination stripe and release that stripe.
4059          */
4060         raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
4061         struct stripe_head *sh;
4062         sector_t first_sector, last_sector;
4063         int raid_disks = conf->previous_raid_disks;
4064         int data_disks = raid_disks - conf->max_degraded;
4065         int new_data_disks = conf->raid_disks - conf->max_degraded;
4066         int i;
4067         int dd_idx;
4068         sector_t writepos, readpos, safepos;
4069         sector_t stripe_addr;
4070         int reshape_sectors;
4071         struct list_head stripes;
4072
4073         if (sector_nr == 0) {
4074                 /* If restarting in the middle, skip the initial sectors */
4075                 if (mddev->delta_disks < 0 &&
4076                     conf->reshape_progress < raid5_size(mddev, 0, 0)) {
4077                         sector_nr = raid5_size(mddev, 0, 0)
4078                                 - conf->reshape_progress;
4079                 } else if (mddev->delta_disks >= 0 &&
4080                            conf->reshape_progress > 0)
4081                         sector_nr = conf->reshape_progress;
4082                 sector_div(sector_nr, new_data_disks);
4083                 if (sector_nr) {
4084                         mddev->curr_resync_completed = sector_nr;
4085                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4086                         *skipped = 1;
4087                         return sector_nr;
4088                 }
4089         }
4090
4091         /* We need to process a full chunk at a time.
4092          * If old and new chunk sizes differ, we need to process the
4093          * largest of these
4094          */
4095         if (mddev->new_chunk_sectors > mddev->chunk_sectors)
4096                 reshape_sectors = mddev->new_chunk_sectors;
4097         else
4098                 reshape_sectors = mddev->chunk_sectors;
4099
4100         /* we update the metadata when there is more than 3Meg
4101          * in the block range (that is rather arbitrary, should
4102          * probably be time based) or when the data about to be
4103          * copied would over-write the source of the data at
4104          * the front of the range.
4105          * i.e. one new_stripe along from reshape_progress new_maps
4106          * to after where reshape_safe old_maps to
4107          */
4108         writepos = conf->reshape_progress;
4109         sector_div(writepos, new_data_disks);
4110         readpos = conf->reshape_progress;
4111         sector_div(readpos, data_disks);
4112         safepos = conf->reshape_safe;
4113         sector_div(safepos, data_disks);
4114         if (mddev->delta_disks < 0) {
4115                 writepos -= min_t(sector_t, reshape_sectors, writepos);
4116                 readpos += reshape_sectors;
4117                 safepos += reshape_sectors;
4118         } else {
4119                 writepos += reshape_sectors;
4120                 readpos -= min_t(sector_t, reshape_sectors, readpos);
4121                 safepos -= min_t(sector_t, reshape_sectors, safepos);
4122         }
4123
4124         /* 'writepos' is the most advanced device address we might write.
4125          * 'readpos' is the least advanced device address we might read.
4126          * 'safepos' is the least address recorded in the metadata as having
4127          *     been reshaped.
4128          * If 'readpos' is behind 'writepos', then there is no way that we can
4129          * ensure safety in the face of a crash - that must be done by userspace
4130          * making a backup of the data.  So in that case there is no particular
4131          * rush to update metadata.
4132          * Otherwise if 'safepos' is behind 'writepos', then we really need to
4133          * update the metadata to advance 'safepos' to match 'readpos' so that
4134          * we can be safe in the event of a crash.
4135          * So we insist on updating metadata if safepos is behind writepos and
4136          * readpos is beyond writepos.
4137          * In any case, update the metadata every 10 seconds.
4138          * Maybe that number should be configurable, but I'm not sure it is
4139          * worth it.... maybe it could be a multiple of safemode_delay???
4140          */
4141         if ((mddev->delta_disks < 0
4142              ? (safepos > writepos && readpos < writepos)
4143              : (safepos < writepos && readpos > writepos)) ||
4144             time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4145                 /* Cannot proceed until we've updated the superblock... */
4146                 wait_event(conf->wait_for_overlap,
4147                            atomic_read(&conf->reshape_stripes)==0);
4148                 mddev->reshape_position = conf->reshape_progress;
4149                 mddev->curr_resync_completed = mddev->curr_resync;
4150                 conf->reshape_checkpoint = jiffies;
4151                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4152                 md_wakeup_thread(mddev->thread);
4153                 wait_event(mddev->sb_wait, mddev->flags == 0 ||
4154                            kthread_should_stop());
4155                 spin_lock_irq(&conf->device_lock);
4156                 conf->reshape_safe = mddev->reshape_position;
4157                 spin_unlock_irq(&conf->device_lock);
4158                 wake_up(&conf->wait_for_overlap);
4159                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4160         }
4161
4162         if (mddev->delta_disks < 0) {
4163                 BUG_ON(conf->reshape_progress == 0);
4164                 stripe_addr = writepos;
4165                 BUG_ON((mddev->dev_sectors &
4166                         ~((sector_t)reshape_sectors - 1))
4167                        - reshape_sectors - stripe_addr
4168                        != sector_nr);
4169         } else {
4170                 BUG_ON(writepos != sector_nr + reshape_sectors);
4171                 stripe_addr = sector_nr;
4172         }
4173         INIT_LIST_HEAD(&stripes);
4174         for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
4175                 int j;
4176                 int skipped_disk = 0;
4177                 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
4178                 set_bit(STRIPE_EXPANDING, &sh->state);
4179                 atomic_inc(&conf->reshape_stripes);
4180                 /* If any of this stripe is beyond the end of the old
4181                  * array, then we need to zero those blocks
4182                  */
4183                 for (j=sh->disks; j--;) {
4184                         sector_t s;
4185                         if (j == sh->pd_idx)
4186                                 continue;
4187                         if (conf->level == 6 &&
4188                             j == sh->qd_idx)
4189                                 continue;
4190                         s = compute_blocknr(sh, j, 0);
4191                         if (s < raid5_size(mddev, 0, 0)) {
4192                                 skipped_disk = 1;
4193                                 continue;
4194                         }
4195                         memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
4196                         set_bit(R5_Expanded, &sh->dev[j].flags);
4197                         set_bit(R5_UPTODATE, &sh->dev[j].flags);
4198                 }
4199                 if (!skipped_disk) {
4200                         set_bit(STRIPE_EXPAND_READY, &sh->state);
4201                         set_bit(STRIPE_HANDLE, &sh->state);
4202                 }
4203                 list_add(&sh->lru, &stripes);
4204         }
4205         spin_lock_irq(&conf->device_lock);
4206         if (mddev->delta_disks < 0)
4207                 conf->reshape_progress -= reshape_sectors * new_data_disks;
4208         else
4209                 conf->reshape_progress += reshape_sectors * new_data_disks;
4210         spin_unlock_irq(&conf->device_lock);
4211         /* Ok, those stripe are ready. We can start scheduling
4212          * reads on the source stripes.
4213          * The source stripes are determined by mapping the first and last
4214          * block on the destination stripes.
4215          */
4216         first_sector =
4217                 raid5_compute_sector(conf, stripe_addr*(new_data_disks),
4218                                      1, &dd_idx, NULL);
4219         last_sector =
4220                 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
4221                                             * new_data_disks - 1),
4222                                      1, &dd_idx, NULL);
4223         if (last_sector >= mddev->dev_sectors)
4224                 last_sector = mddev->dev_sectors - 1;
4225         while (first_sector <= last_sector) {
4226                 sh = get_active_stripe(conf, first_sector, 1, 0, 1);
4227                 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
4228                 set_bit(STRIPE_HANDLE, &sh->state);
4229                 release_stripe(sh);
4230                 first_sector += STRIPE_SECTORS;
4231         }
4232         /* Now that the sources are clearly marked, we can release
4233          * the destination stripes
4234          */
4235         while (!list_empty(&stripes)) {
4236                 sh = list_entry(stripes.next, struct stripe_head, lru);
4237                 list_del_init(&sh->lru);
4238                 release_stripe(sh);
4239         }
4240         /* If this takes us to the resync_max point where we have to pause,
4241          * then we need to write out the superblock.
4242          */
4243         sector_nr += reshape_sectors;
4244         if ((sector_nr - mddev->curr_resync_completed) * 2
4245             >= mddev->resync_max - mddev->curr_resync_completed) {
4246                 /* Cannot proceed until we've updated the superblock... */
4247                 wait_event(conf->wait_for_overlap,
4248                            atomic_read(&conf->reshape_stripes) == 0);
4249                 mddev->reshape_position = conf->reshape_progress;
4250                 mddev->curr_resync_completed = mddev->curr_resync + reshape_sectors;
4251                 conf->reshape_checkpoint = jiffies;
4252                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4253                 md_wakeup_thread(mddev->thread);
4254                 wait_event(mddev->sb_wait,
4255                            !test_bit(MD_CHANGE_DEVS, &mddev->flags)
4256                            || kthread_should_stop());
4257                 spin_lock_irq(&conf->device_lock);
4258                 conf->reshape_safe = mddev->reshape_position;
4259                 spin_unlock_irq(&conf->device_lock);
4260                 wake_up(&conf->wait_for_overlap);
4261                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4262         }
4263         return reshape_sectors;
4264 }
4265
4266 /* FIXME go_faster isn't used */
4267 static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
4268 {
4269         raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
4270         struct stripe_head *sh;
4271         sector_t max_sector = mddev->dev_sectors;
4272         int sync_blocks;
4273         int still_degraded = 0;
4274         int i;
4275
4276         if (sector_nr >= max_sector) {
4277                 /* just being told to finish up .. nothing much to do */
4278                 unplug_slaves(mddev);
4279
4280                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
4281                         end_reshape(conf);
4282                         return 0;
4283                 }
4284
4285                 if (mddev->curr_resync < max_sector) /* aborted */
4286                         bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
4287                                         &sync_blocks, 1);
4288                 else /* completed sync */
4289                         conf->fullsync = 0;
4290                 bitmap_close_sync(mddev->bitmap);
4291
4292                 return 0;
4293         }
4294
4295         /* Allow raid5_quiesce to complete */
4296         wait_event(conf->wait_for_overlap, conf->quiesce != 2);
4297
4298         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4299                 return reshape_request(mddev, sector_nr, skipped);
4300
4301         /* No need to check resync_max as we never do more than one
4302          * stripe, and as resync_max will always be on a chunk boundary,
4303          * if the check in md_do_sync didn't fire, there is no chance
4304          * of overstepping resync_max here
4305          */
4306
4307         /* if there is too many failed drives and we are trying
4308          * to resync, then assert that we are finished, because there is
4309          * nothing we can do.
4310          */
4311         if (mddev->degraded >= conf->max_degraded &&
4312             test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
4313                 sector_t rv = mddev->dev_sectors - sector_nr;
4314                 *skipped = 1;
4315                 return rv;
4316         }
4317         if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
4318             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
4319             !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
4320                 /* we can skip this block, and probably more */
4321                 sync_blocks /= STRIPE_SECTORS;
4322                 *skipped = 1;
4323                 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
4324         }
4325
4326
4327         bitmap_cond_end_sync(mddev->bitmap, sector_nr);
4328
4329         sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
4330         if (sh == NULL) {
4331                 sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
4332                 /* make sure we don't swamp the stripe cache if someone else
4333                  * is trying to get access
4334                  */
4335                 schedule_timeout_uninterruptible(1);
4336         }
4337         /* Need to check if array will still be degraded after recovery/resync
4338          * We don't need to check the 'failed' flag as when that gets set,
4339          * recovery aborts.
4340          */
4341         for (i = 0; i < conf->raid_disks; i++)
4342                 if (conf->disks[i].rdev == NULL)
4343                         still_degraded = 1;
4344
4345         bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
4346
4347         spin_lock(&sh->lock);
4348         set_bit(STRIPE_SYNCING, &sh->state);
4349         clear_bit(STRIPE_INSYNC, &sh->state);
4350         spin_unlock(&sh->lock);
4351
4352         handle_stripe(sh);
4353         release_stripe(sh);
4354
4355         return STRIPE_SECTORS;
4356 }
4357
4358 static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
4359 {
4360         /* We may not be able to submit a whole bio at once as there
4361          * may not be enough stripe_heads available.
4362          * We cannot pre-allocate enough stripe_heads as we may need
4363          * more than exist in the cache (if we allow ever large chunks).
4364          * So we do one stripe head at a time and record in
4365          * ->bi_hw_segments how many have been done.
4366          *
4367          * We *know* that this entire raid_bio is in one chunk, so
4368          * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4369          */
4370         struct stripe_head *sh;
4371         int dd_idx;
4372         sector_t sector, logical_sector, last_sector;
4373         int scnt = 0;
4374         int remaining;
4375         int handled = 0;
4376
4377         logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4378         sector = raid5_compute_sector(conf, logical_sector,
4379                                       0, &dd_idx, NULL);
4380         last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
4381
4382         for (; logical_sector < last_sector;
4383              logical_sector += STRIPE_SECTORS,
4384                      sector += STRIPE_SECTORS,
4385                      scnt++) {
4386
4387                 if (scnt < raid5_bi_hw_segments(raid_bio))
4388                         /* already done this stripe */
4389                         continue;
4390
4391                 sh = get_active_stripe(conf, sector, 0, 1, 0);
4392
4393                 if (!sh) {
4394                         /* failed to get a stripe - must wait */
4395                         raid5_set_bi_hw_segments(raid_bio, scnt);
4396                         conf->retry_read_aligned = raid_bio;
4397                         return handled;
4398                 }
4399
4400                 set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
4401                 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
4402                         release_stripe(sh);
4403                         raid5_set_bi_hw_segments(raid_bio, scnt);
4404                         conf->retry_read_aligned = raid_bio;
4405                         return handled;
4406                 }
4407
4408                 handle_stripe(sh);
4409                 release_stripe(sh);
4410                 handled++;
4411         }
4412         spin_lock_irq(&conf->device_lock);
4413         remaining = raid5_dec_bi_phys_segments(raid_bio);
4414         spin_unlock_irq(&conf->device_lock);
4415         if (remaining == 0)
4416                 bio_endio(raid_bio, 0);
4417         if (atomic_dec_and_test(&conf->active_aligned_reads))
4418                 wake_up(&conf->wait_for_stripe);
4419         return handled;
4420 }
4421
4422
4423 /*
4424  * This is our raid5 kernel thread.
4425  *
4426  * We scan the hash table for stripes which can be handled now.
4427  * During the scan, completed stripes are saved for us by the interrupt
4428  * handler, so that they will not have to wait for our next wakeup.
4429  */
4430 static void raid5d(mddev_t *mddev)
4431 {
4432         struct stripe_head *sh;
4433         raid5_conf_t *conf = mddev->private;
4434         int handled;
4435
4436         pr_debug("+++ raid5d active\n");
4437
4438         md_check_recovery(mddev);
4439
4440         handled = 0;
4441         spin_lock_irq(&conf->device_lock);
4442         while (1) {
4443                 struct bio *bio;
4444
4445                 if (conf->seq_flush != conf->seq_write) {
4446                         int seq = conf->seq_flush;
4447                         spin_unlock_irq(&conf->device_lock);
4448                         bitmap_unplug(mddev->bitmap);
4449                         spin_lock_irq(&conf->device_lock);
4450                         conf->seq_write = seq;
4451                         activate_bit_delay(conf);
4452                 }
4453
4454                 while ((bio = remove_bio_from_retry(conf))) {
4455                         int ok;
4456                         spin_unlock_irq(&conf->device_lock);
4457                         ok = retry_aligned_read(conf, bio);
4458                         spin_lock_irq(&conf->device_lock);
4459                         if (!ok)
4460                                 break;
4461                         handled++;
4462                 }
4463
4464                 sh = __get_priority_stripe(conf);
4465
4466                 if (!sh)
4467                         break;
4468                 spin_unlock_irq(&conf->device_lock);
4469                 
4470                 handled++;
4471                 handle_stripe(sh);
4472                 release_stripe(sh);
4473                 cond_resched();
4474
4475                 spin_lock_irq(&conf->device_lock);
4476         }
4477         pr_debug("%d stripes handled\n", handled);
4478
4479         spin_unlock_irq(&conf->device_lock);
4480
4481         async_tx_issue_pending_all();
4482         unplug_slaves(mddev);
4483
4484         pr_debug("--- raid5d inactive\n");
4485 }
4486
4487 static ssize_t
4488 raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
4489 {
4490         raid5_conf_t *conf = mddev->private;
4491         if (conf)
4492                 return sprintf(page, "%d\n", conf->max_nr_stripes);
4493         else
4494                 return 0;
4495 }
4496
4497 static ssize_t
4498 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
4499 {
4500         raid5_conf_t *conf = mddev->private;
4501         unsigned long new;
4502         int err;
4503
4504         if (len >= PAGE_SIZE)
4505                 return -EINVAL;
4506         if (!conf)
4507                 return -ENODEV;
4508
4509         if (strict_strtoul(page, 10, &new))
4510                 return -EINVAL;
4511         if (new <= 16 || new > 32768)
4512                 return -EINVAL;
4513         while (new < conf->max_nr_stripes) {
4514                 if (drop_one_stripe(conf))
4515                         conf->max_nr_stripes--;
4516                 else
4517                         break;
4518         }
4519         err = md_allow_write(mddev);
4520         if (err)
4521                 return err;
4522         while (new > conf->max_nr_stripes) {
4523                 if (grow_one_stripe(conf))
4524                         conf->max_nr_stripes++;
4525                 else break;
4526         }
4527         return len;
4528 }
4529
4530 static struct md_sysfs_entry
4531 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
4532                                 raid5_show_stripe_cache_size,
4533                                 raid5_store_stripe_cache_size);
4534
4535 static ssize_t
4536 raid5_show_preread_threshold(mddev_t *mddev, char *page)
4537 {
4538         raid5_conf_t *conf = mddev->private;
4539         if (conf)
4540                 return sprintf(page, "%d\n", conf->bypass_threshold);
4541         else
4542                 return 0;
4543 }
4544
4545 static ssize_t
4546 raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
4547 {
4548         raid5_conf_t *conf = mddev->private;
4549         unsigned long new;
4550         if (len >= PAGE_SIZE)
4551                 return -EINVAL;
4552         if (!conf)
4553                 return -ENODEV;
4554
4555         if (strict_strtoul(page, 10, &new))
4556                 return -EINVAL;
4557         if (new > conf->max_nr_stripes)
4558                 return -EINVAL;
4559         conf->bypass_threshold = new;
4560         return len;
4561 }
4562
4563 static struct md_sysfs_entry
4564 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
4565                                         S_IRUGO | S_IWUSR,
4566                                         raid5_show_preread_threshold,
4567                                         raid5_store_preread_threshold);
4568
4569 static ssize_t
4570 stripe_cache_active_show(mddev_t *mddev, char *page)
4571 {
4572         raid5_conf_t *conf = mddev->private;
4573         if (conf)
4574                 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
4575         else
4576                 return 0;
4577 }
4578
4579 static struct md_sysfs_entry
4580 raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
4581
4582 static struct attribute *raid5_attrs[] =  {
4583         &raid5_stripecache_size.attr,
4584         &raid5_stripecache_active.attr,
4585         &raid5_preread_bypass_threshold.attr,
4586         NULL,
4587 };
4588 static struct attribute_group raid5_attrs_group = {
4589         .name = NULL,
4590         .attrs = raid5_attrs,
4591 };
4592
4593 static sector_t
4594 raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4595 {
4596         raid5_conf_t *conf = mddev->private;
4597
4598         if (!sectors)
4599                 sectors = mddev->dev_sectors;
4600         if (!raid_disks)
4601                 /* size is defined by the smallest of previous and new size */
4602                 raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
4603
4604         sectors &= ~((sector_t)mddev->chunk_sectors - 1);
4605         sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
4606         return sectors * (raid_disks - conf->max_degraded);
4607 }
4608
4609 static void raid5_free_percpu(raid5_conf_t *conf)
4610 {
4611         struct raid5_percpu *percpu;
4612         unsigned long cpu;
4613
4614         if (!conf->percpu)
4615                 return;
4616
4617         get_online_cpus();
4618         for_each_possible_cpu(cpu) {
4619                 percpu = per_cpu_ptr(conf->percpu, cpu);
4620                 safe_put_page(percpu->spare_page);
4621                 kfree(percpu->scribble);
4622         }
4623 #ifdef CONFIG_HOTPLUG_CPU
4624         unregister_cpu_notifier(&conf->cpu_notify);
4625 #endif
4626         put_online_cpus();
4627
4628         free_percpu(conf->percpu);
4629 }
4630
4631 static void free_conf(raid5_conf_t *conf)
4632 {
4633         shrink_stripes(conf);
4634         raid5_free_percpu(conf);
4635         kfree(conf->disks);
4636         kfree(conf->stripe_hashtbl);
4637         kfree(conf);
4638 }
4639
4640 #ifdef CONFIG_HOTPLUG_CPU
4641 static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
4642                               void *hcpu)
4643 {
4644         raid5_conf_t *conf = container_of(nfb, raid5_conf_t, cpu_notify);
4645         long cpu = (long)hcpu;
4646         struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
4647
4648         switch (action) {
4649         case CPU_UP_PREPARE:
4650         case CPU_UP_PREPARE_FROZEN:
4651                 if (conf->level == 6 && !percpu->spare_page)
4652                         percpu->spare_page = alloc_page(GFP_KERNEL);
4653                 if (!percpu->scribble)
4654                         percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4655
4656                 if (!percpu->scribble ||
4657                     (conf->level == 6 && !percpu->spare_page)) {
4658                         safe_put_page(percpu->spare_page);
4659                         kfree(percpu->scribble);
4660                         pr_err("%s: failed memory allocation for cpu%ld\n",
4661                                __func__, cpu);
4662                         return NOTIFY_BAD;
4663                 }
4664                 break;
4665         case CPU_DEAD:
4666         case CPU_DEAD_FROZEN:
4667                 safe_put_page(percpu->spare_page);
4668                 kfree(percpu->scribble);
4669                 percpu->spare_page = NULL;
4670                 percpu->scribble = NULL;
4671                 break;
4672         default:
4673                 break;
4674         }
4675         return NOTIFY_OK;
4676 }
4677 #endif
4678
4679 static int raid5_alloc_percpu(raid5_conf_t *conf)
4680 {
4681         unsigned long cpu;
4682         struct page *spare_page;
4683         struct raid5_percpu __percpu *allcpus;
4684         void *scribble;
4685         int err;
4686
4687         allcpus = alloc_percpu(struct raid5_percpu);
4688         if (!allcpus)
4689                 return -ENOMEM;
4690         conf->percpu = allcpus;
4691
4692         get_online_cpus();
4693         err = 0;
4694         for_each_present_cpu(cpu) {
4695                 if (conf->level == 6) {
4696                         spare_page = alloc_page(GFP_KERNEL);
4697                         if (!spare_page) {
4698                                 err = -ENOMEM;
4699                                 break;
4700                         }
4701                         per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
4702                 }
4703                 scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4704                 if (!scribble) {
4705                         err = -ENOMEM;
4706                         break;
4707                 }
4708                 per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
4709         }
4710 #ifdef CONFIG_HOTPLUG_CPU
4711         conf->cpu_notify.notifier_call = raid456_cpu_notify;
4712         conf->cpu_notify.priority = 0;
4713         if (err == 0)
4714                 err = register_cpu_notifier(&conf->cpu_notify);
4715 #endif
4716         put_online_cpus();
4717
4718         return err;
4719 }
4720
4721 static raid5_conf_t *setup_conf(mddev_t *mddev)
4722 {
4723         raid5_conf_t *conf;
4724         int raid_disk, memory, max_disks;
4725         mdk_rdev_t *rdev;
4726         struct disk_info *disk;
4727
4728         if (mddev->new_level != 5
4729             && mddev->new_level != 4
4730             && mddev->new_level != 6) {
4731                 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n",
4732                        mdname(mddev), mddev->new_level);
4733                 return ERR_PTR(-EIO);
4734         }
4735         if ((mddev->new_level == 5
4736              && !algorithm_valid_raid5(mddev->new_layout)) ||
4737             (mddev->new_level == 6
4738              && !algorithm_valid_raid6(mddev->new_layout))) {
4739                 printk(KERN_ERR "raid5: %s: layout %d not supported\n",
4740                        mdname(mddev), mddev->new_layout);
4741                 return ERR_PTR(-EIO);
4742         }
4743         if (mddev->new_level == 6 && mddev->raid_disks < 4) {
4744                 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
4745                        mdname(mddev), mddev->raid_disks);
4746                 return ERR_PTR(-EINVAL);
4747         }
4748
4749         if (!mddev->new_chunk_sectors ||
4750             (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
4751             !is_power_of_2(mddev->new_chunk_sectors)) {
4752                 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
4753                        mddev->new_chunk_sectors << 9, mdname(mddev));
4754                 return ERR_PTR(-EINVAL);
4755         }
4756
4757         conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL);
4758         if (conf == NULL)
4759                 goto abort;
4760         spin_lock_init(&conf->device_lock);
4761         init_waitqueue_head(&conf->wait_for_stripe);
4762         init_waitqueue_head(&conf->wait_for_overlap);
4763         INIT_LIST_HEAD(&conf->handle_list);
4764         INIT_LIST_HEAD(&conf->hold_list);
4765         INIT_LIST_HEAD(&conf->delayed_list);
4766         INIT_LIST_HEAD(&conf->bitmap_list);
4767         INIT_LIST_HEAD(&conf->inactive_list);
4768         atomic_set(&conf->active_stripes, 0);
4769         atomic_set(&conf->preread_active_stripes, 0);
4770         atomic_set(&conf->active_aligned_reads, 0);
4771         conf->bypass_threshold = BYPASS_THRESHOLD;
4772
4773         conf->raid_disks = mddev->raid_disks;
4774         if (mddev->reshape_position == MaxSector)
4775                 conf->previous_raid_disks = mddev->raid_disks;
4776         else
4777                 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
4778         max_disks = max(conf->raid_disks, conf->previous_raid_disks);
4779         conf->scribble_len = scribble_len(max_disks);
4780
4781         conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
4782                               GFP_KERNEL);
4783         if (!conf->disks)
4784                 goto abort;
4785
4786         conf->mddev = mddev;
4787
4788         if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
4789                 goto abort;
4790
4791         conf->level = mddev->new_level;
4792         if (raid5_alloc_percpu(conf) != 0)
4793                 goto abort;
4794
4795         pr_debug("raid5: run(%s) called.\n", mdname(mddev));
4796
4797         list_for_each_entry(rdev, &mddev->disks, same_set) {
4798                 raid_disk = rdev->raid_disk;
4799                 if (raid_disk >= max_disks
4800                     || raid_disk < 0)
4801                         continue;
4802                 disk = conf->disks + raid_disk;
4803
4804                 disk->rdev = rdev;
4805
4806                 if (test_bit(In_sync, &rdev->flags)) {
4807                         char b[BDEVNAME_SIZE];
4808                         printk(KERN_INFO "raid5: device %s operational as raid"
4809                                 " disk %d\n", bdevname(rdev->bdev,b),
4810                                 raid_disk);
4811                 } else
4812                         /* Cannot rely on bitmap to complete recovery */
4813                         conf->fullsync = 1;
4814         }
4815
4816         conf->chunk_sectors = mddev->new_chunk_sectors;
4817         conf->level = mddev->new_level;
4818         if (conf->level == 6)
4819                 conf->max_degraded = 2;
4820         else
4821                 conf->max_degraded = 1;
4822         conf->algorithm = mddev->new_layout;
4823         conf->max_nr_stripes = NR_STRIPES;
4824         conf->reshape_progress = mddev->reshape_position;
4825         if (conf->reshape_progress != MaxSector) {
4826                 conf->prev_chunk_sectors = mddev->chunk_sectors;
4827                 conf->prev_algo = mddev->layout;
4828         }
4829
4830         memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
4831                  max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
4832         if (grow_stripes(conf, conf->max_nr_stripes)) {
4833                 printk(KERN_ERR
4834                         "raid5: couldn't allocate %dkB for buffers\n", memory);
4835                 goto abort;
4836         } else
4837                 printk(KERN_INFO "raid5: allocated %dkB for %s\n",
4838                         memory, mdname(mddev));
4839
4840         conf->thread = md_register_thread(raid5d, mddev, NULL);
4841         if (!conf->thread) {
4842                 printk(KERN_ERR
4843                        "raid5: couldn't allocate thread for %s\n",
4844                        mdname(mddev));
4845                 goto abort;
4846         }
4847
4848         return conf;
4849
4850  abort:
4851         if (conf) {
4852                 free_conf(conf);
4853                 return ERR_PTR(-EIO);
4854         } else
4855                 return ERR_PTR(-ENOMEM);
4856 }
4857
4858
4859 static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
4860 {
4861         switch (algo) {
4862         case ALGORITHM_PARITY_0:
4863                 if (raid_disk < max_degraded)
4864                         return 1;
4865                 break;
4866         case ALGORITHM_PARITY_N:
4867                 if (raid_disk >= raid_disks - max_degraded)
4868                         return 1;
4869                 break;
4870         case ALGORITHM_PARITY_0_6:
4871                 if (raid_disk == 0 || 
4872                     raid_disk == raid_disks - 1)
4873                         return 1;
4874                 break;
4875         case ALGORITHM_LEFT_ASYMMETRIC_6:
4876         case ALGORITHM_RIGHT_ASYMMETRIC_6:
4877         case ALGORITHM_LEFT_SYMMETRIC_6:
4878         case ALGORITHM_RIGHT_SYMMETRIC_6:
4879                 if (raid_disk == raid_disks - 1)
4880                         return 1;
4881         }
4882         return 0;
4883 }
4884
4885 static int run(mddev_t *mddev)
4886 {
4887         raid5_conf_t *conf;
4888         int working_disks = 0, chunk_size;
4889         int dirty_parity_disks = 0;
4890         mdk_rdev_t *rdev;
4891         sector_t reshape_offset = 0;
4892
4893         if (mddev->recovery_cp != MaxSector)
4894                 printk(KERN_NOTICE "raid5: %s is not clean"
4895                        " -- starting background reconstruction\n",
4896                        mdname(mddev));
4897         if (mddev->reshape_position != MaxSector) {
4898                 /* Check that we can continue the reshape.
4899                  * Currently only disks can change, it must
4900                  * increase, and we must be past the point where
4901                  * a stripe over-writes itself
4902                  */
4903                 sector_t here_new, here_old;
4904                 int old_disks;
4905                 int max_degraded = (mddev->level == 6 ? 2 : 1);
4906
4907                 if (mddev->new_level != mddev->level) {
4908                         printk(KERN_ERR "raid5: %s: unsupported reshape "
4909                                "required - aborting.\n",
4910                                mdname(mddev));
4911                         return -EINVAL;
4912                 }
4913                 old_disks = mddev->raid_disks - mddev->delta_disks;
4914                 /* reshape_position must be on a new-stripe boundary, and one
4915                  * further up in new geometry must map after here in old
4916                  * geometry.
4917                  */
4918                 here_new = mddev->reshape_position;
4919                 if (sector_div(here_new, mddev->new_chunk_sectors *
4920                                (mddev->raid_disks - max_degraded))) {
4921                         printk(KERN_ERR "raid5: reshape_position not "
4922                                "on a stripe boundary\n");
4923                         return -EINVAL;
4924                 }
4925                 reshape_offset = here_new * mddev->new_chunk_sectors;
4926                 /* here_new is the stripe we will write to */
4927                 here_old = mddev->reshape_position;
4928                 sector_div(here_old, mddev->chunk_sectors *
4929                            (old_disks-max_degraded));
4930                 /* here_old is the first stripe that we might need to read
4931                  * from */
4932                 if (mddev->delta_disks == 0) {
4933                         /* We cannot be sure it is safe to start an in-place
4934                          * reshape.  It is only safe if user-space if monitoring
4935                          * and taking constant backups.
4936                          * mdadm always starts a situation like this in
4937                          * readonly mode so it can take control before
4938                          * allowing any writes.  So just check for that.
4939                          */
4940                         if ((here_new * mddev->new_chunk_sectors != 
4941                              here_old * mddev->chunk_sectors) ||
4942                             mddev->ro == 0) {
4943                                 printk(KERN_ERR "raid5: in-place reshape must be started"
4944                                        " in read-only mode - aborting\n");
4945                                 return -EINVAL;
4946                         }
4947                 } else if (mddev->delta_disks < 0
4948                     ? (here_new * mddev->new_chunk_sectors <=
4949                        here_old * mddev->chunk_sectors)
4950                     : (here_new * mddev->new_chunk_sectors >=
4951                        here_old * mddev->chunk_sectors)) {
4952                         /* Reading from the same stripe as writing to - bad */
4953                         printk(KERN_ERR "raid5: reshape_position too early for "
4954                                "auto-recovery - aborting.\n");
4955                         return -EINVAL;
4956                 }
4957                 printk(KERN_INFO "raid5: reshape will continue\n");
4958                 /* OK, we should be able to continue; */
4959         } else {
4960                 BUG_ON(mddev->level != mddev->new_level);
4961                 BUG_ON(mddev->layout != mddev->new_layout);
4962                 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
4963                 BUG_ON(mddev->delta_disks != 0);
4964         }
4965
4966         if (mddev->private == NULL)
4967                 conf = setup_conf(mddev);
4968         else
4969                 conf = mddev->private;
4970
4971         if (IS_ERR(conf))
4972                 return PTR_ERR(conf);
4973
4974         mddev->thread = conf->thread;
4975         conf->thread = NULL;
4976         mddev->private = conf;
4977
4978         /*
4979          * 0 for a fully functional array, 1 or 2 for a degraded array.
4980          */
4981         list_for_each_entry(rdev, &mddev->disks, same_set) {
4982                 if (rdev->raid_disk < 0)
4983                         continue;
4984                 if (test_bit(In_sync, &rdev->flags))
4985                         working_disks++;
4986                 /* This disc is not fully in-sync.  However if it
4987                  * just stored parity (beyond the recovery_offset),
4988                  * when we don't need to be concerned about the
4989                  * array being dirty.
4990                  * When reshape goes 'backwards', we never have
4991                  * partially completed devices, so we only need
4992                  * to worry about reshape going forwards.
4993                  */
4994                 /* Hack because v0.91 doesn't store recovery_offset properly. */
4995                 if (mddev->major_version == 0 &&
4996                     mddev->minor_version > 90)
4997                         rdev->recovery_offset = reshape_offset;
4998                         
4999                 printk("%d: w=%d pa=%d pr=%d m=%d a=%d r=%d op1=%d op2=%d\n",
5000                        rdev->raid_disk, working_disks, conf->prev_algo,
5001                        conf->previous_raid_disks, conf->max_degraded,
5002                        conf->algorithm, conf->raid_disks, 
5003                        only_parity(rdev->raid_disk,
5004                                    conf->prev_algo,
5005                                    conf->previous_raid_disks,
5006                                    conf->max_degraded),
5007                        only_parity(rdev->raid_disk,
5008                                    conf->algorithm,
5009                                    conf->raid_disks,
5010                                    conf->max_degraded));
5011                 if (rdev->recovery_offset < reshape_offset) {
5012                         /* We need to check old and new layout */
5013                         if (!only_parity(rdev->raid_disk,
5014                                          conf->algorithm,
5015                                          conf->raid_disks,
5016                                          conf->max_degraded))
5017                                 continue;
5018                 }
5019                 if (!only_parity(rdev->raid_disk,
5020                                  conf->prev_algo,
5021                                  conf->previous_raid_disks,
5022                                  conf->max_degraded))
5023                         continue;
5024                 dirty_parity_disks++;
5025         }
5026
5027         mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
5028                            - working_disks);
5029
5030         if (mddev->degraded > conf->max_degraded) {
5031                 printk(KERN_ERR "raid5: not enough operational devices for %s"
5032                         " (%d/%d failed)\n",
5033                         mdname(mddev), mddev->degraded, conf->raid_disks);
5034                 goto abort;
5035         }
5036
5037         /* device size must be a multiple of chunk size */
5038         mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
5039         mddev->resync_max_sectors = mddev->dev_sectors;
5040
5041         if (mddev->degraded > dirty_parity_disks &&
5042             mddev->recovery_cp != MaxSector) {
5043                 if (mddev->ok_start_degraded)
5044                         printk(KERN_WARNING
5045                                "raid5: starting dirty degraded array: %s"
5046                                "- data corruption possible.\n",
5047                                mdname(mddev));
5048                 else {
5049                         printk(KERN_ERR
5050                                "raid5: cannot start dirty degraded array for %s\n",
5051                                mdname(mddev));
5052                         goto abort;
5053                 }
5054         }
5055
5056         if (mddev->degraded == 0)
5057                 printk("raid5: raid level %d set %s active with %d out of %d"
5058                        " devices, algorithm %d\n", conf->level, mdname(mddev),
5059                        mddev->raid_disks-mddev->degraded, mddev->raid_disks,
5060                        mddev->new_layout);
5061         else
5062                 printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
5063                         " out of %d devices, algorithm %d\n", conf->level,
5064                         mdname(mddev), mddev->raid_disks - mddev->degraded,
5065                         mddev->raid_disks, mddev->new_layout);
5066
5067         print_raid5_conf(conf);
5068
5069         if (conf->reshape_progress != MaxSector) {
5070                 printk("...ok start reshape thread\n");
5071                 conf->reshape_safe = conf->reshape_progress;
5072                 atomic_set(&conf->reshape_stripes, 0);
5073                 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5074                 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5075                 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5076                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5077                 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5078                                                         "reshape");
5079         }
5080
5081         /* read-ahead size must cover two whole stripes, which is
5082          * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5083          */
5084         {
5085                 int data_disks = conf->previous_raid_disks - conf->max_degraded;
5086                 int stripe = data_disks *
5087                         ((mddev->chunk_sectors << 9) / PAGE_SIZE);
5088                 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
5089                         mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
5090         }
5091
5092         /* Ok, everything is just fine now */
5093         if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
5094                 printk(KERN_WARNING
5095                        "raid5: failed to create sysfs attributes for %s\n",
5096                        mdname(mddev));
5097
5098         mddev->queue->queue_lock = &conf->device_lock;
5099
5100         mddev->queue->unplug_fn = raid5_unplug_device;
5101         mddev->queue->backing_dev_info.congested_data = mddev;
5102         mddev->queue->backing_dev_info.congested_fn = raid5_congested;
5103
5104         md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5105
5106         blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
5107         chunk_size = mddev->chunk_sectors << 9;
5108         blk_queue_io_min(mddev->queue, chunk_size);
5109         blk_queue_io_opt(mddev->queue, chunk_size *
5110                          (conf->raid_disks - conf->max_degraded));
5111
5112         list_for_each_entry(rdev, &mddev->disks, same_set)
5113                 disk_stack_limits(mddev->gendisk, rdev->bdev,
5114                                   rdev->data_offset << 9);
5115
5116         return 0;
5117 abort:
5118         md_unregister_thread(mddev->thread);
5119         mddev->thread = NULL;
5120         if (conf) {
5121                 print_raid5_conf(conf);
5122                 free_conf(conf);
5123         }
5124         mddev->private = NULL;
5125         printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
5126         return -EIO;
5127 }
5128
5129
5130
5131 static int stop(mddev_t *mddev)
5132 {
5133         raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
5134
5135         md_unregister_thread(mddev->thread);
5136         mddev->thread = NULL;
5137         mddev->queue->backing_dev_info.congested_fn = NULL;
5138         blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
5139         sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
5140         free_conf(conf);
5141         mddev->private = NULL;
5142         return 0;
5143 }
5144
5145 #ifdef DEBUG
5146 static void print_sh(struct seq_file *seq, struct stripe_head *sh)
5147 {
5148         int i;
5149
5150         seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
5151                    (unsigned long long)sh->sector, sh->pd_idx, sh->state);
5152         seq_printf(seq, "sh %llu,  count %d.\n",
5153                    (unsigned long long)sh->sector, atomic_read(&sh->count));
5154         seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
5155         for (i = 0; i < sh->disks; i++) {
5156                 seq_printf(seq, "(cache%d: %p %ld) ",
5157                            i, sh->dev[i].page, sh->dev[i].flags);
5158         }
5159         seq_printf(seq, "\n");
5160 }
5161
5162 static void printall(struct seq_file *seq, raid5_conf_t *conf)
5163 {
5164         struct stripe_head *sh;
5165         struct hlist_node *hn;
5166         int i;
5167
5168         spin_lock_irq(&conf->device_lock);
5169         for (i = 0; i < NR_HASH; i++) {
5170                 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
5171                         if (sh->raid_conf != conf)
5172                                 continue;
5173                         print_sh(seq, sh);
5174                 }
5175         }
5176         spin_unlock_irq(&conf->device_lock);
5177 }
5178 #endif
5179
5180 static void status(struct seq_file *seq, mddev_t *mddev)
5181 {
5182         raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
5183         int i;
5184
5185         seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
5186                 mddev->chunk_sectors / 2, mddev->layout);
5187         seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
5188         for (i = 0; i < conf->raid_disks; i++)
5189                 seq_printf (seq, "%s",
5190                                conf->disks[i].rdev &&
5191                                test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
5192         seq_printf (seq, "]");
5193 #ifdef DEBUG
5194         seq_printf (seq, "\n");
5195         printall(seq, conf);
5196 #endif
5197 }
5198
5199 static void print_raid5_conf (raid5_conf_t *conf)
5200 {
5201         int i;
5202         struct disk_info *tmp;
5203
5204         printk("RAID5 conf printout:\n");
5205         if (!conf) {
5206                 printk("(conf==NULL)\n");
5207                 return;
5208         }
5209         printk(" --- rd:%d wd:%d\n", conf->raid_disks,
5210                  conf->raid_disks - conf->mddev->degraded);
5211
5212         for (i = 0; i < conf->raid_disks; i++) {
5213                 char b[BDEVNAME_SIZE];
5214                 tmp = conf->disks + i;
5215                 if (tmp->rdev)
5216                 printk(" disk %d, o:%d, dev:%s\n",
5217                         i, !test_bit(Faulty, &tmp->rdev->flags),
5218                         bdevname(tmp->rdev->bdev,b));
5219         }
5220 }
5221
5222 static int raid5_spare_active(mddev_t *mddev)
5223 {
5224         int i;
5225         raid5_conf_t *conf = mddev->private;
5226         struct disk_info *tmp;
5227
5228         for (i = 0; i < conf->raid_disks; i++) {
5229                 tmp = conf->disks + i;
5230                 if (tmp->rdev
5231                     && !test_bit(Faulty, &tmp->rdev->flags)
5232                     && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
5233                         unsigned long flags;
5234                         spin_lock_irqsave(&conf->device_lock, flags);
5235                         mddev->degraded--;
5236                         spin_unlock_irqrestore(&conf->device_lock, flags);
5237                 }
5238         }
5239         print_raid5_conf(conf);
5240         return 0;
5241 }
5242
5243 static int raid5_remove_disk(mddev_t *mddev, int number)
5244 {
5245         raid5_conf_t *conf = mddev->private;
5246         int err = 0;
5247         mdk_rdev_t *rdev;
5248         struct disk_info *p = conf->disks + number;
5249
5250         print_raid5_conf(conf);
5251         rdev = p->rdev;
5252         if (rdev) {
5253                 if (number >= conf->raid_disks &&
5254                     conf->reshape_progress == MaxSector)
5255                         clear_bit(In_sync, &rdev->flags);
5256
5257                 if (test_bit(In_sync, &rdev->flags) ||
5258                     atomic_read(&rdev->nr_pending)) {
5259                         err = -EBUSY;
5260                         goto abort;
5261                 }
5262                 /* Only remove non-faulty devices if recovery
5263                  * isn't possible.
5264                  */
5265                 if (!test_bit(Faulty, &rdev->flags) &&
5266                     mddev->degraded <= conf->max_degraded &&
5267                     number < conf->raid_disks) {
5268                         err = -EBUSY;
5269                         goto abort;
5270                 }
5271                 p->rdev = NULL;
5272                 synchronize_rcu();
5273                 if (atomic_read(&rdev->nr_pending)) {
5274                         /* lost the race, try later */
5275                         err = -EBUSY;
5276                         p->rdev = rdev;
5277                 }
5278         }
5279 abort:
5280
5281         print_raid5_conf(conf);
5282         return err;
5283 }
5284
5285 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
5286 {
5287         raid5_conf_t *conf = mddev->private;
5288         int err = -EEXIST;
5289         int disk;
5290         struct disk_info *p;
5291         int first = 0;
5292         int last = conf->raid_disks - 1;
5293
5294         if (mddev->degraded > conf->max_degraded)
5295                 /* no point adding a device */
5296                 return -EINVAL;
5297
5298         if (rdev->raid_disk >= 0)
5299                 first = last = rdev->raid_disk;
5300
5301         /*
5302          * find the disk ... but prefer rdev->saved_raid_disk
5303          * if possible.
5304          */
5305         if (rdev->saved_raid_disk >= 0 &&
5306             rdev->saved_raid_disk >= first &&
5307             conf->disks[rdev->saved_raid_disk].rdev == NULL)
5308                 disk = rdev->saved_raid_disk;
5309         else
5310                 disk = first;
5311         for ( ; disk <= last ; disk++)
5312                 if ((p=conf->disks + disk)->rdev == NULL) {
5313                         clear_bit(In_sync, &rdev->flags);
5314                         rdev->raid_disk = disk;
5315                         err = 0;
5316                         if (rdev->saved_raid_disk != disk)
5317                                 conf->fullsync = 1;
5318                         rcu_assign_pointer(p->rdev, rdev);
5319                         break;
5320                 }
5321         print_raid5_conf(conf);
5322         return err;
5323 }
5324
5325 static int raid5_resize(mddev_t *mddev, sector_t sectors)
5326 {
5327         /* no resync is happening, and there is enough space
5328          * on all devices, so we can resize.
5329          * We need to make sure resync covers any new space.
5330          * If the array is shrinking we should possibly wait until
5331          * any io in the removed space completes, but it hardly seems
5332          * worth it.
5333          */
5334         sectors &= ~((sector_t)mddev->chunk_sectors - 1);
5335         md_set_array_sectors(mddev, raid5_size(mddev, sectors,
5336                                                mddev->raid_disks));
5337         if (mddev->array_sectors >
5338             raid5_size(mddev, sectors, mddev->raid_disks))