Merge branch 'for-2.6.37/barrier' of git://git.kernel.dk/linux-2.6-block
[linux-2.6.git] / drivers / md / dm-snap.c
1 /*
2  * dm-snapshot.c
3  *
4  * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5  *
6  * This file is released under the GPL.
7  */
8
9 #include <linux/blkdev.h>
10 #include <linux/device-mapper.h>
11 #include <linux/delay.h>
12 #include <linux/fs.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
22 #include <linux/workqueue.h>
23
24 #include "dm-exception-store.h"
25
26 #define DM_MSG_PREFIX "snapshots"
27
28 static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
29
30 #define dm_target_is_snapshot_merge(ti) \
31         ((ti)->type->name == dm_snapshot_merge_target_name)
32
33 /*
34  * The percentage increment we will wake up users at
35  */
36 #define WAKE_UP_PERCENT 5
37
38 /*
39  * kcopyd priority of snapshot operations
40  */
41 #define SNAPSHOT_COPY_PRIORITY 2
42
43 /*
44  * Reserve 1MB for each snapshot initially (with minimum of 1 page).
45  */
46 #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
47
48 /*
49  * The size of the mempool used to track chunks in use.
50  */
51 #define MIN_IOS 256
52
53 #define DM_TRACKED_CHUNK_HASH_SIZE      16
54 #define DM_TRACKED_CHUNK_HASH(x)        ((unsigned long)(x) & \
55                                          (DM_TRACKED_CHUNK_HASH_SIZE - 1))
56
57 struct dm_exception_table {
58         uint32_t hash_mask;
59         unsigned hash_shift;
60         struct list_head *table;
61 };
62
63 struct dm_snapshot {
64         struct rw_semaphore lock;
65
66         struct dm_dev *origin;
67         struct dm_dev *cow;
68
69         struct dm_target *ti;
70
71         /* List of snapshots per Origin */
72         struct list_head list;
73
74         /*
75          * You can't use a snapshot if this is 0 (e.g. if full).
76          * A snapshot-merge target never clears this.
77          */
78         int valid;
79
80         /* Origin writes don't trigger exceptions until this is set */
81         int active;
82
83         /* Whether or not owning mapped_device is suspended */
84         int suspended;
85
86         atomic_t pending_exceptions_count;
87
88         mempool_t *pending_pool;
89
90         struct dm_exception_table pending;
91         struct dm_exception_table complete;
92
93         /*
94          * pe_lock protects all pending_exception operations and access
95          * as well as the snapshot_bios list.
96          */
97         spinlock_t pe_lock;
98
99         /* Chunks with outstanding reads */
100         spinlock_t tracked_chunk_lock;
101         mempool_t *tracked_chunk_pool;
102         struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
103
104         /* The on disk metadata handler */
105         struct dm_exception_store *store;
106
107         struct dm_kcopyd_client *kcopyd_client;
108
109         /* Queue of snapshot writes for ksnapd to flush */
110         struct bio_list queued_bios;
111         struct work_struct queued_bios_work;
112
113         /* Wait for events based on state_bits */
114         unsigned long state_bits;
115
116         /* Range of chunks currently being merged. */
117         chunk_t first_merging_chunk;
118         int num_merging_chunks;
119
120         /*
121          * The merge operation failed if this flag is set.
122          * Failure modes are handled as follows:
123          * - I/O error reading the header
124          *      => don't load the target; abort.
125          * - Header does not have "valid" flag set
126          *      => use the origin; forget about the snapshot.
127          * - I/O error when reading exceptions
128          *      => don't load the target; abort.
129          *         (We can't use the intermediate origin state.)
130          * - I/O error while merging
131          *      => stop merging; set merge_failed; process I/O normally.
132          */
133         int merge_failed;
134
135         /*
136          * Incoming bios that overlap with chunks being merged must wait
137          * for them to be committed.
138          */
139         struct bio_list bios_queued_during_merge;
140 };
141
142 /*
143  * state_bits:
144  *   RUNNING_MERGE  - Merge operation is in progress.
145  *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
146  *                    cleared afterwards.
147  */
148 #define RUNNING_MERGE          0
149 #define SHUTDOWN_MERGE         1
150
151 struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
152 {
153         return s->origin;
154 }
155 EXPORT_SYMBOL(dm_snap_origin);
156
157 struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
158 {
159         return s->cow;
160 }
161 EXPORT_SYMBOL(dm_snap_cow);
162
163 static struct workqueue_struct *ksnapd;
164 static void flush_queued_bios(struct work_struct *work);
165
166 static sector_t chunk_to_sector(struct dm_exception_store *store,
167                                 chunk_t chunk)
168 {
169         return chunk << store->chunk_shift;
170 }
171
172 static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
173 {
174         /*
175          * There is only ever one instance of a particular block
176          * device so we can compare pointers safely.
177          */
178         return lhs == rhs;
179 }
180
181 struct dm_snap_pending_exception {
182         struct dm_exception e;
183
184         /*
185          * Origin buffers waiting for this to complete are held
186          * in a bio list
187          */
188         struct bio_list origin_bios;
189         struct bio_list snapshot_bios;
190
191         /* Pointer back to snapshot context */
192         struct dm_snapshot *snap;
193
194         /*
195          * 1 indicates the exception has already been sent to
196          * kcopyd.
197          */
198         int started;
199 };
200
201 /*
202  * Hash table mapping origin volumes to lists of snapshots and
203  * a lock to protect it
204  */
205 static struct kmem_cache *exception_cache;
206 static struct kmem_cache *pending_cache;
207
208 struct dm_snap_tracked_chunk {
209         struct hlist_node node;
210         chunk_t chunk;
211 };
212
213 static struct kmem_cache *tracked_chunk_cache;
214
215 static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
216                                                  chunk_t chunk)
217 {
218         struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
219                                                         GFP_NOIO);
220         unsigned long flags;
221
222         c->chunk = chunk;
223
224         spin_lock_irqsave(&s->tracked_chunk_lock, flags);
225         hlist_add_head(&c->node,
226                        &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
227         spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
228
229         return c;
230 }
231
232 static void stop_tracking_chunk(struct dm_snapshot *s,
233                                 struct dm_snap_tracked_chunk *c)
234 {
235         unsigned long flags;
236
237         spin_lock_irqsave(&s->tracked_chunk_lock, flags);
238         hlist_del(&c->node);
239         spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
240
241         mempool_free(c, s->tracked_chunk_pool);
242 }
243
244 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
245 {
246         struct dm_snap_tracked_chunk *c;
247         struct hlist_node *hn;
248         int found = 0;
249
250         spin_lock_irq(&s->tracked_chunk_lock);
251
252         hlist_for_each_entry(c, hn,
253             &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
254                 if (c->chunk == chunk) {
255                         found = 1;
256                         break;
257                 }
258         }
259
260         spin_unlock_irq(&s->tracked_chunk_lock);
261
262         return found;
263 }
264
265 /*
266  * This conflicting I/O is extremely improbable in the caller,
267  * so msleep(1) is sufficient and there is no need for a wait queue.
268  */
269 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
270 {
271         while (__chunk_is_tracked(s, chunk))
272                 msleep(1);
273 }
274
275 /*
276  * One of these per registered origin, held in the snapshot_origins hash
277  */
278 struct origin {
279         /* The origin device */
280         struct block_device *bdev;
281
282         struct list_head hash_list;
283
284         /* List of snapshots for this origin */
285         struct list_head snapshots;
286 };
287
288 /*
289  * Size of the hash table for origin volumes. If we make this
290  * the size of the minors list then it should be nearly perfect
291  */
292 #define ORIGIN_HASH_SIZE 256
293 #define ORIGIN_MASK      0xFF
294 static struct list_head *_origins;
295 static struct rw_semaphore _origins_lock;
296
297 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
298 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
299 static uint64_t _pending_exceptions_done_count;
300
301 static int init_origin_hash(void)
302 {
303         int i;
304
305         _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
306                            GFP_KERNEL);
307         if (!_origins) {
308                 DMERR("unable to allocate memory");
309                 return -ENOMEM;
310         }
311
312         for (i = 0; i < ORIGIN_HASH_SIZE; i++)
313                 INIT_LIST_HEAD(_origins + i);
314         init_rwsem(&_origins_lock);
315
316         return 0;
317 }
318
319 static void exit_origin_hash(void)
320 {
321         kfree(_origins);
322 }
323
324 static unsigned origin_hash(struct block_device *bdev)
325 {
326         return bdev->bd_dev & ORIGIN_MASK;
327 }
328
329 static struct origin *__lookup_origin(struct block_device *origin)
330 {
331         struct list_head *ol;
332         struct origin *o;
333
334         ol = &_origins[origin_hash(origin)];
335         list_for_each_entry (o, ol, hash_list)
336                 if (bdev_equal(o->bdev, origin))
337                         return o;
338
339         return NULL;
340 }
341
342 static void __insert_origin(struct origin *o)
343 {
344         struct list_head *sl = &_origins[origin_hash(o->bdev)];
345         list_add_tail(&o->hash_list, sl);
346 }
347
348 /*
349  * _origins_lock must be held when calling this function.
350  * Returns number of snapshots registered using the supplied cow device, plus:
351  * snap_src - a snapshot suitable for use as a source of exception handover
352  * snap_dest - a snapshot capable of receiving exception handover.
353  * snap_merge - an existing snapshot-merge target linked to the same origin.
354  *   There can be at most one snapshot-merge target. The parameter is optional.
355  *
356  * Possible return values and states of snap_src and snap_dest.
357  *   0: NULL, NULL  - first new snapshot
358  *   1: snap_src, NULL - normal snapshot
359  *   2: snap_src, snap_dest  - waiting for handover
360  *   2: snap_src, NULL - handed over, waiting for old to be deleted
361  *   1: NULL, snap_dest - source got destroyed without handover
362  */
363 static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
364                                         struct dm_snapshot **snap_src,
365                                         struct dm_snapshot **snap_dest,
366                                         struct dm_snapshot **snap_merge)
367 {
368         struct dm_snapshot *s;
369         struct origin *o;
370         int count = 0;
371         int active;
372
373         o = __lookup_origin(snap->origin->bdev);
374         if (!o)
375                 goto out;
376
377         list_for_each_entry(s, &o->snapshots, list) {
378                 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
379                         *snap_merge = s;
380                 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
381                         continue;
382
383                 down_read(&s->lock);
384                 active = s->active;
385                 up_read(&s->lock);
386
387                 if (active) {
388                         if (snap_src)
389                                 *snap_src = s;
390                 } else if (snap_dest)
391                         *snap_dest = s;
392
393                 count++;
394         }
395
396 out:
397         return count;
398 }
399
400 /*
401  * On success, returns 1 if this snapshot is a handover destination,
402  * otherwise returns 0.
403  */
404 static int __validate_exception_handover(struct dm_snapshot *snap)
405 {
406         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
407         struct dm_snapshot *snap_merge = NULL;
408
409         /* Does snapshot need exceptions handed over to it? */
410         if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
411                                           &snap_merge) == 2) ||
412             snap_dest) {
413                 snap->ti->error = "Snapshot cow pairing for exception "
414                                   "table handover failed";
415                 return -EINVAL;
416         }
417
418         /*
419          * If no snap_src was found, snap cannot become a handover
420          * destination.
421          */
422         if (!snap_src)
423                 return 0;
424
425         /*
426          * Non-snapshot-merge handover?
427          */
428         if (!dm_target_is_snapshot_merge(snap->ti))
429                 return 1;
430
431         /*
432          * Do not allow more than one merging snapshot.
433          */
434         if (snap_merge) {
435                 snap->ti->error = "A snapshot is already merging.";
436                 return -EINVAL;
437         }
438
439         if (!snap_src->store->type->prepare_merge ||
440             !snap_src->store->type->commit_merge) {
441                 snap->ti->error = "Snapshot exception store does not "
442                                   "support snapshot-merge.";
443                 return -EINVAL;
444         }
445
446         return 1;
447 }
448
449 static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
450 {
451         struct dm_snapshot *l;
452
453         /* Sort the list according to chunk size, largest-first smallest-last */
454         list_for_each_entry(l, &o->snapshots, list)
455                 if (l->store->chunk_size < s->store->chunk_size)
456                         break;
457         list_add_tail(&s->list, &l->list);
458 }
459
460 /*
461  * Make a note of the snapshot and its origin so we can look it
462  * up when the origin has a write on it.
463  *
464  * Also validate snapshot exception store handovers.
465  * On success, returns 1 if this registration is a handover destination,
466  * otherwise returns 0.
467  */
468 static int register_snapshot(struct dm_snapshot *snap)
469 {
470         struct origin *o, *new_o = NULL;
471         struct block_device *bdev = snap->origin->bdev;
472         int r = 0;
473
474         new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
475         if (!new_o)
476                 return -ENOMEM;
477
478         down_write(&_origins_lock);
479
480         r = __validate_exception_handover(snap);
481         if (r < 0) {
482                 kfree(new_o);
483                 goto out;
484         }
485
486         o = __lookup_origin(bdev);
487         if (o)
488                 kfree(new_o);
489         else {
490                 /* New origin */
491                 o = new_o;
492
493                 /* Initialise the struct */
494                 INIT_LIST_HEAD(&o->snapshots);
495                 o->bdev = bdev;
496
497                 __insert_origin(o);
498         }
499
500         __insert_snapshot(o, snap);
501
502 out:
503         up_write(&_origins_lock);
504
505         return r;
506 }
507
508 /*
509  * Move snapshot to correct place in list according to chunk size.
510  */
511 static void reregister_snapshot(struct dm_snapshot *s)
512 {
513         struct block_device *bdev = s->origin->bdev;
514
515         down_write(&_origins_lock);
516
517         list_del(&s->list);
518         __insert_snapshot(__lookup_origin(bdev), s);
519
520         up_write(&_origins_lock);
521 }
522
523 static void unregister_snapshot(struct dm_snapshot *s)
524 {
525         struct origin *o;
526
527         down_write(&_origins_lock);
528         o = __lookup_origin(s->origin->bdev);
529
530         list_del(&s->list);
531         if (o && list_empty(&o->snapshots)) {
532                 list_del(&o->hash_list);
533                 kfree(o);
534         }
535
536         up_write(&_origins_lock);
537 }
538
539 /*
540  * Implementation of the exception hash tables.
541  * The lowest hash_shift bits of the chunk number are ignored, allowing
542  * some consecutive chunks to be grouped together.
543  */
544 static int dm_exception_table_init(struct dm_exception_table *et,
545                                    uint32_t size, unsigned hash_shift)
546 {
547         unsigned int i;
548
549         et->hash_shift = hash_shift;
550         et->hash_mask = size - 1;
551         et->table = dm_vcalloc(size, sizeof(struct list_head));
552         if (!et->table)
553                 return -ENOMEM;
554
555         for (i = 0; i < size; i++)
556                 INIT_LIST_HEAD(et->table + i);
557
558         return 0;
559 }
560
561 static void dm_exception_table_exit(struct dm_exception_table *et,
562                                     struct kmem_cache *mem)
563 {
564         struct list_head *slot;
565         struct dm_exception *ex, *next;
566         int i, size;
567
568         size = et->hash_mask + 1;
569         for (i = 0; i < size; i++) {
570                 slot = et->table + i;
571
572                 list_for_each_entry_safe (ex, next, slot, hash_list)
573                         kmem_cache_free(mem, ex);
574         }
575
576         vfree(et->table);
577 }
578
579 static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
580 {
581         return (chunk >> et->hash_shift) & et->hash_mask;
582 }
583
584 static void dm_remove_exception(struct dm_exception *e)
585 {
586         list_del(&e->hash_list);
587 }
588
589 /*
590  * Return the exception data for a sector, or NULL if not
591  * remapped.
592  */
593 static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
594                                                 chunk_t chunk)
595 {
596         struct list_head *slot;
597         struct dm_exception *e;
598
599         slot = &et->table[exception_hash(et, chunk)];
600         list_for_each_entry (e, slot, hash_list)
601                 if (chunk >= e->old_chunk &&
602                     chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
603                         return e;
604
605         return NULL;
606 }
607
608 static struct dm_exception *alloc_completed_exception(void)
609 {
610         struct dm_exception *e;
611
612         e = kmem_cache_alloc(exception_cache, GFP_NOIO);
613         if (!e)
614                 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
615
616         return e;
617 }
618
619 static void free_completed_exception(struct dm_exception *e)
620 {
621         kmem_cache_free(exception_cache, e);
622 }
623
624 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
625 {
626         struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
627                                                              GFP_NOIO);
628
629         atomic_inc(&s->pending_exceptions_count);
630         pe->snap = s;
631
632         return pe;
633 }
634
635 static void free_pending_exception(struct dm_snap_pending_exception *pe)
636 {
637         struct dm_snapshot *s = pe->snap;
638
639         mempool_free(pe, s->pending_pool);
640         smp_mb__before_atomic_dec();
641         atomic_dec(&s->pending_exceptions_count);
642 }
643
644 static void dm_insert_exception(struct dm_exception_table *eh,
645                                 struct dm_exception *new_e)
646 {
647         struct list_head *l;
648         struct dm_exception *e = NULL;
649
650         l = &eh->table[exception_hash(eh, new_e->old_chunk)];
651
652         /* Add immediately if this table doesn't support consecutive chunks */
653         if (!eh->hash_shift)
654                 goto out;
655
656         /* List is ordered by old_chunk */
657         list_for_each_entry_reverse(e, l, hash_list) {
658                 /* Insert after an existing chunk? */
659                 if (new_e->old_chunk == (e->old_chunk +
660                                          dm_consecutive_chunk_count(e) + 1) &&
661                     new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
662                                          dm_consecutive_chunk_count(e) + 1)) {
663                         dm_consecutive_chunk_count_inc(e);
664                         free_completed_exception(new_e);
665                         return;
666                 }
667
668                 /* Insert before an existing chunk? */
669                 if (new_e->old_chunk == (e->old_chunk - 1) &&
670                     new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
671                         dm_consecutive_chunk_count_inc(e);
672                         e->old_chunk--;
673                         e->new_chunk--;
674                         free_completed_exception(new_e);
675                         return;
676                 }
677
678                 if (new_e->old_chunk > e->old_chunk)
679                         break;
680         }
681
682 out:
683         list_add(&new_e->hash_list, e ? &e->hash_list : l);
684 }
685
686 /*
687  * Callback used by the exception stores to load exceptions when
688  * initialising.
689  */
690 static int dm_add_exception(void *context, chunk_t old, chunk_t new)
691 {
692         struct dm_snapshot *s = context;
693         struct dm_exception *e;
694
695         e = alloc_completed_exception();
696         if (!e)
697                 return -ENOMEM;
698
699         e->old_chunk = old;
700
701         /* Consecutive_count is implicitly initialised to zero */
702         e->new_chunk = new;
703
704         dm_insert_exception(&s->complete, e);
705
706         return 0;
707 }
708
709 /*
710  * Return a minimum chunk size of all snapshots that have the specified origin.
711  * Return zero if the origin has no snapshots.
712  */
713 static sector_t __minimum_chunk_size(struct origin *o)
714 {
715         struct dm_snapshot *snap;
716         unsigned chunk_size = 0;
717
718         if (o)
719                 list_for_each_entry(snap, &o->snapshots, list)
720                         chunk_size = min_not_zero(chunk_size,
721                                                   snap->store->chunk_size);
722
723         return chunk_size;
724 }
725
726 /*
727  * Hard coded magic.
728  */
729 static int calc_max_buckets(void)
730 {
731         /* use a fixed size of 2MB */
732         unsigned long mem = 2 * 1024 * 1024;
733         mem /= sizeof(struct list_head);
734
735         return mem;
736 }
737
738 /*
739  * Allocate room for a suitable hash table.
740  */
741 static int init_hash_tables(struct dm_snapshot *s)
742 {
743         sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
744
745         /*
746          * Calculate based on the size of the original volume or
747          * the COW volume...
748          */
749         cow_dev_size = get_dev_size(s->cow->bdev);
750         origin_dev_size = get_dev_size(s->origin->bdev);
751         max_buckets = calc_max_buckets();
752
753         hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
754         hash_size = min(hash_size, max_buckets);
755
756         if (hash_size < 64)
757                 hash_size = 64;
758         hash_size = rounddown_pow_of_two(hash_size);
759         if (dm_exception_table_init(&s->complete, hash_size,
760                                     DM_CHUNK_CONSECUTIVE_BITS))
761                 return -ENOMEM;
762
763         /*
764          * Allocate hash table for in-flight exceptions
765          * Make this smaller than the real hash table
766          */
767         hash_size >>= 3;
768         if (hash_size < 64)
769                 hash_size = 64;
770
771         if (dm_exception_table_init(&s->pending, hash_size, 0)) {
772                 dm_exception_table_exit(&s->complete, exception_cache);
773                 return -ENOMEM;
774         }
775
776         return 0;
777 }
778
779 static void merge_shutdown(struct dm_snapshot *s)
780 {
781         clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
782         smp_mb__after_clear_bit();
783         wake_up_bit(&s->state_bits, RUNNING_MERGE);
784 }
785
786 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
787 {
788         s->first_merging_chunk = 0;
789         s->num_merging_chunks = 0;
790
791         return bio_list_get(&s->bios_queued_during_merge);
792 }
793
794 /*
795  * Remove one chunk from the index of completed exceptions.
796  */
797 static int __remove_single_exception_chunk(struct dm_snapshot *s,
798                                            chunk_t old_chunk)
799 {
800         struct dm_exception *e;
801
802         e = dm_lookup_exception(&s->complete, old_chunk);
803         if (!e) {
804                 DMERR("Corruption detected: exception for block %llu is "
805                       "on disk but not in memory",
806                       (unsigned long long)old_chunk);
807                 return -EINVAL;
808         }
809
810         /*
811          * If this is the only chunk using this exception, remove exception.
812          */
813         if (!dm_consecutive_chunk_count(e)) {
814                 dm_remove_exception(e);
815                 free_completed_exception(e);
816                 return 0;
817         }
818
819         /*
820          * The chunk may be either at the beginning or the end of a
821          * group of consecutive chunks - never in the middle.  We are
822          * removing chunks in the opposite order to that in which they
823          * were added, so this should always be true.
824          * Decrement the consecutive chunk counter and adjust the
825          * starting point if necessary.
826          */
827         if (old_chunk == e->old_chunk) {
828                 e->old_chunk++;
829                 e->new_chunk++;
830         } else if (old_chunk != e->old_chunk +
831                    dm_consecutive_chunk_count(e)) {
832                 DMERR("Attempt to merge block %llu from the "
833                       "middle of a chunk range [%llu - %llu]",
834                       (unsigned long long)old_chunk,
835                       (unsigned long long)e->old_chunk,
836                       (unsigned long long)
837                       e->old_chunk + dm_consecutive_chunk_count(e));
838                 return -EINVAL;
839         }
840
841         dm_consecutive_chunk_count_dec(e);
842
843         return 0;
844 }
845
846 static void flush_bios(struct bio *bio);
847
848 static int remove_single_exception_chunk(struct dm_snapshot *s)
849 {
850         struct bio *b = NULL;
851         int r;
852         chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
853
854         down_write(&s->lock);
855
856         /*
857          * Process chunks (and associated exceptions) in reverse order
858          * so that dm_consecutive_chunk_count_dec() accounting works.
859          */
860         do {
861                 r = __remove_single_exception_chunk(s, old_chunk);
862                 if (r)
863                         goto out;
864         } while (old_chunk-- > s->first_merging_chunk);
865
866         b = __release_queued_bios_after_merge(s);
867
868 out:
869         up_write(&s->lock);
870         if (b)
871                 flush_bios(b);
872
873         return r;
874 }
875
876 static int origin_write_extent(struct dm_snapshot *merging_snap,
877                                sector_t sector, unsigned chunk_size);
878
879 static void merge_callback(int read_err, unsigned long write_err,
880                            void *context);
881
882 static uint64_t read_pending_exceptions_done_count(void)
883 {
884         uint64_t pending_exceptions_done;
885
886         spin_lock(&_pending_exceptions_done_spinlock);
887         pending_exceptions_done = _pending_exceptions_done_count;
888         spin_unlock(&_pending_exceptions_done_spinlock);
889
890         return pending_exceptions_done;
891 }
892
893 static void increment_pending_exceptions_done_count(void)
894 {
895         spin_lock(&_pending_exceptions_done_spinlock);
896         _pending_exceptions_done_count++;
897         spin_unlock(&_pending_exceptions_done_spinlock);
898
899         wake_up_all(&_pending_exceptions_done);
900 }
901
902 static void snapshot_merge_next_chunks(struct dm_snapshot *s)
903 {
904         int i, linear_chunks;
905         chunk_t old_chunk, new_chunk;
906         struct dm_io_region src, dest;
907         sector_t io_size;
908         uint64_t previous_count;
909
910         BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
911         if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
912                 goto shut;
913
914         /*
915          * valid flag never changes during merge, so no lock required.
916          */
917         if (!s->valid) {
918                 DMERR("Snapshot is invalid: can't merge");
919                 goto shut;
920         }
921
922         linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
923                                                       &new_chunk);
924         if (linear_chunks <= 0) {
925                 if (linear_chunks < 0) {
926                         DMERR("Read error in exception store: "
927                               "shutting down merge");
928                         down_write(&s->lock);
929                         s->merge_failed = 1;
930                         up_write(&s->lock);
931                 }
932                 goto shut;
933         }
934
935         /* Adjust old_chunk and new_chunk to reflect start of linear region */
936         old_chunk = old_chunk + 1 - linear_chunks;
937         new_chunk = new_chunk + 1 - linear_chunks;
938
939         /*
940          * Use one (potentially large) I/O to copy all 'linear_chunks'
941          * from the exception store to the origin
942          */
943         io_size = linear_chunks * s->store->chunk_size;
944
945         dest.bdev = s->origin->bdev;
946         dest.sector = chunk_to_sector(s->store, old_chunk);
947         dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
948
949         src.bdev = s->cow->bdev;
950         src.sector = chunk_to_sector(s->store, new_chunk);
951         src.count = dest.count;
952
953         /*
954          * Reallocate any exceptions needed in other snapshots then
955          * wait for the pending exceptions to complete.
956          * Each time any pending exception (globally on the system)
957          * completes we are woken and repeat the process to find out
958          * if we can proceed.  While this may not seem a particularly
959          * efficient algorithm, it is not expected to have any
960          * significant impact on performance.
961          */
962         previous_count = read_pending_exceptions_done_count();
963         while (origin_write_extent(s, dest.sector, io_size)) {
964                 wait_event(_pending_exceptions_done,
965                            (read_pending_exceptions_done_count() !=
966                             previous_count));
967                 /* Retry after the wait, until all exceptions are done. */
968                 previous_count = read_pending_exceptions_done_count();
969         }
970
971         down_write(&s->lock);
972         s->first_merging_chunk = old_chunk;
973         s->num_merging_chunks = linear_chunks;
974         up_write(&s->lock);
975
976         /* Wait until writes to all 'linear_chunks' drain */
977         for (i = 0; i < linear_chunks; i++)
978                 __check_for_conflicting_io(s, old_chunk + i);
979
980         dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
981         return;
982
983 shut:
984         merge_shutdown(s);
985 }
986
987 static void error_bios(struct bio *bio);
988
989 static void merge_callback(int read_err, unsigned long write_err, void *context)
990 {
991         struct dm_snapshot *s = context;
992         struct bio *b = NULL;
993
994         if (read_err || write_err) {
995                 if (read_err)
996                         DMERR("Read error: shutting down merge.");
997                 else
998                         DMERR("Write error: shutting down merge.");
999                 goto shut;
1000         }
1001
1002         if (s->store->type->commit_merge(s->store,
1003                                          s->num_merging_chunks) < 0) {
1004                 DMERR("Write error in exception store: shutting down merge");
1005                 goto shut;
1006         }
1007
1008         if (remove_single_exception_chunk(s) < 0)
1009                 goto shut;
1010
1011         snapshot_merge_next_chunks(s);
1012
1013         return;
1014
1015 shut:
1016         down_write(&s->lock);
1017         s->merge_failed = 1;
1018         b = __release_queued_bios_after_merge(s);
1019         up_write(&s->lock);
1020         error_bios(b);
1021
1022         merge_shutdown(s);
1023 }
1024
1025 static void start_merge(struct dm_snapshot *s)
1026 {
1027         if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1028                 snapshot_merge_next_chunks(s);
1029 }
1030
1031 static int wait_schedule(void *ptr)
1032 {
1033         schedule();
1034
1035         return 0;
1036 }
1037
1038 /*
1039  * Stop the merging process and wait until it finishes.
1040  */
1041 static void stop_merge(struct dm_snapshot *s)
1042 {
1043         set_bit(SHUTDOWN_MERGE, &s->state_bits);
1044         wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
1045                     TASK_UNINTERRUPTIBLE);
1046         clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1047 }
1048
1049 /*
1050  * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1051  */
1052 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1053 {
1054         struct dm_snapshot *s;
1055         int i;
1056         int r = -EINVAL;
1057         char *origin_path, *cow_path;
1058         unsigned args_used, num_flush_requests = 1;
1059         fmode_t origin_mode = FMODE_READ;
1060
1061         if (argc != 4) {
1062                 ti->error = "requires exactly 4 arguments";
1063                 r = -EINVAL;
1064                 goto bad;
1065         }
1066
1067         if (dm_target_is_snapshot_merge(ti)) {
1068                 num_flush_requests = 2;
1069                 origin_mode = FMODE_WRITE;
1070         }
1071
1072         s = kmalloc(sizeof(*s), GFP_KERNEL);
1073         if (!s) {
1074                 ti->error = "Cannot allocate snapshot context private "
1075                     "structure";
1076                 r = -ENOMEM;
1077                 goto bad;
1078         }
1079
1080         origin_path = argv[0];
1081         argv++;
1082         argc--;
1083
1084         r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1085         if (r) {
1086                 ti->error = "Cannot get origin device";
1087                 goto bad_origin;
1088         }
1089
1090         cow_path = argv[0];
1091         argv++;
1092         argc--;
1093
1094         r = dm_get_device(ti, cow_path, FMODE_READ | FMODE_WRITE, &s->cow);
1095         if (r) {
1096                 ti->error = "Cannot get COW device";
1097                 goto bad_cow;
1098         }
1099
1100         r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1101         if (r) {
1102                 ti->error = "Couldn't create exception store";
1103                 r = -EINVAL;
1104                 goto bad_store;
1105         }
1106
1107         argv += args_used;
1108         argc -= args_used;
1109
1110         s->ti = ti;
1111         s->valid = 1;
1112         s->active = 0;
1113         s->suspended = 0;
1114         atomic_set(&s->pending_exceptions_count, 0);
1115         init_rwsem(&s->lock);
1116         INIT_LIST_HEAD(&s->list);
1117         spin_lock_init(&s->pe_lock);
1118         s->state_bits = 0;
1119         s->merge_failed = 0;
1120         s->first_merging_chunk = 0;
1121         s->num_merging_chunks = 0;
1122         bio_list_init(&s->bios_queued_during_merge);
1123
1124         /* Allocate hash table for COW data */
1125         if (init_hash_tables(s)) {
1126                 ti->error = "Unable to allocate hash table space";
1127                 r = -ENOMEM;
1128                 goto bad_hash_tables;
1129         }
1130
1131         r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
1132         if (r) {
1133                 ti->error = "Could not create kcopyd client";
1134                 goto bad_kcopyd;
1135         }
1136
1137         s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1138         if (!s->pending_pool) {
1139                 ti->error = "Could not allocate mempool for pending exceptions";
1140                 goto bad_pending_pool;
1141         }
1142
1143         s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
1144                                                          tracked_chunk_cache);
1145         if (!s->tracked_chunk_pool) {
1146                 ti->error = "Could not allocate tracked_chunk mempool for "
1147                             "tracking reads";
1148                 goto bad_tracked_chunk_pool;
1149         }
1150
1151         for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1152                 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1153
1154         spin_lock_init(&s->tracked_chunk_lock);
1155
1156         bio_list_init(&s->queued_bios);
1157         INIT_WORK(&s->queued_bios_work, flush_queued_bios);
1158
1159         ti->private = s;
1160         ti->num_flush_requests = num_flush_requests;
1161
1162         /* Add snapshot to the list of snapshots for this origin */
1163         /* Exceptions aren't triggered till snapshot_resume() is called */
1164         r = register_snapshot(s);
1165         if (r == -ENOMEM) {
1166                 ti->error = "Snapshot origin struct allocation failed";
1167                 goto bad_load_and_register;
1168         } else if (r < 0) {
1169                 /* invalid handover, register_snapshot has set ti->error */
1170                 goto bad_load_and_register;
1171         }
1172
1173         /*
1174          * Metadata must only be loaded into one table at once, so skip this
1175          * if metadata will be handed over during resume.
1176          * Chunk size will be set during the handover - set it to zero to
1177          * ensure it's ignored.
1178          */
1179         if (r > 0) {
1180                 s->store->chunk_size = 0;
1181                 return 0;
1182         }
1183
1184         r = s->store->type->read_metadata(s->store, dm_add_exception,
1185                                           (void *)s);
1186         if (r < 0) {
1187                 ti->error = "Failed to read snapshot metadata";
1188                 goto bad_read_metadata;
1189         } else if (r > 0) {
1190                 s->valid = 0;
1191                 DMWARN("Snapshot is marked invalid.");
1192         }
1193
1194         if (!s->store->chunk_size) {
1195                 ti->error = "Chunk size not set";
1196                 goto bad_read_metadata;
1197         }
1198         ti->split_io = s->store->chunk_size;
1199
1200         return 0;
1201
1202 bad_read_metadata:
1203         unregister_snapshot(s);
1204
1205 bad_load_and_register:
1206         mempool_destroy(s->tracked_chunk_pool);
1207
1208 bad_tracked_chunk_pool:
1209         mempool_destroy(s->pending_pool);
1210
1211 bad_pending_pool:
1212         dm_kcopyd_client_destroy(s->kcopyd_client);
1213
1214 bad_kcopyd:
1215         dm_exception_table_exit(&s->pending, pending_cache);
1216         dm_exception_table_exit(&s->complete, exception_cache);
1217
1218 bad_hash_tables:
1219         dm_exception_store_destroy(s->store);
1220
1221 bad_store:
1222         dm_put_device(ti, s->cow);
1223
1224 bad_cow:
1225         dm_put_device(ti, s->origin);
1226
1227 bad_origin:
1228         kfree(s);
1229
1230 bad:
1231         return r;
1232 }
1233
1234 static void __free_exceptions(struct dm_snapshot *s)
1235 {
1236         dm_kcopyd_client_destroy(s->kcopyd_client);
1237         s->kcopyd_client = NULL;
1238
1239         dm_exception_table_exit(&s->pending, pending_cache);
1240         dm_exception_table_exit(&s->complete, exception_cache);
1241 }
1242
1243 static void __handover_exceptions(struct dm_snapshot *snap_src,
1244                                   struct dm_snapshot *snap_dest)
1245 {
1246         union {
1247                 struct dm_exception_table table_swap;
1248                 struct dm_exception_store *store_swap;
1249         } u;
1250
1251         /*
1252          * Swap all snapshot context information between the two instances.
1253          */
1254         u.table_swap = snap_dest->complete;
1255         snap_dest->complete = snap_src->complete;
1256         snap_src->complete = u.table_swap;
1257
1258         u.store_swap = snap_dest->store;
1259         snap_dest->store = snap_src->store;
1260         snap_src->store = u.store_swap;
1261
1262         snap_dest->store->snap = snap_dest;
1263         snap_src->store->snap = snap_src;
1264
1265         snap_dest->ti->split_io = snap_dest->store->chunk_size;
1266         snap_dest->valid = snap_src->valid;
1267
1268         /*
1269          * Set source invalid to ensure it receives no further I/O.
1270          */
1271         snap_src->valid = 0;
1272 }
1273
1274 static void snapshot_dtr(struct dm_target *ti)
1275 {
1276 #ifdef CONFIG_DM_DEBUG
1277         int i;
1278 #endif
1279         struct dm_snapshot *s = ti->private;
1280         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1281
1282         flush_workqueue(ksnapd);
1283
1284         down_read(&_origins_lock);
1285         /* Check whether exception handover must be cancelled */
1286         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1287         if (snap_src && snap_dest && (s == snap_src)) {
1288                 down_write(&snap_dest->lock);
1289                 snap_dest->valid = 0;
1290                 up_write(&snap_dest->lock);
1291                 DMERR("Cancelling snapshot handover.");
1292         }
1293         up_read(&_origins_lock);
1294
1295         if (dm_target_is_snapshot_merge(ti))
1296                 stop_merge(s);
1297
1298         /* Prevent further origin writes from using this snapshot. */
1299         /* After this returns there can be no new kcopyd jobs. */
1300         unregister_snapshot(s);
1301
1302         while (atomic_read(&s->pending_exceptions_count))
1303                 msleep(1);
1304         /*
1305          * Ensure instructions in mempool_destroy aren't reordered
1306          * before atomic_read.
1307          */
1308         smp_mb();
1309
1310 #ifdef CONFIG_DM_DEBUG
1311         for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1312                 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1313 #endif
1314
1315         mempool_destroy(s->tracked_chunk_pool);
1316
1317         __free_exceptions(s);
1318
1319         mempool_destroy(s->pending_pool);
1320
1321         dm_exception_store_destroy(s->store);
1322
1323         dm_put_device(ti, s->cow);
1324
1325         dm_put_device(ti, s->origin);
1326
1327         kfree(s);
1328 }
1329
1330 /*
1331  * Flush a list of buffers.
1332  */
1333 static void flush_bios(struct bio *bio)
1334 {
1335         struct bio *n;
1336
1337         while (bio) {
1338                 n = bio->bi_next;
1339                 bio->bi_next = NULL;
1340                 generic_make_request(bio);
1341                 bio = n;
1342         }
1343 }
1344
1345 static void flush_queued_bios(struct work_struct *work)
1346 {
1347         struct dm_snapshot *s =
1348                 container_of(work, struct dm_snapshot, queued_bios_work);
1349         struct bio *queued_bios;
1350         unsigned long flags;
1351
1352         spin_lock_irqsave(&s->pe_lock, flags);
1353         queued_bios = bio_list_get(&s->queued_bios);
1354         spin_unlock_irqrestore(&s->pe_lock, flags);
1355
1356         flush_bios(queued_bios);
1357 }
1358
1359 static int do_origin(struct dm_dev *origin, struct bio *bio);
1360
1361 /*
1362  * Flush a list of buffers.
1363  */
1364 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1365 {
1366         struct bio *n;
1367         int r;
1368
1369         while (bio) {
1370                 n = bio->bi_next;
1371                 bio->bi_next = NULL;
1372                 r = do_origin(s->origin, bio);
1373                 if (r == DM_MAPIO_REMAPPED)
1374                         generic_make_request(bio);
1375                 bio = n;
1376         }
1377 }
1378
1379 /*
1380  * Error a list of buffers.
1381  */
1382 static void error_bios(struct bio *bio)
1383 {
1384         struct bio *n;
1385
1386         while (bio) {
1387                 n = bio->bi_next;
1388                 bio->bi_next = NULL;
1389                 bio_io_error(bio);
1390                 bio = n;
1391         }
1392 }
1393
1394 static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1395 {
1396         if (!s->valid)
1397                 return;
1398
1399         if (err == -EIO)
1400                 DMERR("Invalidating snapshot: Error reading/writing.");
1401         else if (err == -ENOMEM)
1402                 DMERR("Invalidating snapshot: Unable to allocate exception.");
1403
1404         if (s->store->type->drop_snapshot)
1405                 s->store->type->drop_snapshot(s->store);
1406
1407         s->valid = 0;
1408
1409         dm_table_event(s->ti->table);
1410 }
1411
1412 static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1413 {
1414         struct dm_exception *e;
1415         struct dm_snapshot *s = pe->snap;
1416         struct bio *origin_bios = NULL;
1417         struct bio *snapshot_bios = NULL;
1418         int error = 0;
1419
1420         if (!success) {
1421                 /* Read/write error - snapshot is unusable */
1422                 down_write(&s->lock);
1423                 __invalidate_snapshot(s, -EIO);
1424                 error = 1;
1425                 goto out;
1426         }
1427
1428         e = alloc_completed_exception();
1429         if (!e) {
1430                 down_write(&s->lock);
1431                 __invalidate_snapshot(s, -ENOMEM);
1432                 error = 1;
1433                 goto out;
1434         }
1435         *e = pe->e;
1436
1437         down_write(&s->lock);
1438         if (!s->valid) {
1439                 free_completed_exception(e);
1440                 error = 1;
1441                 goto out;
1442         }
1443
1444         /* Check for conflicting reads */
1445         __check_for_conflicting_io(s, pe->e.old_chunk);
1446
1447         /*
1448          * Add a proper exception, and remove the
1449          * in-flight exception from the list.
1450          */
1451         dm_insert_exception(&s->complete, e);
1452
1453  out:
1454         dm_remove_exception(&pe->e);
1455         snapshot_bios = bio_list_get(&pe->snapshot_bios);
1456         origin_bios = bio_list_get(&pe->origin_bios);
1457         free_pending_exception(pe);
1458
1459         increment_pending_exceptions_done_count();
1460
1461         up_write(&s->lock);
1462
1463         /* Submit any pending write bios */
1464         if (error)
1465                 error_bios(snapshot_bios);
1466         else
1467                 flush_bios(snapshot_bios);
1468
1469         retry_origin_bios(s, origin_bios);
1470 }
1471
1472 static void commit_callback(void *context, int success)
1473 {
1474         struct dm_snap_pending_exception *pe = context;
1475
1476         pending_complete(pe, success);
1477 }
1478
1479 /*
1480  * Called when the copy I/O has finished.  kcopyd actually runs
1481  * this code so don't block.
1482  */
1483 static void copy_callback(int read_err, unsigned long write_err, void *context)
1484 {
1485         struct dm_snap_pending_exception *pe = context;
1486         struct dm_snapshot *s = pe->snap;
1487
1488         if (read_err || write_err)
1489                 pending_complete(pe, 0);
1490
1491         else
1492                 /* Update the metadata if we are persistent */
1493                 s->store->type->commit_exception(s->store, &pe->e,
1494                                                  commit_callback, pe);
1495 }
1496
1497 /*
1498  * Dispatches the copy operation to kcopyd.
1499  */
1500 static void start_copy(struct dm_snap_pending_exception *pe)
1501 {
1502         struct dm_snapshot *s = pe->snap;
1503         struct dm_io_region src, dest;
1504         struct block_device *bdev = s->origin->bdev;
1505         sector_t dev_size;
1506
1507         dev_size = get_dev_size(bdev);
1508
1509         src.bdev = bdev;
1510         src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1511         src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1512
1513         dest.bdev = s->cow->bdev;
1514         dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1515         dest.count = src.count;
1516
1517         /* Hand over to kcopyd */
1518         dm_kcopyd_copy(s->kcopyd_client,
1519                     &src, 1, &dest, 0, copy_callback, pe);
1520 }
1521
1522 static struct dm_snap_pending_exception *
1523 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1524 {
1525         struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1526
1527         if (!e)
1528                 return NULL;
1529
1530         return container_of(e, struct dm_snap_pending_exception, e);
1531 }
1532
1533 /*
1534  * Looks to see if this snapshot already has a pending exception
1535  * for this chunk, otherwise it allocates a new one and inserts
1536  * it into the pending table.
1537  *
1538  * NOTE: a write lock must be held on snap->lock before calling
1539  * this.
1540  */
1541 static struct dm_snap_pending_exception *
1542 __find_pending_exception(struct dm_snapshot *s,
1543                          struct dm_snap_pending_exception *pe, chunk_t chunk)
1544 {
1545         struct dm_snap_pending_exception *pe2;
1546
1547         pe2 = __lookup_pending_exception(s, chunk);
1548         if (pe2) {
1549                 free_pending_exception(pe);
1550                 return pe2;
1551         }
1552
1553         pe->e.old_chunk = chunk;
1554         bio_list_init(&pe->origin_bios);
1555         bio_list_init(&pe->snapshot_bios);
1556         pe->started = 0;
1557
1558         if (s->store->type->prepare_exception(s->store, &pe->e)) {
1559                 free_pending_exception(pe);
1560                 return NULL;
1561         }
1562
1563         dm_insert_exception(&s->pending, &pe->e);
1564
1565         return pe;
1566 }
1567
1568 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1569                             struct bio *bio, chunk_t chunk)
1570 {
1571         bio->bi_bdev = s->cow->bdev;
1572         bio->bi_sector = chunk_to_sector(s->store,
1573                                          dm_chunk_number(e->new_chunk) +
1574                                          (chunk - e->old_chunk)) +
1575                                          (bio->bi_sector &
1576                                           s->store->chunk_mask);
1577 }
1578
1579 static int snapshot_map(struct dm_target *ti, struct bio *bio,
1580                         union map_info *map_context)
1581 {
1582         struct dm_exception *e;
1583         struct dm_snapshot *s = ti->private;
1584         int r = DM_MAPIO_REMAPPED;
1585         chunk_t chunk;
1586         struct dm_snap_pending_exception *pe = NULL;
1587
1588         if (bio->bi_rw & REQ_FLUSH) {
1589                 bio->bi_bdev = s->cow->bdev;
1590                 return DM_MAPIO_REMAPPED;
1591         }
1592
1593         chunk = sector_to_chunk(s->store, bio->bi_sector);
1594
1595         /* Full snapshots are not usable */
1596         /* To get here the table must be live so s->active is always set. */
1597         if (!s->valid)
1598                 return -EIO;
1599
1600         /* FIXME: should only take write lock if we need
1601          * to copy an exception */
1602         down_write(&s->lock);
1603
1604         if (!s->valid) {
1605                 r = -EIO;
1606                 goto out_unlock;
1607         }
1608
1609         /* If the block is already remapped - use that, else remap it */
1610         e = dm_lookup_exception(&s->complete, chunk);
1611         if (e) {
1612                 remap_exception(s, e, bio, chunk);
1613                 goto out_unlock;
1614         }
1615
1616         /*
1617          * Write to snapshot - higher level takes care of RW/RO
1618          * flags so we should only get this if we are
1619          * writeable.
1620          */
1621         if (bio_rw(bio) == WRITE) {
1622                 pe = __lookup_pending_exception(s, chunk);
1623                 if (!pe) {
1624                         up_write(&s->lock);
1625                         pe = alloc_pending_exception(s);
1626                         down_write(&s->lock);
1627
1628                         if (!s->valid) {
1629                                 free_pending_exception(pe);
1630                                 r = -EIO;
1631                                 goto out_unlock;
1632                         }
1633
1634                         e = dm_lookup_exception(&s->complete, chunk);
1635                         if (e) {
1636                                 free_pending_exception(pe);
1637                                 remap_exception(s, e, bio, chunk);
1638                                 goto out_unlock;
1639                         }
1640
1641                         pe = __find_pending_exception(s, pe, chunk);
1642                         if (!pe) {
1643                                 __invalidate_snapshot(s, -ENOMEM);
1644                                 r = -EIO;
1645                                 goto out_unlock;
1646                         }
1647                 }
1648
1649                 remap_exception(s, &pe->e, bio, chunk);
1650                 bio_list_add(&pe->snapshot_bios, bio);
1651
1652                 r = DM_MAPIO_SUBMITTED;
1653
1654                 if (!pe->started) {
1655                         /* this is protected by snap->lock */
1656                         pe->started = 1;
1657                         up_write(&s->lock);
1658                         start_copy(pe);
1659                         goto out;
1660                 }
1661         } else {
1662                 bio->bi_bdev = s->origin->bdev;
1663                 map_context->ptr = track_chunk(s, chunk);
1664         }
1665
1666  out_unlock:
1667         up_write(&s->lock);
1668  out:
1669         return r;
1670 }
1671
1672 /*
1673  * A snapshot-merge target behaves like a combination of a snapshot
1674  * target and a snapshot-origin target.  It only generates new
1675  * exceptions in other snapshots and not in the one that is being
1676  * merged.
1677  *
1678  * For each chunk, if there is an existing exception, it is used to
1679  * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
1680  * which in turn might generate exceptions in other snapshots.
1681  * If merging is currently taking place on the chunk in question, the
1682  * I/O is deferred by adding it to s->bios_queued_during_merge.
1683  */
1684 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
1685                               union map_info *map_context)
1686 {
1687         struct dm_exception *e;
1688         struct dm_snapshot *s = ti->private;
1689         int r = DM_MAPIO_REMAPPED;
1690         chunk_t chunk;
1691
1692         if (bio->bi_rw & REQ_FLUSH) {
1693                 if (!map_context->target_request_nr)
1694                         bio->bi_bdev = s->origin->bdev;
1695                 else
1696                         bio->bi_bdev = s->cow->bdev;
1697                 map_context->ptr = NULL;
1698                 return DM_MAPIO_REMAPPED;
1699         }
1700
1701         chunk = sector_to_chunk(s->store, bio->bi_sector);
1702
1703         down_write(&s->lock);
1704
1705         /* Full merging snapshots are redirected to the origin */
1706         if (!s->valid)
1707                 goto redirect_to_origin;
1708
1709         /* If the block is already remapped - use that */
1710         e = dm_lookup_exception(&s->complete, chunk);
1711         if (e) {
1712                 /* Queue writes overlapping with chunks being merged */
1713                 if (bio_rw(bio) == WRITE &&
1714                     chunk >= s->first_merging_chunk &&
1715                     chunk < (s->first_merging_chunk +
1716                              s->num_merging_chunks)) {
1717                         bio->bi_bdev = s->origin->bdev;
1718                         bio_list_add(&s->bios_queued_during_merge, bio);
1719                         r = DM_MAPIO_SUBMITTED;
1720                         goto out_unlock;
1721                 }
1722
1723                 remap_exception(s, e, bio, chunk);
1724
1725                 if (bio_rw(bio) == WRITE)
1726                         map_context->ptr = track_chunk(s, chunk);
1727                 goto out_unlock;
1728         }
1729
1730 redirect_to_origin:
1731         bio->bi_bdev = s->origin->bdev;
1732
1733         if (bio_rw(bio) == WRITE) {
1734                 up_write(&s->lock);
1735                 return do_origin(s->origin, bio);
1736         }
1737
1738 out_unlock:
1739         up_write(&s->lock);
1740
1741         return r;
1742 }
1743
1744 static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1745                            int error, union map_info *map_context)
1746 {
1747         struct dm_snapshot *s = ti->private;
1748         struct dm_snap_tracked_chunk *c = map_context->ptr;
1749
1750         if (c)
1751                 stop_tracking_chunk(s, c);
1752
1753         return 0;
1754 }
1755
1756 static void snapshot_merge_presuspend(struct dm_target *ti)
1757 {
1758         struct dm_snapshot *s = ti->private;
1759
1760         stop_merge(s);
1761 }
1762
1763 static void snapshot_postsuspend(struct dm_target *ti)
1764 {
1765         struct dm_snapshot *s = ti->private;
1766
1767         down_write(&s->lock);
1768         s->suspended = 1;
1769         up_write(&s->lock);
1770 }
1771
1772 static int snapshot_preresume(struct dm_target *ti)
1773 {
1774         int r = 0;
1775         struct dm_snapshot *s = ti->private;
1776         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1777
1778         down_read(&_origins_lock);
1779         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1780         if (snap_src && snap_dest) {
1781                 down_read(&snap_src->lock);
1782                 if (s == snap_src) {
1783                         DMERR("Unable to resume snapshot source until "
1784                               "handover completes.");
1785                         r = -EINVAL;
1786                 } else if (!snap_src->suspended) {
1787                         DMERR("Unable to perform snapshot handover until "
1788                               "source is suspended.");
1789                         r = -EINVAL;
1790                 }
1791                 up_read(&snap_src->lock);
1792         }
1793         up_read(&_origins_lock);
1794
1795         return r;
1796 }
1797
1798 static void snapshot_resume(struct dm_target *ti)
1799 {
1800         struct dm_snapshot *s = ti->private;
1801         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1802
1803         down_read(&_origins_lock);
1804         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1805         if (snap_src && snap_dest) {
1806                 down_write(&snap_src->lock);
1807                 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1808                 __handover_exceptions(snap_src, snap_dest);
1809                 up_write(&snap_dest->lock);
1810                 up_write(&snap_src->lock);
1811         }
1812         up_read(&_origins_lock);
1813
1814         /* Now we have correct chunk size, reregister */
1815         reregister_snapshot(s);
1816
1817         down_write(&s->lock);
1818         s->active = 1;
1819         s->suspended = 0;
1820         up_write(&s->lock);
1821 }
1822
1823 static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
1824 {
1825         sector_t min_chunksize;
1826
1827         down_read(&_origins_lock);
1828         min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1829         up_read(&_origins_lock);
1830
1831         return min_chunksize;
1832 }
1833
1834 static void snapshot_merge_resume(struct dm_target *ti)
1835 {
1836         struct dm_snapshot *s = ti->private;
1837
1838         /*
1839          * Handover exceptions from existing snapshot.
1840          */
1841         snapshot_resume(ti);
1842
1843         /*
1844          * snapshot-merge acts as an origin, so set ti->split_io
1845          */
1846         ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
1847
1848         start_merge(s);
1849 }
1850
1851 static int snapshot_status(struct dm_target *ti, status_type_t type,
1852                            char *result, unsigned int maxlen)
1853 {
1854         unsigned sz = 0;
1855         struct dm_snapshot *snap = ti->private;
1856
1857         switch (type) {
1858         case STATUSTYPE_INFO:
1859
1860                 down_write(&snap->lock);
1861
1862                 if (!snap->valid)
1863                         DMEMIT("Invalid");
1864                 else if (snap->merge_failed)
1865                         DMEMIT("Merge failed");
1866                 else {
1867                         if (snap->store->type->usage) {
1868                                 sector_t total_sectors, sectors_allocated,
1869                                          metadata_sectors;
1870                                 snap->store->type->usage(snap->store,
1871                                                          &total_sectors,
1872                                                          &sectors_allocated,
1873                                                          &metadata_sectors);
1874                                 DMEMIT("%llu/%llu %llu",
1875                                        (unsigned long long)sectors_allocated,
1876                                        (unsigned long long)total_sectors,
1877                                        (unsigned long long)metadata_sectors);
1878                         }
1879                         else
1880                                 DMEMIT("Unknown");
1881                 }
1882
1883                 up_write(&snap->lock);
1884
1885                 break;
1886
1887         case STATUSTYPE_TABLE:
1888                 /*
1889                  * kdevname returns a static pointer so we need
1890                  * to make private copies if the output is to
1891                  * make sense.
1892                  */
1893                 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
1894                 snap->store->type->status(snap->store, type, result + sz,
1895                                           maxlen - sz);
1896                 break;
1897         }
1898
1899         return 0;
1900 }
1901
1902 static int snapshot_iterate_devices(struct dm_target *ti,
1903                                     iterate_devices_callout_fn fn, void *data)
1904 {
1905         struct dm_snapshot *snap = ti->private;
1906         int r;
1907
1908         r = fn(ti, snap->origin, 0, ti->len, data);
1909
1910         if (!r)
1911                 r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
1912
1913         return r;
1914 }
1915
1916
1917 /*-----------------------------------------------------------------
1918  * Origin methods
1919  *---------------------------------------------------------------*/
1920
1921 /*
1922  * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1923  * supplied bio was ignored.  The caller may submit it immediately.
1924  * (No remapping actually occurs as the origin is always a direct linear
1925  * map.)
1926  *
1927  * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1928  * and any supplied bio is added to a list to be submitted once all
1929  * the necessary exceptions exist.
1930  */
1931 static int __origin_write(struct list_head *snapshots, sector_t sector,
1932                           struct bio *bio)
1933 {
1934         int r = DM_MAPIO_REMAPPED;
1935         struct dm_snapshot *snap;
1936         struct dm_exception *e;
1937         struct dm_snap_pending_exception *pe;
1938         struct dm_snap_pending_exception *pe_to_start_now = NULL;
1939         struct dm_snap_pending_exception *pe_to_start_last = NULL;
1940         chunk_t chunk;
1941
1942         /* Do all the snapshots on this origin */
1943         list_for_each_entry (snap, snapshots, list) {
1944                 /*
1945                  * Don't make new exceptions in a merging snapshot
1946                  * because it has effectively been deleted
1947                  */
1948                 if (dm_target_is_snapshot_merge(snap->ti))
1949                         continue;
1950
1951                 down_write(&snap->lock);
1952
1953                 /* Only deal with valid and active snapshots */
1954                 if (!snap->valid || !snap->active)
1955                         goto next_snapshot;
1956
1957                 /* Nothing to do if writing beyond end of snapshot */
1958                 if (sector >= dm_table_get_size(snap->ti->table))
1959                         goto next_snapshot;
1960
1961                 /*
1962                  * Remember, different snapshots can have
1963                  * different chunk sizes.
1964                  */
1965                 chunk = sector_to_chunk(snap->store, sector);
1966
1967                 /*
1968                  * Check exception table to see if block
1969                  * is already remapped in this snapshot
1970                  * and trigger an exception if not.
1971                  */
1972                 e = dm_lookup_exception(&snap->complete, chunk);
1973                 if (e)
1974                         goto next_snapshot;
1975
1976                 pe = __lookup_pending_exception(snap, chunk);
1977                 if (!pe) {
1978                         up_write(&snap->lock);
1979                         pe = alloc_pending_exception(snap);
1980                         down_write(&snap->lock);
1981
1982                         if (!snap->valid) {
1983                                 free_pending_exception(pe);
1984                                 goto next_snapshot;
1985                         }
1986
1987                         e = dm_lookup_exception(&snap->complete, chunk);
1988                         if (e) {
1989                                 free_pending_exception(pe);
1990                                 goto next_snapshot;
1991                         }
1992
1993                         pe = __find_pending_exception(snap, pe, chunk);
1994                         if (!pe) {
1995                                 __invalidate_snapshot(snap, -ENOMEM);
1996                                 goto next_snapshot;
1997                         }
1998                 }
1999
2000                 r = DM_MAPIO_SUBMITTED;
2001
2002                 /*
2003                  * If an origin bio was supplied, queue it to wait for the
2004                  * completion of this exception, and start this one last,
2005                  * at the end of the function.
2006                  */
2007                 if (bio) {
2008                         bio_list_add(&pe->origin_bios, bio);
2009                         bio = NULL;
2010
2011                         if (!pe->started) {
2012                                 pe->started = 1;
2013                                 pe_to_start_last = pe;
2014                         }
2015                 }
2016
2017                 if (!pe->started) {
2018                         pe->started = 1;
2019                         pe_to_start_now = pe;
2020                 }
2021
2022  next_snapshot:
2023                 up_write(&snap->lock);
2024
2025                 if (pe_to_start_now) {
2026                         start_copy(pe_to_start_now);
2027                         pe_to_start_now = NULL;
2028                 }
2029         }
2030
2031         /*
2032          * Submit the exception against which the bio is queued last,
2033          * to give the other exceptions a head start.
2034          */
2035         if (pe_to_start_last)
2036                 start_copy(pe_to_start_last);
2037
2038         return r;
2039 }
2040
2041 /*
2042  * Called on a write from the origin driver.
2043  */
2044 static int do_origin(struct dm_dev *origin, struct bio *bio)
2045 {
2046         struct origin *o;
2047         int r = DM_MAPIO_REMAPPED;
2048
2049         down_read(&_origins_lock);
2050         o = __lookup_origin(origin->bdev);
2051         if (o)
2052                 r = __origin_write(&o->snapshots, bio->bi_sector, bio);
2053         up_read(&_origins_lock);
2054
2055         return r;
2056 }
2057
2058 /*
2059  * Trigger exceptions in all non-merging snapshots.
2060  *
2061  * The chunk size of the merging snapshot may be larger than the chunk
2062  * size of some other snapshot so we may need to reallocate multiple
2063  * chunks in other snapshots.
2064  *
2065  * We scan all the overlapping exceptions in the other snapshots.
2066  * Returns 1 if anything was reallocated and must be waited for,
2067  * otherwise returns 0.
2068  *
2069  * size must be a multiple of merging_snap's chunk_size.
2070  */
2071 static int origin_write_extent(struct dm_snapshot *merging_snap,
2072                                sector_t sector, unsigned size)
2073 {
2074         int must_wait = 0;
2075         sector_t n;
2076         struct origin *o;
2077
2078         /*
2079          * The origin's __minimum_chunk_size() got stored in split_io
2080          * by snapshot_merge_resume().
2081          */
2082         down_read(&_origins_lock);
2083         o = __lookup_origin(merging_snap->origin->bdev);
2084         for (n = 0; n < size; n += merging_snap->ti->split_io)
2085                 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2086                     DM_MAPIO_SUBMITTED)
2087                         must_wait = 1;
2088         up_read(&_origins_lock);
2089
2090         return must_wait;
2091 }
2092
2093 /*
2094  * Origin: maps a linear range of a device, with hooks for snapshotting.
2095  */
2096
2097 /*
2098  * Construct an origin mapping: <dev_path>
2099  * The context for an origin is merely a 'struct dm_dev *'
2100  * pointing to the real device.
2101  */
2102 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2103 {
2104         int r;
2105         struct dm_dev *dev;
2106
2107         if (argc != 1) {
2108                 ti->error = "origin: incorrect number of arguments";
2109                 return -EINVAL;
2110         }
2111
2112         r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
2113         if (r) {
2114                 ti->error = "Cannot get target device";
2115                 return r;
2116         }
2117
2118         ti->private = dev;
2119         ti->num_flush_requests = 1;
2120
2121         return 0;
2122 }
2123
2124 static void origin_dtr(struct dm_target *ti)
2125 {
2126         struct dm_dev *dev = ti->private;
2127         dm_put_device(ti, dev);
2128 }
2129
2130 static int origin_map(struct dm_target *ti, struct bio *bio,
2131                       union map_info *map_context)
2132 {
2133         struct dm_dev *dev = ti->private;
2134         bio->bi_bdev = dev->bdev;
2135
2136         if (bio->bi_rw & REQ_FLUSH)
2137                 return DM_MAPIO_REMAPPED;
2138
2139         /* Only tell snapshots if this is a write */
2140         return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
2141 }
2142
2143 /*
2144  * Set the target "split_io" field to the minimum of all the snapshots'
2145  * chunk sizes.
2146  */
2147 static void origin_resume(struct dm_target *ti)
2148 {
2149         struct dm_dev *dev = ti->private;
2150
2151         ti->split_io = get_origin_minimum_chunksize(dev->bdev);
2152 }
2153
2154 static int origin_status(struct dm_target *ti, status_type_t type, char *result,
2155                          unsigned int maxlen)
2156 {
2157         struct dm_dev *dev = ti->private;
2158
2159         switch (type) {
2160         case STATUSTYPE_INFO:
2161                 result[0] = '\0';
2162                 break;
2163
2164         case STATUSTYPE_TABLE:
2165                 snprintf(result, maxlen, "%s", dev->name);
2166                 break;
2167         }
2168
2169         return 0;
2170 }
2171
2172 static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2173                         struct bio_vec *biovec, int max_size)
2174 {
2175         struct dm_dev *dev = ti->private;
2176         struct request_queue *q = bdev_get_queue(dev->bdev);
2177
2178         if (!q->merge_bvec_fn)
2179                 return max_size;
2180
2181         bvm->bi_bdev = dev->bdev;
2182         bvm->bi_sector = bvm->bi_sector;
2183
2184         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2185 }
2186
2187 static int origin_iterate_devices(struct dm_target *ti,
2188                                   iterate_devices_callout_fn fn, void *data)
2189 {
2190         struct dm_dev *dev = ti->private;
2191
2192         return fn(ti, dev, 0, ti->len, data);
2193 }
2194
2195 static struct target_type origin_target = {
2196         .name    = "snapshot-origin",
2197         .version = {1, 7, 0},
2198         .module  = THIS_MODULE,
2199         .ctr     = origin_ctr,
2200         .dtr     = origin_dtr,
2201         .map     = origin_map,
2202         .resume  = origin_resume,
2203         .status  = origin_status,
2204         .merge   = origin_merge,
2205         .iterate_devices = origin_iterate_devices,
2206 };
2207
2208 static struct target_type snapshot_target = {
2209         .name    = "snapshot",
2210         .version = {1, 9, 0},
2211         .module  = THIS_MODULE,
2212         .ctr     = snapshot_ctr,
2213         .dtr     = snapshot_dtr,
2214         .map     = snapshot_map,
2215         .end_io  = snapshot_end_io,
2216         .postsuspend = snapshot_postsuspend,
2217         .preresume  = snapshot_preresume,
2218         .resume  = snapshot_resume,
2219         .status  = snapshot_status,
2220         .iterate_devices = snapshot_iterate_devices,
2221 };
2222
2223 static struct target_type merge_target = {
2224         .name    = dm_snapshot_merge_target_name,
2225         .version = {1, 0, 0},
2226         .module  = THIS_MODULE,
2227         .ctr     = snapshot_ctr,
2228         .dtr     = snapshot_dtr,
2229         .map     = snapshot_merge_map,
2230         .end_io  = snapshot_end_io,
2231         .presuspend = snapshot_merge_presuspend,
2232         .postsuspend = snapshot_postsuspend,
2233         .preresume  = snapshot_preresume,
2234         .resume  = snapshot_merge_resume,
2235         .status  = snapshot_status,
2236         .iterate_devices = snapshot_iterate_devices,
2237 };
2238
2239 static int __init dm_snapshot_init(void)
2240 {
2241         int r;
2242
2243         r = dm_exception_store_init();
2244         if (r) {
2245                 DMERR("Failed to initialize exception stores");
2246                 return r;
2247         }
2248
2249         r = dm_register_target(&snapshot_target);
2250         if (r < 0) {
2251                 DMERR("snapshot target register failed %d", r);
2252                 goto bad_register_snapshot_target;
2253         }
2254
2255         r = dm_register_target(&origin_target);
2256         if (r < 0) {
2257                 DMERR("Origin target register failed %d", r);
2258                 goto bad_register_origin_target;
2259         }
2260
2261         r = dm_register_target(&merge_target);
2262         if (r < 0) {
2263                 DMERR("Merge target register failed %d", r);
2264                 goto bad_register_merge_target;
2265         }
2266
2267         r = init_origin_hash();
2268         if (r) {
2269                 DMERR("init_origin_hash failed.");
2270                 goto bad_origin_hash;
2271         }
2272
2273         exception_cache = KMEM_CACHE(dm_exception, 0);
2274         if (!exception_cache) {
2275                 DMERR("Couldn't create exception cache.");
2276                 r = -ENOMEM;
2277                 goto bad_exception_cache;
2278         }
2279
2280         pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2281         if (!pending_cache) {
2282                 DMERR("Couldn't create pending cache.");
2283                 r = -ENOMEM;
2284                 goto bad_pending_cache;
2285         }
2286
2287         tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
2288         if (!tracked_chunk_cache) {
2289                 DMERR("Couldn't create cache to track chunks in use.");
2290                 r = -ENOMEM;
2291                 goto bad_tracked_chunk_cache;
2292         }
2293
2294         ksnapd = create_singlethread_workqueue("ksnapd");
2295         if (!ksnapd) {
2296                 DMERR("Failed to create ksnapd workqueue.");
2297                 r = -ENOMEM;
2298                 goto bad_pending_pool;
2299         }
2300
2301         return 0;
2302
2303 bad_pending_pool:
2304         kmem_cache_destroy(tracked_chunk_cache);
2305 bad_tracked_chunk_cache:
2306         kmem_cache_destroy(pending_cache);
2307 bad_pending_cache:
2308         kmem_cache_destroy(exception_cache);
2309 bad_exception_cache:
2310         exit_origin_hash();
2311 bad_origin_hash:
2312         dm_unregister_target(&merge_target);
2313 bad_register_merge_target:
2314         dm_unregister_target(&origin_target);
2315 bad_register_origin_target:
2316         dm_unregister_target(&snapshot_target);
2317 bad_register_snapshot_target:
2318         dm_exception_store_exit();
2319
2320         return r;
2321 }
2322
2323 static void __exit dm_snapshot_exit(void)
2324 {
2325         destroy_workqueue(ksnapd);
2326
2327         dm_unregister_target(&snapshot_target);
2328         dm_unregister_target(&origin_target);
2329         dm_unregister_target(&merge_target);
2330
2331         exit_origin_hash();
2332         kmem_cache_destroy(pending_cache);
2333         kmem_cache_destroy(exception_cache);
2334         kmem_cache_destroy(tracked_chunk_cache);
2335
2336         dm_exception_store_exit();
2337 }
2338
2339 /* Module hooks */
2340 module_init(dm_snapshot_init);
2341 module_exit(dm_snapshot_exit);
2342
2343 MODULE_DESCRIPTION(DM_NAME " snapshot target");
2344 MODULE_AUTHOR("Joe Thornber");
2345 MODULE_LICENSE("GPL");