i2c: tegra: Add stub runtime power management
[linux-2.6.git] / drivers / md / dm-snap.c
index 288994e..6f75887 100644 (file)
 #include <linux/vmalloc.h>
 #include <linux/log2.h>
 #include <linux/dm-kcopyd.h>
-#include <linux/workqueue.h>
 
 #include "dm-exception-store.h"
 
 #define DM_MSG_PREFIX "snapshots"
 
-/*
- * The percentage increment we will wake up users at
- */
-#define WAKE_UP_PERCENT 5
+static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
 
-/*
- * kcopyd priority of snapshot operations
- */
-#define SNAPSHOT_COPY_PRIORITY 2
-
-/*
- * Reserve 1MB for each snapshot initially (with minimum of 1 page).
- */
-#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
+#define dm_target_is_snapshot_merge(ti) \
+       ((ti)->type->name == dm_snapshot_merge_target_name)
 
 /*
  * The size of the mempool used to track chunks in use.
@@ -66,19 +55,19 @@ struct dm_snapshot {
        /* List of snapshots per Origin */
        struct list_head list;
 
-       /* You can't use a snapshot if this is 0 (e.g. if full) */
+       /*
+        * You can't use a snapshot if this is 0 (e.g. if full).
+        * A snapshot-merge target never clears this.
+        */
        int valid;
 
        /* Origin writes don't trigger exceptions until this is set */
        int active;
 
-       /* Whether or not owning mapped_device is suspended */
-       int suspended;
+       atomic_t pending_exceptions_count;
 
        mempool_t *pending_pool;
 
-       atomic_t pending_exceptions_count;
-
        struct dm_exception_table pending;
        struct dm_exception_table complete;
 
@@ -88,30 +77,66 @@ struct dm_snapshot {
         */
        spinlock_t pe_lock;
 
+       /* Chunks with outstanding reads */
+       spinlock_t tracked_chunk_lock;
+       mempool_t *tracked_chunk_pool;
+       struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
+
        /* The on disk metadata handler */
        struct dm_exception_store *store;
 
        struct dm_kcopyd_client *kcopyd_client;
 
-       /* Queue of snapshot writes for ksnapd to flush */
-       struct bio_list queued_bios;
-       struct work_struct queued_bios_work;
+       /* Wait for events based on state_bits */
+       unsigned long state_bits;
 
-       /* Chunks with outstanding reads */
-       mempool_t *tracked_chunk_pool;
-       spinlock_t tracked_chunk_lock;
-       struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
+       /* Range of chunks currently being merged. */
+       chunk_t first_merging_chunk;
+       int num_merging_chunks;
+
+       /*
+        * The merge operation failed if this flag is set.
+        * Failure modes are handled as follows:
+        * - I/O error reading the header
+        *      => don't load the target; abort.
+        * - Header does not have "valid" flag set
+        *      => use the origin; forget about the snapshot.
+        * - I/O error when reading exceptions
+        *      => don't load the target; abort.
+        *         (We can't use the intermediate origin state.)
+        * - I/O error while merging
+        *      => stop merging; set merge_failed; process I/O normally.
+        */
+       int merge_failed;
+
+       /*
+        * Incoming bios that overlap with chunks being merged must wait
+        * for them to be committed.
+        */
+       struct bio_list bios_queued_during_merge;
 };
 
+/*
+ * state_bits:
+ *   RUNNING_MERGE  - Merge operation is in progress.
+ *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
+ *                    cleared afterwards.
+ */
+#define RUNNING_MERGE          0
+#define SHUTDOWN_MERGE         1
+
+struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
+{
+       return s->origin;
+}
+EXPORT_SYMBOL(dm_snap_origin);
+
 struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
 {
        return s->cow;
 }
 EXPORT_SYMBOL(dm_snap_cow);
 
-static struct workqueue_struct *ksnapd;
-static void flush_queued_bios(struct work_struct *work);
-
 static sector_t chunk_to_sector(struct dm_exception_store *store,
                                chunk_t chunk)
 {
@@ -137,28 +162,6 @@ struct dm_snap_pending_exception {
        struct bio_list origin_bios;
        struct bio_list snapshot_bios;
 
-       /*
-        * Short-term queue of pending exceptions prior to submission.
-        */
-       struct list_head list;
-
-       /*
-        * The primary pending_exception is the one that holds
-        * the ref_count and the list of origin_bios for a
-        * group of pending_exceptions.  It is always last to get freed.
-        * These fields get set up when writing to the origin.
-        */
-       struct dm_snap_pending_exception *primary_pe;
-
-       /*
-        * Number of pending_exceptions processing this chunk.
-        * When this drops to zero we must complete the origin bios.
-        * If incrementing or decrementing this, hold pe->snap->lock for
-        * the sibling concerned and not pe->primary_pe->snap->lock unless
-        * they are the same.
-        */
-       atomic_t ref_count;
-
        /* Pointer back to snapshot context */
        struct dm_snapshot *snap;
 
@@ -167,6 +170,13 @@ struct dm_snap_pending_exception {
         * kcopyd.
         */
        int started;
+
+       /*
+        * For writing a complete chunk, bypassing the copy.
+        */
+       struct bio *full_bio;
+       bio_end_io_t *full_bio_end_io;
+       void *full_bio_private;
 };
 
 /*
@@ -265,6 +275,10 @@ struct origin {
 static struct list_head *_origins;
 static struct rw_semaphore _origins_lock;
 
+static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
+static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
+static uint64_t _pending_exceptions_done_count;
+
 static int init_origin_hash(void)
 {
        int i;
@@ -317,8 +331,10 @@ static void __insert_origin(struct origin *o)
  * Returns number of snapshots registered using the supplied cow device, plus:
  * snap_src - a snapshot suitable for use as a source of exception handover
  * snap_dest - a snapshot capable of receiving exception handover.
+ * snap_merge - an existing snapshot-merge target linked to the same origin.
+ *   There can be at most one snapshot-merge target. The parameter is optional.
  *
- * Possible return values and states:
+ * Possible return values and states of snap_src and snap_dest.
  *   0: NULL, NULL  - first new snapshot
  *   1: snap_src, NULL - normal snapshot
  *   2: snap_src, snap_dest  - waiting for handover
@@ -327,7 +343,8 @@ static void __insert_origin(struct origin *o)
  */
 static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
                                        struct dm_snapshot **snap_src,
-                                       struct dm_snapshot **snap_dest)
+                                       struct dm_snapshot **snap_dest,
+                                       struct dm_snapshot **snap_merge)
 {
        struct dm_snapshot *s;
        struct origin *o;
@@ -339,6 +356,8 @@ static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
                goto out;
 
        list_for_each_entry(s, &o->snapshots, list) {
+               if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
+                       *snap_merge = s;
                if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
                        continue;
 
@@ -366,9 +385,11 @@ out:
 static int __validate_exception_handover(struct dm_snapshot *snap)
 {
        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
+       struct dm_snapshot *snap_merge = NULL;
 
        /* Does snapshot need exceptions handed over to it? */
-       if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest) == 2) ||
+       if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
+                                         &snap_merge) == 2) ||
            snap_dest) {
                snap->ti->error = "Snapshot cow pairing for exception "
                                  "table handover failed";
@@ -382,6 +403,27 @@ static int __validate_exception_handover(struct dm_snapshot *snap)
        if (!snap_src)
                return 0;
 
+       /*
+        * Non-snapshot-merge handover?
+        */
+       if (!dm_target_is_snapshot_merge(snap->ti))
+               return 1;
+
+       /*
+        * Do not allow more than one merging snapshot.
+        */
+       if (snap_merge) {
+               snap->ti->error = "A snapshot is already merging.";
+               return -EINVAL;
+       }
+
+       if (!snap_src->store->type->prepare_merge ||
+           !snap_src->store->type->commit_merge) {
+               snap->ti->error = "Snapshot exception store does not "
+                                 "support snapshot-merge.";
+               return -EINVAL;
+       }
+
        return 1;
 }
 
@@ -645,8 +687,6 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
        return 0;
 }
 
-#define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
-
 /*
  * Return a minimum chunk size of all snapshots that have the specified origin.
  * Return zero if the origin has no snapshots.
@@ -717,6 +757,276 @@ static int init_hash_tables(struct dm_snapshot *s)
        return 0;
 }
 
+static void merge_shutdown(struct dm_snapshot *s)
+{
+       clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
+       smp_mb__after_clear_bit();
+       wake_up_bit(&s->state_bits, RUNNING_MERGE);
+}
+
+static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
+{
+       s->first_merging_chunk = 0;
+       s->num_merging_chunks = 0;
+
+       return bio_list_get(&s->bios_queued_during_merge);
+}
+
+/*
+ * Remove one chunk from the index of completed exceptions.
+ */
+static int __remove_single_exception_chunk(struct dm_snapshot *s,
+                                          chunk_t old_chunk)
+{
+       struct dm_exception *e;
+
+       e = dm_lookup_exception(&s->complete, old_chunk);
+       if (!e) {
+               DMERR("Corruption detected: exception for block %llu is "
+                     "on disk but not in memory",
+                     (unsigned long long)old_chunk);
+               return -EINVAL;
+       }
+
+       /*
+        * If this is the only chunk using this exception, remove exception.
+        */
+       if (!dm_consecutive_chunk_count(e)) {
+               dm_remove_exception(e);
+               free_completed_exception(e);
+               return 0;
+       }
+
+       /*
+        * The chunk may be either at the beginning or the end of a
+        * group of consecutive chunks - never in the middle.  We are
+        * removing chunks in the opposite order to that in which they
+        * were added, so this should always be true.
+        * Decrement the consecutive chunk counter and adjust the
+        * starting point if necessary.
+        */
+       if (old_chunk == e->old_chunk) {
+               e->old_chunk++;
+               e->new_chunk++;
+       } else if (old_chunk != e->old_chunk +
+                  dm_consecutive_chunk_count(e)) {
+               DMERR("Attempt to merge block %llu from the "
+                     "middle of a chunk range [%llu - %llu]",
+                     (unsigned long long)old_chunk,
+                     (unsigned long long)e->old_chunk,
+                     (unsigned long long)
+                     e->old_chunk + dm_consecutive_chunk_count(e));
+               return -EINVAL;
+       }
+
+       dm_consecutive_chunk_count_dec(e);
+
+       return 0;
+}
+
+static void flush_bios(struct bio *bio);
+
+static int remove_single_exception_chunk(struct dm_snapshot *s)
+{
+       struct bio *b = NULL;
+       int r;
+       chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
+
+       down_write(&s->lock);
+
+       /*
+        * Process chunks (and associated exceptions) in reverse order
+        * so that dm_consecutive_chunk_count_dec() accounting works.
+        */
+       do {
+               r = __remove_single_exception_chunk(s, old_chunk);
+               if (r)
+                       goto out;
+       } while (old_chunk-- > s->first_merging_chunk);
+
+       b = __release_queued_bios_after_merge(s);
+
+out:
+       up_write(&s->lock);
+       if (b)
+               flush_bios(b);
+
+       return r;
+}
+
+static int origin_write_extent(struct dm_snapshot *merging_snap,
+                              sector_t sector, unsigned chunk_size);
+
+static void merge_callback(int read_err, unsigned long write_err,
+                          void *context);
+
+static uint64_t read_pending_exceptions_done_count(void)
+{
+       uint64_t pending_exceptions_done;
+
+       spin_lock(&_pending_exceptions_done_spinlock);
+       pending_exceptions_done = _pending_exceptions_done_count;
+       spin_unlock(&_pending_exceptions_done_spinlock);
+
+       return pending_exceptions_done;
+}
+
+static void increment_pending_exceptions_done_count(void)
+{
+       spin_lock(&_pending_exceptions_done_spinlock);
+       _pending_exceptions_done_count++;
+       spin_unlock(&_pending_exceptions_done_spinlock);
+
+       wake_up_all(&_pending_exceptions_done);
+}
+
+static void snapshot_merge_next_chunks(struct dm_snapshot *s)
+{
+       int i, linear_chunks;
+       chunk_t old_chunk, new_chunk;
+       struct dm_io_region src, dest;
+       sector_t io_size;
+       uint64_t previous_count;
+
+       BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
+       if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
+               goto shut;
+
+       /*
+        * valid flag never changes during merge, so no lock required.
+        */
+       if (!s->valid) {
+               DMERR("Snapshot is invalid: can't merge");
+               goto shut;
+       }
+
+       linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
+                                                     &new_chunk);
+       if (linear_chunks <= 0) {
+               if (linear_chunks < 0) {
+                       DMERR("Read error in exception store: "
+                             "shutting down merge");
+                       down_write(&s->lock);
+                       s->merge_failed = 1;
+                       up_write(&s->lock);
+               }
+               goto shut;
+       }
+
+       /* Adjust old_chunk and new_chunk to reflect start of linear region */
+       old_chunk = old_chunk + 1 - linear_chunks;
+       new_chunk = new_chunk + 1 - linear_chunks;
+
+       /*
+        * Use one (potentially large) I/O to copy all 'linear_chunks'
+        * from the exception store to the origin
+        */
+       io_size = linear_chunks * s->store->chunk_size;
+
+       dest.bdev = s->origin->bdev;
+       dest.sector = chunk_to_sector(s->store, old_chunk);
+       dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
+
+       src.bdev = s->cow->bdev;
+       src.sector = chunk_to_sector(s->store, new_chunk);
+       src.count = dest.count;
+
+       /*
+        * Reallocate any exceptions needed in other snapshots then
+        * wait for the pending exceptions to complete.
+        * Each time any pending exception (globally on the system)
+        * completes we are woken and repeat the process to find out
+        * if we can proceed.  While this may not seem a particularly
+        * efficient algorithm, it is not expected to have any
+        * significant impact on performance.
+        */
+       previous_count = read_pending_exceptions_done_count();
+       while (origin_write_extent(s, dest.sector, io_size)) {
+               wait_event(_pending_exceptions_done,
+                          (read_pending_exceptions_done_count() !=
+                           previous_count));
+               /* Retry after the wait, until all exceptions are done. */
+               previous_count = read_pending_exceptions_done_count();
+       }
+
+       down_write(&s->lock);
+       s->first_merging_chunk = old_chunk;
+       s->num_merging_chunks = linear_chunks;
+       up_write(&s->lock);
+
+       /* Wait until writes to all 'linear_chunks' drain */
+       for (i = 0; i < linear_chunks; i++)
+               __check_for_conflicting_io(s, old_chunk + i);
+
+       dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
+       return;
+
+shut:
+       merge_shutdown(s);
+}
+
+static void error_bios(struct bio *bio);
+
+static void merge_callback(int read_err, unsigned long write_err, void *context)
+{
+       struct dm_snapshot *s = context;
+       struct bio *b = NULL;
+
+       if (read_err || write_err) {
+               if (read_err)
+                       DMERR("Read error: shutting down merge.");
+               else
+                       DMERR("Write error: shutting down merge.");
+               goto shut;
+       }
+
+       if (s->store->type->commit_merge(s->store,
+                                        s->num_merging_chunks) < 0) {
+               DMERR("Write error in exception store: shutting down merge");
+               goto shut;
+       }
+
+       if (remove_single_exception_chunk(s) < 0)
+               goto shut;
+
+       snapshot_merge_next_chunks(s);
+
+       return;
+
+shut:
+       down_write(&s->lock);
+       s->merge_failed = 1;
+       b = __release_queued_bios_after_merge(s);
+       up_write(&s->lock);
+       error_bios(b);
+
+       merge_shutdown(s);
+}
+
+static void start_merge(struct dm_snapshot *s)
+{
+       if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
+               snapshot_merge_next_chunks(s);
+}
+
+static int wait_schedule(void *ptr)
+{
+       schedule();
+
+       return 0;
+}
+
+/*
+ * Stop the merging process and wait until it finishes.
+ */
+static void stop_merge(struct dm_snapshot *s)
+{
+       set_bit(SHUTDOWN_MERGE, &s->state_bits);
+       wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
+                   TASK_UNINTERRUPTIBLE);
+       clear_bit(SHUTDOWN_MERGE, &s->state_bits);
+}
+
 /*
  * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
  */
@@ -726,7 +1036,8 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        int i;
        int r = -EINVAL;
        char *origin_path, *cow_path;
-       unsigned args_used;
+       unsigned args_used, num_flush_requests = 1;
+       fmode_t origin_mode = FMODE_READ;
 
        if (argc != 4) {
                ti->error = "requires exactly 4 arguments";
@@ -734,24 +1045,33 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad;
        }
 
-       origin_path = argv[0];
-       argv++;
-       argc--;
+       if (dm_target_is_snapshot_merge(ti)) {
+               num_flush_requests = 2;
+               origin_mode = FMODE_WRITE;
+       }
 
        s = kmalloc(sizeof(*s), GFP_KERNEL);
        if (!s) {
-               ti->error = "Cannot allocate snapshot context private "
-                   "structure";
+               ti->error = "Cannot allocate private snapshot structure";
                r = -ENOMEM;
                goto bad;
        }
 
+       origin_path = argv[0];
+       argv++;
+       argc--;
+
+       r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
+       if (r) {
+               ti->error = "Cannot get origin device";
+               goto bad_origin;
+       }
+
        cow_path = argv[0];
        argv++;
        argc--;
 
-       r = dm_get_device(ti, cow_path, 0, 0,
-                         FMODE_READ | FMODE_WRITE, &s->cow);
+       r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
        if (r) {
                ti->error = "Cannot get COW device";
                goto bad_cow;
@@ -767,20 +1087,18 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        argv += args_used;
        argc -= args_used;
 
-       r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
-       if (r) {
-               ti->error = "Cannot get origin device";
-               goto bad_origin;
-       }
-
        s->ti = ti;
        s->valid = 1;
        s->active = 0;
-       s->suspended = 0;
        atomic_set(&s->pending_exceptions_count, 0);
        init_rwsem(&s->lock);
        INIT_LIST_HEAD(&s->list);
        spin_lock_init(&s->pe_lock);
+       s->state_bits = 0;
+       s->merge_failed = 0;
+       s->first_merging_chunk = 0;
+       s->num_merging_chunks = 0;
+       bio_list_init(&s->bios_queued_during_merge);
 
        /* Allocate hash table for COW data */
        if (init_hash_tables(s)) {
@@ -789,8 +1107,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad_hash_tables;
        }
 
-       r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
-       if (r) {
+       s->kcopyd_client = dm_kcopyd_client_create();
+       if (IS_ERR(s->kcopyd_client)) {
+               r = PTR_ERR(s->kcopyd_client);
                ti->error = "Could not create kcopyd client";
                goto bad_kcopyd;
        }
@@ -814,11 +1133,8 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
        spin_lock_init(&s->tracked_chunk_lock);
 
-       bio_list_init(&s->queued_bios);
-       INIT_WORK(&s->queued_bios_work, flush_queued_bios);
-
        ti->private = s;
-       ti->num_flush_requests = 1;
+       ti->num_flush_requests = num_flush_requests;
 
        /* Add snapshot to the list of snapshots for this origin */
        /* Exceptions aren't triggered till snapshot_resume() is called */
@@ -877,15 +1193,15 @@ bad_kcopyd:
        dm_exception_table_exit(&s->complete, exception_cache);
 
 bad_hash_tables:
-       dm_put_device(ti, s->origin);
-
-bad_origin:
        dm_exception_store_destroy(s->store);
 
 bad_store:
        dm_put_device(ti, s->cow);
 
 bad_cow:
+       dm_put_device(ti, s->origin);
+
+bad_origin:
        kfree(s);
 
 bad:
@@ -940,11 +1256,9 @@ static void snapshot_dtr(struct dm_target *ti)
        struct dm_snapshot *s = ti->private;
        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
 
-       flush_workqueue(ksnapd);
-
        down_read(&_origins_lock);
        /* Check whether exception handover must be cancelled */
-       (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest);
+       (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
        if (snap_src && snap_dest && (s == snap_src)) {
                down_write(&snap_dest->lock);
                snap_dest->valid = 0;
@@ -953,6 +1267,9 @@ static void snapshot_dtr(struct dm_target *ti)
        }
        up_read(&_origins_lock);
 
+       if (dm_target_is_snapshot_merge(ti))
+               stop_merge(s);
+
        /* Prevent further origin writes from using this snapshot. */
        /* After this returns there can be no new kcopyd jobs. */
        unregister_snapshot(s);
@@ -976,12 +1293,12 @@ static void snapshot_dtr(struct dm_target *ti)
 
        mempool_destroy(s->pending_pool);
 
-       dm_put_device(ti, s->origin);
-
        dm_exception_store_destroy(s->store);
 
        dm_put_device(ti, s->cow);
 
+       dm_put_device(ti, s->origin);
+
        kfree(s);
 }
 
@@ -1000,18 +1317,24 @@ static void flush_bios(struct bio *bio)
        }
 }
 
-static void flush_queued_bios(struct work_struct *work)
-{
-       struct dm_snapshot *s =
-               container_of(work, struct dm_snapshot, queued_bios_work);
-       struct bio *queued_bios;
-       unsigned long flags;
+static int do_origin(struct dm_dev *origin, struct bio *bio);
 
-       spin_lock_irqsave(&s->pe_lock, flags);
-       queued_bios = bio_list_get(&s->queued_bios);
-       spin_unlock_irqrestore(&s->pe_lock, flags);
+/*
+ * Flush a list of buffers.
+ */
+static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
+{
+       struct bio *n;
+       int r;
 
-       flush_bios(queued_bios);
+       while (bio) {
+               n = bio->bi_next;
+               bio->bi_next = NULL;
+               r = do_origin(s->origin, bio);
+               if (r == DM_MAPIO_REMAPPED)
+                       generic_make_request(bio);
+               bio = n;
+       }
 }
 
 /*
@@ -1047,45 +1370,13 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
        dm_table_event(s->ti->table);
 }
 
-static void get_pending_exception(struct dm_snap_pending_exception *pe)
-{
-       atomic_inc(&pe->ref_count);
-}
-
-static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
-{
-       struct dm_snap_pending_exception *primary_pe;
-       struct bio *origin_bios = NULL;
-
-       primary_pe = pe->primary_pe;
-
-       /*
-        * If this pe is involved in a write to the origin and
-        * it is the last sibling to complete then release
-        * the bios for the original write to the origin.
-        */
-       if (primary_pe &&
-           atomic_dec_and_test(&primary_pe->ref_count)) {
-               origin_bios = bio_list_get(&primary_pe->origin_bios);
-               free_pending_exception(primary_pe);
-       }
-
-       /*
-        * Free the pe if it's not linked to an origin write or if
-        * it's not itself a primary pe.
-        */
-       if (!primary_pe || primary_pe != pe)
-               free_pending_exception(pe);
-
-       return origin_bios;
-}
-
 static void pending_complete(struct dm_snap_pending_exception *pe, int success)
 {
        struct dm_exception *e;
        struct dm_snapshot *s = pe->snap;
        struct bio *origin_bios = NULL;
        struct bio *snapshot_bios = NULL;
+       struct bio *full_bio = NULL;
        int error = 0;
 
        if (!success) {
@@ -1121,20 +1412,33 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
         */
        dm_insert_exception(&s->complete, e);
 
- out:
+out:
        dm_remove_exception(&pe->e);
        snapshot_bios = bio_list_get(&pe->snapshot_bios);
-       origin_bios = put_pending_exception(pe);
+       origin_bios = bio_list_get(&pe->origin_bios);
+       full_bio = pe->full_bio;
+       if (full_bio) {
+               full_bio->bi_end_io = pe->full_bio_end_io;
+               full_bio->bi_private = pe->full_bio_private;
+       }
+       free_pending_exception(pe);
+
+       increment_pending_exceptions_done_count();
 
        up_write(&s->lock);
 
        /* Submit any pending write bios */
-       if (error)
+       if (error) {
+               if (full_bio)
+                       bio_io_error(full_bio);
                error_bios(snapshot_bios);
-       else
+       } else {
+               if (full_bio)
+                       bio_endio(full_bio, 0);
                flush_bios(snapshot_bios);
+       }
 
-       flush_bios(origin_bios);
+       retry_origin_bios(s, origin_bios);
 }
 
 static void commit_callback(void *context, int success)
@@ -1183,8 +1487,33 @@ static void start_copy(struct dm_snap_pending_exception *pe)
        dest.count = src.count;
 
        /* Hand over to kcopyd */
-       dm_kcopyd_copy(s->kcopyd_client,
-                   &src, 1, &dest, 0, copy_callback, pe);
+       dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
+}
+
+static void full_bio_end_io(struct bio *bio, int error)
+{
+       void *callback_data = bio->bi_private;
+
+       dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
+}
+
+static void start_full_bio(struct dm_snap_pending_exception *pe,
+                          struct bio *bio)
+{
+       struct dm_snapshot *s = pe->snap;
+       void *callback_data;
+
+       pe->full_bio = bio;
+       pe->full_bio_end_io = bio->bi_end_io;
+       pe->full_bio_private = bio->bi_private;
+
+       callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
+                                                  copy_callback, pe);
+
+       bio->bi_end_io = full_bio_end_io;
+       bio->bi_private = callback_data;
+
+       generic_make_request(bio);
 }
 
 static struct dm_snap_pending_exception *
@@ -1221,16 +1550,14 @@ __find_pending_exception(struct dm_snapshot *s,
        pe->e.old_chunk = chunk;
        bio_list_init(&pe->origin_bios);
        bio_list_init(&pe->snapshot_bios);
-       pe->primary_pe = NULL;
-       atomic_set(&pe->ref_count, 0);
        pe->started = 0;
+       pe->full_bio = NULL;
 
        if (s->store->type->prepare_exception(s->store, &pe->e)) {
                free_pending_exception(pe);
                return NULL;
        }
 
-       get_pending_exception(pe);
        dm_insert_exception(&s->pending, &pe->e);
 
        return pe;
@@ -1256,7 +1583,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
        chunk_t chunk;
        struct dm_snap_pending_exception *pe = NULL;
 
-       if (unlikely(bio_empty_barrier(bio))) {
+       if (bio->bi_rw & REQ_FLUSH) {
                bio->bi_bdev = s->cow->bdev;
                return DM_MAPIO_REMAPPED;
        }
@@ -1318,10 +1645,19 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
                }
 
                remap_exception(s, &pe->e, bio, chunk);
-               bio_list_add(&pe->snapshot_bios, bio);
 
                r = DM_MAPIO_SUBMITTED;
 
+               if (!pe->started &&
+                   bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
+                       pe->started = 1;
+                       up_write(&s->lock);
+                       start_full_bio(pe, bio);
+                       goto out;
+               }
+
+               bio_list_add(&pe->snapshot_bios, bio);
+
                if (!pe->started) {
                        /* this is protected by snap->lock */
                        pe->started = 1;
@@ -1334,9 +1670,81 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
                map_context->ptr = track_chunk(s, chunk);
        }
 
- out_unlock:
+out_unlock:
        up_write(&s->lock);
- out:
+out:
+       return r;
+}
+
+/*
+ * A snapshot-merge target behaves like a combination of a snapshot
+ * target and a snapshot-origin target.  It only generates new
+ * exceptions in other snapshots and not in the one that is being
+ * merged.
+ *
+ * For each chunk, if there is an existing exception, it is used to
+ * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
+ * which in turn might generate exceptions in other snapshots.
+ * If merging is currently taking place on the chunk in question, the
+ * I/O is deferred by adding it to s->bios_queued_during_merge.
+ */
+static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
+                             union map_info *map_context)
+{
+       struct dm_exception *e;
+       struct dm_snapshot *s = ti->private;
+       int r = DM_MAPIO_REMAPPED;
+       chunk_t chunk;
+
+       if (bio->bi_rw & REQ_FLUSH) {
+               if (!map_context->target_request_nr)
+                       bio->bi_bdev = s->origin->bdev;
+               else
+                       bio->bi_bdev = s->cow->bdev;
+               map_context->ptr = NULL;
+               return DM_MAPIO_REMAPPED;
+       }
+
+       chunk = sector_to_chunk(s->store, bio->bi_sector);
+
+       down_write(&s->lock);
+
+       /* Full merging snapshots are redirected to the origin */
+       if (!s->valid)
+               goto redirect_to_origin;
+
+       /* If the block is already remapped - use that */
+       e = dm_lookup_exception(&s->complete, chunk);
+       if (e) {
+               /* Queue writes overlapping with chunks being merged */
+               if (bio_rw(bio) == WRITE &&
+                   chunk >= s->first_merging_chunk &&
+                   chunk < (s->first_merging_chunk +
+                            s->num_merging_chunks)) {
+                       bio->bi_bdev = s->origin->bdev;
+                       bio_list_add(&s->bios_queued_during_merge, bio);
+                       r = DM_MAPIO_SUBMITTED;
+                       goto out_unlock;
+               }
+
+               remap_exception(s, e, bio, chunk);
+
+               if (bio_rw(bio) == WRITE)
+                       map_context->ptr = track_chunk(s, chunk);
+               goto out_unlock;
+       }
+
+redirect_to_origin:
+       bio->bi_bdev = s->origin->bdev;
+
+       if (bio_rw(bio) == WRITE) {
+               up_write(&s->lock);
+               return do_origin(s->origin, bio);
+       }
+
+out_unlock:
+       up_write(&s->lock);
+
        return r;
 }
 
@@ -1352,13 +1760,11 @@ static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
        return 0;
 }
 
-static void snapshot_postsuspend(struct dm_target *ti)
+static void snapshot_merge_presuspend(struct dm_target *ti)
 {
        struct dm_snapshot *s = ti->private;
 
-       down_write(&s->lock);
-       s->suspended = 1;
-       up_write(&s->lock);
+       stop_merge(s);
 }
 
 static int snapshot_preresume(struct dm_target *ti)
@@ -1368,14 +1774,14 @@ static int snapshot_preresume(struct dm_target *ti)
        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
 
        down_read(&_origins_lock);
-       (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest);
+       (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
        if (snap_src && snap_dest) {
                down_read(&snap_src->lock);
                if (s == snap_src) {
                        DMERR("Unable to resume snapshot source until "
                              "handover completes.");
                        r = -EINVAL;
-               } else if (!snap_src->suspended) {
+               } else if (!dm_suspended(snap_src->ti)) {
                        DMERR("Unable to perform snapshot handover until "
                              "source is suspended.");
                        r = -EINVAL;
@@ -1393,7 +1799,7 @@ static void snapshot_resume(struct dm_target *ti)
        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
 
        down_read(&_origins_lock);
-       (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest);
+       (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
        if (snap_src && snap_dest) {
                down_write(&snap_src->lock);
                down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
@@ -1408,10 +1814,37 @@ static void snapshot_resume(struct dm_target *ti)
 
        down_write(&s->lock);
        s->active = 1;
-       s->suspended = 0;
        up_write(&s->lock);
 }
 
+static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
+{
+       sector_t min_chunksize;
+
+       down_read(&_origins_lock);
+       min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
+       up_read(&_origins_lock);
+
+       return min_chunksize;
+}
+
+static void snapshot_merge_resume(struct dm_target *ti)
+{
+       struct dm_snapshot *s = ti->private;
+
+       /*
+        * Handover exceptions from existing snapshot.
+        */
+       snapshot_resume(ti);
+
+       /*
+        * snapshot-merge acts as an origin, so set ti->split_io
+        */
+       ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
+
+       start_merge(s);
+}
+
 static int snapshot_status(struct dm_target *ti, status_type_t type,
                           char *result, unsigned int maxlen)
 {
@@ -1425,6 +1858,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
 
                if (!snap->valid)
                        DMEMIT("Invalid");
+               else if (snap->merge_failed)
+                       DMEMIT("Merge failed");
                else {
                        if (snap->store->type->usage) {
                                sector_t total_sectors, sectors_allocated,
@@ -1465,8 +1900,14 @@ static int snapshot_iterate_devices(struct dm_target *ti,
                                    iterate_devices_callout_fn fn, void *data)
 {
        struct dm_snapshot *snap = ti->private;
+       int r;
+
+       r = fn(ti, snap->origin, 0, ti->len, data);
 
-       return fn(ti, snap->origin, 0, ti->len, data);
+       if (!r)
+               r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
+
+       return r;
 }
 
 
@@ -1487,15 +1928,22 @@ static int snapshot_iterate_devices(struct dm_target *ti,
 static int __origin_write(struct list_head *snapshots, sector_t sector,
                          struct bio *bio)
 {
-       int r = DM_MAPIO_REMAPPED, first = 0;
+       int r = DM_MAPIO_REMAPPED;
        struct dm_snapshot *snap;
        struct dm_exception *e;
-       struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
+       struct dm_snap_pending_exception *pe;
+       struct dm_snap_pending_exception *pe_to_start_now = NULL;
+       struct dm_snap_pending_exception *pe_to_start_last = NULL;
        chunk_t chunk;
-       LIST_HEAD(pe_queue);
 
        /* Do all the snapshots on this origin */
        list_for_each_entry (snap, snapshots, list) {
+               /*
+                * Don't make new exceptions in a merging snapshot
+                * because it has effectively been deleted
+                */
+               if (dm_target_is_snapshot_merge(snap->ti))
+                       continue;
 
                down_write(&snap->lock);
 
@@ -1517,9 +1965,6 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
                 * Check exception table to see if block
                 * is already remapped in this snapshot
                 * and trigger an exception if not.
-                *
-                * ref_count is initialised to 1 so pending_complete()
-                * won't destroy the primary_pe while we're inside this loop.
                 */
                e = dm_lookup_exception(&snap->complete, chunk);
                if (e)
@@ -1549,60 +1994,43 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
                        }
                }
 
-               if (!primary_pe) {
-                       /*
-                        * Either every pe here has same
-                        * primary_pe or none has one yet.
-                        */
-                       if (pe->primary_pe)
-                               primary_pe = pe->primary_pe;
-                       else {
-                               primary_pe = pe;
-                               first = 1;
-                       }
-
-                       if (bio)
-                               bio_list_add(&primary_pe->origin_bios, bio);
+               r = DM_MAPIO_SUBMITTED;
 
-                       r = DM_MAPIO_SUBMITTED;
-               }
+               /*
+                * If an origin bio was supplied, queue it to wait for the
+                * completion of this exception, and start this one last,
+                * at the end of the function.
+                */
+               if (bio) {
+                       bio_list_add(&pe->origin_bios, bio);
+                       bio = NULL;
 
-               if (!pe->primary_pe) {
-                       pe->primary_pe = primary_pe;
-                       get_pending_exception(primary_pe);
+                       if (!pe->started) {
+                               pe->started = 1;
+                               pe_to_start_last = pe;
+                       }
                }
 
                if (!pe->started) {
                        pe->started = 1;
-                       list_add_tail(&pe->list, &pe_queue);
+                       pe_to_start_now = pe;
                }
 
- next_snapshot:
+next_snapshot:
                up_write(&snap->lock);
-       }
-
-       if (!primary_pe)
-               return r;
 
-       /*
-        * If this is the first time we're processing this chunk and
-        * ref_count is now 1 it means all the pending exceptions
-        * got completed while we were in the loop above, so it falls to
-        * us here to remove the primary_pe and submit any origin_bios.
-        */
-
-       if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
-               flush_bios(bio_list_get(&primary_pe->origin_bios));
-               free_pending_exception(primary_pe);
-               /* If we got here, pe_queue is necessarily empty. */
-               return r;
+               if (pe_to_start_now) {
+                       start_copy(pe_to_start_now);
+                       pe_to_start_now = NULL;
+               }
        }
 
        /*
-        * Now that we have a complete pe list we can start the copying.
+        * Submit the exception against which the bio is queued last,
+        * to give the other exceptions a head start.
         */
-       list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
-               start_copy(pe);
+       if (pe_to_start_last)
+               start_copy(pe_to_start_last);
 
        return r;
 }
@@ -1625,6 +2053,41 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
 }
 
 /*
+ * Trigger exceptions in all non-merging snapshots.
+ *
+ * The chunk size of the merging snapshot may be larger than the chunk
+ * size of some other snapshot so we may need to reallocate multiple
+ * chunks in other snapshots.
+ *
+ * We scan all the overlapping exceptions in the other snapshots.
+ * Returns 1 if anything was reallocated and must be waited for,
+ * otherwise returns 0.
+ *
+ * size must be a multiple of merging_snap's chunk_size.
+ */
+static int origin_write_extent(struct dm_snapshot *merging_snap,
+                              sector_t sector, unsigned size)
+{
+       int must_wait = 0;
+       sector_t n;
+       struct origin *o;
+
+       /*
+        * The origin's __minimum_chunk_size() got stored in split_io
+        * by snapshot_merge_resume().
+        */
+       down_read(&_origins_lock);
+       o = __lookup_origin(merging_snap->origin->bdev);
+       for (n = 0; n < size; n += merging_snap->ti->split_io)
+               if (__origin_write(&o->snapshots, sector + n, NULL) ==
+                   DM_MAPIO_SUBMITTED)
+                       must_wait = 1;
+       up_read(&_origins_lock);
+
+       return must_wait;
+}
+
+/*
  * Origin: maps a linear range of a device, with hooks for snapshotting.
  */
 
@@ -1643,8 +2106,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                return -EINVAL;
        }
 
-       r = dm_get_device(ti, argv[0], 0, ti->len,
-                         dm_table_get_mode(ti->table), &dev);
+       r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
        if (r) {
                ti->error = "Cannot get target device";
                return r;
@@ -1668,7 +2130,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
        struct dm_dev *dev = ti->private;
        bio->bi_bdev = dev->bdev;
 
-       if (unlikely(bio_empty_barrier(bio)))
+       if (bio->bi_rw & REQ_FLUSH)
                return DM_MAPIO_REMAPPED;
 
        /* Only tell snapshots if this is a write */
@@ -1683,11 +2145,7 @@ static void origin_resume(struct dm_target *ti)
 {
        struct dm_dev *dev = ti->private;
 
-       down_read(&_origins_lock);
-
-       ti->split_io = __minimum_chunk_size(__lookup_origin(dev->bdev));
-
-       up_read(&_origins_lock);
+       ti->split_io = get_origin_minimum_chunksize(dev->bdev);
 }
 
 static int origin_status(struct dm_target *ti, status_type_t type, char *result,
@@ -1708,6 +2166,21 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
        return 0;
 }
 
+static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+                       struct bio_vec *biovec, int max_size)
+{
+       struct dm_dev *dev = ti->private;
+       struct request_queue *q = bdev_get_queue(dev->bdev);
+
+       if (!q->merge_bvec_fn)
+               return max_size;
+
+       bvm->bi_bdev = dev->bdev;
+       bvm->bi_sector = bvm->bi_sector;
+
+       return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
 static int origin_iterate_devices(struct dm_target *ti,
                                  iterate_devices_callout_fn fn, void *data)
 {
@@ -1718,31 +2191,46 @@ static int origin_iterate_devices(struct dm_target *ti,
 
 static struct target_type origin_target = {
        .name    = "snapshot-origin",
-       .version = {1, 7, 0},
+       .version = {1, 7, 1},
        .module  = THIS_MODULE,
        .ctr     = origin_ctr,
        .dtr     = origin_dtr,
        .map     = origin_map,
        .resume  = origin_resume,
        .status  = origin_status,
+       .merge   = origin_merge,
        .iterate_devices = origin_iterate_devices,
 };
 
 static struct target_type snapshot_target = {
        .name    = "snapshot",
-       .version = {1, 9, 0},
+       .version = {1, 10, 0},
        .module  = THIS_MODULE,
        .ctr     = snapshot_ctr,
        .dtr     = snapshot_dtr,
        .map     = snapshot_map,
        .end_io  = snapshot_end_io,
-       .postsuspend = snapshot_postsuspend,
        .preresume  = snapshot_preresume,
        .resume  = snapshot_resume,
        .status  = snapshot_status,
        .iterate_devices = snapshot_iterate_devices,
 };
 
+static struct target_type merge_target = {
+       .name    = dm_snapshot_merge_target_name,
+       .version = {1, 1, 0},
+       .module  = THIS_MODULE,
+       .ctr     = snapshot_ctr,
+       .dtr     = snapshot_dtr,
+       .map     = snapshot_merge_map,
+       .end_io  = snapshot_end_io,
+       .presuspend = snapshot_merge_presuspend,
+       .preresume  = snapshot_preresume,
+       .resume  = snapshot_merge_resume,
+       .status  = snapshot_status,
+       .iterate_devices = snapshot_iterate_devices,
+};
+
 static int __init dm_snapshot_init(void)
 {
        int r;
@@ -1754,7 +2242,7 @@ static int __init dm_snapshot_init(void)
        }
 
        r = dm_register_target(&snapshot_target);
-       if (r) {
+       if (r < 0) {
                DMERR("snapshot target register failed %d", r);
                goto bad_register_snapshot_target;
        }
@@ -1762,69 +2250,67 @@ static int __init dm_snapshot_init(void)
        r = dm_register_target(&origin_target);
        if (r < 0) {
                DMERR("Origin target register failed %d", r);
-               goto bad1;
+               goto bad_register_origin_target;
+       }
+
+       r = dm_register_target(&merge_target);
+       if (r < 0) {
+               DMERR("Merge target register failed %d", r);
+               goto bad_register_merge_target;
        }
 
        r = init_origin_hash();
        if (r) {
                DMERR("init_origin_hash failed.");
-               goto bad2;
+               goto bad_origin_hash;
        }
 
        exception_cache = KMEM_CACHE(dm_exception, 0);
        if (!exception_cache) {
                DMERR("Couldn't create exception cache.");
                r = -ENOMEM;
-               goto bad3;
+               goto bad_exception_cache;
        }
 
        pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
        if (!pending_cache) {
                DMERR("Couldn't create pending cache.");
                r = -ENOMEM;
-               goto bad4;
+               goto bad_pending_cache;
        }
 
        tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
        if (!tracked_chunk_cache) {
                DMERR("Couldn't create cache to track chunks in use.");
                r = -ENOMEM;
-               goto bad5;
-       }
-
-       ksnapd = create_singlethread_workqueue("ksnapd");
-       if (!ksnapd) {
-               DMERR("Failed to create ksnapd workqueue.");
-               r = -ENOMEM;
-               goto bad_pending_pool;
+               goto bad_tracked_chunk_cache;
        }
 
        return 0;
 
-bad_pending_pool:
-       kmem_cache_destroy(tracked_chunk_cache);
-bad5:
+bad_tracked_chunk_cache:
        kmem_cache_destroy(pending_cache);
-bad4:
+bad_pending_cache:
        kmem_cache_destroy(exception_cache);
-bad3:
+bad_exception_cache:
        exit_origin_hash();
-bad2:
+bad_origin_hash:
+       dm_unregister_target(&merge_target);
+bad_register_merge_target:
        dm_unregister_target(&origin_target);
-bad1:
+bad_register_origin_target:
        dm_unregister_target(&snapshot_target);
-
 bad_register_snapshot_target:
        dm_exception_store_exit();
+
        return r;
 }
 
 static void __exit dm_snapshot_exit(void)
 {
-       destroy_workqueue(ksnapd);
-
        dm_unregister_target(&snapshot_target);
        dm_unregister_target(&origin_target);
+       dm_unregister_target(&merge_target);
 
        exit_origin_hash();
        kmem_cache_destroy(pending_cache);