i2c: tegra: Add stub runtime power management
[linux-2.6.git] / drivers / md / dm-snap.c
index 1498704..6f75887 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/vmalloc.h>
 #include <linux/log2.h>
 #include <linux/dm-kcopyd.h>
-#include <linux/workqueue.h>
 
 #include "dm-exception-store.h"
 
@@ -31,21 +30,6 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
        ((ti)->type->name == dm_snapshot_merge_target_name)
 
 /*
- * The percentage increment we will wake up users at
- */
-#define WAKE_UP_PERCENT 5
-
-/*
- * kcopyd priority of snapshot operations
- */
-#define SNAPSHOT_COPY_PRIORITY 2
-
-/*
- * Reserve 1MB for each snapshot initially (with minimum of 1 page).
- */
-#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
-
-/*
  * The size of the mempool used to track chunks in use.
  */
 #define MIN_IOS 256
@@ -71,19 +55,19 @@ struct dm_snapshot {
        /* List of snapshots per Origin */
        struct list_head list;
 
-       /* You can't use a snapshot if this is 0 (e.g. if full) */
+       /*
+        * You can't use a snapshot if this is 0 (e.g. if full).
+        * A snapshot-merge target never clears this.
+        */
        int valid;
 
        /* Origin writes don't trigger exceptions until this is set */
        int active;
 
-       /* Whether or not owning mapped_device is suspended */
-       int suspended;
+       atomic_t pending_exceptions_count;
 
        mempool_t *pending_pool;
 
-       atomic_t pending_exceptions_count;
-
        struct dm_exception_table pending;
        struct dm_exception_table complete;
 
@@ -93,20 +77,16 @@ struct dm_snapshot {
         */
        spinlock_t pe_lock;
 
+       /* Chunks with outstanding reads */
+       spinlock_t tracked_chunk_lock;
+       mempool_t *tracked_chunk_pool;
+       struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
+
        /* The on disk metadata handler */
        struct dm_exception_store *store;
 
        struct dm_kcopyd_client *kcopyd_client;
 
-       /* Queue of snapshot writes for ksnapd to flush */
-       struct bio_list queued_bios;
-       struct work_struct queued_bios_work;
-
-       /* Chunks with outstanding reads */
-       mempool_t *tracked_chunk_pool;
-       spinlock_t tracked_chunk_lock;
-       struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
-
        /* Wait for events based on state_bits */
        unsigned long state_bits;
 
@@ -115,6 +95,21 @@ struct dm_snapshot {
        int num_merging_chunks;
 
        /*
+        * The merge operation failed if this flag is set.
+        * Failure modes are handled as follows:
+        * - I/O error reading the header
+        *      => don't load the target; abort.
+        * - Header does not have "valid" flag set
+        *      => use the origin; forget about the snapshot.
+        * - I/O error when reading exceptions
+        *      => don't load the target; abort.
+        *         (We can't use the intermediate origin state.)
+        * - I/O error while merging
+        *      => stop merging; set merge_failed; process I/O normally.
+        */
+       int merge_failed;
+
+       /*
         * Incoming bios that overlap with chunks being merged must wait
         * for them to be committed.
         */
@@ -130,15 +125,18 @@ struct dm_snapshot {
 #define RUNNING_MERGE          0
 #define SHUTDOWN_MERGE         1
 
+struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
+{
+       return s->origin;
+}
+EXPORT_SYMBOL(dm_snap_origin);
+
 struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
 {
        return s->cow;
 }
 EXPORT_SYMBOL(dm_snap_cow);
 
-static struct workqueue_struct *ksnapd;
-static void flush_queued_bios(struct work_struct *work);
-
 static sector_t chunk_to_sector(struct dm_exception_store *store,
                                chunk_t chunk)
 {
@@ -172,6 +170,13 @@ struct dm_snap_pending_exception {
         * kcopyd.
         */
        int started;
+
+       /*
+        * For writing a complete chunk, bypassing the copy.
+        */
+       struct bio *full_bio;
+       bio_end_io_t *full_bio_end_io;
+       void *full_bio_private;
 };
 
 /*
@@ -682,8 +687,6 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
        return 0;
 }
 
-#define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
-
 /*
  * Return a minimum chunk size of all snapshots that have the specified origin.
  * Return zero if the origin has no snapshots.
@@ -879,9 +882,10 @@ static void increment_pending_exceptions_done_count(void)
 
 static void snapshot_merge_next_chunks(struct dm_snapshot *s)
 {
-       int r;
+       int i, linear_chunks;
        chunk_t old_chunk, new_chunk;
        struct dm_io_region src, dest;
+       sector_t io_size;
        uint64_t previous_count;
 
        BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
@@ -896,20 +900,32 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
                goto shut;
        }
 
-       r = s->store->type->prepare_merge(s->store, &old_chunk, &new_chunk);
-       if (r <= 0) {
-               if (r < 0)
+       linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
+                                                     &new_chunk);
+       if (linear_chunks <= 0) {
+               if (linear_chunks < 0) {
                        DMERR("Read error in exception store: "
                              "shutting down merge");
+                       down_write(&s->lock);
+                       s->merge_failed = 1;
+                       up_write(&s->lock);
+               }
                goto shut;
        }
 
-       /* TODO: use larger I/O size once we verify that kcopyd handles it */
+       /* Adjust old_chunk and new_chunk to reflect start of linear region */
+       old_chunk = old_chunk + 1 - linear_chunks;
+       new_chunk = new_chunk + 1 - linear_chunks;
+
+       /*
+        * Use one (potentially large) I/O to copy all 'linear_chunks'
+        * from the exception store to the origin
+        */
+       io_size = linear_chunks * s->store->chunk_size;
 
        dest.bdev = s->origin->bdev;
        dest.sector = chunk_to_sector(s->store, old_chunk);
-       dest.count = min((sector_t)s->store->chunk_size,
-                        get_dev_size(dest.bdev) - dest.sector);
+       dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
 
        src.bdev = s->cow->bdev;
        src.sector = chunk_to_sector(s->store, new_chunk);
@@ -925,7 +941,7 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
         * significant impact on performance.
         */
        previous_count = read_pending_exceptions_done_count();
-       while (origin_write_extent(s, dest.sector, s->store->chunk_size)) {
+       while (origin_write_extent(s, dest.sector, io_size)) {
                wait_event(_pending_exceptions_done,
                           (read_pending_exceptions_done_count() !=
                            previous_count));
@@ -935,10 +951,12 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
 
        down_write(&s->lock);
        s->first_merging_chunk = old_chunk;
-       s->num_merging_chunks = 1;
+       s->num_merging_chunks = linear_chunks;
        up_write(&s->lock);
 
-       __check_for_conflicting_io(s, old_chunk);
+       /* Wait until writes to all 'linear_chunks' drain */
+       for (i = 0; i < linear_chunks; i++)
+               __check_for_conflicting_io(s, old_chunk + i);
 
        dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
        return;
@@ -977,6 +995,7 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
 
 shut:
        down_write(&s->lock);
+       s->merge_failed = 1;
        b = __release_queued_bios_after_merge(s);
        up_write(&s->lock);
        error_bios(b);
@@ -1031,24 +1050,28 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                origin_mode = FMODE_WRITE;
        }
 
-       origin_path = argv[0];
-       argv++;
-       argc--;
-
        s = kmalloc(sizeof(*s), GFP_KERNEL);
        if (!s) {
-               ti->error = "Cannot allocate snapshot context private "
-                   "structure";
+               ti->error = "Cannot allocate private snapshot structure";
                r = -ENOMEM;
                goto bad;
        }
 
+       origin_path = argv[0];
+       argv++;
+       argc--;
+
+       r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
+       if (r) {
+               ti->error = "Cannot get origin device";
+               goto bad_origin;
+       }
+
        cow_path = argv[0];
        argv++;
        argc--;
 
-       r = dm_get_device(ti, cow_path, 0, 0,
-                         FMODE_READ | FMODE_WRITE, &s->cow);
+       r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
        if (r) {
                ti->error = "Cannot get COW device";
                goto bad_cow;
@@ -1064,21 +1087,15 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        argv += args_used;
        argc -= args_used;
 
-       r = dm_get_device(ti, origin_path, 0, ti->len, origin_mode, &s->origin);
-       if (r) {
-               ti->error = "Cannot get origin device";
-               goto bad_origin;
-       }
-
        s->ti = ti;
        s->valid = 1;
        s->active = 0;
-       s->suspended = 0;
        atomic_set(&s->pending_exceptions_count, 0);
        init_rwsem(&s->lock);
        INIT_LIST_HEAD(&s->list);
        spin_lock_init(&s->pe_lock);
        s->state_bits = 0;
+       s->merge_failed = 0;
        s->first_merging_chunk = 0;
        s->num_merging_chunks = 0;
        bio_list_init(&s->bios_queued_during_merge);
@@ -1090,8 +1107,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad_hash_tables;
        }
 
-       r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
-       if (r) {
+       s->kcopyd_client = dm_kcopyd_client_create();
+       if (IS_ERR(s->kcopyd_client)) {
+               r = PTR_ERR(s->kcopyd_client);
                ti->error = "Could not create kcopyd client";
                goto bad_kcopyd;
        }
@@ -1115,9 +1133,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
        spin_lock_init(&s->tracked_chunk_lock);
 
-       bio_list_init(&s->queued_bios);
-       INIT_WORK(&s->queued_bios_work, flush_queued_bios);
-
        ti->private = s;
        ti->num_flush_requests = num_flush_requests;
 
@@ -1178,15 +1193,15 @@ bad_kcopyd:
        dm_exception_table_exit(&s->complete, exception_cache);
 
 bad_hash_tables:
-       dm_put_device(ti, s->origin);
-
-bad_origin:
        dm_exception_store_destroy(s->store);
 
 bad_store:
        dm_put_device(ti, s->cow);
 
 bad_cow:
+       dm_put_device(ti, s->origin);
+
+bad_origin:
        kfree(s);
 
 bad:
@@ -1241,8 +1256,6 @@ static void snapshot_dtr(struct dm_target *ti)
        struct dm_snapshot *s = ti->private;
        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
 
-       flush_workqueue(ksnapd);
-
        down_read(&_origins_lock);
        /* Check whether exception handover must be cancelled */
        (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
@@ -1280,12 +1293,12 @@ static void snapshot_dtr(struct dm_target *ti)
 
        mempool_destroy(s->pending_pool);
 
-       dm_put_device(ti, s->origin);
-
        dm_exception_store_destroy(s->store);
 
        dm_put_device(ti, s->cow);
 
+       dm_put_device(ti, s->origin);
+
        kfree(s);
 }
 
@@ -1304,20 +1317,6 @@ static void flush_bios(struct bio *bio)
        }
 }
 
-static void flush_queued_bios(struct work_struct *work)
-{
-       struct dm_snapshot *s =
-               container_of(work, struct dm_snapshot, queued_bios_work);
-       struct bio *queued_bios;
-       unsigned long flags;
-
-       spin_lock_irqsave(&s->pe_lock, flags);
-       queued_bios = bio_list_get(&s->queued_bios);
-       spin_unlock_irqrestore(&s->pe_lock, flags);
-
-       flush_bios(queued_bios);
-}
-
 static int do_origin(struct dm_dev *origin, struct bio *bio);
 
 /*
@@ -1377,6 +1376,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
        struct dm_snapshot *s = pe->snap;
        struct bio *origin_bios = NULL;
        struct bio *snapshot_bios = NULL;
+       struct bio *full_bio = NULL;
        int error = 0;
 
        if (!success) {
@@ -1412,10 +1412,15 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
         */
        dm_insert_exception(&s->complete, e);
 
- out:
+out:
        dm_remove_exception(&pe->e);
        snapshot_bios = bio_list_get(&pe->snapshot_bios);
        origin_bios = bio_list_get(&pe->origin_bios);
+       full_bio = pe->full_bio;
+       if (full_bio) {
+               full_bio->bi_end_io = pe->full_bio_end_io;
+               full_bio->bi_private = pe->full_bio_private;
+       }
        free_pending_exception(pe);
 
        increment_pending_exceptions_done_count();
@@ -1423,10 +1428,15 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
        up_write(&s->lock);
 
        /* Submit any pending write bios */
-       if (error)
+       if (error) {
+               if (full_bio)
+                       bio_io_error(full_bio);
                error_bios(snapshot_bios);
-       else
+       } else {
+               if (full_bio)
+                       bio_endio(full_bio, 0);
                flush_bios(snapshot_bios);
+       }
 
        retry_origin_bios(s, origin_bios);
 }
@@ -1477,8 +1487,33 @@ static void start_copy(struct dm_snap_pending_exception *pe)
        dest.count = src.count;
 
        /* Hand over to kcopyd */
-       dm_kcopyd_copy(s->kcopyd_client,
-                   &src, 1, &dest, 0, copy_callback, pe);
+       dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
+}
+
+static void full_bio_end_io(struct bio *bio, int error)
+{
+       void *callback_data = bio->bi_private;
+
+       dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
+}
+
+static void start_full_bio(struct dm_snap_pending_exception *pe,
+                          struct bio *bio)
+{
+       struct dm_snapshot *s = pe->snap;
+       void *callback_data;
+
+       pe->full_bio = bio;
+       pe->full_bio_end_io = bio->bi_end_io;
+       pe->full_bio_private = bio->bi_private;
+
+       callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
+                                                  copy_callback, pe);
+
+       bio->bi_end_io = full_bio_end_io;
+       bio->bi_private = callback_data;
+
+       generic_make_request(bio);
 }
 
 static struct dm_snap_pending_exception *
@@ -1516,6 +1551,7 @@ __find_pending_exception(struct dm_snapshot *s,
        bio_list_init(&pe->origin_bios);
        bio_list_init(&pe->snapshot_bios);
        pe->started = 0;
+       pe->full_bio = NULL;
 
        if (s->store->type->prepare_exception(s->store, &pe->e)) {
                free_pending_exception(pe);
@@ -1547,7 +1583,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
        chunk_t chunk;
        struct dm_snap_pending_exception *pe = NULL;
 
-       if (unlikely(bio_empty_barrier(bio))) {
+       if (bio->bi_rw & REQ_FLUSH) {
                bio->bi_bdev = s->cow->bdev;
                return DM_MAPIO_REMAPPED;
        }
@@ -1609,10 +1645,19 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
                }
 
                remap_exception(s, &pe->e, bio, chunk);
-               bio_list_add(&pe->snapshot_bios, bio);
 
                r = DM_MAPIO_SUBMITTED;
 
+               if (!pe->started &&
+                   bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
+                       pe->started = 1;
+                       up_write(&s->lock);
+                       start_full_bio(pe, bio);
+                       goto out;
+               }
+
+               bio_list_add(&pe->snapshot_bios, bio);
+
                if (!pe->started) {
                        /* this is protected by snap->lock */
                        pe->started = 1;
@@ -1625,9 +1670,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
                map_context->ptr = track_chunk(s, chunk);
        }
 
- out_unlock:
+out_unlock:
        up_write(&s->lock);
- out:
+out:
        return r;
 }
 
@@ -1651,8 +1696,8 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
        int r = DM_MAPIO_REMAPPED;
        chunk_t chunk;
 
-       if (unlikely(bio_empty_barrier(bio))) {
-               if (!map_context->flush_request)
+       if (bio->bi_rw & REQ_FLUSH) {
+               if (!map_context->target_request_nr)
                        bio->bi_bdev = s->origin->bdev;
                else
                        bio->bi_bdev = s->cow->bdev;
@@ -1664,11 +1709,9 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
 
        down_write(&s->lock);
 
-       /* Full snapshots are not usable */
-       if (!s->valid) {
-               r = -EIO;
-               goto out_unlock;
-       }
+       /* Full merging snapshots are redirected to the origin */
+       if (!s->valid)
+               goto redirect_to_origin;
 
        /* If the block is already remapped - use that */
        e = dm_lookup_exception(&s->complete, chunk);
@@ -1691,6 +1734,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
                goto out_unlock;
        }
 
+redirect_to_origin:
        bio->bi_bdev = s->origin->bdev;
 
        if (bio_rw(bio) == WRITE) {
@@ -1723,15 +1767,6 @@ static void snapshot_merge_presuspend(struct dm_target *ti)
        stop_merge(s);
 }
 
-static void snapshot_postsuspend(struct dm_target *ti)
-{
-       struct dm_snapshot *s = ti->private;
-
-       down_write(&s->lock);
-       s->suspended = 1;
-       up_write(&s->lock);
-}
-
 static int snapshot_preresume(struct dm_target *ti)
 {
        int r = 0;
@@ -1746,7 +1781,7 @@ static int snapshot_preresume(struct dm_target *ti)
                        DMERR("Unable to resume snapshot source until "
                              "handover completes.");
                        r = -EINVAL;
-               } else if (!snap_src->suspended) {
+               } else if (!dm_suspended(snap_src->ti)) {
                        DMERR("Unable to perform snapshot handover until "
                              "source is suspended.");
                        r = -EINVAL;
@@ -1779,7 +1814,6 @@ static void snapshot_resume(struct dm_target *ti)
 
        down_write(&s->lock);
        s->active = 1;
-       s->suspended = 0;
        up_write(&s->lock);
 }
 
@@ -1824,6 +1858,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
 
                if (!snap->valid)
                        DMEMIT("Invalid");
+               else if (snap->merge_failed)
+                       DMEMIT("Merge failed");
                else {
                        if (snap->store->type->usage) {
                                sector_t total_sectors, sectors_allocated,
@@ -1864,8 +1900,14 @@ static int snapshot_iterate_devices(struct dm_target *ti,
                                    iterate_devices_callout_fn fn, void *data)
 {
        struct dm_snapshot *snap = ti->private;
+       int r;
 
-       return fn(ti, snap->origin, 0, ti->len, data);
+       r = fn(ti, snap->origin, 0, ti->len, data);
+
+       if (!r)
+               r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
+
+       return r;
 }
 
 
@@ -1974,7 +2016,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
                        pe_to_start_now = pe;
                }
 
- next_snapshot:
+next_snapshot:
                up_write(&snap->lock);
 
                if (pe_to_start_now) {
@@ -2064,8 +2106,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                return -EINVAL;
        }
 
-       r = dm_get_device(ti, argv[0], 0, ti->len,
-                         dm_table_get_mode(ti->table), &dev);
+       r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
        if (r) {
                ti->error = "Cannot get target device";
                return r;
@@ -2089,7 +2130,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
        struct dm_dev *dev = ti->private;
        bio->bi_bdev = dev->bdev;
 
-       if (unlikely(bio_empty_barrier(bio)))
+       if (bio->bi_rw & REQ_FLUSH)
                return DM_MAPIO_REMAPPED;
 
        /* Only tell snapshots if this is a write */
@@ -2125,6 +2166,21 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
        return 0;
 }
 
+static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+                       struct bio_vec *biovec, int max_size)
+{
+       struct dm_dev *dev = ti->private;
+       struct request_queue *q = bdev_get_queue(dev->bdev);
+
+       if (!q->merge_bvec_fn)
+               return max_size;
+
+       bvm->bi_bdev = dev->bdev;
+       bvm->bi_sector = bvm->bi_sector;
+
+       return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
 static int origin_iterate_devices(struct dm_target *ti,
                                  iterate_devices_callout_fn fn, void *data)
 {
@@ -2135,25 +2191,25 @@ static int origin_iterate_devices(struct dm_target *ti,
 
 static struct target_type origin_target = {
        .name    = "snapshot-origin",
-       .version = {1, 7, 0},
+       .version = {1, 7, 1},
        .module  = THIS_MODULE,
        .ctr     = origin_ctr,
        .dtr     = origin_dtr,
        .map     = origin_map,
        .resume  = origin_resume,
        .status  = origin_status,
+       .merge   = origin_merge,
        .iterate_devices = origin_iterate_devices,
 };
 
 static struct target_type snapshot_target = {
        .name    = "snapshot",
-       .version = {1, 9, 0},
+       .version = {1, 10, 0},
        .module  = THIS_MODULE,
        .ctr     = snapshot_ctr,
        .dtr     = snapshot_dtr,
        .map     = snapshot_map,
        .end_io  = snapshot_end_io,
-       .postsuspend = snapshot_postsuspend,
        .preresume  = snapshot_preresume,
        .resume  = snapshot_resume,
        .status  = snapshot_status,
@@ -2162,14 +2218,13 @@ static struct target_type snapshot_target = {
 
 static struct target_type merge_target = {
        .name    = dm_snapshot_merge_target_name,
-       .version = {1, 0, 0},
+       .version = {1, 1, 0},
        .module  = THIS_MODULE,
        .ctr     = snapshot_ctr,
        .dtr     = snapshot_dtr,
        .map     = snapshot_merge_map,
        .end_io  = snapshot_end_io,
        .presuspend = snapshot_merge_presuspend,
-       .postsuspend = snapshot_postsuspend,
        .preresume  = snapshot_preresume,
        .resume  = snapshot_merge_resume,
        .status  = snapshot_status,
@@ -2231,17 +2286,8 @@ static int __init dm_snapshot_init(void)
                goto bad_tracked_chunk_cache;
        }
 
-       ksnapd = create_singlethread_workqueue("ksnapd");
-       if (!ksnapd) {
-               DMERR("Failed to create ksnapd workqueue.");
-               r = -ENOMEM;
-               goto bad_pending_pool;
-       }
-
        return 0;
 
-bad_pending_pool:
-       kmem_cache_destroy(tracked_chunk_cache);
 bad_tracked_chunk_cache:
        kmem_cache_destroy(pending_cache);
 bad_pending_cache:
@@ -2262,8 +2308,6 @@ bad_register_snapshot_target:
 
 static void __exit dm_snapshot_exit(void)
 {
-       destroy_workqueue(ksnapd);
-
        dm_unregister_target(&snapshot_target);
        dm_unregister_target(&origin_target);
        dm_unregister_target(&merge_target);