video: tegra: nvmap: add handle share count to debug stats
[linux-3.10.git] / drivers / md / dm-cache-target.c
1 /*
2  * Copyright (C) 2012 Red Hat. All rights reserved.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm.h"
8 #include "dm-bio-prison.h"
9 #include "dm-bio-record.h"
10 #include "dm-cache-metadata.h"
11
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/init.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19
20 #define DM_MSG_PREFIX "cache"
21
22 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
23         "A percentage of time allocated for copying to and/or from cache");
24
25 /*----------------------------------------------------------------*/
26
27 /*
28  * Glossary:
29  *
30  * oblock: index of an origin block
31  * cblock: index of a cache block
32  * promotion: movement of a block from origin to cache
33  * demotion: movement of a block from cache to origin
34  * migration: movement of a block between the origin and cache device,
35  *            either direction
36  */
37
38 /*----------------------------------------------------------------*/
39
40 static size_t bitset_size_in_bytes(unsigned nr_entries)
41 {
42         return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
43 }
44
45 static unsigned long *alloc_bitset(unsigned nr_entries)
46 {
47         size_t s = bitset_size_in_bytes(nr_entries);
48         return vzalloc(s);
49 }
50
51 static void clear_bitset(void *bitset, unsigned nr_entries)
52 {
53         size_t s = bitset_size_in_bytes(nr_entries);
54         memset(bitset, 0, s);
55 }
56
57 static void free_bitset(unsigned long *bits)
58 {
59         vfree(bits);
60 }
61
62 /*----------------------------------------------------------------*/
63
64 #define PRISON_CELLS 1024
65 #define MIGRATION_POOL_SIZE 128
66 #define COMMIT_PERIOD HZ
67 #define MIGRATION_COUNT_WINDOW 10
68
69 /*
70  * The block size of the device holding cache data must be >= 32KB
71  */
72 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
73
74 /*
75  * FIXME: the cache is read/write for the time being.
76  */
77 enum cache_mode {
78         CM_WRITE,               /* metadata may be changed */
79         CM_READ_ONLY,           /* metadata may not be changed */
80 };
81
82 struct cache_features {
83         enum cache_mode mode;
84         bool write_through:1;
85 };
86
87 struct cache_stats {
88         atomic_t read_hit;
89         atomic_t read_miss;
90         atomic_t write_hit;
91         atomic_t write_miss;
92         atomic_t demotion;
93         atomic_t promotion;
94         atomic_t copies_avoided;
95         atomic_t cache_cell_clash;
96         atomic_t commit_count;
97         atomic_t discard_count;
98 };
99
100 struct cache {
101         struct dm_target *ti;
102         struct dm_target_callbacks callbacks;
103
104         /*
105          * Metadata is written to this device.
106          */
107         struct dm_dev *metadata_dev;
108
109         /*
110          * The slower of the two data devices.  Typically a spindle.
111          */
112         struct dm_dev *origin_dev;
113
114         /*
115          * The faster of the two data devices.  Typically an SSD.
116          */
117         struct dm_dev *cache_dev;
118
119         /*
120          * Cache features such as write-through.
121          */
122         struct cache_features features;
123
124         /*
125          * Size of the origin device in _complete_ blocks and native sectors.
126          */
127         dm_oblock_t origin_blocks;
128         sector_t origin_sectors;
129
130         /*
131          * Size of the cache device in blocks.
132          */
133         dm_cblock_t cache_size;
134
135         /*
136          * Fields for converting from sectors to blocks.
137          */
138         uint32_t sectors_per_block;
139         int sectors_per_block_shift;
140
141         struct dm_cache_metadata *cmd;
142
143         spinlock_t lock;
144         struct bio_list deferred_bios;
145         struct bio_list deferred_flush_bios;
146         struct bio_list deferred_writethrough_bios;
147         struct list_head quiesced_migrations;
148         struct list_head completed_migrations;
149         struct list_head need_commit_migrations;
150         sector_t migration_threshold;
151         atomic_t nr_migrations;
152         wait_queue_head_t migration_wait;
153
154         wait_queue_head_t quiescing_wait;
155         atomic_t quiescing_ack;
156
157         /*
158          * cache_size entries, dirty if set
159          */
160         dm_cblock_t nr_dirty;
161         unsigned long *dirty_bitset;
162
163         /*
164          * origin_blocks entries, discarded if set.
165          */
166         uint32_t discard_block_size; /* a power of 2 times sectors per block */
167         dm_dblock_t discard_nr_blocks;
168         unsigned long *discard_bitset;
169
170         struct dm_kcopyd_client *copier;
171         struct workqueue_struct *wq;
172         struct work_struct worker;
173
174         struct delayed_work waker;
175         unsigned long last_commit_jiffies;
176
177         struct dm_bio_prison *prison;
178         struct dm_deferred_set *all_io_ds;
179
180         mempool_t *migration_pool;
181         struct dm_cache_migration *next_migration;
182
183         struct dm_cache_policy *policy;
184         unsigned policy_nr_args;
185
186         bool need_tick_bio:1;
187         bool sized:1;
188         bool quiescing:1;
189         bool commit_requested:1;
190         bool loaded_mappings:1;
191         bool loaded_discards:1;
192
193         struct cache_stats stats;
194
195         /*
196          * Rather than reconstructing the table line for the status we just
197          * save it and regurgitate.
198          */
199         unsigned nr_ctr_args;
200         const char **ctr_args;
201 };
202
203 struct per_bio_data {
204         bool tick:1;
205         unsigned req_nr:2;
206         struct dm_deferred_entry *all_io_entry;
207
208         /*
209          * writethrough fields.  These MUST remain at the end of this
210          * structure and the 'cache' member must be the first as it
211          * is used to determine the offset of the writethrough fields.
212          */
213         struct cache *cache;
214         dm_cblock_t cblock;
215         bio_end_io_t *saved_bi_end_io;
216         struct dm_bio_details bio_details;
217 };
218
219 struct dm_cache_migration {
220         struct list_head list;
221         struct cache *cache;
222
223         unsigned long start_jiffies;
224         dm_oblock_t old_oblock;
225         dm_oblock_t new_oblock;
226         dm_cblock_t cblock;
227
228         bool err:1;
229         bool writeback:1;
230         bool demote:1;
231         bool promote:1;
232
233         struct dm_bio_prison_cell *old_ocell;
234         struct dm_bio_prison_cell *new_ocell;
235 };
236
237 /*
238  * Processing a bio in the worker thread may require these memory
239  * allocations.  We prealloc to avoid deadlocks (the same worker thread
240  * frees them back to the mempool).
241  */
242 struct prealloc {
243         struct dm_cache_migration *mg;
244         struct dm_bio_prison_cell *cell1;
245         struct dm_bio_prison_cell *cell2;
246 };
247
248 static void wake_worker(struct cache *cache)
249 {
250         queue_work(cache->wq, &cache->worker);
251 }
252
253 /*----------------------------------------------------------------*/
254
255 static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
256 {
257         /* FIXME: change to use a local slab. */
258         return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
259 }
260
261 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
262 {
263         dm_bio_prison_free_cell(cache->prison, cell);
264 }
265
266 static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
267 {
268         if (!p->mg) {
269                 p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
270                 if (!p->mg)
271                         return -ENOMEM;
272         }
273
274         if (!p->cell1) {
275                 p->cell1 = alloc_prison_cell(cache);
276                 if (!p->cell1)
277                         return -ENOMEM;
278         }
279
280         if (!p->cell2) {
281                 p->cell2 = alloc_prison_cell(cache);
282                 if (!p->cell2)
283                         return -ENOMEM;
284         }
285
286         return 0;
287 }
288
289 static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
290 {
291         if (p->cell2)
292                 free_prison_cell(cache, p->cell2);
293
294         if (p->cell1)
295                 free_prison_cell(cache, p->cell1);
296
297         if (p->mg)
298                 mempool_free(p->mg, cache->migration_pool);
299 }
300
301 static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
302 {
303         struct dm_cache_migration *mg = p->mg;
304
305         BUG_ON(!mg);
306         p->mg = NULL;
307
308         return mg;
309 }
310
311 /*
312  * You must have a cell within the prealloc struct to return.  If not this
313  * function will BUG() rather than returning NULL.
314  */
315 static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
316 {
317         struct dm_bio_prison_cell *r = NULL;
318
319         if (p->cell1) {
320                 r = p->cell1;
321                 p->cell1 = NULL;
322
323         } else if (p->cell2) {
324                 r = p->cell2;
325                 p->cell2 = NULL;
326         } else
327                 BUG();
328
329         return r;
330 }
331
332 /*
333  * You can't have more than two cells in a prealloc struct.  BUG() will be
334  * called if you try and overfill.
335  */
336 static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
337 {
338         if (!p->cell2)
339                 p->cell2 = cell;
340
341         else if (!p->cell1)
342                 p->cell1 = cell;
343
344         else
345                 BUG();
346 }
347
348 /*----------------------------------------------------------------*/
349
350 static void build_key(dm_oblock_t oblock, struct dm_cell_key *key)
351 {
352         key->virtual = 0;
353         key->dev = 0;
354         key->block = from_oblock(oblock);
355 }
356
357 /*
358  * The caller hands in a preallocated cell, and a free function for it.
359  * The cell will be freed if there's an error, or if it wasn't used because
360  * a cell with that key already exists.
361  */
362 typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
363
364 static int bio_detain(struct cache *cache, dm_oblock_t oblock,
365                       struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
366                       cell_free_fn free_fn, void *free_context,
367                       struct dm_bio_prison_cell **cell_result)
368 {
369         int r;
370         struct dm_cell_key key;
371
372         build_key(oblock, &key);
373         r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
374         if (r)
375                 free_fn(free_context, cell_prealloc);
376
377         return r;
378 }
379
380 static int get_cell(struct cache *cache,
381                     dm_oblock_t oblock,
382                     struct prealloc *structs,
383                     struct dm_bio_prison_cell **cell_result)
384 {
385         int r;
386         struct dm_cell_key key;
387         struct dm_bio_prison_cell *cell_prealloc;
388
389         cell_prealloc = prealloc_get_cell(structs);
390
391         build_key(oblock, &key);
392         r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
393         if (r)
394                 prealloc_put_cell(structs, cell_prealloc);
395
396         return r;
397 }
398
399 /*----------------------------------------------------------------*/
400
401 static bool is_dirty(struct cache *cache, dm_cblock_t b)
402 {
403         return test_bit(from_cblock(b), cache->dirty_bitset);
404 }
405
406 static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
407 {
408         if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
409                 cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
410                 policy_set_dirty(cache->policy, oblock);
411         }
412 }
413
414 static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
415 {
416         if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
417                 policy_clear_dirty(cache->policy, oblock);
418                 cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
419                 if (!from_cblock(cache->nr_dirty))
420                         dm_table_event(cache->ti->table);
421         }
422 }
423
424 /*----------------------------------------------------------------*/
425
426 static bool block_size_is_power_of_two(struct cache *cache)
427 {
428         return cache->sectors_per_block_shift >= 0;
429 }
430
431 static dm_block_t block_div(dm_block_t b, uint32_t n)
432 {
433         do_div(b, n);
434
435         return b;
436 }
437
438 static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
439 {
440         uint32_t discard_blocks = cache->discard_block_size;
441         dm_block_t b = from_oblock(oblock);
442
443         if (!block_size_is_power_of_two(cache))
444                 discard_blocks = discard_blocks / cache->sectors_per_block;
445         else
446                 discard_blocks >>= cache->sectors_per_block_shift;
447
448         b = block_div(b, discard_blocks);
449
450         return to_dblock(b);
451 }
452
453 static void set_discard(struct cache *cache, dm_dblock_t b)
454 {
455         unsigned long flags;
456
457         atomic_inc(&cache->stats.discard_count);
458
459         spin_lock_irqsave(&cache->lock, flags);
460         set_bit(from_dblock(b), cache->discard_bitset);
461         spin_unlock_irqrestore(&cache->lock, flags);
462 }
463
464 static void clear_discard(struct cache *cache, dm_dblock_t b)
465 {
466         unsigned long flags;
467
468         spin_lock_irqsave(&cache->lock, flags);
469         clear_bit(from_dblock(b), cache->discard_bitset);
470         spin_unlock_irqrestore(&cache->lock, flags);
471 }
472
473 static bool is_discarded(struct cache *cache, dm_dblock_t b)
474 {
475         int r;
476         unsigned long flags;
477
478         spin_lock_irqsave(&cache->lock, flags);
479         r = test_bit(from_dblock(b), cache->discard_bitset);
480         spin_unlock_irqrestore(&cache->lock, flags);
481
482         return r;
483 }
484
485 static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
486 {
487         int r;
488         unsigned long flags;
489
490         spin_lock_irqsave(&cache->lock, flags);
491         r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
492                      cache->discard_bitset);
493         spin_unlock_irqrestore(&cache->lock, flags);
494
495         return r;
496 }
497
498 /*----------------------------------------------------------------*/
499
500 static void load_stats(struct cache *cache)
501 {
502         struct dm_cache_statistics stats;
503
504         dm_cache_metadata_get_stats(cache->cmd, &stats);
505         atomic_set(&cache->stats.read_hit, stats.read_hits);
506         atomic_set(&cache->stats.read_miss, stats.read_misses);
507         atomic_set(&cache->stats.write_hit, stats.write_hits);
508         atomic_set(&cache->stats.write_miss, stats.write_misses);
509 }
510
511 static void save_stats(struct cache *cache)
512 {
513         struct dm_cache_statistics stats;
514
515         stats.read_hits = atomic_read(&cache->stats.read_hit);
516         stats.read_misses = atomic_read(&cache->stats.read_miss);
517         stats.write_hits = atomic_read(&cache->stats.write_hit);
518         stats.write_misses = atomic_read(&cache->stats.write_miss);
519
520         dm_cache_metadata_set_stats(cache->cmd, &stats);
521 }
522
523 /*----------------------------------------------------------------
524  * Per bio data
525  *--------------------------------------------------------------*/
526
527 /*
528  * If using writeback, leave out struct per_bio_data's writethrough fields.
529  */
530 #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
531 #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
532
533 static size_t get_per_bio_data_size(struct cache *cache)
534 {
535         return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
536 }
537
538 static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
539 {
540         struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
541         BUG_ON(!pb);
542         return pb;
543 }
544
545 static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
546 {
547         struct per_bio_data *pb = get_per_bio_data(bio, data_size);
548
549         pb->tick = false;
550         pb->req_nr = dm_bio_get_target_bio_nr(bio);
551         pb->all_io_entry = NULL;
552
553         return pb;
554 }
555
556 /*----------------------------------------------------------------
557  * Remapping
558  *--------------------------------------------------------------*/
559 static void remap_to_origin(struct cache *cache, struct bio *bio)
560 {
561         bio->bi_bdev = cache->origin_dev->bdev;
562 }
563
564 static void remap_to_cache(struct cache *cache, struct bio *bio,
565                            dm_cblock_t cblock)
566 {
567         sector_t bi_sector = bio->bi_sector;
568
569         bio->bi_bdev = cache->cache_dev->bdev;
570         if (!block_size_is_power_of_two(cache))
571                 bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
572                                 sector_div(bi_sector, cache->sectors_per_block);
573         else
574                 bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
575                                 (bi_sector & (cache->sectors_per_block - 1));
576 }
577
578 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
579 {
580         unsigned long flags;
581         size_t pb_data_size = get_per_bio_data_size(cache);
582         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
583
584         spin_lock_irqsave(&cache->lock, flags);
585         if (cache->need_tick_bio &&
586             !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
587                 pb->tick = true;
588                 cache->need_tick_bio = false;
589         }
590         spin_unlock_irqrestore(&cache->lock, flags);
591 }
592
593 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
594                                   dm_oblock_t oblock)
595 {
596         check_if_tick_bio_needed(cache, bio);
597         remap_to_origin(cache, bio);
598         if (bio_data_dir(bio) == WRITE)
599                 clear_discard(cache, oblock_to_dblock(cache, oblock));
600 }
601
602 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
603                                  dm_oblock_t oblock, dm_cblock_t cblock)
604 {
605         remap_to_cache(cache, bio, cblock);
606         if (bio_data_dir(bio) == WRITE) {
607                 set_dirty(cache, oblock, cblock);
608                 clear_discard(cache, oblock_to_dblock(cache, oblock));
609         }
610 }
611
612 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
613 {
614         sector_t block_nr = bio->bi_sector;
615
616         if (!block_size_is_power_of_two(cache))
617                 (void) sector_div(block_nr, cache->sectors_per_block);
618         else
619                 block_nr >>= cache->sectors_per_block_shift;
620
621         return to_oblock(block_nr);
622 }
623
624 static int bio_triggers_commit(struct cache *cache, struct bio *bio)
625 {
626         return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
627 }
628
629 static void issue(struct cache *cache, struct bio *bio)
630 {
631         unsigned long flags;
632
633         if (!bio_triggers_commit(cache, bio)) {
634                 generic_make_request(bio);
635                 return;
636         }
637
638         /*
639          * Batch together any bios that trigger commits and then issue a
640          * single commit for them in do_worker().
641          */
642         spin_lock_irqsave(&cache->lock, flags);
643         cache->commit_requested = true;
644         bio_list_add(&cache->deferred_flush_bios, bio);
645         spin_unlock_irqrestore(&cache->lock, flags);
646 }
647
648 static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
649 {
650         unsigned long flags;
651
652         spin_lock_irqsave(&cache->lock, flags);
653         bio_list_add(&cache->deferred_writethrough_bios, bio);
654         spin_unlock_irqrestore(&cache->lock, flags);
655
656         wake_worker(cache);
657 }
658
659 static void writethrough_endio(struct bio *bio, int err)
660 {
661         struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
662         bio->bi_end_io = pb->saved_bi_end_io;
663
664         if (err) {
665                 bio_endio(bio, err);
666                 return;
667         }
668
669         dm_bio_restore(&pb->bio_details, bio);
670         remap_to_cache(pb->cache, bio, pb->cblock);
671
672         /*
673          * We can't issue this bio directly, since we're in interrupt
674          * context.  So it gets put on a bio list for processing by the
675          * worker thread.
676          */
677         defer_writethrough_bio(pb->cache, bio);
678 }
679
680 /*
681  * When running in writethrough mode we need to send writes to clean blocks
682  * to both the cache and origin devices.  In future we'd like to clone the
683  * bio and send them in parallel, but for now we're doing them in
684  * series as this is easier.
685  */
686 static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
687                                        dm_oblock_t oblock, dm_cblock_t cblock)
688 {
689         struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
690
691         pb->cache = cache;
692         pb->cblock = cblock;
693         pb->saved_bi_end_io = bio->bi_end_io;
694         dm_bio_record(&pb->bio_details, bio);
695         bio->bi_end_io = writethrough_endio;
696
697         remap_to_origin_clear_discard(pb->cache, bio, oblock);
698 }
699
700 /*----------------------------------------------------------------
701  * Migration processing
702  *
703  * Migration covers moving data from the origin device to the cache, or
704  * vice versa.
705  *--------------------------------------------------------------*/
706 static void free_migration(struct dm_cache_migration *mg)
707 {
708         mempool_free(mg, mg->cache->migration_pool);
709 }
710
711 static void inc_nr_migrations(struct cache *cache)
712 {
713         atomic_inc(&cache->nr_migrations);
714 }
715
716 static void dec_nr_migrations(struct cache *cache)
717 {
718         atomic_dec(&cache->nr_migrations);
719
720         /*
721          * Wake the worker in case we're suspending the target.
722          */
723         wake_up(&cache->migration_wait);
724 }
725
726 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
727                          bool holder)
728 {
729         (holder ? dm_cell_release : dm_cell_release_no_holder)
730                 (cache->prison, cell, &cache->deferred_bios);
731         free_prison_cell(cache, cell);
732 }
733
734 static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
735                        bool holder)
736 {
737         unsigned long flags;
738
739         spin_lock_irqsave(&cache->lock, flags);
740         __cell_defer(cache, cell, holder);
741         spin_unlock_irqrestore(&cache->lock, flags);
742
743         wake_worker(cache);
744 }
745
746 static void cleanup_migration(struct dm_cache_migration *mg)
747 {
748         struct cache *cache = mg->cache;
749         free_migration(mg);
750         dec_nr_migrations(cache);
751 }
752
753 static void migration_failure(struct dm_cache_migration *mg)
754 {
755         struct cache *cache = mg->cache;
756
757         if (mg->writeback) {
758                 DMWARN_LIMIT("writeback failed; couldn't copy block");
759                 set_dirty(cache, mg->old_oblock, mg->cblock);
760                 cell_defer(cache, mg->old_ocell, false);
761
762         } else if (mg->demote) {
763                 DMWARN_LIMIT("demotion failed; couldn't copy block");
764                 policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
765
766                 cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
767                 if (mg->promote)
768                         cell_defer(cache, mg->new_ocell, 1);
769         } else {
770                 DMWARN_LIMIT("promotion failed; couldn't copy block");
771                 policy_remove_mapping(cache->policy, mg->new_oblock);
772                 cell_defer(cache, mg->new_ocell, 1);
773         }
774
775         cleanup_migration(mg);
776 }
777
778 static void migration_success_pre_commit(struct dm_cache_migration *mg)
779 {
780         unsigned long flags;
781         struct cache *cache = mg->cache;
782
783         if (mg->writeback) {
784                 cell_defer(cache, mg->old_ocell, false);
785                 clear_dirty(cache, mg->old_oblock, mg->cblock);
786                 cleanup_migration(mg);
787                 return;
788
789         } else if (mg->demote) {
790                 if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
791                         DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
792                         policy_force_mapping(cache->policy, mg->new_oblock,
793                                              mg->old_oblock);
794                         if (mg->promote)
795                                 cell_defer(cache, mg->new_ocell, true);
796                         cleanup_migration(mg);
797                         return;
798                 }
799         } else {
800                 if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
801                         DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
802                         policy_remove_mapping(cache->policy, mg->new_oblock);
803                         cleanup_migration(mg);
804                         return;
805                 }
806         }
807
808         spin_lock_irqsave(&cache->lock, flags);
809         list_add_tail(&mg->list, &cache->need_commit_migrations);
810         cache->commit_requested = true;
811         spin_unlock_irqrestore(&cache->lock, flags);
812 }
813
814 static void migration_success_post_commit(struct dm_cache_migration *mg)
815 {
816         unsigned long flags;
817         struct cache *cache = mg->cache;
818
819         if (mg->writeback) {
820                 DMWARN("writeback unexpectedly triggered commit");
821                 return;
822
823         } else if (mg->demote) {
824                 cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
825
826                 if (mg->promote) {
827                         mg->demote = false;
828
829                         spin_lock_irqsave(&cache->lock, flags);
830                         list_add_tail(&mg->list, &cache->quiesced_migrations);
831                         spin_unlock_irqrestore(&cache->lock, flags);
832
833                 } else
834                         cleanup_migration(mg);
835
836         } else {
837                 cell_defer(cache, mg->new_ocell, true);
838                 clear_dirty(cache, mg->new_oblock, mg->cblock);
839                 cleanup_migration(mg);
840         }
841 }
842
843 static void copy_complete(int read_err, unsigned long write_err, void *context)
844 {
845         unsigned long flags;
846         struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
847         struct cache *cache = mg->cache;
848
849         if (read_err || write_err)
850                 mg->err = true;
851
852         spin_lock_irqsave(&cache->lock, flags);
853         list_add_tail(&mg->list, &cache->completed_migrations);
854         spin_unlock_irqrestore(&cache->lock, flags);
855
856         wake_worker(cache);
857 }
858
859 static void issue_copy_real(struct dm_cache_migration *mg)
860 {
861         int r;
862         struct dm_io_region o_region, c_region;
863         struct cache *cache = mg->cache;
864         sector_t cblock = from_cblock(mg->cblock);
865
866         o_region.bdev = cache->origin_dev->bdev;
867         o_region.count = cache->sectors_per_block;
868
869         c_region.bdev = cache->cache_dev->bdev;
870         c_region.sector = cblock * cache->sectors_per_block;
871         c_region.count = cache->sectors_per_block;
872
873         if (mg->writeback || mg->demote) {
874                 /* demote */
875                 o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
876                 r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
877         } else {
878                 /* promote */
879                 o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
880                 r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
881         }
882
883         if (r < 0)
884                 migration_failure(mg);
885 }
886
887 static void avoid_copy(struct dm_cache_migration *mg)
888 {
889         atomic_inc(&mg->cache->stats.copies_avoided);
890         migration_success_pre_commit(mg);
891 }
892
893 static void issue_copy(struct dm_cache_migration *mg)
894 {
895         bool avoid;
896         struct cache *cache = mg->cache;
897
898         if (mg->writeback || mg->demote)
899                 avoid = !is_dirty(cache, mg->cblock) ||
900                         is_discarded_oblock(cache, mg->old_oblock);
901         else
902                 avoid = is_discarded_oblock(cache, mg->new_oblock);
903
904         avoid ? avoid_copy(mg) : issue_copy_real(mg);
905 }
906
907 static void complete_migration(struct dm_cache_migration *mg)
908 {
909         if (mg->err)
910                 migration_failure(mg);
911         else
912                 migration_success_pre_commit(mg);
913 }
914
915 static void process_migrations(struct cache *cache, struct list_head *head,
916                                void (*fn)(struct dm_cache_migration *))
917 {
918         unsigned long flags;
919         struct list_head list;
920         struct dm_cache_migration *mg, *tmp;
921
922         INIT_LIST_HEAD(&list);
923         spin_lock_irqsave(&cache->lock, flags);
924         list_splice_init(head, &list);
925         spin_unlock_irqrestore(&cache->lock, flags);
926
927         list_for_each_entry_safe(mg, tmp, &list, list)
928                 fn(mg);
929 }
930
931 static void __queue_quiesced_migration(struct dm_cache_migration *mg)
932 {
933         list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
934 }
935
936 static void queue_quiesced_migration(struct dm_cache_migration *mg)
937 {
938         unsigned long flags;
939         struct cache *cache = mg->cache;
940
941         spin_lock_irqsave(&cache->lock, flags);
942         __queue_quiesced_migration(mg);
943         spin_unlock_irqrestore(&cache->lock, flags);
944
945         wake_worker(cache);
946 }
947
948 static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
949 {
950         unsigned long flags;
951         struct dm_cache_migration *mg, *tmp;
952
953         spin_lock_irqsave(&cache->lock, flags);
954         list_for_each_entry_safe(mg, tmp, work, list)
955                 __queue_quiesced_migration(mg);
956         spin_unlock_irqrestore(&cache->lock, flags);
957
958         wake_worker(cache);
959 }
960
961 static void check_for_quiesced_migrations(struct cache *cache,
962                                           struct per_bio_data *pb)
963 {
964         struct list_head work;
965
966         if (!pb->all_io_entry)
967                 return;
968
969         INIT_LIST_HEAD(&work);
970         if (pb->all_io_entry)
971                 dm_deferred_entry_dec(pb->all_io_entry, &work);
972
973         if (!list_empty(&work))
974                 queue_quiesced_migrations(cache, &work);
975 }
976
977 static void quiesce_migration(struct dm_cache_migration *mg)
978 {
979         if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
980                 queue_quiesced_migration(mg);
981 }
982
983 static void promote(struct cache *cache, struct prealloc *structs,
984                     dm_oblock_t oblock, dm_cblock_t cblock,
985                     struct dm_bio_prison_cell *cell)
986 {
987         struct dm_cache_migration *mg = prealloc_get_migration(structs);
988
989         mg->err = false;
990         mg->writeback = false;
991         mg->demote = false;
992         mg->promote = true;
993         mg->cache = cache;
994         mg->new_oblock = oblock;
995         mg->cblock = cblock;
996         mg->old_ocell = NULL;
997         mg->new_ocell = cell;
998         mg->start_jiffies = jiffies;
999
1000         inc_nr_migrations(cache);
1001         quiesce_migration(mg);
1002 }
1003
1004 static void writeback(struct cache *cache, struct prealloc *structs,
1005                       dm_oblock_t oblock, dm_cblock_t cblock,
1006                       struct dm_bio_prison_cell *cell)
1007 {
1008         struct dm_cache_migration *mg = prealloc_get_migration(structs);
1009
1010         mg->err = false;
1011         mg->writeback = true;
1012         mg->demote = false;
1013         mg->promote = false;
1014         mg->cache = cache;
1015         mg->old_oblock = oblock;
1016         mg->cblock = cblock;
1017         mg->old_ocell = cell;
1018         mg->new_ocell = NULL;
1019         mg->start_jiffies = jiffies;
1020
1021         inc_nr_migrations(cache);
1022         quiesce_migration(mg);
1023 }
1024
1025 static void demote_then_promote(struct cache *cache, struct prealloc *structs,
1026                                 dm_oblock_t old_oblock, dm_oblock_t new_oblock,
1027                                 dm_cblock_t cblock,
1028                                 struct dm_bio_prison_cell *old_ocell,
1029                                 struct dm_bio_prison_cell *new_ocell)
1030 {
1031         struct dm_cache_migration *mg = prealloc_get_migration(structs);
1032
1033         mg->err = false;
1034         mg->writeback = false;
1035         mg->demote = true;
1036         mg->promote = true;
1037         mg->cache = cache;
1038         mg->old_oblock = old_oblock;
1039         mg->new_oblock = new_oblock;
1040         mg->cblock = cblock;
1041         mg->old_ocell = old_ocell;
1042         mg->new_ocell = new_ocell;
1043         mg->start_jiffies = jiffies;
1044
1045         inc_nr_migrations(cache);
1046         quiesce_migration(mg);
1047 }
1048
1049 /*----------------------------------------------------------------
1050  * bio processing
1051  *--------------------------------------------------------------*/
1052 static void defer_bio(struct cache *cache, struct bio *bio)
1053 {
1054         unsigned long flags;
1055
1056         spin_lock_irqsave(&cache->lock, flags);
1057         bio_list_add(&cache->deferred_bios, bio);
1058         spin_unlock_irqrestore(&cache->lock, flags);
1059
1060         wake_worker(cache);
1061 }
1062
1063 static void process_flush_bio(struct cache *cache, struct bio *bio)
1064 {
1065         size_t pb_data_size = get_per_bio_data_size(cache);
1066         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1067
1068         BUG_ON(bio->bi_size);
1069         if (!pb->req_nr)
1070                 remap_to_origin(cache, bio);
1071         else
1072                 remap_to_cache(cache, bio, 0);
1073
1074         issue(cache, bio);
1075 }
1076
1077 /*
1078  * People generally discard large parts of a device, eg, the whole device
1079  * when formatting.  Splitting these large discards up into cache block
1080  * sized ios and then quiescing (always neccessary for discard) takes too
1081  * long.
1082  *
1083  * We keep it simple, and allow any size of discard to come in, and just
1084  * mark off blocks on the discard bitset.  No passdown occurs!
1085  *
1086  * To implement passdown we need to change the bio_prison such that a cell
1087  * can have a key that spans many blocks.
1088  */
1089 static void process_discard_bio(struct cache *cache, struct bio *bio)
1090 {
1091         dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
1092                                                   cache->discard_block_size);
1093         dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
1094         dm_block_t b;
1095
1096         end_block = block_div(end_block, cache->discard_block_size);
1097
1098         for (b = start_block; b < end_block; b++)
1099                 set_discard(cache, to_dblock(b));
1100
1101         bio_endio(bio, 0);
1102 }
1103
1104 static bool spare_migration_bandwidth(struct cache *cache)
1105 {
1106         sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
1107                 cache->sectors_per_block;
1108         return current_volume < cache->migration_threshold;
1109 }
1110
1111 static bool is_writethrough_io(struct cache *cache, struct bio *bio,
1112                                dm_cblock_t cblock)
1113 {
1114         return bio_data_dir(bio) == WRITE &&
1115                 cache->features.write_through && !is_dirty(cache, cblock);
1116 }
1117
1118 static void inc_hit_counter(struct cache *cache, struct bio *bio)
1119 {
1120         atomic_inc(bio_data_dir(bio) == READ ?
1121                    &cache->stats.read_hit : &cache->stats.write_hit);
1122 }
1123
1124 static void inc_miss_counter(struct cache *cache, struct bio *bio)
1125 {
1126         atomic_inc(bio_data_dir(bio) == READ ?
1127                    &cache->stats.read_miss : &cache->stats.write_miss);
1128 }
1129
1130 static void process_bio(struct cache *cache, struct prealloc *structs,
1131                         struct bio *bio)
1132 {
1133         int r;
1134         bool release_cell = true;
1135         dm_oblock_t block = get_bio_block(cache, bio);
1136         struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
1137         struct policy_result lookup_result;
1138         size_t pb_data_size = get_per_bio_data_size(cache);
1139         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1140         bool discarded_block = is_discarded_oblock(cache, block);
1141         bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
1142
1143         /*
1144          * Check to see if that block is currently migrating.
1145          */
1146         cell_prealloc = prealloc_get_cell(structs);
1147         r = bio_detain(cache, block, bio, cell_prealloc,
1148                        (cell_free_fn) prealloc_put_cell,
1149                        structs, &new_ocell);
1150         if (r > 0)
1151                 return;
1152
1153         r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
1154                        bio, &lookup_result);
1155
1156         if (r == -EWOULDBLOCK)
1157                 /* migration has been denied */
1158                 lookup_result.op = POLICY_MISS;
1159
1160         switch (lookup_result.op) {
1161         case POLICY_HIT:
1162                 inc_hit_counter(cache, bio);
1163                 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1164
1165                 if (is_writethrough_io(cache, bio, lookup_result.cblock))
1166                         remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
1167                 else
1168                         remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
1169
1170                 issue(cache, bio);
1171                 break;
1172
1173         case POLICY_MISS:
1174                 inc_miss_counter(cache, bio);
1175                 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1176                 remap_to_origin_clear_discard(cache, bio, block);
1177                 issue(cache, bio);
1178                 break;
1179
1180         case POLICY_NEW:
1181                 atomic_inc(&cache->stats.promotion);
1182                 promote(cache, structs, block, lookup_result.cblock, new_ocell);
1183                 release_cell = false;
1184                 break;
1185
1186         case POLICY_REPLACE:
1187                 cell_prealloc = prealloc_get_cell(structs);
1188                 r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
1189                                (cell_free_fn) prealloc_put_cell,
1190                                structs, &old_ocell);
1191                 if (r > 0) {
1192                         /*
1193                          * We have to be careful to avoid lock inversion of
1194                          * the cells.  So we back off, and wait for the
1195                          * old_ocell to become free.
1196                          */
1197                         policy_force_mapping(cache->policy, block,
1198                                              lookup_result.old_oblock);
1199                         atomic_inc(&cache->stats.cache_cell_clash);
1200                         break;
1201                 }
1202                 atomic_inc(&cache->stats.demotion);
1203                 atomic_inc(&cache->stats.promotion);
1204
1205                 demote_then_promote(cache, structs, lookup_result.old_oblock,
1206                                     block, lookup_result.cblock,
1207                                     old_ocell, new_ocell);
1208                 release_cell = false;
1209                 break;
1210
1211         default:
1212                 DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
1213                             (unsigned) lookup_result.op);
1214                 bio_io_error(bio);
1215         }
1216
1217         if (release_cell)
1218                 cell_defer(cache, new_ocell, false);
1219 }
1220
1221 static int need_commit_due_to_time(struct cache *cache)
1222 {
1223         return jiffies < cache->last_commit_jiffies ||
1224                jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
1225 }
1226
1227 static int commit_if_needed(struct cache *cache)
1228 {
1229         if (dm_cache_changed_this_transaction(cache->cmd) &&
1230             (cache->commit_requested || need_commit_due_to_time(cache))) {
1231                 atomic_inc(&cache->stats.commit_count);
1232                 cache->last_commit_jiffies = jiffies;
1233                 cache->commit_requested = false;
1234                 return dm_cache_commit(cache->cmd, false);
1235         }
1236
1237         return 0;
1238 }
1239
1240 static void process_deferred_bios(struct cache *cache)
1241 {
1242         unsigned long flags;
1243         struct bio_list bios;
1244         struct bio *bio;
1245         struct prealloc structs;
1246
1247         memset(&structs, 0, sizeof(structs));
1248         bio_list_init(&bios);
1249
1250         spin_lock_irqsave(&cache->lock, flags);
1251         bio_list_merge(&bios, &cache->deferred_bios);
1252         bio_list_init(&cache->deferred_bios);
1253         spin_unlock_irqrestore(&cache->lock, flags);
1254
1255         while (!bio_list_empty(&bios)) {
1256                 /*
1257                  * If we've got no free migration structs, and processing
1258                  * this bio might require one, we pause until there are some
1259                  * prepared mappings to process.
1260                  */
1261                 if (prealloc_data_structs(cache, &structs)) {
1262                         spin_lock_irqsave(&cache->lock, flags);
1263                         bio_list_merge(&cache->deferred_bios, &bios);
1264                         spin_unlock_irqrestore(&cache->lock, flags);
1265                         break;
1266                 }
1267
1268                 bio = bio_list_pop(&bios);
1269
1270                 if (bio->bi_rw & REQ_FLUSH)
1271                         process_flush_bio(cache, bio);
1272                 else if (bio->bi_rw & REQ_DISCARD)
1273                         process_discard_bio(cache, bio);
1274                 else
1275                         process_bio(cache, &structs, bio);
1276         }
1277
1278         prealloc_free_structs(cache, &structs);
1279 }
1280
1281 static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
1282 {
1283         unsigned long flags;
1284         struct bio_list bios;
1285         struct bio *bio;
1286
1287         bio_list_init(&bios);
1288
1289         spin_lock_irqsave(&cache->lock, flags);
1290         bio_list_merge(&bios, &cache->deferred_flush_bios);
1291         bio_list_init(&cache->deferred_flush_bios);
1292         spin_unlock_irqrestore(&cache->lock, flags);
1293
1294         while ((bio = bio_list_pop(&bios)))
1295                 submit_bios ? generic_make_request(bio) : bio_io_error(bio);
1296 }
1297
1298 static void process_deferred_writethrough_bios(struct cache *cache)
1299 {
1300         unsigned long flags;
1301         struct bio_list bios;
1302         struct bio *bio;
1303
1304         bio_list_init(&bios);
1305
1306         spin_lock_irqsave(&cache->lock, flags);
1307         bio_list_merge(&bios, &cache->deferred_writethrough_bios);
1308         bio_list_init(&cache->deferred_writethrough_bios);
1309         spin_unlock_irqrestore(&cache->lock, flags);
1310
1311         while ((bio = bio_list_pop(&bios)))
1312                 generic_make_request(bio);
1313 }
1314
1315 static void writeback_some_dirty_blocks(struct cache *cache)
1316 {
1317         int r = 0;
1318         dm_oblock_t oblock;
1319         dm_cblock_t cblock;
1320         struct prealloc structs;
1321         struct dm_bio_prison_cell *old_ocell;
1322
1323         memset(&structs, 0, sizeof(structs));
1324
1325         while (spare_migration_bandwidth(cache)) {
1326                 if (prealloc_data_structs(cache, &structs))
1327                         break;
1328
1329                 r = policy_writeback_work(cache->policy, &oblock, &cblock);
1330                 if (r)
1331                         break;
1332
1333                 r = get_cell(cache, oblock, &structs, &old_ocell);
1334                 if (r) {
1335                         policy_set_dirty(cache->policy, oblock);
1336                         break;
1337                 }
1338
1339                 writeback(cache, &structs, oblock, cblock, old_ocell);
1340         }
1341
1342         prealloc_free_structs(cache, &structs);
1343 }
1344
1345 /*----------------------------------------------------------------
1346  * Main worker loop
1347  *--------------------------------------------------------------*/
1348 static bool is_quiescing(struct cache *cache)
1349 {
1350         int r;
1351         unsigned long flags;
1352
1353         spin_lock_irqsave(&cache->lock, flags);
1354         r = cache->quiescing;
1355         spin_unlock_irqrestore(&cache->lock, flags);
1356
1357         return r;
1358 }
1359
1360 static void ack_quiescing(struct cache *cache)
1361 {
1362         if (is_quiescing(cache)) {
1363                 atomic_inc(&cache->quiescing_ack);
1364                 wake_up(&cache->quiescing_wait);
1365         }
1366 }
1367
1368 static void wait_for_quiescing_ack(struct cache *cache)
1369 {
1370         wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
1371 }
1372
1373 static void start_quiescing(struct cache *cache)
1374 {
1375         unsigned long flags;
1376
1377         spin_lock_irqsave(&cache->lock, flags);
1378         cache->quiescing = true;
1379         spin_unlock_irqrestore(&cache->lock, flags);
1380
1381         wait_for_quiescing_ack(cache);
1382 }
1383
1384 static void stop_quiescing(struct cache *cache)
1385 {
1386         unsigned long flags;
1387
1388         spin_lock_irqsave(&cache->lock, flags);
1389         cache->quiescing = false;
1390         spin_unlock_irqrestore(&cache->lock, flags);
1391
1392         atomic_set(&cache->quiescing_ack, 0);
1393 }
1394
1395 static void wait_for_migrations(struct cache *cache)
1396 {
1397         wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
1398 }
1399
1400 static void stop_worker(struct cache *cache)
1401 {
1402         cancel_delayed_work(&cache->waker);
1403         flush_workqueue(cache->wq);
1404 }
1405
1406 static void requeue_deferred_io(struct cache *cache)
1407 {
1408         struct bio *bio;
1409         struct bio_list bios;
1410
1411         bio_list_init(&bios);
1412         bio_list_merge(&bios, &cache->deferred_bios);
1413         bio_list_init(&cache->deferred_bios);
1414
1415         while ((bio = bio_list_pop(&bios)))
1416                 bio_endio(bio, DM_ENDIO_REQUEUE);
1417 }
1418
1419 static int more_work(struct cache *cache)
1420 {
1421         if (is_quiescing(cache))
1422                 return !list_empty(&cache->quiesced_migrations) ||
1423                         !list_empty(&cache->completed_migrations) ||
1424                         !list_empty(&cache->need_commit_migrations);
1425         else
1426                 return !bio_list_empty(&cache->deferred_bios) ||
1427                         !bio_list_empty(&cache->deferred_flush_bios) ||
1428                         !bio_list_empty(&cache->deferred_writethrough_bios) ||
1429                         !list_empty(&cache->quiesced_migrations) ||
1430                         !list_empty(&cache->completed_migrations) ||
1431                         !list_empty(&cache->need_commit_migrations);
1432 }
1433
1434 static void do_worker(struct work_struct *ws)
1435 {
1436         struct cache *cache = container_of(ws, struct cache, worker);
1437
1438         do {
1439                 if (!is_quiescing(cache)) {
1440                         writeback_some_dirty_blocks(cache);
1441                         process_deferred_writethrough_bios(cache);
1442                         process_deferred_bios(cache);
1443                 }
1444
1445                 process_migrations(cache, &cache->quiesced_migrations, issue_copy);
1446                 process_migrations(cache, &cache->completed_migrations, complete_migration);
1447
1448                 if (commit_if_needed(cache)) {
1449                         process_deferred_flush_bios(cache, false);
1450
1451                         /*
1452                          * FIXME: rollback metadata or just go into a
1453                          * failure mode and error everything
1454                          */
1455                 } else {
1456                         process_deferred_flush_bios(cache, true);
1457                         process_migrations(cache, &cache->need_commit_migrations,
1458                                            migration_success_post_commit);
1459                 }
1460
1461                 ack_quiescing(cache);
1462
1463         } while (more_work(cache));
1464 }
1465
1466 /*
1467  * We want to commit periodically so that not too much
1468  * unwritten metadata builds up.
1469  */
1470 static void do_waker(struct work_struct *ws)
1471 {
1472         struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
1473         policy_tick(cache->policy);
1474         wake_worker(cache);
1475         queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
1476 }
1477
1478 /*----------------------------------------------------------------*/
1479
1480 static int is_congested(struct dm_dev *dev, int bdi_bits)
1481 {
1482         struct request_queue *q = bdev_get_queue(dev->bdev);
1483         return bdi_congested(&q->backing_dev_info, bdi_bits);
1484 }
1485
1486 static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1487 {
1488         struct cache *cache = container_of(cb, struct cache, callbacks);
1489
1490         return is_congested(cache->origin_dev, bdi_bits) ||
1491                 is_congested(cache->cache_dev, bdi_bits);
1492 }
1493
1494 /*----------------------------------------------------------------
1495  * Target methods
1496  *--------------------------------------------------------------*/
1497
1498 /*
1499  * This function gets called on the error paths of the constructor, so we
1500  * have to cope with a partially initialised struct.
1501  */
1502 static void destroy(struct cache *cache)
1503 {
1504         unsigned i;
1505
1506         if (cache->next_migration)
1507                 mempool_free(cache->next_migration, cache->migration_pool);
1508
1509         if (cache->migration_pool)
1510                 mempool_destroy(cache->migration_pool);
1511
1512         if (cache->all_io_ds)
1513                 dm_deferred_set_destroy(cache->all_io_ds);
1514
1515         if (cache->prison)
1516                 dm_bio_prison_destroy(cache->prison);
1517
1518         if (cache->wq)
1519                 destroy_workqueue(cache->wq);
1520
1521         if (cache->dirty_bitset)
1522                 free_bitset(cache->dirty_bitset);
1523
1524         if (cache->discard_bitset)
1525                 free_bitset(cache->discard_bitset);
1526
1527         if (cache->copier)
1528                 dm_kcopyd_client_destroy(cache->copier);
1529
1530         if (cache->cmd)
1531                 dm_cache_metadata_close(cache->cmd);
1532
1533         if (cache->metadata_dev)
1534                 dm_put_device(cache->ti, cache->metadata_dev);
1535
1536         if (cache->origin_dev)
1537                 dm_put_device(cache->ti, cache->origin_dev);
1538
1539         if (cache->cache_dev)
1540                 dm_put_device(cache->ti, cache->cache_dev);
1541
1542         if (cache->policy)
1543                 dm_cache_policy_destroy(cache->policy);
1544
1545         for (i = 0; i < cache->nr_ctr_args ; i++)
1546                 kfree(cache->ctr_args[i]);
1547         kfree(cache->ctr_args);
1548
1549         kfree(cache);
1550 }
1551
1552 static void cache_dtr(struct dm_target *ti)
1553 {
1554         struct cache *cache = ti->private;
1555
1556         destroy(cache);
1557 }
1558
1559 static sector_t get_dev_size(struct dm_dev *dev)
1560 {
1561         return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1562 }
1563
1564 /*----------------------------------------------------------------*/
1565
1566 /*
1567  * Construct a cache device mapping.
1568  *
1569  * cache <metadata dev> <cache dev> <origin dev> <block size>
1570  *       <#feature args> [<feature arg>]*
1571  *       <policy> <#policy args> [<policy arg>]*
1572  *
1573  * metadata dev    : fast device holding the persistent metadata
1574  * cache dev       : fast device holding cached data blocks
1575  * origin dev      : slow device holding original data blocks
1576  * block size      : cache unit size in sectors
1577  *
1578  * #feature args   : number of feature arguments passed
1579  * feature args    : writethrough.  (The default is writeback.)
1580  *
1581  * policy          : the replacement policy to use
1582  * #policy args    : an even number of policy arguments corresponding
1583  *                   to key/value pairs passed to the policy
1584  * policy args     : key/value pairs passed to the policy
1585  *                   E.g. 'sequential_threshold 1024'
1586  *                   See cache-policies.txt for details.
1587  *
1588  * Optional feature arguments are:
1589  *   writethrough  : write through caching that prohibits cache block
1590  *                   content from being different from origin block content.
1591  *                   Without this argument, the default behaviour is to write
1592  *                   back cache block contents later for performance reasons,
1593  *                   so they may differ from the corresponding origin blocks.
1594  */
1595 struct cache_args {
1596         struct dm_target *ti;
1597
1598         struct dm_dev *metadata_dev;
1599
1600         struct dm_dev *cache_dev;
1601         sector_t cache_sectors;
1602
1603         struct dm_dev *origin_dev;
1604         sector_t origin_sectors;
1605
1606         uint32_t block_size;
1607
1608         const char *policy_name;
1609         int policy_argc;
1610         const char **policy_argv;
1611
1612         struct cache_features features;
1613 };
1614
1615 static void destroy_cache_args(struct cache_args *ca)
1616 {
1617         if (ca->metadata_dev)
1618                 dm_put_device(ca->ti, ca->metadata_dev);
1619
1620         if (ca->cache_dev)
1621                 dm_put_device(ca->ti, ca->cache_dev);
1622
1623         if (ca->origin_dev)
1624                 dm_put_device(ca->ti, ca->origin_dev);
1625
1626         kfree(ca);
1627 }
1628
1629 static bool at_least_one_arg(struct dm_arg_set *as, char **error)
1630 {
1631         if (!as->argc) {
1632                 *error = "Insufficient args";
1633                 return false;
1634         }
1635
1636         return true;
1637 }
1638
1639 static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
1640                               char **error)
1641 {
1642         int r;
1643         sector_t metadata_dev_size;
1644         char b[BDEVNAME_SIZE];
1645
1646         if (!at_least_one_arg(as, error))
1647                 return -EINVAL;
1648
1649         r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1650                           &ca->metadata_dev);
1651         if (r) {
1652                 *error = "Error opening metadata device";
1653                 return r;
1654         }
1655
1656         metadata_dev_size = get_dev_size(ca->metadata_dev);
1657         if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
1658                 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1659                        bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
1660
1661         return 0;
1662 }
1663
1664 static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
1665                            char **error)
1666 {
1667         int r;
1668
1669         if (!at_least_one_arg(as, error))
1670                 return -EINVAL;
1671
1672         r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1673                           &ca->cache_dev);
1674         if (r) {
1675                 *error = "Error opening cache device";
1676                 return r;
1677         }
1678         ca->cache_sectors = get_dev_size(ca->cache_dev);
1679
1680         return 0;
1681 }
1682
1683 static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
1684                             char **error)
1685 {
1686         int r;
1687
1688         if (!at_least_one_arg(as, error))
1689                 return -EINVAL;
1690
1691         r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1692                           &ca->origin_dev);
1693         if (r) {
1694                 *error = "Error opening origin device";
1695                 return r;
1696         }
1697
1698         ca->origin_sectors = get_dev_size(ca->origin_dev);
1699         if (ca->ti->len > ca->origin_sectors) {
1700                 *error = "Device size larger than cached device";
1701                 return -EINVAL;
1702         }
1703
1704         return 0;
1705 }
1706
1707 static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
1708                             char **error)
1709 {
1710         unsigned long tmp;
1711
1712         if (!at_least_one_arg(as, error))
1713                 return -EINVAL;
1714
1715         if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp ||
1716             tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
1717             tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
1718                 *error = "Invalid data block size";
1719                 return -EINVAL;
1720         }
1721
1722         if (tmp > ca->cache_sectors) {
1723                 *error = "Data block size is larger than the cache device";
1724                 return -EINVAL;
1725         }
1726
1727         ca->block_size = tmp;
1728
1729         return 0;
1730 }
1731
1732 static void init_features(struct cache_features *cf)
1733 {
1734         cf->mode = CM_WRITE;
1735         cf->write_through = false;
1736 }
1737
1738 static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
1739                           char **error)
1740 {
1741         static struct dm_arg _args[] = {
1742                 {0, 1, "Invalid number of cache feature arguments"},
1743         };
1744
1745         int r;
1746         unsigned argc;
1747         const char *arg;
1748         struct cache_features *cf = &ca->features;
1749
1750         init_features(cf);
1751
1752         r = dm_read_arg_group(_args, as, &argc, error);
1753         if (r)
1754                 return -EINVAL;
1755
1756         while (argc--) {
1757                 arg = dm_shift_arg(as);
1758
1759                 if (!strcasecmp(arg, "writeback"))
1760                         cf->write_through = false;
1761
1762                 else if (!strcasecmp(arg, "writethrough"))
1763                         cf->write_through = true;
1764
1765                 else {
1766                         *error = "Unrecognised cache feature requested";
1767                         return -EINVAL;
1768                 }
1769         }
1770
1771         return 0;
1772 }
1773
1774 static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
1775                         char **error)
1776 {
1777         static struct dm_arg _args[] = {
1778                 {0, 1024, "Invalid number of policy arguments"},
1779         };
1780
1781         int r;
1782
1783         if (!at_least_one_arg(as, error))
1784                 return -EINVAL;
1785
1786         ca->policy_name = dm_shift_arg(as);
1787
1788         r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
1789         if (r)
1790                 return -EINVAL;
1791
1792         ca->policy_argv = (const char **)as->argv;
1793         dm_consume_args(as, ca->policy_argc);
1794
1795         return 0;
1796 }
1797
1798 static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
1799                             char **error)
1800 {
1801         int r;
1802         struct dm_arg_set as;
1803
1804         as.argc = argc;
1805         as.argv = argv;
1806
1807         r = parse_metadata_dev(ca, &as, error);
1808         if (r)
1809                 return r;
1810
1811         r = parse_cache_dev(ca, &as, error);
1812         if (r)
1813                 return r;
1814
1815         r = parse_origin_dev(ca, &as, error);
1816         if (r)
1817                 return r;
1818
1819         r = parse_block_size(ca, &as, error);
1820         if (r)
1821                 return r;
1822
1823         r = parse_features(ca, &as, error);
1824         if (r)
1825                 return r;
1826
1827         r = parse_policy(ca, &as, error);
1828         if (r)
1829                 return r;
1830
1831         return 0;
1832 }
1833
1834 /*----------------------------------------------------------------*/
1835
1836 static struct kmem_cache *migration_cache;
1837
1838 #define NOT_CORE_OPTION 1
1839
1840 static int process_config_option(struct cache *cache, const char *key, const char *value)
1841 {
1842         unsigned long tmp;
1843
1844         if (!strcasecmp(key, "migration_threshold")) {
1845                 if (kstrtoul(value, 10, &tmp))
1846                         return -EINVAL;
1847
1848                 cache->migration_threshold = tmp;
1849                 return 0;
1850         }
1851
1852         return NOT_CORE_OPTION;
1853 }
1854
1855 static int set_config_value(struct cache *cache, const char *key, const char *value)
1856 {
1857         int r = process_config_option(cache, key, value);
1858
1859         if (r == NOT_CORE_OPTION)
1860                 r = policy_set_config_value(cache->policy, key, value);
1861
1862         if (r)
1863                 DMWARN("bad config value for %s: %s", key, value);
1864
1865         return r;
1866 }
1867
1868 static int set_config_values(struct cache *cache, int argc, const char **argv)
1869 {
1870         int r = 0;
1871
1872         if (argc & 1) {
1873                 DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
1874                 return -EINVAL;
1875         }
1876
1877         while (argc) {
1878                 r = set_config_value(cache, argv[0], argv[1]);
1879                 if (r)
1880                         break;
1881
1882                 argc -= 2;
1883                 argv += 2;
1884         }
1885
1886         return r;
1887 }
1888
1889 static int create_cache_policy(struct cache *cache, struct cache_args *ca,
1890                                char **error)
1891 {
1892         cache->policy = dm_cache_policy_create(ca->policy_name,
1893                                                cache->cache_size,
1894                                                cache->origin_sectors,
1895                                                cache->sectors_per_block);
1896         if (!cache->policy) {
1897                 *error = "Error creating cache's policy";
1898                 return -ENOMEM;
1899         }
1900
1901         return 0;
1902 }
1903
1904 /*
1905  * We want the discard block size to be a power of two, at least the size
1906  * of the cache block size, and have no more than 2^14 discard blocks
1907  * across the origin.
1908  */
1909 #define MAX_DISCARD_BLOCKS (1 << 14)
1910
1911 static bool too_many_discard_blocks(sector_t discard_block_size,
1912                                     sector_t origin_size)
1913 {
1914         (void) sector_div(origin_size, discard_block_size);
1915
1916         return origin_size > MAX_DISCARD_BLOCKS;
1917 }
1918
1919 static sector_t calculate_discard_block_size(sector_t cache_block_size,
1920                                              sector_t origin_size)
1921 {
1922         sector_t discard_block_size;
1923
1924         discard_block_size = roundup_pow_of_two(cache_block_size);
1925
1926         if (origin_size)
1927                 while (too_many_discard_blocks(discard_block_size, origin_size))
1928                         discard_block_size *= 2;
1929
1930         return discard_block_size;
1931 }
1932
1933 #define DEFAULT_MIGRATION_THRESHOLD 2048
1934
1935 static int cache_create(struct cache_args *ca, struct cache **result)
1936 {
1937         int r = 0;
1938         char **error = &ca->ti->error;
1939         struct cache *cache;
1940         struct dm_target *ti = ca->ti;
1941         dm_block_t origin_blocks;
1942         struct dm_cache_metadata *cmd;
1943         bool may_format = ca->features.mode == CM_WRITE;
1944
1945         cache = kzalloc(sizeof(*cache), GFP_KERNEL);
1946         if (!cache)
1947                 return -ENOMEM;
1948
1949         cache->ti = ca->ti;
1950         ti->private = cache;
1951         ti->num_flush_bios = 2;
1952         ti->flush_supported = true;
1953
1954         ti->num_discard_bios = 1;
1955         ti->discards_supported = true;
1956         ti->discard_zeroes_data_unsupported = true;
1957
1958         cache->features = ca->features;
1959         ti->per_bio_data_size = get_per_bio_data_size(cache);
1960
1961         cache->callbacks.congested_fn = cache_is_congested;
1962         dm_table_add_target_callbacks(ti->table, &cache->callbacks);
1963
1964         cache->metadata_dev = ca->metadata_dev;
1965         cache->origin_dev = ca->origin_dev;
1966         cache->cache_dev = ca->cache_dev;
1967
1968         ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
1969
1970         /* FIXME: factor out this whole section */
1971         origin_blocks = cache->origin_sectors = ca->origin_sectors;
1972         origin_blocks = block_div(origin_blocks, ca->block_size);
1973         cache->origin_blocks = to_oblock(origin_blocks);
1974
1975         cache->sectors_per_block = ca->block_size;
1976         if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
1977                 r = -EINVAL;
1978                 goto bad;
1979         }
1980
1981         if (ca->block_size & (ca->block_size - 1)) {
1982                 dm_block_t cache_size = ca->cache_sectors;
1983
1984                 cache->sectors_per_block_shift = -1;
1985                 cache_size = block_div(cache_size, ca->block_size);
1986                 cache->cache_size = to_cblock(cache_size);
1987         } else {
1988                 cache->sectors_per_block_shift = __ffs(ca->block_size);
1989                 cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift);
1990         }
1991
1992         r = create_cache_policy(cache, ca, error);
1993         if (r)
1994                 goto bad;
1995
1996         cache->policy_nr_args = ca->policy_argc;
1997         cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
1998
1999         r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
2000         if (r) {
2001                 *error = "Error setting cache policy's config values";
2002                 goto bad;
2003         }
2004
2005         cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
2006                                      ca->block_size, may_format,
2007                                      dm_cache_policy_get_hint_size(cache->policy));
2008         if (IS_ERR(cmd)) {
2009                 *error = "Error creating metadata object";
2010                 r = PTR_ERR(cmd);
2011                 goto bad;
2012         }
2013         cache->cmd = cmd;
2014
2015         spin_lock_init(&cache->lock);
2016         bio_list_init(&cache->deferred_bios);
2017         bio_list_init(&cache->deferred_flush_bios);
2018         bio_list_init(&cache->deferred_writethrough_bios);
2019         INIT_LIST_HEAD(&cache->quiesced_migrations);
2020         INIT_LIST_HEAD(&cache->completed_migrations);
2021         INIT_LIST_HEAD(&cache->need_commit_migrations);
2022         atomic_set(&cache->nr_migrations, 0);
2023         init_waitqueue_head(&cache->migration_wait);
2024
2025         init_waitqueue_head(&cache->quiescing_wait);
2026         atomic_set(&cache->quiescing_ack, 0);
2027
2028         r = -ENOMEM;
2029         cache->nr_dirty = 0;
2030         cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
2031         if (!cache->dirty_bitset) {
2032                 *error = "could not allocate dirty bitset";
2033                 goto bad;
2034         }
2035         clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2036
2037         cache->discard_block_size =
2038                 calculate_discard_block_size(cache->sectors_per_block,
2039                                              cache->origin_sectors);
2040         cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
2041         cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
2042         if (!cache->discard_bitset) {
2043                 *error = "could not allocate discard bitset";
2044                 goto bad;
2045         }
2046         clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2047
2048         cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2049         if (IS_ERR(cache->copier)) {
2050                 *error = "could not create kcopyd client";
2051                 r = PTR_ERR(cache->copier);
2052                 goto bad;
2053         }
2054
2055         cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2056         if (!cache->wq) {
2057                 *error = "could not create workqueue for metadata object";
2058                 goto bad;
2059         }
2060         INIT_WORK(&cache->worker, do_worker);
2061         INIT_DELAYED_WORK(&cache->waker, do_waker);
2062         cache->last_commit_jiffies = jiffies;
2063
2064         cache->prison = dm_bio_prison_create(PRISON_CELLS);
2065         if (!cache->prison) {
2066                 *error = "could not create bio prison";
2067                 goto bad;
2068         }
2069
2070         cache->all_io_ds = dm_deferred_set_create();
2071         if (!cache->all_io_ds) {
2072                 *error = "could not create all_io deferred set";
2073                 goto bad;
2074         }
2075
2076         cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
2077                                                          migration_cache);
2078         if (!cache->migration_pool) {
2079                 *error = "Error creating cache's migration mempool";
2080                 goto bad;
2081         }
2082
2083         cache->next_migration = NULL;
2084
2085         cache->need_tick_bio = true;
2086         cache->sized = false;
2087         cache->quiescing = false;
2088         cache->commit_requested = false;
2089         cache->loaded_mappings = false;
2090         cache->loaded_discards = false;
2091
2092         load_stats(cache);
2093
2094         atomic_set(&cache->stats.demotion, 0);
2095         atomic_set(&cache->stats.promotion, 0);
2096         atomic_set(&cache->stats.copies_avoided, 0);
2097         atomic_set(&cache->stats.cache_cell_clash, 0);
2098         atomic_set(&cache->stats.commit_count, 0);
2099         atomic_set(&cache->stats.discard_count, 0);
2100
2101         *result = cache;
2102         return 0;
2103
2104 bad:
2105         destroy(cache);
2106         return r;
2107 }
2108
2109 static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
2110 {
2111         unsigned i;
2112         const char **copy;
2113
2114         copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
2115         if (!copy)
2116                 return -ENOMEM;
2117         for (i = 0; i < argc; i++) {
2118                 copy[i] = kstrdup(argv[i], GFP_KERNEL);
2119                 if (!copy[i]) {
2120                         while (i--)
2121                                 kfree(copy[i]);
2122                         kfree(copy);
2123                         return -ENOMEM;
2124                 }
2125         }
2126
2127         cache->nr_ctr_args = argc;
2128         cache->ctr_args = copy;
2129
2130         return 0;
2131 }
2132
2133 static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2134 {
2135         int r = -EINVAL;
2136         struct cache_args *ca;
2137         struct cache *cache = NULL;
2138
2139         ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2140         if (!ca) {
2141                 ti->error = "Error allocating memory for cache";
2142                 return -ENOMEM;
2143         }
2144         ca->ti = ti;
2145
2146         r = parse_cache_args(ca, argc, argv, &ti->error);
2147         if (r)
2148                 goto out;
2149
2150         r = cache_create(ca, &cache);
2151         if (r)
2152                 goto out;
2153
2154         r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
2155         if (r) {
2156                 destroy(cache);
2157                 goto out;
2158         }
2159
2160         ti->private = cache;
2161
2162 out:
2163         destroy_cache_args(ca);
2164         return r;
2165 }
2166
2167 static int cache_map(struct dm_target *ti, struct bio *bio)
2168 {
2169         struct cache *cache = ti->private;
2170
2171         int r;
2172         dm_oblock_t block = get_bio_block(cache, bio);
2173         size_t pb_data_size = get_per_bio_data_size(cache);
2174         bool can_migrate = false;
2175         bool discarded_block;
2176         struct dm_bio_prison_cell *cell;
2177         struct policy_result lookup_result;
2178         struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
2179
2180         if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
2181                 /*
2182                  * This can only occur if the io goes to a partial block at
2183                  * the end of the origin device.  We don't cache these.
2184                  * Just remap to the origin and carry on.
2185                  */
2186                 remap_to_origin(cache, bio);
2187                 return DM_MAPIO_REMAPPED;
2188         }
2189
2190         if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
2191                 defer_bio(cache, bio);
2192                 return DM_MAPIO_SUBMITTED;
2193         }
2194
2195         /*
2196          * Check to see if that block is currently migrating.
2197          */
2198         cell = alloc_prison_cell(cache);
2199         if (!cell) {
2200                 defer_bio(cache, bio);
2201                 return DM_MAPIO_SUBMITTED;
2202         }
2203
2204         r = bio_detain(cache, block, bio, cell,
2205                        (cell_free_fn) free_prison_cell,
2206                        cache, &cell);
2207         if (r) {
2208                 if (r < 0)
2209                         defer_bio(cache, bio);
2210
2211                 return DM_MAPIO_SUBMITTED;
2212         }
2213
2214         discarded_block = is_discarded_oblock(cache, block);
2215
2216         r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
2217                        bio, &lookup_result);
2218         if (r == -EWOULDBLOCK) {
2219                 cell_defer(cache, cell, true);
2220                 return DM_MAPIO_SUBMITTED;
2221
2222         } else if (r) {
2223                 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
2224                 bio_io_error(bio);
2225                 return DM_MAPIO_SUBMITTED;
2226         }
2227
2228         switch (lookup_result.op) {
2229         case POLICY_HIT:
2230                 inc_hit_counter(cache, bio);
2231                 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2232
2233                 if (is_writethrough_io(cache, bio, lookup_result.cblock))
2234                         remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
2235                 else
2236                         remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
2237
2238                 cell_defer(cache, cell, false);
2239                 break;
2240
2241         case POLICY_MISS:
2242                 inc_miss_counter(cache, bio);
2243                 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2244
2245                 if (pb->req_nr != 0) {
2246                         /*
2247                          * This is a duplicate writethrough io that is no
2248                          * longer needed because the block has been demoted.
2249                          */
2250                         bio_endio(bio, 0);
2251                         cell_defer(cache, cell, false);
2252                         return DM_MAPIO_SUBMITTED;
2253                 } else {
2254                         remap_to_origin_clear_discard(cache, bio, block);
2255                         cell_defer(cache, cell, false);
2256                 }
2257                 break;
2258
2259         default:
2260                 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
2261                             (unsigned) lookup_result.op);
2262                 bio_io_error(bio);
2263                 return DM_MAPIO_SUBMITTED;
2264         }
2265
2266         return DM_MAPIO_REMAPPED;
2267 }
2268
2269 static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
2270 {
2271         struct cache *cache = ti->private;
2272         unsigned long flags;
2273         size_t pb_data_size = get_per_bio_data_size(cache);
2274         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
2275
2276         if (pb->tick) {
2277                 policy_tick(cache->policy);
2278
2279                 spin_lock_irqsave(&cache->lock, flags);
2280                 cache->need_tick_bio = true;
2281                 spin_unlock_irqrestore(&cache->lock, flags);
2282         }
2283
2284         check_for_quiesced_migrations(cache, pb);
2285
2286         return 0;
2287 }
2288
2289 static int write_dirty_bitset(struct cache *cache)
2290 {
2291         unsigned i, r;
2292
2293         for (i = 0; i < from_cblock(cache->cache_size); i++) {
2294                 r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
2295                                        is_dirty(cache, to_cblock(i)));
2296                 if (r)
2297                         return r;
2298         }
2299
2300         return 0;
2301 }
2302
2303 static int write_discard_bitset(struct cache *cache)
2304 {
2305         unsigned i, r;
2306
2307         r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
2308                                            cache->discard_nr_blocks);
2309         if (r) {
2310                 DMERR("could not resize on-disk discard bitset");
2311                 return r;
2312         }
2313
2314         for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
2315                 r = dm_cache_set_discard(cache->cmd, to_dblock(i),
2316                                          is_discarded(cache, to_dblock(i)));
2317                 if (r)
2318                         return r;
2319         }
2320
2321         return 0;
2322 }
2323
2324 static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock,
2325                      uint32_t hint)
2326 {
2327         struct cache *cache = context;
2328         return dm_cache_save_hint(cache->cmd, cblock, hint);
2329 }
2330
2331 static int write_hints(struct cache *cache)
2332 {
2333         int r;
2334
2335         r = dm_cache_begin_hints(cache->cmd, cache->policy);
2336         if (r) {
2337                 DMERR("dm_cache_begin_hints failed");
2338                 return r;
2339         }
2340
2341         r = policy_walk_mappings(cache->policy, save_hint, cache);
2342         if (r)
2343                 DMERR("policy_walk_mappings failed");
2344
2345         return r;
2346 }
2347
2348 /*
2349  * returns true on success
2350  */
2351 static bool sync_metadata(struct cache *cache)
2352 {
2353         int r1, r2, r3, r4;
2354
2355         r1 = write_dirty_bitset(cache);
2356         if (r1)
2357                 DMERR("could not write dirty bitset");
2358
2359         r2 = write_discard_bitset(cache);
2360         if (r2)
2361                 DMERR("could not write discard bitset");
2362
2363         save_stats(cache);
2364
2365         r3 = write_hints(cache);
2366         if (r3)
2367                 DMERR("could not write hints");
2368
2369         /*
2370          * If writing the above metadata failed, we still commit, but don't
2371          * set the clean shutdown flag.  This will effectively force every
2372          * dirty bit to be set on reload.
2373          */
2374         r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
2375         if (r4)
2376                 DMERR("could not write cache metadata.  Data loss may occur.");
2377
2378         return !r1 && !r2 && !r3 && !r4;
2379 }
2380
2381 static void cache_postsuspend(struct dm_target *ti)
2382 {
2383         struct cache *cache = ti->private;
2384
2385         start_quiescing(cache);
2386         wait_for_migrations(cache);
2387         stop_worker(cache);
2388         requeue_deferred_io(cache);
2389         stop_quiescing(cache);
2390
2391         (void) sync_metadata(cache);
2392 }
2393
2394 static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2395                         bool dirty, uint32_t hint, bool hint_valid)
2396 {
2397         int r;
2398         struct cache *cache = context;
2399
2400         r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
2401         if (r)
2402                 return r;
2403
2404         if (dirty)
2405                 set_dirty(cache, oblock, cblock);
2406         else
2407                 clear_dirty(cache, oblock, cblock);
2408
2409         return 0;
2410 }
2411
2412 static int load_discard(void *context, sector_t discard_block_size,
2413                         dm_dblock_t dblock, bool discard)
2414 {
2415         struct cache *cache = context;
2416
2417         /* FIXME: handle mis-matched block size */
2418
2419         if (discard)
2420                 set_discard(cache, dblock);
2421         else
2422                 clear_discard(cache, dblock);
2423
2424         return 0;
2425 }
2426
2427 static int cache_preresume(struct dm_target *ti)
2428 {
2429         int r = 0;
2430         struct cache *cache = ti->private;
2431         sector_t actual_cache_size = get_dev_size(cache->cache_dev);
2432         (void) sector_div(actual_cache_size, cache->sectors_per_block);
2433
2434         /*
2435          * Check to see if the cache has resized.
2436          */
2437         if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) {
2438                 cache->cache_size = to_cblock(actual_cache_size);
2439
2440                 r = dm_cache_resize(cache->cmd, cache->cache_size);
2441                 if (r) {
2442                         DMERR("could not resize cache metadata");
2443                         return r;
2444                 }
2445
2446                 cache->sized = true;
2447         }
2448
2449         if (!cache->loaded_mappings) {
2450                 r = dm_cache_load_mappings(cache->cmd, cache->policy,
2451                                            load_mapping, cache);
2452                 if (r) {
2453                         DMERR("could not load cache mappings");
2454                         return r;
2455                 }
2456
2457                 cache->loaded_mappings = true;
2458         }
2459
2460         if (!cache->loaded_discards) {
2461                 r = dm_cache_load_discards(cache->cmd, load_discard, cache);
2462                 if (r) {
2463                         DMERR("could not load origin discards");
2464                         return r;
2465                 }
2466
2467                 cache->loaded_discards = true;
2468         }
2469
2470         return r;
2471 }
2472
2473 static void cache_resume(struct dm_target *ti)
2474 {
2475         struct cache *cache = ti->private;
2476
2477         cache->need_tick_bio = true;
2478         do_waker(&cache->waker.work);
2479 }
2480
2481 /*
2482  * Status format:
2483  *
2484  * <#used metadata blocks>/<#total metadata blocks>
2485  * <#read hits> <#read misses> <#write hits> <#write misses>
2486  * <#demotions> <#promotions> <#blocks in cache> <#dirty>
2487  * <#features> <features>*
2488  * <#core args> <core args>
2489  * <#policy args> <policy args>*
2490  */
2491 static void cache_status(struct dm_target *ti, status_type_t type,
2492                          unsigned status_flags, char *result, unsigned maxlen)
2493 {
2494         int r = 0;
2495         unsigned i;
2496         ssize_t sz = 0;
2497         dm_block_t nr_free_blocks_metadata = 0;
2498         dm_block_t nr_blocks_metadata = 0;
2499         char buf[BDEVNAME_SIZE];
2500         struct cache *cache = ti->private;
2501         dm_cblock_t residency;
2502
2503         switch (type) {
2504         case STATUSTYPE_INFO:
2505                 /* Commit to ensure statistics aren't out-of-date */
2506                 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
2507                         r = dm_cache_commit(cache->cmd, false);
2508                         if (r)
2509                                 DMERR("could not commit metadata for accurate status");
2510                 }
2511
2512                 r = dm_cache_get_free_metadata_block_count(cache->cmd,
2513                                                            &nr_free_blocks_metadata);
2514                 if (r) {
2515                         DMERR("could not get metadata free block count");
2516                         goto err;
2517                 }
2518
2519                 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
2520                 if (r) {
2521                         DMERR("could not get metadata device size");
2522                         goto err;
2523                 }
2524
2525                 residency = policy_residency(cache->policy);
2526
2527                 DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ",
2528                        (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2529                        (unsigned long long)nr_blocks_metadata,
2530                        (unsigned) atomic_read(&cache->stats.read_hit),
2531                        (unsigned) atomic_read(&cache->stats.read_miss),
2532                        (unsigned) atomic_read(&cache->stats.write_hit),
2533                        (unsigned) atomic_read(&cache->stats.write_miss),
2534                        (unsigned) atomic_read(&cache->stats.demotion),
2535                        (unsigned) atomic_read(&cache->stats.promotion),
2536                        (unsigned long long) from_cblock(residency),
2537                        cache->nr_dirty);
2538
2539                 if (cache->features.write_through)
2540                         DMEMIT("1 writethrough ");
2541                 else
2542                         DMEMIT("0 ");
2543
2544                 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
2545                 if (sz < maxlen) {
2546                         r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
2547                         if (r)
2548                                 DMERR("policy_emit_config_values returned %d", r);
2549                 }
2550
2551                 break;
2552
2553         case STATUSTYPE_TABLE:
2554                 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
2555                 DMEMIT("%s ", buf);
2556                 format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
2557                 DMEMIT("%s ", buf);
2558                 format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
2559                 DMEMIT("%s", buf);
2560
2561                 for (i = 0; i < cache->nr_ctr_args - 1; i++)
2562                         DMEMIT(" %s", cache->ctr_args[i]);
2563                 if (cache->nr_ctr_args)
2564                         DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
2565         }
2566
2567         return;
2568
2569 err:
2570         DMEMIT("Error");
2571 }
2572
2573 /*
2574  * Supports <key> <value>.
2575  *
2576  * The key migration_threshold is supported by the cache target core.
2577  */
2578 static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
2579 {
2580         struct cache *cache = ti->private;
2581
2582         if (argc != 2)
2583                 return -EINVAL;
2584
2585         return set_config_value(cache, argv[0], argv[1]);
2586 }
2587
2588 static int cache_iterate_devices(struct dm_target *ti,
2589                                  iterate_devices_callout_fn fn, void *data)
2590 {
2591         int r = 0;
2592         struct cache *cache = ti->private;
2593
2594         r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
2595         if (!r)
2596                 r = fn(ti, cache->origin_dev, 0, ti->len, data);
2597
2598         return r;
2599 }
2600
2601 /*
2602  * We assume I/O is going to the origin (which is the volume
2603  * more likely to have restrictions e.g. by being striped).
2604  * (Looking up the exact location of the data would be expensive
2605  * and could always be out of date by the time the bio is submitted.)
2606  */
2607 static int cache_bvec_merge(struct dm_target *ti,
2608                             struct bvec_merge_data *bvm,
2609                             struct bio_vec *biovec, int max_size)
2610 {
2611         struct cache *cache = ti->private;
2612         struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
2613
2614         if (!q->merge_bvec_fn)
2615                 return max_size;
2616
2617         bvm->bi_bdev = cache->origin_dev->bdev;
2618         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2619 }
2620
2621 static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
2622 {
2623         /*
2624          * FIXME: these limits may be incompatible with the cache device
2625          */
2626         limits->max_discard_sectors = cache->discard_block_size * 1024;
2627         limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
2628 }
2629
2630 static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
2631 {
2632         struct cache *cache = ti->private;
2633
2634         blk_limits_io_min(limits, 0);
2635         blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
2636         set_discard_limits(cache, limits);
2637 }
2638
2639 /*----------------------------------------------------------------*/
2640
2641 static struct target_type cache_target = {
2642         .name = "cache",
2643         .version = {1, 1, 1},
2644         .module = THIS_MODULE,
2645         .ctr = cache_ctr,
2646         .dtr = cache_dtr,
2647         .map = cache_map,
2648         .end_io = cache_end_io,
2649         .postsuspend = cache_postsuspend,
2650         .preresume = cache_preresume,
2651         .resume = cache_resume,
2652         .status = cache_status,
2653         .message = cache_message,
2654         .iterate_devices = cache_iterate_devices,
2655         .merge = cache_bvec_merge,
2656         .io_hints = cache_io_hints,
2657 };
2658
2659 static int __init dm_cache_init(void)
2660 {
2661         int r;
2662
2663         r = dm_register_target(&cache_target);
2664         if (r) {
2665                 DMERR("cache target registration failed: %d", r);
2666                 return r;
2667         }
2668
2669         migration_cache = KMEM_CACHE(dm_cache_migration, 0);
2670         if (!migration_cache) {
2671                 dm_unregister_target(&cache_target);
2672                 return -ENOMEM;
2673         }
2674
2675         return 0;
2676 }
2677
2678 static void __exit dm_cache_exit(void)
2679 {
2680         dm_unregister_target(&cache_target);
2681         kmem_cache_destroy(migration_cache);
2682 }
2683
2684 module_init(dm_cache_init);
2685 module_exit(dm_cache_exit);
2686
2687 MODULE_DESCRIPTION(DM_NAME " cache target");
2688 MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
2689 MODULE_LICENSE("GPL");