blob: ebaaf72cd8220c62e7f5287b8e32dee756186ee0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
9
10#include <linux/module.h>
11#include <linux/vmalloc.h>
12#include <linux/blkdev.h>
13#include <linux/namei.h>
14#include <linux/ctype.h>
15#include <linux/slab.h>
16#include <linux/interrupt.h>
Arjan van de Ven48c9c272006-03-27 01:18:20 -080017#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/atomic.h>
19
Alasdair G Kergon72d94862006-06-26 00:27:35 -070020#define DM_MSG_PREFIX "table"
21
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#define MAX_DEPTH 16
23#define NODE_SIZE L1_CACHE_BYTES
24#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
25#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
26
27struct dm_table {
Mike Anderson1134e5a2006-03-27 01:17:54 -080028 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 atomic_t holders;
30
31 /* btree table */
32 unsigned int depth;
33 unsigned int counts[MAX_DEPTH]; /* in nodes */
34 sector_t *index[MAX_DEPTH];
35
36 unsigned int num_targets;
37 unsigned int num_allocated;
38 sector_t *highs;
39 struct dm_target *targets;
40
Andi Kleenab4c142482009-01-06 03:05:09 +000041 unsigned barriers_supported:1;
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 /*
44 * Indicates the rw permissions for the new logical
45 * device. This should be a combination of FMODE_READ
46 * and FMODE_WRITE.
47 */
Al Viroaeb5d722008-09-02 15:28:45 -040048 fmode_t mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
50 /* a list of devices used by this table */
51 struct list_head devices;
52
53 /*
54 * These are optimistic limits taken from all the
55 * targets, some targets will need smaller limits.
56 */
57 struct io_restrictions limits;
58
59 /* events get handed up using this callback */
60 void (*event_fn)(void *);
61 void *event_context;
62};
63
64/*
65 * Similar to ceiling(log_size(n))
66 */
67static unsigned int int_log(unsigned int n, unsigned int base)
68{
69 int result = 0;
70
71 while (n > 1) {
72 n = dm_div_up(n, base);
73 result++;
74 }
75
76 return result;
77}
78
79/*
80 * Returns the minimum that is _not_ zero, unless both are zero.
81 */
82#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
83
84/*
85 * Combine two io_restrictions, always taking the lower value.
86 */
87static void combine_restrictions_low(struct io_restrictions *lhs,
88 struct io_restrictions *rhs)
89{
90 lhs->max_sectors =
91 min_not_zero(lhs->max_sectors, rhs->max_sectors);
92
93 lhs->max_phys_segments =
94 min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments);
95
96 lhs->max_hw_segments =
97 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
98
99 lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size);
100
101 lhs->max_segment_size =
102 min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
103
Neil Brown91212502007-12-13 14:16:04 +0000104 lhs->max_hw_sectors =
105 min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors);
106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 lhs->seg_boundary_mask =
108 min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
NeilBrown969429b2006-03-27 01:17:49 -0800109
Vasily Averin5ec140e2007-10-31 08:33:24 +0100110 lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
111
NeilBrown969429b2006-03-27 01:17:49 -0800112 lhs->no_cluster |= rhs->no_cluster;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113}
114
115/*
116 * Calculate the index of the child node of the n'th node k'th key.
117 */
118static inline unsigned int get_child(unsigned int n, unsigned int k)
119{
120 return (n * CHILDREN_PER_NODE) + k;
121}
122
123/*
124 * Return the n'th node of level l from table t.
125 */
126static inline sector_t *get_node(struct dm_table *t,
127 unsigned int l, unsigned int n)
128{
129 return t->index[l] + (n * KEYS_PER_NODE);
130}
131
132/*
133 * Return the highest key that you could lookup from the n'th
134 * node on level l of the btree.
135 */
136static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
137{
138 for (; l < t->depth - 1; l++)
139 n = get_child(n, CHILDREN_PER_NODE - 1);
140
141 if (n >= t->counts[l])
142 return (sector_t) - 1;
143
144 return get_node(t, l, n)[KEYS_PER_NODE - 1];
145}
146
147/*
148 * Fills in a level of the btree based on the highs of the level
149 * below it.
150 */
151static int setup_btree_index(unsigned int l, struct dm_table *t)
152{
153 unsigned int n, k;
154 sector_t *node;
155
156 for (n = 0U; n < t->counts[l]; n++) {
157 node = get_node(t, l, n);
158
159 for (k = 0U; k < KEYS_PER_NODE; k++)
160 node[k] = high(t, l + 1, get_child(n, k));
161 }
162
163 return 0;
164}
165
166void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
167{
168 unsigned long size;
169 void *addr;
170
171 /*
172 * Check that we're not going to overflow.
173 */
174 if (nmemb > (ULONG_MAX / elem_size))
175 return NULL;
176
177 size = nmemb * elem_size;
178 addr = vmalloc(size);
179 if (addr)
180 memset(addr, 0, size);
181
182 return addr;
183}
184
185/*
186 * highs, and targets are managed as dynamic arrays during a
187 * table load.
188 */
189static int alloc_targets(struct dm_table *t, unsigned int num)
190{
191 sector_t *n_highs;
192 struct dm_target *n_targets;
193 int n = t->num_targets;
194
195 /*
196 * Allocate both the target array and offset array at once.
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000197 * Append an empty entry to catch sectors beyond the end of
198 * the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 */
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000200 n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 sizeof(sector_t));
202 if (!n_highs)
203 return -ENOMEM;
204
205 n_targets = (struct dm_target *) (n_highs + num);
206
207 if (n) {
208 memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
209 memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
210 }
211
212 memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
213 vfree(t->highs);
214
215 t->num_allocated = num;
216 t->highs = n_highs;
217 t->targets = n_targets;
218
219 return 0;
220}
221
Al Viroaeb5d722008-09-02 15:28:45 -0400222int dm_table_create(struct dm_table **result, fmode_t mode,
Mike Anderson1134e5a2006-03-27 01:17:54 -0800223 unsigned num_targets, struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224{
Dmitry Monakhov094262d2007-10-19 22:38:51 +0100225 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 if (!t)
228 return -ENOMEM;
229
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 INIT_LIST_HEAD(&t->devices);
231 atomic_set(&t->holders, 1);
Andi Kleenab4c142482009-01-06 03:05:09 +0000232 t->barriers_supported = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
234 if (!num_targets)
235 num_targets = KEYS_PER_NODE;
236
237 num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
238
239 if (alloc_targets(t, num_targets)) {
240 kfree(t);
241 t = NULL;
242 return -ENOMEM;
243 }
244
245 t->mode = mode;
Mike Anderson1134e5a2006-03-27 01:17:54 -0800246 t->md = md;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 *result = t;
248 return 0;
249}
250
251static void free_devices(struct list_head *devices)
252{
253 struct list_head *tmp, *next;
254
Paul Jimenezafb24522008-02-08 02:09:59 +0000255 list_for_each_safe(tmp, next, devices) {
Mikulas Patocka82b15192008-10-10 13:37:09 +0100256 struct dm_dev_internal *dd =
257 list_entry(tmp, struct dm_dev_internal, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 kfree(dd);
259 }
260}
261
Alasdair G Kergon5e198d92005-05-05 16:16:09 -0700262static void table_destroy(struct dm_table *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263{
264 unsigned int i;
265
266 /* free the indexes (see dm_table_complete) */
267 if (t->depth >= 2)
268 vfree(t->index[t->depth - 2]);
269
270 /* free the targets */
271 for (i = 0; i < t->num_targets; i++) {
272 struct dm_target *tgt = t->targets + i;
273
274 if (tgt->type->dtr)
275 tgt->type->dtr(tgt);
276
277 dm_put_target_type(tgt->type);
278 }
279
280 vfree(t->highs);
281
282 /* free the device list */
283 if (t->devices.next != &t->devices) {
284 DMWARN("devices still present during destroy: "
285 "dm_table_remove_device calls missing");
286
287 free_devices(&t->devices);
288 }
289
290 kfree(t);
291}
292
293void dm_table_get(struct dm_table *t)
294{
295 atomic_inc(&t->holders);
296}
297
298void dm_table_put(struct dm_table *t)
299{
300 if (!t)
301 return;
302
303 if (atomic_dec_and_test(&t->holders))
304 table_destroy(t);
305}
306
307/*
308 * Checks to see if we need to extend highs or targets.
309 */
310static inline int check_space(struct dm_table *t)
311{
312 if (t->num_targets >= t->num_allocated)
313 return alloc_targets(t, t->num_allocated * 2);
314
315 return 0;
316}
317
318/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 * See if we've already got a device in the list.
320 */
Mikulas Patocka82b15192008-10-10 13:37:09 +0100321static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322{
Mikulas Patocka82b15192008-10-10 13:37:09 +0100323 struct dm_dev_internal *dd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
325 list_for_each_entry (dd, l, list)
Mikulas Patocka82b15192008-10-10 13:37:09 +0100326 if (dd->dm_dev.bdev->bd_dev == dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 return dd;
328
329 return NULL;
330}
331
332/*
333 * Open a device so we can use it as a map destination.
334 */
Mikulas Patocka82b15192008-10-10 13:37:09 +0100335static int open_dev(struct dm_dev_internal *d, dev_t dev,
336 struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337{
338 static char *_claim_ptr = "I belong to device-mapper";
339 struct block_device *bdev;
340
341 int r;
342
Mikulas Patocka82b15192008-10-10 13:37:09 +0100343 BUG_ON(d->dm_dev.bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Mikulas Patocka82b15192008-10-10 13:37:09 +0100345 bdev = open_by_devnum(dev, d->dm_dev.mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 if (IS_ERR(bdev))
347 return PTR_ERR(bdev);
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800348 r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 if (r)
Al Viro9a1c3542008-02-22 20:40:24 -0500350 blkdev_put(bdev, d->dm_dev.mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 else
Mikulas Patocka82b15192008-10-10 13:37:09 +0100352 d->dm_dev.bdev = bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 return r;
354}
355
356/*
357 * Close a device that we've been using.
358 */
Mikulas Patocka82b15192008-10-10 13:37:09 +0100359static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360{
Mikulas Patocka82b15192008-10-10 13:37:09 +0100361 if (!d->dm_dev.bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 return;
363
Mikulas Patocka82b15192008-10-10 13:37:09 +0100364 bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
Al Viro9a1c3542008-02-22 20:40:24 -0500365 blkdev_put(d->dm_dev.bdev, d->dm_dev.mode);
Mikulas Patocka82b15192008-10-10 13:37:09 +0100366 d->dm_dev.bdev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367}
368
369/*
Mike Anderson2cd54d92007-05-09 02:32:57 -0700370 * If possible, this checks an area of a destination device is valid.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 */
Mikulas Patocka82b15192008-10-10 13:37:09 +0100372static int check_device_area(struct dm_dev_internal *dd, sector_t start,
373 sector_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374{
Mikulas Patocka82b15192008-10-10 13:37:09 +0100375 sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT;
Mike Anderson2cd54d92007-05-09 02:32:57 -0700376
377 if (!dev_size)
378 return 1;
379
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 return ((start < dev_size) && (len <= (dev_size - start)));
381}
382
383/*
384 * This upgrades the mode on an already open dm_dev. Being
385 * careful to leave things as they were if we fail to reopen the
386 * device.
387 */
Al Viroaeb5d722008-09-02 15:28:45 -0400388static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
Mikulas Patocka82b15192008-10-10 13:37:09 +0100389 struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390{
391 int r;
Mikulas Patocka82b15192008-10-10 13:37:09 +0100392 struct dm_dev_internal dd_copy;
393 dev_t dev = dd->dm_dev.bdev->bd_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
395 dd_copy = *dd;
396
Mikulas Patocka82b15192008-10-10 13:37:09 +0100397 dd->dm_dev.mode |= new_mode;
398 dd->dm_dev.bdev = NULL;
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800399 r = open_dev(dd, dev, md);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 if (!r)
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800401 close_dev(&dd_copy, md);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 else
403 *dd = dd_copy;
404
405 return r;
406}
407
408/*
409 * Add a device to the list, or just increment the usage count if
410 * it's already present.
411 */
412static int __table_get_device(struct dm_table *t, struct dm_target *ti,
413 const char *path, sector_t start, sector_t len,
Al Viroaeb5d722008-09-02 15:28:45 -0400414 fmode_t mode, struct dm_dev **result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415{
416 int r;
Andrew Morton69a2ce72008-02-08 02:10:14 +0000417 dev_t uninitialized_var(dev);
Mikulas Patocka82b15192008-10-10 13:37:09 +0100418 struct dm_dev_internal *dd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 unsigned int major, minor;
420
Eric Sesterhenn547bc922006-03-26 18:22:50 +0200421 BUG_ON(!t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
423 if (sscanf(path, "%u:%u", &major, &minor) == 2) {
424 /* Extract the major/minor numbers */
425 dev = MKDEV(major, minor);
426 if (MAJOR(dev) != major || MINOR(dev) != minor)
427 return -EOVERFLOW;
428 } else {
429 /* convert the path to a device */
Christoph Hellwig72e82642008-08-11 00:24:08 +0200430 struct block_device *bdev = lookup_bdev(path);
431
432 if (IS_ERR(bdev))
433 return PTR_ERR(bdev);
434 dev = bdev->bd_dev;
435 bdput(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 }
437
438 dd = find_device(&t->devices, dev);
439 if (!dd) {
440 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
441 if (!dd)
442 return -ENOMEM;
443
Mikulas Patocka82b15192008-10-10 13:37:09 +0100444 dd->dm_dev.mode = mode;
445 dd->dm_dev.bdev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800447 if ((r = open_dev(dd, dev, t->md))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 kfree(dd);
449 return r;
450 }
451
Mikulas Patocka82b15192008-10-10 13:37:09 +0100452 format_dev_t(dd->dm_dev.name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
454 atomic_set(&dd->count, 0);
455 list_add(&dd->list, &t->devices);
456
Mikulas Patocka82b15192008-10-10 13:37:09 +0100457 } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800458 r = upgrade_mode(dd, mode, t->md);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 if (r)
460 return r;
461 }
462 atomic_inc(&dd->count);
463
464 if (!check_device_area(dd, start, len)) {
465 DMWARN("device %s too small for target", path);
Mikulas Patocka82b15192008-10-10 13:37:09 +0100466 dm_put_device(ti, &dd->dm_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 return -EINVAL;
468 }
469
Mikulas Patocka82b15192008-10-10 13:37:09 +0100470 *result = &dd->dm_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472 return 0;
473}
474
Bryn Reeves3cb40212006-10-03 01:15:42 -0700475void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
476{
Jens Axboe165125e2007-07-24 09:28:11 +0200477 struct request_queue *q = bdev_get_queue(bdev);
Bryn Reeves3cb40212006-10-03 01:15:42 -0700478 struct io_restrictions *rs = &ti->limits;
Alasdair G Kergon0c2322e2008-10-10 13:37:13 +0100479 char b[BDEVNAME_SIZE];
480
481 if (unlikely(!q)) {
482 DMWARN("%s: Cannot set limits for nonexistent device %s",
483 dm_device_name(ti->table->md), bdevname(bdev, b));
484 return;
485 }
Bryn Reeves3cb40212006-10-03 01:15:42 -0700486
487 /*
488 * Combine the device limits low.
489 *
490 * FIXME: if we move an io_restriction struct
491 * into q this would just be a call to
492 * combine_restrictions_low()
493 */
494 rs->max_sectors =
495 min_not_zero(rs->max_sectors, q->max_sectors);
496
Milan Broz9980c632008-07-21 12:00:39 +0100497 /*
498 * Check if merge fn is supported.
499 * If not we'll force DM to use PAGE_SIZE or
500 * smaller I/O, just to be safe.
Bryn Reeves3cb40212006-10-03 01:15:42 -0700501 */
Milan Broz9980c632008-07-21 12:00:39 +0100502
503 if (q->merge_bvec_fn && !ti->type->merge)
Bryn Reeves3cb40212006-10-03 01:15:42 -0700504 rs->max_sectors =
505 min_not_zero(rs->max_sectors,
506 (unsigned int) (PAGE_SIZE >> 9));
507
508 rs->max_phys_segments =
509 min_not_zero(rs->max_phys_segments,
510 q->max_phys_segments);
511
512 rs->max_hw_segments =
513 min_not_zero(rs->max_hw_segments, q->max_hw_segments);
514
515 rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
516
517 rs->max_segment_size =
518 min_not_zero(rs->max_segment_size, q->max_segment_size);
519
Neil Brown91212502007-12-13 14:16:04 +0000520 rs->max_hw_sectors =
521 min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
522
Bryn Reeves3cb40212006-10-03 01:15:42 -0700523 rs->seg_boundary_mask =
524 min_not_zero(rs->seg_boundary_mask,
525 q->seg_boundary_mask);
526
Vasily Averin5ec140e2007-10-31 08:33:24 +0100527 rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
528
Bryn Reeves3cb40212006-10-03 01:15:42 -0700529 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
530}
531EXPORT_SYMBOL_GPL(dm_set_device_limits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
533int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
Al Viroaeb5d722008-09-02 15:28:45 -0400534 sector_t len, fmode_t mode, struct dm_dev **result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535{
536 int r = __table_get_device(ti->table, ti, path,
537 start, len, mode, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Bryn Reeves3cb40212006-10-03 01:15:42 -0700539 if (!r)
540 dm_set_device_limits(ti, (*result)->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
542 return r;
543}
544
545/*
546 * Decrement a devices use count and remove it if necessary.
547 */
Mikulas Patocka82b15192008-10-10 13:37:09 +0100548void dm_put_device(struct dm_target *ti, struct dm_dev *d)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549{
Mikulas Patocka82b15192008-10-10 13:37:09 +0100550 struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
551 dm_dev);
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 if (atomic_dec_and_test(&dd->count)) {
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800554 close_dev(dd, ti->table->md);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 list_del(&dd->list);
556 kfree(dd);
557 }
558}
559
560/*
561 * Checks to see if the target joins onto the end of the table.
562 */
563static int adjoin(struct dm_table *table, struct dm_target *ti)
564{
565 struct dm_target *prev;
566
567 if (!table->num_targets)
568 return !ti->begin;
569
570 prev = &table->targets[table->num_targets - 1];
571 return (ti->begin == (prev->begin + prev->len));
572}
573
574/*
575 * Used to dynamically allocate the arg array.
576 */
577static char **realloc_argv(unsigned *array_size, char **old_argv)
578{
579 char **argv;
580 unsigned new_size;
581
582 new_size = *array_size ? *array_size * 2 : 64;
583 argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
584 if (argv) {
585 memcpy(argv, old_argv, *array_size * sizeof(*argv));
586 *array_size = new_size;
587 }
588
589 kfree(old_argv);
590 return argv;
591}
592
593/*
594 * Destructively splits up the argument list to pass to ctr.
595 */
596int dm_split_args(int *argc, char ***argvp, char *input)
597{
598 char *start, *end = input, *out, **argv = NULL;
599 unsigned array_size = 0;
600
601 *argc = 0;
David Teigland814d6862006-06-26 00:27:31 -0700602
603 if (!input) {
604 *argvp = NULL;
605 return 0;
606 }
607
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 argv = realloc_argv(&array_size, argv);
609 if (!argv)
610 return -ENOMEM;
611
612 while (1) {
613 start = end;
614
615 /* Skip whitespace */
616 while (*start && isspace(*start))
617 start++;
618
619 if (!*start)
620 break; /* success, we hit the end */
621
622 /* 'out' is used to remove any back-quotes */
623 end = out = start;
624 while (*end) {
625 /* Everything apart from '\0' can be quoted */
626 if (*end == '\\' && *(end + 1)) {
627 *out++ = *(end + 1);
628 end += 2;
629 continue;
630 }
631
632 if (isspace(*end))
633 break; /* end of token */
634
635 *out++ = *end++;
636 }
637
638 /* have we already filled the array ? */
639 if ((*argc + 1) > array_size) {
640 argv = realloc_argv(&array_size, argv);
641 if (!argv)
642 return -ENOMEM;
643 }
644
645 /* we know this is whitespace */
646 if (*end)
647 end++;
648
649 /* terminate the string and put it in the array */
650 *out = '\0';
651 argv[*argc] = start;
652 (*argc)++;
653 }
654
655 *argvp = argv;
656 return 0;
657}
658
659static void check_for_valid_limits(struct io_restrictions *rs)
660{
661 if (!rs->max_sectors)
Mike Christiedefd94b2005-12-05 02:37:06 -0600662 rs->max_sectors = SAFE_MAX_SECTORS;
Neil Brown91212502007-12-13 14:16:04 +0000663 if (!rs->max_hw_sectors)
664 rs->max_hw_sectors = SAFE_MAX_SECTORS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 if (!rs->max_phys_segments)
666 rs->max_phys_segments = MAX_PHYS_SEGMENTS;
667 if (!rs->max_hw_segments)
668 rs->max_hw_segments = MAX_HW_SEGMENTS;
669 if (!rs->hardsect_size)
670 rs->hardsect_size = 1 << SECTOR_SHIFT;
671 if (!rs->max_segment_size)
672 rs->max_segment_size = MAX_SEGMENT_SIZE;
673 if (!rs->seg_boundary_mask)
Milan Broz0e435ac2008-12-03 12:55:08 +0100674 rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
Vasily Averin5ec140e2007-10-31 08:33:24 +0100675 if (!rs->bounce_pfn)
676 rs->bounce_pfn = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677}
678
679int dm_table_add_target(struct dm_table *t, const char *type,
680 sector_t start, sector_t len, char *params)
681{
682 int r = -EINVAL, argc;
683 char **argv;
684 struct dm_target *tgt;
685
686 if ((r = check_space(t)))
687 return r;
688
689 tgt = t->targets + t->num_targets;
690 memset(tgt, 0, sizeof(*tgt));
691
692 if (!len) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700693 DMERR("%s: zero-length target", dm_device_name(t->md));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 return -EINVAL;
695 }
696
697 tgt->type = dm_get_target_type(type);
698 if (!tgt->type) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700699 DMERR("%s: %s: unknown target type", dm_device_name(t->md),
700 type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 return -EINVAL;
702 }
703
704 tgt->table = t;
705 tgt->begin = start;
706 tgt->len = len;
707 tgt->error = "Unknown error";
708
709 /*
710 * Does this target adjoin the previous one ?
711 */
712 if (!adjoin(t, tgt)) {
713 tgt->error = "Gap in table";
714 r = -EINVAL;
715 goto bad;
716 }
717
718 r = dm_split_args(&argc, &argv, params);
719 if (r) {
720 tgt->error = "couldn't split parameters (insufficient memory)";
721 goto bad;
722 }
723
724 r = tgt->type->ctr(tgt, argc, argv);
725 kfree(argv);
726 if (r)
727 goto bad;
728
729 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
730
731 /* FIXME: the plan is to combine high here and then have
732 * the merge fn apply the target level restrictions. */
733 combine_restrictions_low(&t->limits, &tgt->limits);
Andi Kleenab4c142482009-01-06 03:05:09 +0000734
735 if (!(tgt->type->features & DM_TARGET_SUPPORTS_BARRIERS))
736 t->barriers_supported = 0;
737
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 return 0;
739
740 bad:
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700741 DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 dm_put_target_type(tgt->type);
743 return r;
744}
745
746static int setup_indexes(struct dm_table *t)
747{
748 int i;
749 unsigned int total = 0;
750 sector_t *indexes;
751
752 /* allocate the space for *all* the indexes */
753 for (i = t->depth - 2; i >= 0; i--) {
754 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
755 total += t->counts[i];
756 }
757
758 indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
759 if (!indexes)
760 return -ENOMEM;
761
762 /* set up internal nodes, bottom-up */
Jun'ichi Nomura82d601d2008-02-08 02:10:04 +0000763 for (i = t->depth - 2; i >= 0; i--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 t->index[i] = indexes;
765 indexes += (KEYS_PER_NODE * t->counts[i]);
766 setup_btree_index(i, t);
767 }
768
769 return 0;
770}
771
772/*
773 * Builds the btree to index the map.
774 */
775int dm_table_complete(struct dm_table *t)
776{
777 int r = 0;
778 unsigned int leaf_nodes;
779
780 check_for_valid_limits(&t->limits);
781
Andi Kleenab4c142482009-01-06 03:05:09 +0000782 /*
783 * We only support barriers if there is exactly one underlying device.
784 */
785 if (!list_is_singular(&t->devices))
786 t->barriers_supported = 0;
787
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 /* how many indexes will the btree have ? */
789 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
790 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
791
792 /* leaf layer has already been set up */
793 t->counts[t->depth - 1] = leaf_nodes;
794 t->index[t->depth - 1] = t->highs;
795
796 if (t->depth >= 2)
797 r = setup_indexes(t);
798
799 return r;
800}
801
Arjan van de Ven48c9c272006-03-27 01:18:20 -0800802static DEFINE_MUTEX(_event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803void dm_table_event_callback(struct dm_table *t,
804 void (*fn)(void *), void *context)
805{
Arjan van de Ven48c9c272006-03-27 01:18:20 -0800806 mutex_lock(&_event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 t->event_fn = fn;
808 t->event_context = context;
Arjan van de Ven48c9c272006-03-27 01:18:20 -0800809 mutex_unlock(&_event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810}
811
812void dm_table_event(struct dm_table *t)
813{
814 /*
815 * You can no longer call dm_table_event() from interrupt
816 * context, use a bottom half instead.
817 */
818 BUG_ON(in_interrupt());
819
Arjan van de Ven48c9c272006-03-27 01:18:20 -0800820 mutex_lock(&_event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 if (t->event_fn)
822 t->event_fn(t->event_context);
Arjan van de Ven48c9c272006-03-27 01:18:20 -0800823 mutex_unlock(&_event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824}
825
826sector_t dm_table_get_size(struct dm_table *t)
827{
828 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
829}
830
831struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
832{
Milan Broz14353532006-06-26 00:27:27 -0700833 if (index >= t->num_targets)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 return NULL;
835
836 return t->targets + index;
837}
838
839/*
840 * Search the btree for the correct target.
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000841 *
842 * Caller should check returned pointer with dm_target_is_valid()
843 * to trap I/O beyond end of device.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 */
845struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
846{
847 unsigned int l, n = 0, k = 0;
848 sector_t *node;
849
850 for (l = 0; l < t->depth; l++) {
851 n = get_child(n, k);
852 node = get_node(t, l, n);
853
854 for (k = 0; k < KEYS_PER_NODE; k++)
855 if (node[k] >= sector)
856 break;
857 }
858
859 return &t->targets[(KEYS_PER_NODE * n) + k];
860}
861
862void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
863{
864 /*
865 * Make sure we obey the optimistic sub devices
866 * restrictions.
867 */
868 blk_queue_max_sectors(q, t->limits.max_sectors);
869 q->max_phys_segments = t->limits.max_phys_segments;
870 q->max_hw_segments = t->limits.max_hw_segments;
871 q->hardsect_size = t->limits.hardsect_size;
872 q->max_segment_size = t->limits.max_segment_size;
Neil Brown91212502007-12-13 14:16:04 +0000873 q->max_hw_sectors = t->limits.max_hw_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 q->seg_boundary_mask = t->limits.seg_boundary_mask;
Vasily Averin5ec140e2007-10-31 08:33:24 +0100875 q->bounce_pfn = t->limits.bounce_pfn;
Jens Axboec9a3f6d2008-04-29 19:12:35 +0200876
NeilBrown969429b2006-03-27 01:17:49 -0800877 if (t->limits.no_cluster)
Jens Axboec9a3f6d2008-04-29 19:12:35 +0200878 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
NeilBrown969429b2006-03-27 01:17:49 -0800879 else
Jens Axboec9a3f6d2008-04-29 19:12:35 +0200880 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
NeilBrown969429b2006-03-27 01:17:49 -0800881
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882}
883
884unsigned int dm_table_get_num_targets(struct dm_table *t)
885{
886 return t->num_targets;
887}
888
889struct list_head *dm_table_get_devices(struct dm_table *t)
890{
891 return &t->devices;
892}
893
Al Viroaeb5d722008-09-02 15:28:45 -0400894fmode_t dm_table_get_mode(struct dm_table *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895{
896 return t->mode;
897}
898
899static void suspend_targets(struct dm_table *t, unsigned postsuspend)
900{
901 int i = t->num_targets;
902 struct dm_target *ti = t->targets;
903
904 while (i--) {
905 if (postsuspend) {
906 if (ti->type->postsuspend)
907 ti->type->postsuspend(ti);
908 } else if (ti->type->presuspend)
909 ti->type->presuspend(ti);
910
911 ti++;
912 }
913}
914
915void dm_table_presuspend_targets(struct dm_table *t)
916{
Alasdair G Kergoncf222b32005-07-28 21:15:57 -0700917 if (!t)
918 return;
919
Adrian Bunke8488d02008-04-24 22:10:51 +0100920 suspend_targets(t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921}
922
923void dm_table_postsuspend_targets(struct dm_table *t)
924{
Alasdair G Kergoncf222b32005-07-28 21:15:57 -0700925 if (!t)
926 return;
927
Adrian Bunke8488d02008-04-24 22:10:51 +0100928 suspend_targets(t, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929}
930
Milan Broz8757b772006-10-03 01:15:36 -0700931int dm_table_resume_targets(struct dm_table *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932{
Milan Broz8757b772006-10-03 01:15:36 -0700933 int i, r = 0;
934
935 for (i = 0; i < t->num_targets; i++) {
936 struct dm_target *ti = t->targets + i;
937
938 if (!ti->type->preresume)
939 continue;
940
941 r = ti->type->preresume(ti);
942 if (r)
943 return r;
944 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
946 for (i = 0; i < t->num_targets; i++) {
947 struct dm_target *ti = t->targets + i;
948
949 if (ti->type->resume)
950 ti->type->resume(ti);
951 }
Milan Broz8757b772006-10-03 01:15:36 -0700952
953 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954}
955
956int dm_table_any_congested(struct dm_table *t, int bdi_bits)
957{
Mikulas Patocka82b15192008-10-10 13:37:09 +0100958 struct dm_dev_internal *dd;
Paul Jimenezafb24522008-02-08 02:09:59 +0000959 struct list_head *devices = dm_table_get_devices(t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 int r = 0;
961
Paul Jimenezafb24522008-02-08 02:09:59 +0000962 list_for_each_entry(dd, devices, list) {
Mikulas Patocka82b15192008-10-10 13:37:09 +0100963 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
Alasdair G Kergon0c2322e2008-10-10 13:37:13 +0100964 char b[BDEVNAME_SIZE];
965
966 if (likely(q))
967 r |= bdi_congested(&q->backing_dev_info, bdi_bits);
968 else
969 DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
970 dm_device_name(t->md),
971 bdevname(dd->dm_dev.bdev, b));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 }
973
974 return r;
975}
976
977void dm_table_unplug_all(struct dm_table *t)
978{
Mikulas Patocka82b15192008-10-10 13:37:09 +0100979 struct dm_dev_internal *dd;
Paul Jimenezafb24522008-02-08 02:09:59 +0000980 struct list_head *devices = dm_table_get_devices(t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
Paul Jimenezafb24522008-02-08 02:09:59 +0000982 list_for_each_entry(dd, devices, list) {
Mikulas Patocka82b15192008-10-10 13:37:09 +0100983 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
Alasdair G Kergon0c2322e2008-10-10 13:37:13 +0100984 char b[BDEVNAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
Alasdair G Kergon0c2322e2008-10-10 13:37:13 +0100986 if (likely(q))
987 blk_unplug(q);
988 else
989 DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
990 dm_device_name(t->md),
991 bdevname(dd->dm_dev.bdev, b));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 }
993}
994
Mike Anderson1134e5a2006-03-27 01:17:54 -0800995struct mapped_device *dm_table_get_md(struct dm_table *t)
996{
997 dm_get(t->md);
998
999 return t->md;
1000}
1001
Andi Kleenab4c142482009-01-06 03:05:09 +00001002int dm_table_barrier_ok(struct dm_table *t)
1003{
1004 return t->barriers_supported;
1005}
1006EXPORT_SYMBOL(dm_table_barrier_ok);
1007
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008EXPORT_SYMBOL(dm_vcalloc);
1009EXPORT_SYMBOL(dm_get_device);
1010EXPORT_SYMBOL(dm_put_device);
1011EXPORT_SYMBOL(dm_table_event);
Alasdair G Kergond5e404c2005-07-12 15:53:05 -07001012EXPORT_SYMBOL(dm_table_get_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013EXPORT_SYMBOL(dm_table_get_mode);
Mike Anderson1134e5a2006-03-27 01:17:54 -08001014EXPORT_SYMBOL(dm_table_get_md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015EXPORT_SYMBOL(dm_table_put);
1016EXPORT_SYMBOL(dm_table_get);
1017EXPORT_SYMBOL(dm_table_unplug_all);