blob: f3f952e347ed99cb6dad3aeddccffd5245ba4985 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
9
10#include <linux/module.h>
11#include <linux/vmalloc.h>
12#include <linux/blkdev.h>
13#include <linux/namei.h>
14#include <linux/ctype.h>
15#include <linux/slab.h>
16#include <linux/interrupt.h>
Arjan van de Ven48c9c272006-03-27 01:18:20 -080017#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/atomic.h>
19
Alasdair G Kergon72d94862006-06-26 00:27:35 -070020#define DM_MSG_PREFIX "table"
21
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#define MAX_DEPTH 16
23#define NODE_SIZE L1_CACHE_BYTES
24#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
25#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
26
27struct dm_table {
Mike Anderson1134e5a2006-03-27 01:17:54 -080028 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 atomic_t holders;
30
31 /* btree table */
32 unsigned int depth;
33 unsigned int counts[MAX_DEPTH]; /* in nodes */
34 sector_t *index[MAX_DEPTH];
35
36 unsigned int num_targets;
37 unsigned int num_allocated;
38 sector_t *highs;
39 struct dm_target *targets;
40
41 /*
42 * Indicates the rw permissions for the new logical
43 * device. This should be a combination of FMODE_READ
44 * and FMODE_WRITE.
45 */
46 int mode;
47
48 /* a list of devices used by this table */
49 struct list_head devices;
50
51 /*
52 * These are optimistic limits taken from all the
53 * targets, some targets will need smaller limits.
54 */
55 struct io_restrictions limits;
56
57 /* events get handed up using this callback */
58 void (*event_fn)(void *);
59 void *event_context;
60};
61
62/*
63 * Similar to ceiling(log_size(n))
64 */
65static unsigned int int_log(unsigned int n, unsigned int base)
66{
67 int result = 0;
68
69 while (n > 1) {
70 n = dm_div_up(n, base);
71 result++;
72 }
73
74 return result;
75}
76
77/*
78 * Returns the minimum that is _not_ zero, unless both are zero.
79 */
80#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
81
82/*
83 * Combine two io_restrictions, always taking the lower value.
84 */
85static void combine_restrictions_low(struct io_restrictions *lhs,
86 struct io_restrictions *rhs)
87{
88 lhs->max_sectors =
89 min_not_zero(lhs->max_sectors, rhs->max_sectors);
90
91 lhs->max_phys_segments =
92 min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments);
93
94 lhs->max_hw_segments =
95 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
96
97 lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size);
98
99 lhs->max_segment_size =
100 min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
101
102 lhs->seg_boundary_mask =
103 min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
NeilBrown969429b2006-03-27 01:17:49 -0800104
Vasily Averin5ec140e2007-10-31 08:33:24 +0100105 lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
106
NeilBrown969429b2006-03-27 01:17:49 -0800107 lhs->no_cluster |= rhs->no_cluster;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108}
109
110/*
111 * Calculate the index of the child node of the n'th node k'th key.
112 */
113static inline unsigned int get_child(unsigned int n, unsigned int k)
114{
115 return (n * CHILDREN_PER_NODE) + k;
116}
117
118/*
119 * Return the n'th node of level l from table t.
120 */
121static inline sector_t *get_node(struct dm_table *t,
122 unsigned int l, unsigned int n)
123{
124 return t->index[l] + (n * KEYS_PER_NODE);
125}
126
127/*
128 * Return the highest key that you could lookup from the n'th
129 * node on level l of the btree.
130 */
131static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
132{
133 for (; l < t->depth - 1; l++)
134 n = get_child(n, CHILDREN_PER_NODE - 1);
135
136 if (n >= t->counts[l])
137 return (sector_t) - 1;
138
139 return get_node(t, l, n)[KEYS_PER_NODE - 1];
140}
141
142/*
143 * Fills in a level of the btree based on the highs of the level
144 * below it.
145 */
146static int setup_btree_index(unsigned int l, struct dm_table *t)
147{
148 unsigned int n, k;
149 sector_t *node;
150
151 for (n = 0U; n < t->counts[l]; n++) {
152 node = get_node(t, l, n);
153
154 for (k = 0U; k < KEYS_PER_NODE; k++)
155 node[k] = high(t, l + 1, get_child(n, k));
156 }
157
158 return 0;
159}
160
161void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
162{
163 unsigned long size;
164 void *addr;
165
166 /*
167 * Check that we're not going to overflow.
168 */
169 if (nmemb > (ULONG_MAX / elem_size))
170 return NULL;
171
172 size = nmemb * elem_size;
173 addr = vmalloc(size);
174 if (addr)
175 memset(addr, 0, size);
176
177 return addr;
178}
179
180/*
181 * highs, and targets are managed as dynamic arrays during a
182 * table load.
183 */
184static int alloc_targets(struct dm_table *t, unsigned int num)
185{
186 sector_t *n_highs;
187 struct dm_target *n_targets;
188 int n = t->num_targets;
189
190 /*
191 * Allocate both the target array and offset array at once.
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000192 * Append an empty entry to catch sectors beyond the end of
193 * the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 */
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000195 n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 sizeof(sector_t));
197 if (!n_highs)
198 return -ENOMEM;
199
200 n_targets = (struct dm_target *) (n_highs + num);
201
202 if (n) {
203 memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
204 memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
205 }
206
207 memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
208 vfree(t->highs);
209
210 t->num_allocated = num;
211 t->highs = n_highs;
212 t->targets = n_targets;
213
214 return 0;
215}
216
Mike Anderson1134e5a2006-03-27 01:17:54 -0800217int dm_table_create(struct dm_table **result, int mode,
218 unsigned num_targets, struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219{
Dmitry Monakhov094262d2007-10-19 22:38:51 +0100220 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222 if (!t)
223 return -ENOMEM;
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 INIT_LIST_HEAD(&t->devices);
226 atomic_set(&t->holders, 1);
227
228 if (!num_targets)
229 num_targets = KEYS_PER_NODE;
230
231 num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
232
233 if (alloc_targets(t, num_targets)) {
234 kfree(t);
235 t = NULL;
236 return -ENOMEM;
237 }
238
239 t->mode = mode;
Mike Anderson1134e5a2006-03-27 01:17:54 -0800240 t->md = md;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 *result = t;
242 return 0;
243}
244
David Teiglandc2ade422006-06-26 00:27:33 -0700245int dm_create_error_table(struct dm_table **result, struct mapped_device *md)
246{
247 struct dm_table *t;
248 sector_t dev_size = 1;
249 int r;
250
251 /*
252 * Find current size of device.
253 * Default to 1 sector if inactive.
254 */
255 t = dm_get_table(md);
256 if (t) {
257 dev_size = dm_table_get_size(t);
258 dm_table_put(t);
259 }
260
261 r = dm_table_create(&t, FMODE_READ, 1, md);
262 if (r)
263 return r;
264
265 r = dm_table_add_target(t, "error", 0, dev_size, NULL);
266 if (r)
267 goto out;
268
269 r = dm_table_complete(t);
270 if (r)
271 goto out;
272
273 *result = t;
274
275out:
276 if (r)
277 dm_table_put(t);
278
279 return r;
280}
281EXPORT_SYMBOL_GPL(dm_create_error_table);
282
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283static void free_devices(struct list_head *devices)
284{
285 struct list_head *tmp, *next;
286
287 for (tmp = devices->next; tmp != devices; tmp = next) {
288 struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
289 next = tmp->next;
290 kfree(dd);
291 }
292}
293
Alasdair G Kergon5e198d92005-05-05 16:16:09 -0700294static void table_destroy(struct dm_table *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295{
296 unsigned int i;
297
298 /* free the indexes (see dm_table_complete) */
299 if (t->depth >= 2)
300 vfree(t->index[t->depth - 2]);
301
302 /* free the targets */
303 for (i = 0; i < t->num_targets; i++) {
304 struct dm_target *tgt = t->targets + i;
305
306 if (tgt->type->dtr)
307 tgt->type->dtr(tgt);
308
309 dm_put_target_type(tgt->type);
310 }
311
312 vfree(t->highs);
313
314 /* free the device list */
315 if (t->devices.next != &t->devices) {
316 DMWARN("devices still present during destroy: "
317 "dm_table_remove_device calls missing");
318
319 free_devices(&t->devices);
320 }
321
322 kfree(t);
323}
324
325void dm_table_get(struct dm_table *t)
326{
327 atomic_inc(&t->holders);
328}
329
330void dm_table_put(struct dm_table *t)
331{
332 if (!t)
333 return;
334
335 if (atomic_dec_and_test(&t->holders))
336 table_destroy(t);
337}
338
339/*
340 * Checks to see if we need to extend highs or targets.
341 */
342static inline int check_space(struct dm_table *t)
343{
344 if (t->num_targets >= t->num_allocated)
345 return alloc_targets(t, t->num_allocated * 2);
346
347 return 0;
348}
349
350/*
351 * Convert a device path to a dev_t.
352 */
353static int lookup_device(const char *path, dev_t *dev)
354{
355 int r;
356 struct nameidata nd;
357 struct inode *inode;
358
359 if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd)))
360 return r;
361
362 inode = nd.dentry->d_inode;
363 if (!inode) {
364 r = -ENOENT;
365 goto out;
366 }
367
368 if (!S_ISBLK(inode->i_mode)) {
369 r = -ENOTBLK;
370 goto out;
371 }
372
373 *dev = inode->i_rdev;
374
375 out:
376 path_release(&nd);
377 return r;
378}
379
380/*
381 * See if we've already got a device in the list.
382 */
383static struct dm_dev *find_device(struct list_head *l, dev_t dev)
384{
385 struct dm_dev *dd;
386
387 list_for_each_entry (dd, l, list)
388 if (dd->bdev->bd_dev == dev)
389 return dd;
390
391 return NULL;
392}
393
394/*
395 * Open a device so we can use it as a map destination.
396 */
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800397static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398{
399 static char *_claim_ptr = "I belong to device-mapper";
400 struct block_device *bdev;
401
402 int r;
403
Eric Sesterhenn547bc922006-03-26 18:22:50 +0200404 BUG_ON(d->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
406 bdev = open_by_devnum(dev, d->mode);
407 if (IS_ERR(bdev))
408 return PTR_ERR(bdev);
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800409 r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 if (r)
411 blkdev_put(bdev);
412 else
413 d->bdev = bdev;
414 return r;
415}
416
417/*
418 * Close a device that we've been using.
419 */
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800420static void close_dev(struct dm_dev *d, struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421{
422 if (!d->bdev)
423 return;
424
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800425 bd_release_from_disk(d->bdev, dm_disk(md));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 blkdev_put(d->bdev);
427 d->bdev = NULL;
428}
429
430/*
Mike Anderson2cd54d92007-05-09 02:32:57 -0700431 * If possible, this checks an area of a destination device is valid.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 */
433static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
434{
Mike Anderson2cd54d92007-05-09 02:32:57 -0700435 sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
436
437 if (!dev_size)
438 return 1;
439
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 return ((start < dev_size) && (len <= (dev_size - start)));
441}
442
443/*
444 * This upgrades the mode on an already open dm_dev. Being
445 * careful to leave things as they were if we fail to reopen the
446 * device.
447 */
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800448static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449{
450 int r;
451 struct dm_dev dd_copy;
452 dev_t dev = dd->bdev->bd_dev;
453
454 dd_copy = *dd;
455
456 dd->mode |= new_mode;
457 dd->bdev = NULL;
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800458 r = open_dev(dd, dev, md);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 if (!r)
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800460 close_dev(&dd_copy, md);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 else
462 *dd = dd_copy;
463
464 return r;
465}
466
467/*
468 * Add a device to the list, or just increment the usage count if
469 * it's already present.
470 */
471static int __table_get_device(struct dm_table *t, struct dm_target *ti,
472 const char *path, sector_t start, sector_t len,
473 int mode, struct dm_dev **result)
474{
475 int r;
476 dev_t dev;
477 struct dm_dev *dd;
478 unsigned int major, minor;
479
Eric Sesterhenn547bc922006-03-26 18:22:50 +0200480 BUG_ON(!t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
482 if (sscanf(path, "%u:%u", &major, &minor) == 2) {
483 /* Extract the major/minor numbers */
484 dev = MKDEV(major, minor);
485 if (MAJOR(dev) != major || MINOR(dev) != minor)
486 return -EOVERFLOW;
487 } else {
488 /* convert the path to a device */
489 if ((r = lookup_device(path, &dev)))
490 return r;
491 }
492
493 dd = find_device(&t->devices, dev);
494 if (!dd) {
495 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
496 if (!dd)
497 return -ENOMEM;
498
499 dd->mode = mode;
500 dd->bdev = NULL;
501
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800502 if ((r = open_dev(dd, dev, t->md))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 kfree(dd);
504 return r;
505 }
506
507 format_dev_t(dd->name, dev);
508
509 atomic_set(&dd->count, 0);
510 list_add(&dd->list, &t->devices);
511
512 } else if (dd->mode != (mode | dd->mode)) {
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800513 r = upgrade_mode(dd, mode, t->md);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 if (r)
515 return r;
516 }
517 atomic_inc(&dd->count);
518
519 if (!check_device_area(dd, start, len)) {
520 DMWARN("device %s too small for target", path);
521 dm_put_device(ti, dd);
522 return -EINVAL;
523 }
524
525 *result = dd;
526
527 return 0;
528}
529
Bryn Reeves3cb40212006-10-03 01:15:42 -0700530void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
531{
Jens Axboe165125e2007-07-24 09:28:11 +0200532 struct request_queue *q = bdev_get_queue(bdev);
Bryn Reeves3cb40212006-10-03 01:15:42 -0700533 struct io_restrictions *rs = &ti->limits;
534
535 /*
536 * Combine the device limits low.
537 *
538 * FIXME: if we move an io_restriction struct
539 * into q this would just be a call to
540 * combine_restrictions_low()
541 */
542 rs->max_sectors =
543 min_not_zero(rs->max_sectors, q->max_sectors);
544
545 /* FIXME: Device-Mapper on top of RAID-0 breaks because DM
546 * currently doesn't honor MD's merge_bvec_fn routine.
547 * In this case, we'll force DM to use PAGE_SIZE or
548 * smaller I/O, just to be safe. A better fix is in the
549 * works, but add this for the time being so it will at
550 * least operate correctly.
551 */
552 if (q->merge_bvec_fn)
553 rs->max_sectors =
554 min_not_zero(rs->max_sectors,
555 (unsigned int) (PAGE_SIZE >> 9));
556
557 rs->max_phys_segments =
558 min_not_zero(rs->max_phys_segments,
559 q->max_phys_segments);
560
561 rs->max_hw_segments =
562 min_not_zero(rs->max_hw_segments, q->max_hw_segments);
563
564 rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
565
566 rs->max_segment_size =
567 min_not_zero(rs->max_segment_size, q->max_segment_size);
568
569 rs->seg_boundary_mask =
570 min_not_zero(rs->seg_boundary_mask,
571 q->seg_boundary_mask);
572
Vasily Averin5ec140e2007-10-31 08:33:24 +0100573 rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
574
Bryn Reeves3cb40212006-10-03 01:15:42 -0700575 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
576}
577EXPORT_SYMBOL_GPL(dm_set_device_limits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
579int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
580 sector_t len, int mode, struct dm_dev **result)
581{
582 int r = __table_get_device(ti->table, ti, path,
583 start, len, mode, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
Bryn Reeves3cb40212006-10-03 01:15:42 -0700585 if (!r)
586 dm_set_device_limits(ti, (*result)->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
588 return r;
589}
590
591/*
592 * Decrement a devices use count and remove it if necessary.
593 */
594void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
595{
596 if (atomic_dec_and_test(&dd->count)) {
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800597 close_dev(dd, ti->table->md);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 list_del(&dd->list);
599 kfree(dd);
600 }
601}
602
603/*
604 * Checks to see if the target joins onto the end of the table.
605 */
606static int adjoin(struct dm_table *table, struct dm_target *ti)
607{
608 struct dm_target *prev;
609
610 if (!table->num_targets)
611 return !ti->begin;
612
613 prev = &table->targets[table->num_targets - 1];
614 return (ti->begin == (prev->begin + prev->len));
615}
616
617/*
618 * Used to dynamically allocate the arg array.
619 */
620static char **realloc_argv(unsigned *array_size, char **old_argv)
621{
622 char **argv;
623 unsigned new_size;
624
625 new_size = *array_size ? *array_size * 2 : 64;
626 argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
627 if (argv) {
628 memcpy(argv, old_argv, *array_size * sizeof(*argv));
629 *array_size = new_size;
630 }
631
632 kfree(old_argv);
633 return argv;
634}
635
636/*
637 * Destructively splits up the argument list to pass to ctr.
638 */
639int dm_split_args(int *argc, char ***argvp, char *input)
640{
641 char *start, *end = input, *out, **argv = NULL;
642 unsigned array_size = 0;
643
644 *argc = 0;
David Teigland814d6862006-06-26 00:27:31 -0700645
646 if (!input) {
647 *argvp = NULL;
648 return 0;
649 }
650
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 argv = realloc_argv(&array_size, argv);
652 if (!argv)
653 return -ENOMEM;
654
655 while (1) {
656 start = end;
657
658 /* Skip whitespace */
659 while (*start && isspace(*start))
660 start++;
661
662 if (!*start)
663 break; /* success, we hit the end */
664
665 /* 'out' is used to remove any back-quotes */
666 end = out = start;
667 while (*end) {
668 /* Everything apart from '\0' can be quoted */
669 if (*end == '\\' && *(end + 1)) {
670 *out++ = *(end + 1);
671 end += 2;
672 continue;
673 }
674
675 if (isspace(*end))
676 break; /* end of token */
677
678 *out++ = *end++;
679 }
680
681 /* have we already filled the array ? */
682 if ((*argc + 1) > array_size) {
683 argv = realloc_argv(&array_size, argv);
684 if (!argv)
685 return -ENOMEM;
686 }
687
688 /* we know this is whitespace */
689 if (*end)
690 end++;
691
692 /* terminate the string and put it in the array */
693 *out = '\0';
694 argv[*argc] = start;
695 (*argc)++;
696 }
697
698 *argvp = argv;
699 return 0;
700}
701
702static void check_for_valid_limits(struct io_restrictions *rs)
703{
704 if (!rs->max_sectors)
Mike Christiedefd94b2005-12-05 02:37:06 -0600705 rs->max_sectors = SAFE_MAX_SECTORS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 if (!rs->max_phys_segments)
707 rs->max_phys_segments = MAX_PHYS_SEGMENTS;
708 if (!rs->max_hw_segments)
709 rs->max_hw_segments = MAX_HW_SEGMENTS;
710 if (!rs->hardsect_size)
711 rs->hardsect_size = 1 << SECTOR_SHIFT;
712 if (!rs->max_segment_size)
713 rs->max_segment_size = MAX_SEGMENT_SIZE;
714 if (!rs->seg_boundary_mask)
715 rs->seg_boundary_mask = -1;
Vasily Averin5ec140e2007-10-31 08:33:24 +0100716 if (!rs->bounce_pfn)
717 rs->bounce_pfn = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718}
719
720int dm_table_add_target(struct dm_table *t, const char *type,
721 sector_t start, sector_t len, char *params)
722{
723 int r = -EINVAL, argc;
724 char **argv;
725 struct dm_target *tgt;
726
727 if ((r = check_space(t)))
728 return r;
729
730 tgt = t->targets + t->num_targets;
731 memset(tgt, 0, sizeof(*tgt));
732
733 if (!len) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700734 DMERR("%s: zero-length target", dm_device_name(t->md));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 return -EINVAL;
736 }
737
738 tgt->type = dm_get_target_type(type);
739 if (!tgt->type) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700740 DMERR("%s: %s: unknown target type", dm_device_name(t->md),
741 type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 return -EINVAL;
743 }
744
745 tgt->table = t;
746 tgt->begin = start;
747 tgt->len = len;
748 tgt->error = "Unknown error";
749
750 /*
751 * Does this target adjoin the previous one ?
752 */
753 if (!adjoin(t, tgt)) {
754 tgt->error = "Gap in table";
755 r = -EINVAL;
756 goto bad;
757 }
758
759 r = dm_split_args(&argc, &argv, params);
760 if (r) {
761 tgt->error = "couldn't split parameters (insufficient memory)";
762 goto bad;
763 }
764
765 r = tgt->type->ctr(tgt, argc, argv);
766 kfree(argv);
767 if (r)
768 goto bad;
769
770 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
771
772 /* FIXME: the plan is to combine high here and then have
773 * the merge fn apply the target level restrictions. */
774 combine_restrictions_low(&t->limits, &tgt->limits);
775 return 0;
776
777 bad:
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700778 DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 dm_put_target_type(tgt->type);
780 return r;
781}
782
783static int setup_indexes(struct dm_table *t)
784{
785 int i;
786 unsigned int total = 0;
787 sector_t *indexes;
788
789 /* allocate the space for *all* the indexes */
790 for (i = t->depth - 2; i >= 0; i--) {
791 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
792 total += t->counts[i];
793 }
794
795 indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
796 if (!indexes)
797 return -ENOMEM;
798
799 /* set up internal nodes, bottom-up */
800 for (i = t->depth - 2, total = 0; i >= 0; i--) {
801 t->index[i] = indexes;
802 indexes += (KEYS_PER_NODE * t->counts[i]);
803 setup_btree_index(i, t);
804 }
805
806 return 0;
807}
808
809/*
810 * Builds the btree to index the map.
811 */
812int dm_table_complete(struct dm_table *t)
813{
814 int r = 0;
815 unsigned int leaf_nodes;
816
817 check_for_valid_limits(&t->limits);
818
819 /* how many indexes will the btree have ? */
820 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
821 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
822
823 /* leaf layer has already been set up */
824 t->counts[t->depth - 1] = leaf_nodes;
825 t->index[t->depth - 1] = t->highs;
826
827 if (t->depth >= 2)
828 r = setup_indexes(t);
829
830 return r;
831}
832
Arjan van de Ven48c9c272006-03-27 01:18:20 -0800833static DEFINE_MUTEX(_event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834void dm_table_event_callback(struct dm_table *t,
835 void (*fn)(void *), void *context)
836{
Arjan van de Ven48c9c272006-03-27 01:18:20 -0800837 mutex_lock(&_event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 t->event_fn = fn;
839 t->event_context = context;
Arjan van de Ven48c9c272006-03-27 01:18:20 -0800840 mutex_unlock(&_event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841}
842
843void dm_table_event(struct dm_table *t)
844{
845 /*
846 * You can no longer call dm_table_event() from interrupt
847 * context, use a bottom half instead.
848 */
849 BUG_ON(in_interrupt());
850
Arjan van de Ven48c9c272006-03-27 01:18:20 -0800851 mutex_lock(&_event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 if (t->event_fn)
853 t->event_fn(t->event_context);
Arjan van de Ven48c9c272006-03-27 01:18:20 -0800854 mutex_unlock(&_event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855}
856
857sector_t dm_table_get_size(struct dm_table *t)
858{
859 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
860}
861
862struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
863{
Milan Broz14353532006-06-26 00:27:27 -0700864 if (index >= t->num_targets)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 return NULL;
866
867 return t->targets + index;
868}
869
870/*
871 * Search the btree for the correct target.
Jun'ichi Nomura512875b2007-12-13 14:15:25 +0000872 *
873 * Caller should check returned pointer with dm_target_is_valid()
874 * to trap I/O beyond end of device.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 */
876struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
877{
878 unsigned int l, n = 0, k = 0;
879 sector_t *node;
880
881 for (l = 0; l < t->depth; l++) {
882 n = get_child(n, k);
883 node = get_node(t, l, n);
884
885 for (k = 0; k < KEYS_PER_NODE; k++)
886 if (node[k] >= sector)
887 break;
888 }
889
890 return &t->targets[(KEYS_PER_NODE * n) + k];
891}
892
893void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
894{
895 /*
896 * Make sure we obey the optimistic sub devices
897 * restrictions.
898 */
899 blk_queue_max_sectors(q, t->limits.max_sectors);
900 q->max_phys_segments = t->limits.max_phys_segments;
901 q->max_hw_segments = t->limits.max_hw_segments;
902 q->hardsect_size = t->limits.hardsect_size;
903 q->max_segment_size = t->limits.max_segment_size;
904 q->seg_boundary_mask = t->limits.seg_boundary_mask;
Vasily Averin5ec140e2007-10-31 08:33:24 +0100905 q->bounce_pfn = t->limits.bounce_pfn;
NeilBrown969429b2006-03-27 01:17:49 -0800906 if (t->limits.no_cluster)
907 q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
908 else
909 q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER);
910
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911}
912
913unsigned int dm_table_get_num_targets(struct dm_table *t)
914{
915 return t->num_targets;
916}
917
918struct list_head *dm_table_get_devices(struct dm_table *t)
919{
920 return &t->devices;
921}
922
923int dm_table_get_mode(struct dm_table *t)
924{
925 return t->mode;
926}
927
928static void suspend_targets(struct dm_table *t, unsigned postsuspend)
929{
930 int i = t->num_targets;
931 struct dm_target *ti = t->targets;
932
933 while (i--) {
934 if (postsuspend) {
935 if (ti->type->postsuspend)
936 ti->type->postsuspend(ti);
937 } else if (ti->type->presuspend)
938 ti->type->presuspend(ti);
939
940 ti++;
941 }
942}
943
944void dm_table_presuspend_targets(struct dm_table *t)
945{
Alasdair G Kergoncf222b32005-07-28 21:15:57 -0700946 if (!t)
947 return;
948
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 return suspend_targets(t, 0);
950}
951
952void dm_table_postsuspend_targets(struct dm_table *t)
953{
Alasdair G Kergoncf222b32005-07-28 21:15:57 -0700954 if (!t)
955 return;
956
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 return suspend_targets(t, 1);
958}
959
Milan Broz8757b772006-10-03 01:15:36 -0700960int dm_table_resume_targets(struct dm_table *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961{
Milan Broz8757b772006-10-03 01:15:36 -0700962 int i, r = 0;
963
964 for (i = 0; i < t->num_targets; i++) {
965 struct dm_target *ti = t->targets + i;
966
967 if (!ti->type->preresume)
968 continue;
969
970 r = ti->type->preresume(ti);
971 if (r)
972 return r;
973 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
975 for (i = 0; i < t->num_targets; i++) {
976 struct dm_target *ti = t->targets + i;
977
978 if (ti->type->resume)
979 ti->type->resume(ti);
980 }
Milan Broz8757b772006-10-03 01:15:36 -0700981
982 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983}
984
985int dm_table_any_congested(struct dm_table *t, int bdi_bits)
986{
987 struct list_head *d, *devices;
988 int r = 0;
989
990 devices = dm_table_get_devices(t);
991 for (d = devices->next; d != devices; d = d->next) {
992 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
Jens Axboe165125e2007-07-24 09:28:11 +0200993 struct request_queue *q = bdev_get_queue(dd->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 r |= bdi_congested(&q->backing_dev_info, bdi_bits);
995 }
996
997 return r;
998}
999
1000void dm_table_unplug_all(struct dm_table *t)
1001{
1002 struct list_head *d, *devices = dm_table_get_devices(t);
1003
1004 for (d = devices->next; d != devices; d = d->next) {
1005 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
Jens Axboe165125e2007-07-24 09:28:11 +02001006 struct request_queue *q = bdev_get_queue(dd->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007
Alan D. Brunelle2ad8b1e2007-11-07 14:26:56 -05001008 blk_unplug(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 }
1010}
1011
Mike Anderson1134e5a2006-03-27 01:17:54 -08001012struct mapped_device *dm_table_get_md(struct dm_table *t)
1013{
1014 dm_get(t->md);
1015
1016 return t->md;
1017}
1018
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019EXPORT_SYMBOL(dm_vcalloc);
1020EXPORT_SYMBOL(dm_get_device);
1021EXPORT_SYMBOL(dm_put_device);
1022EXPORT_SYMBOL(dm_table_event);
Alasdair G Kergond5e404c2005-07-12 15:53:05 -07001023EXPORT_SYMBOL(dm_table_get_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024EXPORT_SYMBOL(dm_table_get_mode);
Mike Anderson1134e5a2006-03-27 01:17:54 -08001025EXPORT_SYMBOL(dm_table_get_md);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026EXPORT_SYMBOL(dm_table_put);
1027EXPORT_SYMBOL(dm_table_get);
1028EXPORT_SYMBOL(dm_table_unplug_all);