blob: 10c9439635ca469c541e06eaadd5fc8ba3501b2b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
9
10#include <linux/module.h>
11#include <linux/vmalloc.h>
12#include <linux/blkdev.h>
13#include <linux/namei.h>
14#include <linux/ctype.h>
15#include <linux/slab.h>
16#include <linux/interrupt.h>
Arjan van de Ven48c9c272006-03-27 01:18:20 -080017#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/atomic.h>
19
20#define MAX_DEPTH 16
21#define NODE_SIZE L1_CACHE_BYTES
22#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
23#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
24
25struct dm_table {
Mike Anderson1134e5a2006-03-27 01:17:54 -080026 struct mapped_device *md;
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 atomic_t holders;
28
29 /* btree table */
30 unsigned int depth;
31 unsigned int counts[MAX_DEPTH]; /* in nodes */
32 sector_t *index[MAX_DEPTH];
33
34 unsigned int num_targets;
35 unsigned int num_allocated;
36 sector_t *highs;
37 struct dm_target *targets;
38
39 /*
40 * Indicates the rw permissions for the new logical
41 * device. This should be a combination of FMODE_READ
42 * and FMODE_WRITE.
43 */
44 int mode;
45
46 /* a list of devices used by this table */
47 struct list_head devices;
48
49 /*
50 * These are optimistic limits taken from all the
51 * targets, some targets will need smaller limits.
52 */
53 struct io_restrictions limits;
54
55 /* events get handed up using this callback */
56 void (*event_fn)(void *);
57 void *event_context;
58};
59
60/*
61 * Similar to ceiling(log_size(n))
62 */
63static unsigned int int_log(unsigned int n, unsigned int base)
64{
65 int result = 0;
66
67 while (n > 1) {
68 n = dm_div_up(n, base);
69 result++;
70 }
71
72 return result;
73}
74
75/*
76 * Returns the minimum that is _not_ zero, unless both are zero.
77 */
78#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
79
80/*
81 * Combine two io_restrictions, always taking the lower value.
82 */
83static void combine_restrictions_low(struct io_restrictions *lhs,
84 struct io_restrictions *rhs)
85{
86 lhs->max_sectors =
87 min_not_zero(lhs->max_sectors, rhs->max_sectors);
88
89 lhs->max_phys_segments =
90 min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments);
91
92 lhs->max_hw_segments =
93 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
94
95 lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size);
96
97 lhs->max_segment_size =
98 min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
99
100 lhs->seg_boundary_mask =
101 min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
NeilBrown969429b2006-03-27 01:17:49 -0800102
103 lhs->no_cluster |= rhs->no_cluster;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104}
105
106/*
107 * Calculate the index of the child node of the n'th node k'th key.
108 */
109static inline unsigned int get_child(unsigned int n, unsigned int k)
110{
111 return (n * CHILDREN_PER_NODE) + k;
112}
113
114/*
115 * Return the n'th node of level l from table t.
116 */
117static inline sector_t *get_node(struct dm_table *t,
118 unsigned int l, unsigned int n)
119{
120 return t->index[l] + (n * KEYS_PER_NODE);
121}
122
123/*
124 * Return the highest key that you could lookup from the n'th
125 * node on level l of the btree.
126 */
127static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
128{
129 for (; l < t->depth - 1; l++)
130 n = get_child(n, CHILDREN_PER_NODE - 1);
131
132 if (n >= t->counts[l])
133 return (sector_t) - 1;
134
135 return get_node(t, l, n)[KEYS_PER_NODE - 1];
136}
137
138/*
139 * Fills in a level of the btree based on the highs of the level
140 * below it.
141 */
142static int setup_btree_index(unsigned int l, struct dm_table *t)
143{
144 unsigned int n, k;
145 sector_t *node;
146
147 for (n = 0U; n < t->counts[l]; n++) {
148 node = get_node(t, l, n);
149
150 for (k = 0U; k < KEYS_PER_NODE; k++)
151 node[k] = high(t, l + 1, get_child(n, k));
152 }
153
154 return 0;
155}
156
157void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
158{
159 unsigned long size;
160 void *addr;
161
162 /*
163 * Check that we're not going to overflow.
164 */
165 if (nmemb > (ULONG_MAX / elem_size))
166 return NULL;
167
168 size = nmemb * elem_size;
169 addr = vmalloc(size);
170 if (addr)
171 memset(addr, 0, size);
172
173 return addr;
174}
175
176/*
177 * highs, and targets are managed as dynamic arrays during a
178 * table load.
179 */
180static int alloc_targets(struct dm_table *t, unsigned int num)
181{
182 sector_t *n_highs;
183 struct dm_target *n_targets;
184 int n = t->num_targets;
185
186 /*
187 * Allocate both the target array and offset array at once.
188 */
189 n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) +
190 sizeof(sector_t));
191 if (!n_highs)
192 return -ENOMEM;
193
194 n_targets = (struct dm_target *) (n_highs + num);
195
196 if (n) {
197 memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
198 memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
199 }
200
201 memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
202 vfree(t->highs);
203
204 t->num_allocated = num;
205 t->highs = n_highs;
206 t->targets = n_targets;
207
208 return 0;
209}
210
Mike Anderson1134e5a2006-03-27 01:17:54 -0800211int dm_table_create(struct dm_table **result, int mode,
212 unsigned num_targets, struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213{
214 struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
215
216 if (!t)
217 return -ENOMEM;
218
219 memset(t, 0, sizeof(*t));
220 INIT_LIST_HEAD(&t->devices);
221 atomic_set(&t->holders, 1);
222
223 if (!num_targets)
224 num_targets = KEYS_PER_NODE;
225
226 num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
227
228 if (alloc_targets(t, num_targets)) {
229 kfree(t);
230 t = NULL;
231 return -ENOMEM;
232 }
233
234 t->mode = mode;
Mike Anderson1134e5a2006-03-27 01:17:54 -0800235 t->md = md;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 *result = t;
237 return 0;
238}
239
240static void free_devices(struct list_head *devices)
241{
242 struct list_head *tmp, *next;
243
244 for (tmp = devices->next; tmp != devices; tmp = next) {
245 struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
246 next = tmp->next;
247 kfree(dd);
248 }
249}
250
Alasdair G Kergon5e198d92005-05-05 16:16:09 -0700251static void table_destroy(struct dm_table *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252{
253 unsigned int i;
254
255 /* free the indexes (see dm_table_complete) */
256 if (t->depth >= 2)
257 vfree(t->index[t->depth - 2]);
258
259 /* free the targets */
260 for (i = 0; i < t->num_targets; i++) {
261 struct dm_target *tgt = t->targets + i;
262
263 if (tgt->type->dtr)
264 tgt->type->dtr(tgt);
265
266 dm_put_target_type(tgt->type);
267 }
268
269 vfree(t->highs);
270
271 /* free the device list */
272 if (t->devices.next != &t->devices) {
273 DMWARN("devices still present during destroy: "
274 "dm_table_remove_device calls missing");
275
276 free_devices(&t->devices);
277 }
278
279 kfree(t);
280}
281
282void dm_table_get(struct dm_table *t)
283{
284 atomic_inc(&t->holders);
285}
286
287void dm_table_put(struct dm_table *t)
288{
289 if (!t)
290 return;
291
292 if (atomic_dec_and_test(&t->holders))
293 table_destroy(t);
294}
295
296/*
297 * Checks to see if we need to extend highs or targets.
298 */
299static inline int check_space(struct dm_table *t)
300{
301 if (t->num_targets >= t->num_allocated)
302 return alloc_targets(t, t->num_allocated * 2);
303
304 return 0;
305}
306
307/*
308 * Convert a device path to a dev_t.
309 */
310static int lookup_device(const char *path, dev_t *dev)
311{
312 int r;
313 struct nameidata nd;
314 struct inode *inode;
315
316 if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd)))
317 return r;
318
319 inode = nd.dentry->d_inode;
320 if (!inode) {
321 r = -ENOENT;
322 goto out;
323 }
324
325 if (!S_ISBLK(inode->i_mode)) {
326 r = -ENOTBLK;
327 goto out;
328 }
329
330 *dev = inode->i_rdev;
331
332 out:
333 path_release(&nd);
334 return r;
335}
336
337/*
338 * See if we've already got a device in the list.
339 */
340static struct dm_dev *find_device(struct list_head *l, dev_t dev)
341{
342 struct dm_dev *dd;
343
344 list_for_each_entry (dd, l, list)
345 if (dd->bdev->bd_dev == dev)
346 return dd;
347
348 return NULL;
349}
350
351/*
352 * Open a device so we can use it as a map destination.
353 */
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800354static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
356 static char *_claim_ptr = "I belong to device-mapper";
357 struct block_device *bdev;
358
359 int r;
360
Eric Sesterhenn547bc922006-03-26 18:22:50 +0200361 BUG_ON(d->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363 bdev = open_by_devnum(dev, d->mode);
364 if (IS_ERR(bdev))
365 return PTR_ERR(bdev);
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800366 r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 if (r)
368 blkdev_put(bdev);
369 else
370 d->bdev = bdev;
371 return r;
372}
373
374/*
375 * Close a device that we've been using.
376 */
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800377static void close_dev(struct dm_dev *d, struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378{
379 if (!d->bdev)
380 return;
381
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800382 bd_release_from_disk(d->bdev, dm_disk(md));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 blkdev_put(d->bdev);
384 d->bdev = NULL;
385}
386
387/*
388 * If possible (ie. blk_size[major] is set), this checks an area
389 * of a destination device is valid.
390 */
391static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
392{
393 sector_t dev_size;
394 dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
395 return ((start < dev_size) && (len <= (dev_size - start)));
396}
397
398/*
399 * This upgrades the mode on an already open dm_dev. Being
400 * careful to leave things as they were if we fail to reopen the
401 * device.
402 */
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800403static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404{
405 int r;
406 struct dm_dev dd_copy;
407 dev_t dev = dd->bdev->bd_dev;
408
409 dd_copy = *dd;
410
411 dd->mode |= new_mode;
412 dd->bdev = NULL;
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800413 r = open_dev(dd, dev, md);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 if (!r)
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800415 close_dev(&dd_copy, md);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 else
417 *dd = dd_copy;
418
419 return r;
420}
421
422/*
423 * Add a device to the list, or just increment the usage count if
424 * it's already present.
425 */
426static int __table_get_device(struct dm_table *t, struct dm_target *ti,
427 const char *path, sector_t start, sector_t len,
428 int mode, struct dm_dev **result)
429{
430 int r;
431 dev_t dev;
432 struct dm_dev *dd;
433 unsigned int major, minor;
434
Eric Sesterhenn547bc922006-03-26 18:22:50 +0200435 BUG_ON(!t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
437 if (sscanf(path, "%u:%u", &major, &minor) == 2) {
438 /* Extract the major/minor numbers */
439 dev = MKDEV(major, minor);
440 if (MAJOR(dev) != major || MINOR(dev) != minor)
441 return -EOVERFLOW;
442 } else {
443 /* convert the path to a device */
444 if ((r = lookup_device(path, &dev)))
445 return r;
446 }
447
448 dd = find_device(&t->devices, dev);
449 if (!dd) {
450 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
451 if (!dd)
452 return -ENOMEM;
453
454 dd->mode = mode;
455 dd->bdev = NULL;
456
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800457 if ((r = open_dev(dd, dev, t->md))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 kfree(dd);
459 return r;
460 }
461
462 format_dev_t(dd->name, dev);
463
464 atomic_set(&dd->count, 0);
465 list_add(&dd->list, &t->devices);
466
467 } else if (dd->mode != (mode | dd->mode)) {
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800468 r = upgrade_mode(dd, mode, t->md);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 if (r)
470 return r;
471 }
472 atomic_inc(&dd->count);
473
474 if (!check_device_area(dd, start, len)) {
475 DMWARN("device %s too small for target", path);
476 dm_put_device(ti, dd);
477 return -EINVAL;
478 }
479
480 *result = dd;
481
482 return 0;
483}
484
485
486int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
487 sector_t len, int mode, struct dm_dev **result)
488{
489 int r = __table_get_device(ti->table, ti, path,
490 start, len, mode, result);
491 if (!r) {
492 request_queue_t *q = bdev_get_queue((*result)->bdev);
493 struct io_restrictions *rs = &ti->limits;
494
495 /*
496 * Combine the device limits low.
497 *
498 * FIXME: if we move an io_restriction struct
499 * into q this would just be a call to
500 * combine_restrictions_low()
501 */
502 rs->max_sectors =
503 min_not_zero(rs->max_sectors, q->max_sectors);
504
505 /* FIXME: Device-Mapper on top of RAID-0 breaks because DM
506 * currently doesn't honor MD's merge_bvec_fn routine.
507 * In this case, we'll force DM to use PAGE_SIZE or
508 * smaller I/O, just to be safe. A better fix is in the
509 * works, but add this for the time being so it will at
510 * least operate correctly.
511 */
512 if (q->merge_bvec_fn)
513 rs->max_sectors =
514 min_not_zero(rs->max_sectors,
Alasdair G Kergon3ee247e2006-02-01 03:04:55 -0800515 (unsigned int) (PAGE_SIZE >> 9));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
517 rs->max_phys_segments =
518 min_not_zero(rs->max_phys_segments,
519 q->max_phys_segments);
520
521 rs->max_hw_segments =
522 min_not_zero(rs->max_hw_segments, q->max_hw_segments);
523
524 rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
525
526 rs->max_segment_size =
527 min_not_zero(rs->max_segment_size, q->max_segment_size);
528
529 rs->seg_boundary_mask =
530 min_not_zero(rs->seg_boundary_mask,
531 q->seg_boundary_mask);
NeilBrown969429b2006-03-27 01:17:49 -0800532
533 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 }
535
536 return r;
537}
538
539/*
540 * Decrement a devices use count and remove it if necessary.
541 */
542void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
543{
544 if (atomic_dec_and_test(&dd->count)) {
Jun'ichi Nomuraf1659212006-03-27 01:17:59 -0800545 close_dev(dd, ti->table->md);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 list_del(&dd->list);
547 kfree(dd);
548 }
549}
550
551/*
552 * Checks to see if the target joins onto the end of the table.
553 */
554static int adjoin(struct dm_table *table, struct dm_target *ti)
555{
556 struct dm_target *prev;
557
558 if (!table->num_targets)
559 return !ti->begin;
560
561 prev = &table->targets[table->num_targets - 1];
562 return (ti->begin == (prev->begin + prev->len));
563}
564
565/*
566 * Used to dynamically allocate the arg array.
567 */
568static char **realloc_argv(unsigned *array_size, char **old_argv)
569{
570 char **argv;
571 unsigned new_size;
572
573 new_size = *array_size ? *array_size * 2 : 64;
574 argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
575 if (argv) {
576 memcpy(argv, old_argv, *array_size * sizeof(*argv));
577 *array_size = new_size;
578 }
579
580 kfree(old_argv);
581 return argv;
582}
583
584/*
585 * Destructively splits up the argument list to pass to ctr.
586 */
587int dm_split_args(int *argc, char ***argvp, char *input)
588{
589 char *start, *end = input, *out, **argv = NULL;
590 unsigned array_size = 0;
591
592 *argc = 0;
David Teigland814d6862006-06-26 00:27:31 -0700593
594 if (!input) {
595 *argvp = NULL;
596 return 0;
597 }
598
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 argv = realloc_argv(&array_size, argv);
600 if (!argv)
601 return -ENOMEM;
602
603 while (1) {
604 start = end;
605
606 /* Skip whitespace */
607 while (*start && isspace(*start))
608 start++;
609
610 if (!*start)
611 break; /* success, we hit the end */
612
613 /* 'out' is used to remove any back-quotes */
614 end = out = start;
615 while (*end) {
616 /* Everything apart from '\0' can be quoted */
617 if (*end == '\\' && *(end + 1)) {
618 *out++ = *(end + 1);
619 end += 2;
620 continue;
621 }
622
623 if (isspace(*end))
624 break; /* end of token */
625
626 *out++ = *end++;
627 }
628
629 /* have we already filled the array ? */
630 if ((*argc + 1) > array_size) {
631 argv = realloc_argv(&array_size, argv);
632 if (!argv)
633 return -ENOMEM;
634 }
635
636 /* we know this is whitespace */
637 if (*end)
638 end++;
639
640 /* terminate the string and put it in the array */
641 *out = '\0';
642 argv[*argc] = start;
643 (*argc)++;
644 }
645
646 *argvp = argv;
647 return 0;
648}
649
650static void check_for_valid_limits(struct io_restrictions *rs)
651{
652 if (!rs->max_sectors)
Mike Christiedefd94b2005-12-05 02:37:06 -0600653 rs->max_sectors = SAFE_MAX_SECTORS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 if (!rs->max_phys_segments)
655 rs->max_phys_segments = MAX_PHYS_SEGMENTS;
656 if (!rs->max_hw_segments)
657 rs->max_hw_segments = MAX_HW_SEGMENTS;
658 if (!rs->hardsect_size)
659 rs->hardsect_size = 1 << SECTOR_SHIFT;
660 if (!rs->max_segment_size)
661 rs->max_segment_size = MAX_SEGMENT_SIZE;
662 if (!rs->seg_boundary_mask)
663 rs->seg_boundary_mask = -1;
664}
665
666int dm_table_add_target(struct dm_table *t, const char *type,
667 sector_t start, sector_t len, char *params)
668{
669 int r = -EINVAL, argc;
670 char **argv;
671 struct dm_target *tgt;
672
673 if ((r = check_space(t)))
674 return r;
675
676 tgt = t->targets + t->num_targets;
677 memset(tgt, 0, sizeof(*tgt));
678
679 if (!len) {
680 tgt->error = "zero-length target";
681 DMERR("%s", tgt->error);
682 return -EINVAL;
683 }
684
685 tgt->type = dm_get_target_type(type);
686 if (!tgt->type) {
687 tgt->error = "unknown target type";
688 DMERR("%s", tgt->error);
689 return -EINVAL;
690 }
691
692 tgt->table = t;
693 tgt->begin = start;
694 tgt->len = len;
695 tgt->error = "Unknown error";
696
697 /*
698 * Does this target adjoin the previous one ?
699 */
700 if (!adjoin(t, tgt)) {
701 tgt->error = "Gap in table";
702 r = -EINVAL;
703 goto bad;
704 }
705
706 r = dm_split_args(&argc, &argv, params);
707 if (r) {
708 tgt->error = "couldn't split parameters (insufficient memory)";
709 goto bad;
710 }
711
712 r = tgt->type->ctr(tgt, argc, argv);
713 kfree(argv);
714 if (r)
715 goto bad;
716
717 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
718
719 /* FIXME: the plan is to combine high here and then have
720 * the merge fn apply the target level restrictions. */
721 combine_restrictions_low(&t->limits, &tgt->limits);
722 return 0;
723
724 bad:
725 DMERR("%s", tgt->error);
726 dm_put_target_type(tgt->type);
727 return r;
728}
729
730static int setup_indexes(struct dm_table *t)
731{
732 int i;
733 unsigned int total = 0;
734 sector_t *indexes;
735
736 /* allocate the space for *all* the indexes */
737 for (i = t->depth - 2; i >= 0; i--) {
738 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
739 total += t->counts[i];
740 }
741
742 indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
743 if (!indexes)
744 return -ENOMEM;
745
746 /* set up internal nodes, bottom-up */
747 for (i = t->depth - 2, total = 0; i >= 0; i--) {
748 t->index[i] = indexes;
749 indexes += (KEYS_PER_NODE * t->counts[i]);
750 setup_btree_index(i, t);
751 }
752
753 return 0;
754}
755
756/*
757 * Builds the btree to index the map.
758 */
759int dm_table_complete(struct dm_table *t)
760{
761 int r = 0;
762 unsigned int leaf_nodes;
763
764 check_for_valid_limits(&t->limits);
765
766 /* how many indexes will the btree have ? */
767 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
768 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
769
770 /* leaf layer has already been set up */
771 t->counts[t->depth - 1] = leaf_nodes;
772 t->index[t->depth - 1] = t->highs;
773
774 if (t->depth >= 2)
775 r = setup_indexes(t);
776
777 return r;
778}
779
Arjan van de Ven48c9c272006-03-27 01:18:20 -0800780static DEFINE_MUTEX(_event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781void dm_table_event_callback(struct dm_table *t,
782 void (*fn)(void *), void *context)
783{
Arjan van de Ven48c9c272006-03-27 01:18:20 -0800784 mutex_lock(&_event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 t->event_fn = fn;
786 t->event_context = context;
Arjan van de Ven48c9c272006-03-27 01:18:20 -0800787 mutex_unlock(&_event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788}
789
790void dm_table_event(struct dm_table *t)
791{
792 /*
793 * You can no longer call dm_table_event() from interrupt
794 * context, use a bottom half instead.
795 */
796 BUG_ON(in_interrupt());
797
Arjan van de Ven48c9c272006-03-27 01:18:20 -0800798 mutex_lock(&_event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 if (t->event_fn)
800 t->event_fn(t->event_context);
Arjan van de Ven48c9c272006-03-27 01:18:20 -0800801 mutex_unlock(&_event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802}
803
804sector_t dm_table_get_size(struct dm_table *t)
805{
806 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
807}
808
809struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
810{
Milan Broz14353532006-06-26 00:27:27 -0700811 if (index >= t->num_targets)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 return NULL;
813
814 return t->targets + index;
815}
816
817/*
818 * Search the btree for the correct target.
819 */
820struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
821{
822 unsigned int l, n = 0, k = 0;
823 sector_t *node;
824
825 for (l = 0; l < t->depth; l++) {
826 n = get_child(n, k);
827 node = get_node(t, l, n);
828
829 for (k = 0; k < KEYS_PER_NODE; k++)
830 if (node[k] >= sector)
831 break;
832 }
833
834 return &t->targets[(KEYS_PER_NODE * n) + k];
835}
836
837void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
838{
839 /*
840 * Make sure we obey the optimistic sub devices
841 * restrictions.
842 */
843 blk_queue_max_sectors(q, t->limits.max_sectors);
844 q->max_phys_segments = t->limits.max_phys_segments;
845 q->max_hw_segments = t->limits.max_hw_segments;
846 q->hardsect_size = t->limits.hardsect_size;
847 q->max_segment_size = t->limits.max_segment_size;
848 q->seg_boundary_mask = t->limits.seg_boundary_mask;
NeilBrown969429b2006-03-27 01:17:49 -0800849 if (t->limits.no_cluster)
850 q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
851 else
852 q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER);
853
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854}
855
856unsigned int dm_table_get_num_targets(struct dm_table *t)
857{
858 return t->num_targets;
859}
860
861struct list_head *dm_table_get_devices(struct dm_table *t)
862{
863 return &t->devices;
864}
865
866int dm_table_get_mode(struct dm_table *t)
867{
868 return t->mode;
869}
870
871static void suspend_targets(struct dm_table *t, unsigned postsuspend)
872{
873 int i = t->num_targets;
874 struct dm_target *ti = t->targets;
875
876 while (i--) {
877 if (postsuspend) {
878 if (ti->type->postsuspend)
879 ti->type->postsuspend(ti);
880 } else if (ti->type->presuspend)
881 ti->type->presuspend(ti);
882
883 ti++;
884 }
885}
886
887void dm_table_presuspend_targets(struct dm_table *t)
888{
Alasdair G Kergoncf222b32005-07-28 21:15:57 -0700889 if (!t)
890 return;
891
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 return suspend_targets(t, 0);
893}
894
895void dm_table_postsuspend_targets(struct dm_table *t)
896{
Alasdair G Kergoncf222b32005-07-28 21:15:57 -0700897 if (!t)
898 return;
899
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 return suspend_targets(t, 1);
901}
902
903void dm_table_resume_targets(struct dm_table *t)
904{
905 int i;
906
907 for (i = 0; i < t->num_targets; i++) {
908 struct dm_target *ti = t->targets + i;
909
910 if (ti->type->resume)
911 ti->type->resume(ti);
912 }
913}
914
915int dm_table_any_congested(struct dm_table *t, int bdi_bits)
916{
917 struct list_head *d, *devices;
918 int r = 0;
919
920 devices = dm_table_get_devices(t);
921 for (d = devices->next; d != devices; d = d->next) {
922 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
923 request_queue_t *q = bdev_get_queue(dd->bdev);
924 r |= bdi_congested(&q->backing_dev_info, bdi_bits);
925 }
926
927 return r;
928}
929
930void dm_table_unplug_all(struct dm_table *t)
931{
932 struct list_head *d, *devices = dm_table_get_devices(t);
933
934 for (d = devices->next; d != devices; d = d->next) {
935 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
936 request_queue_t *q = bdev_get_queue(dd->bdev);
937
938 if (q->unplug_fn)
939 q->unplug_fn(q);
940 }
941}
942
943int dm_table_flush_all(struct dm_table *t)
944{
945 struct list_head *d, *devices = dm_table_get_devices(t);
946 int ret = 0;
947
948 for (d = devices->next; d != devices; d = d->next) {
949 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
950 request_queue_t *q = bdev_get_queue(dd->bdev);
951 int err;
952
953 if (!q->issue_flush_fn)
954 err = -EOPNOTSUPP;
955 else
956 err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);
957
958 if (!ret)
959 ret = err;
960 }
961
962 return ret;
963}
964
Mike Anderson1134e5a2006-03-27 01:17:54 -0800965struct mapped_device *dm_table_get_md(struct dm_table *t)
966{
967 dm_get(t->md);
968
969 return t->md;
970}
971
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972EXPORT_SYMBOL(dm_vcalloc);
973EXPORT_SYMBOL(dm_get_device);
974EXPORT_SYMBOL(dm_put_device);
975EXPORT_SYMBOL(dm_table_event);
Alasdair G Kergond5e404c2005-07-12 15:53:05 -0700976EXPORT_SYMBOL(dm_table_get_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977EXPORT_SYMBOL(dm_table_get_mode);
Mike Anderson1134e5a2006-03-27 01:17:54 -0800978EXPORT_SYMBOL(dm_table_get_md);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979EXPORT_SYMBOL(dm_table_put);
980EXPORT_SYMBOL(dm_table_get);
981EXPORT_SYMBOL(dm_table_unplug_all);
982EXPORT_SYMBOL(dm_table_flush_all);