2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <linux/version.h>
24 #include <asm/div64.h>
27 #include "extent_map.h"
29 #include "transaction.h"
30 #include "print-tree.h"
32 #include "async-thread.h"
42 struct btrfs_bio_stripe stripes[];
45 static int init_first_rw_device(struct btrfs_trans_handle *trans,
46 struct btrfs_root *root,
47 struct btrfs_device *device);
48 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
51 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
52 (sizeof(struct btrfs_bio_stripe) * (n)))
54 static DEFINE_MUTEX(uuid_mutex);
55 static LIST_HEAD(fs_uuids);
57 void btrfs_lock_volumes(void)
59 mutex_lock(&uuid_mutex);
62 void btrfs_unlock_volumes(void)
64 mutex_unlock(&uuid_mutex);
67 static void lock_chunks(struct btrfs_root *root)
69 mutex_lock(&root->fs_info->chunk_mutex);
72 static void unlock_chunks(struct btrfs_root *root)
74 mutex_unlock(&root->fs_info->chunk_mutex);
77 int btrfs_cleanup_fs_uuids(void)
79 struct btrfs_fs_devices *fs_devices;
80 struct btrfs_device *dev;
82 while (!list_empty(&fs_uuids)) {
83 fs_devices = list_entry(fs_uuids.next,
84 struct btrfs_fs_devices, list);
85 list_del(&fs_devices->list);
86 while(!list_empty(&fs_devices->devices)) {
87 dev = list_entry(fs_devices->devices.next,
88 struct btrfs_device, dev_list);
90 close_bdev_exclusive(dev->bdev, dev->mode);
91 fs_devices->open_devices--;
93 fs_devices->num_devices--;
95 fs_devices->rw_devices--;
96 list_del(&dev->dev_list);
97 list_del(&dev->dev_alloc_list);
101 WARN_ON(fs_devices->num_devices);
102 WARN_ON(fs_devices->open_devices);
103 WARN_ON(fs_devices->rw_devices);
109 static noinline struct btrfs_device *__find_device(struct list_head *head,
112 struct btrfs_device *dev;
113 struct list_head *cur;
115 list_for_each(cur, head) {
116 dev = list_entry(cur, struct btrfs_device, dev_list);
117 if (dev->devid == devid &&
118 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
125 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
127 struct list_head *cur;
128 struct btrfs_fs_devices *fs_devices;
130 list_for_each(cur, &fs_uuids) {
131 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
132 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
139 * we try to collect pending bios for a device so we don't get a large
140 * number of procs sending bios down to the same device. This greatly
141 * improves the schedulers ability to collect and merge the bios.
143 * But, it also turns into a long list of bios to process and that is sure
144 * to eventually make the worker thread block. The solution here is to
145 * make some progress and then put this work struct back at the end of
146 * the list if the block device is congested. This way, multiple devices
147 * can make progress from a single worker thread.
149 static int noinline run_scheduled_bios(struct btrfs_device *device)
152 struct backing_dev_info *bdi;
153 struct btrfs_fs_info *fs_info;
157 unsigned long num_run = 0;
160 bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
161 fs_info = device->dev_root->fs_info;
162 limit = btrfs_async_submit_limit(fs_info);
163 limit = limit * 2 / 3;
166 spin_lock(&device->io_lock);
168 /* take all the bios off the list at once and process them
169 * later on (without the lock held). But, remember the
170 * tail and other pointers so the bios can be properly reinserted
171 * into the list if we hit congestion
173 pending = device->pending_bios;
174 tail = device->pending_bio_tail;
175 WARN_ON(pending && !tail);
176 device->pending_bios = NULL;
177 device->pending_bio_tail = NULL;
180 * if pending was null this time around, no bios need processing
181 * at all and we can stop. Otherwise it'll loop back up again
182 * and do an additional check so no bios are missed.
184 * device->running_pending is used to synchronize with the
189 device->running_pending = 1;
192 device->running_pending = 0;
194 spin_unlock(&device->io_lock);
198 pending = pending->bi_next;
200 atomic_dec(&fs_info->nr_async_bios);
202 if (atomic_read(&fs_info->nr_async_bios) < limit &&
203 waitqueue_active(&fs_info->async_submit_wait))
204 wake_up(&fs_info->async_submit_wait);
206 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
208 submit_bio(cur->bi_rw, cur);
213 * we made progress, there is more work to do and the bdi
214 * is now congested. Back off and let other work structs
217 if (pending && bdi_write_congested(bdi) &&
218 fs_info->fs_devices->open_devices > 1) {
219 struct bio *old_head;
221 spin_lock(&device->io_lock);
223 old_head = device->pending_bios;
224 device->pending_bios = pending;
225 if (device->pending_bio_tail)
226 tail->bi_next = old_head;
228 device->pending_bio_tail = tail;
230 spin_unlock(&device->io_lock);
231 btrfs_requeue_work(&device->work);
241 static void pending_bios_fn(struct btrfs_work *work)
243 struct btrfs_device *device;
245 device = container_of(work, struct btrfs_device, work);
246 run_scheduled_bios(device);
249 static noinline int device_list_add(const char *path,
250 struct btrfs_super_block *disk_super,
251 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
253 struct btrfs_device *device;
254 struct btrfs_fs_devices *fs_devices;
255 u64 found_transid = btrfs_super_generation(disk_super);
257 fs_devices = find_fsid(disk_super->fsid);
259 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
262 INIT_LIST_HEAD(&fs_devices->devices);
263 INIT_LIST_HEAD(&fs_devices->alloc_list);
264 list_add(&fs_devices->list, &fs_uuids);
265 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
266 fs_devices->latest_devid = devid;
267 fs_devices->latest_trans = found_transid;
270 device = __find_device(&fs_devices->devices, devid,
271 disk_super->dev_item.uuid);
274 if (fs_devices->opened)
277 device = kzalloc(sizeof(*device), GFP_NOFS);
279 /* we can safely leave the fs_devices entry around */
282 device->devid = devid;
283 device->work.func = pending_bios_fn;
284 memcpy(device->uuid, disk_super->dev_item.uuid,
286 device->barriers = 1;
287 spin_lock_init(&device->io_lock);
288 device->name = kstrdup(path, GFP_NOFS);
293 INIT_LIST_HEAD(&device->dev_alloc_list);
294 list_add(&device->dev_list, &fs_devices->devices);
295 device->fs_devices = fs_devices;
296 fs_devices->num_devices++;
299 if (found_transid > fs_devices->latest_trans) {
300 fs_devices->latest_devid = devid;
301 fs_devices->latest_trans = found_transid;
303 *fs_devices_ret = fs_devices;
307 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
309 struct list_head *tmp;
310 struct list_head *cur;
311 struct btrfs_device *device;
312 int seed_devices = 0;
314 mutex_lock(&uuid_mutex);
316 list_for_each_safe(cur, tmp, &fs_devices->devices) {
317 device = list_entry(cur, struct btrfs_device, dev_list);
318 if (device->in_fs_metadata)
322 close_bdev_exclusive(device->bdev, device->mode);
324 fs_devices->open_devices--;
326 if (device->writeable) {
327 list_del_init(&device->dev_alloc_list);
328 device->writeable = 0;
329 fs_devices->rw_devices--;
332 list_del_init(&device->dev_list);
333 fs_devices->num_devices--;
339 if (fs_devices->seed) {
340 fs_devices = fs_devices->seed;
345 mutex_unlock(&uuid_mutex);
349 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
351 struct btrfs_fs_devices *seed_devices;
352 struct list_head *cur;
353 struct btrfs_device *device;
355 if (--fs_devices->opened > 0)
358 list_for_each(cur, &fs_devices->devices) {
359 device = list_entry(cur, struct btrfs_device, dev_list);
361 close_bdev_exclusive(device->bdev, device->mode);
362 fs_devices->open_devices--;
364 if (device->writeable) {
365 list_del_init(&device->dev_alloc_list);
366 fs_devices->rw_devices--;
370 device->writeable = 0;
371 device->in_fs_metadata = 0;
373 fs_devices->opened = 0;
374 fs_devices->seeding = 0;
375 fs_devices->sprouted = 0;
377 seed_devices = fs_devices->seed;
378 fs_devices->seed = NULL;
380 fs_devices = seed_devices;
386 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
390 mutex_lock(&uuid_mutex);
391 ret = __btrfs_close_devices(fs_devices);
392 mutex_unlock(&uuid_mutex);
396 int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
397 fmode_t flags, void *holder)
399 struct block_device *bdev;
400 struct list_head *head = &fs_devices->devices;
401 struct list_head *cur;
402 struct btrfs_device *device;
403 struct block_device *latest_bdev = NULL;
404 struct buffer_head *bh;
405 struct btrfs_super_block *disk_super;
406 u64 latest_devid = 0;
407 u64 latest_transid = 0;
412 list_for_each(cur, head) {
413 device = list_entry(cur, struct btrfs_device, dev_list);
419 bdev = open_bdev_exclusive(device->name, flags, holder);
421 printk("open %s failed\n", device->name);
424 set_blocksize(bdev, 4096);
426 bh = btrfs_read_dev_super(bdev);
430 disk_super = (struct btrfs_super_block *)bh->b_data;
431 devid = le64_to_cpu(disk_super->dev_item.devid);
432 if (devid != device->devid)
435 if (memcmp(device->uuid, disk_super->dev_item.uuid,
439 device->generation = btrfs_super_generation(disk_super);
440 if (!latest_transid || device->generation > latest_transid) {
441 latest_devid = devid;
442 latest_transid = device->generation;
446 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
447 device->writeable = 0;
449 device->writeable = !bdev_read_only(bdev);
454 device->in_fs_metadata = 0;
455 device->mode = flags;
457 fs_devices->open_devices++;
458 if (device->writeable) {
459 fs_devices->rw_devices++;
460 list_add(&device->dev_alloc_list,
461 &fs_devices->alloc_list);
468 close_bdev_exclusive(bdev, FMODE_READ);
472 if (fs_devices->open_devices == 0) {
476 fs_devices->seeding = seeding;
477 fs_devices->opened = 1;
478 fs_devices->latest_bdev = latest_bdev;
479 fs_devices->latest_devid = latest_devid;
480 fs_devices->latest_trans = latest_transid;
481 fs_devices->total_rw_bytes = 0;
486 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
487 fmode_t flags, void *holder)
491 mutex_lock(&uuid_mutex);
492 if (fs_devices->opened) {
493 if (fs_devices->sprouted) {
496 fs_devices->opened++;
500 ret = __btrfs_open_devices(fs_devices, flags, holder);
502 mutex_unlock(&uuid_mutex);
506 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
507 struct btrfs_fs_devices **fs_devices_ret)
509 struct btrfs_super_block *disk_super;
510 struct block_device *bdev;
511 struct buffer_head *bh;
516 mutex_lock(&uuid_mutex);
518 bdev = open_bdev_exclusive(path, flags, holder);
525 ret = set_blocksize(bdev, 4096);
528 bh = btrfs_read_dev_super(bdev);
533 disk_super = (struct btrfs_super_block *)bh->b_data;
534 devid = le64_to_cpu(disk_super->dev_item.devid);
535 transid = btrfs_super_generation(disk_super);
536 if (disk_super->label[0])
537 printk("device label %s ", disk_super->label);
539 /* FIXME, make a readl uuid parser */
540 printk("device fsid %llx-%llx ",
541 *(unsigned long long *)disk_super->fsid,
542 *(unsigned long long *)(disk_super->fsid + 8));
544 printk("devid %Lu transid %Lu %s\n", devid, transid, path);
545 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
549 close_bdev_exclusive(bdev, flags);
551 mutex_unlock(&uuid_mutex);
556 * this uses a pretty simple search, the expectation is that it is
557 * called very infrequently and that a given device has a small number
560 static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
561 struct btrfs_device *device,
562 u64 num_bytes, u64 *start)
564 struct btrfs_key key;
565 struct btrfs_root *root = device->dev_root;
566 struct btrfs_dev_extent *dev_extent = NULL;
567 struct btrfs_path *path;
570 u64 search_start = 0;
571 u64 search_end = device->total_bytes;
575 struct extent_buffer *l;
577 path = btrfs_alloc_path();
583 /* FIXME use last free of some kind */
585 /* we don't want to overwrite the superblock on the drive,
586 * so we make sure to start at an offset of at least 1MB
588 search_start = max((u64)1024 * 1024, search_start);
590 if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
591 search_start = max(root->fs_info->alloc_start, search_start);
593 key.objectid = device->devid;
594 key.offset = search_start;
595 key.type = BTRFS_DEV_EXTENT_KEY;
596 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
599 ret = btrfs_previous_item(root, path, 0, key.type);
603 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
606 slot = path->slots[0];
607 if (slot >= btrfs_header_nritems(l)) {
608 ret = btrfs_next_leaf(root, path);
615 if (search_start >= search_end) {
619 *start = search_start;
623 *start = last_byte > search_start ?
624 last_byte : search_start;
625 if (search_end <= *start) {
631 btrfs_item_key_to_cpu(l, &key, slot);
633 if (key.objectid < device->devid)
636 if (key.objectid > device->devid)
639 if (key.offset >= search_start && key.offset > last_byte &&
641 if (last_byte < search_start)
642 last_byte = search_start;
643 hole_size = key.offset - last_byte;
644 if (key.offset > last_byte &&
645 hole_size >= num_bytes) {
650 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
655 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
656 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
662 /* we have to make sure we didn't find an extent that has already
663 * been allocated by the map tree or the original allocation
665 BUG_ON(*start < search_start);
667 if (*start + num_bytes > search_end) {
671 /* check for pending inserts here */
675 btrfs_free_path(path);
679 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
680 struct btrfs_device *device,
684 struct btrfs_path *path;
685 struct btrfs_root *root = device->dev_root;
686 struct btrfs_key key;
687 struct btrfs_key found_key;
688 struct extent_buffer *leaf = NULL;
689 struct btrfs_dev_extent *extent = NULL;
691 path = btrfs_alloc_path();
695 key.objectid = device->devid;
697 key.type = BTRFS_DEV_EXTENT_KEY;
699 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
701 ret = btrfs_previous_item(root, path, key.objectid,
702 BTRFS_DEV_EXTENT_KEY);
704 leaf = path->nodes[0];
705 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
706 extent = btrfs_item_ptr(leaf, path->slots[0],
707 struct btrfs_dev_extent);
708 BUG_ON(found_key.offset > start || found_key.offset +
709 btrfs_dev_extent_length(leaf, extent) < start);
711 } else if (ret == 0) {
712 leaf = path->nodes[0];
713 extent = btrfs_item_ptr(leaf, path->slots[0],
714 struct btrfs_dev_extent);
718 if (device->bytes_used > 0)
719 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
720 ret = btrfs_del_item(trans, root, path);
723 btrfs_free_path(path);
727 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
728 struct btrfs_device *device,
729 u64 chunk_tree, u64 chunk_objectid,
730 u64 chunk_offset, u64 start, u64 num_bytes)
733 struct btrfs_path *path;
734 struct btrfs_root *root = device->dev_root;
735 struct btrfs_dev_extent *extent;
736 struct extent_buffer *leaf;
737 struct btrfs_key key;
739 WARN_ON(!device->in_fs_metadata);
740 path = btrfs_alloc_path();
744 key.objectid = device->devid;
746 key.type = BTRFS_DEV_EXTENT_KEY;
747 ret = btrfs_insert_empty_item(trans, root, path, &key,
751 leaf = path->nodes[0];
752 extent = btrfs_item_ptr(leaf, path->slots[0],
753 struct btrfs_dev_extent);
754 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
755 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
756 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
758 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
759 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
762 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
763 btrfs_mark_buffer_dirty(leaf);
764 btrfs_free_path(path);
768 static noinline int find_next_chunk(struct btrfs_root *root,
769 u64 objectid, u64 *offset)
771 struct btrfs_path *path;
773 struct btrfs_key key;
774 struct btrfs_chunk *chunk;
775 struct btrfs_key found_key;
777 path = btrfs_alloc_path();
780 key.objectid = objectid;
781 key.offset = (u64)-1;
782 key.type = BTRFS_CHUNK_ITEM_KEY;
784 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
790 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
794 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
796 if (found_key.objectid != objectid)
799 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
801 *offset = found_key.offset +
802 btrfs_chunk_length(path->nodes[0], chunk);
807 btrfs_free_path(path);
811 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
814 struct btrfs_key key;
815 struct btrfs_key found_key;
816 struct btrfs_path *path;
818 root = root->fs_info->chunk_root;
820 path = btrfs_alloc_path();
824 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
825 key.type = BTRFS_DEV_ITEM_KEY;
826 key.offset = (u64)-1;
828 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
834 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
839 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
841 *objectid = found_key.offset + 1;
845 btrfs_free_path(path);
850 * the device information is stored in the chunk root
851 * the btrfs_device struct should be fully filled in
853 int btrfs_add_device(struct btrfs_trans_handle *trans,
854 struct btrfs_root *root,
855 struct btrfs_device *device)
858 struct btrfs_path *path;
859 struct btrfs_dev_item *dev_item;
860 struct extent_buffer *leaf;
861 struct btrfs_key key;
864 root = root->fs_info->chunk_root;
866 path = btrfs_alloc_path();
870 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
871 key.type = BTRFS_DEV_ITEM_KEY;
872 key.offset = device->devid;
874 ret = btrfs_insert_empty_item(trans, root, path, &key,
879 leaf = path->nodes[0];
880 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
882 btrfs_set_device_id(leaf, dev_item, device->devid);
883 btrfs_set_device_generation(leaf, dev_item, 0);
884 btrfs_set_device_type(leaf, dev_item, device->type);
885 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
886 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
887 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
888 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
889 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
890 btrfs_set_device_group(leaf, dev_item, 0);
891 btrfs_set_device_seek_speed(leaf, dev_item, 0);
892 btrfs_set_device_bandwidth(leaf, dev_item, 0);
893 btrfs_set_device_start_offset(leaf, dev_item, 0);
895 ptr = (unsigned long)btrfs_device_uuid(dev_item);
896 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
897 ptr = (unsigned long)btrfs_device_fsid(dev_item);
898 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
899 btrfs_mark_buffer_dirty(leaf);
903 btrfs_free_path(path);
907 static int btrfs_rm_dev_item(struct btrfs_root *root,
908 struct btrfs_device *device)
911 struct btrfs_path *path;
912 struct btrfs_key key;
913 struct btrfs_trans_handle *trans;
915 root = root->fs_info->chunk_root;
917 path = btrfs_alloc_path();
921 trans = btrfs_start_transaction(root, 1);
922 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
923 key.type = BTRFS_DEV_ITEM_KEY;
924 key.offset = device->devid;
927 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
936 ret = btrfs_del_item(trans, root, path);
940 btrfs_free_path(path);
942 btrfs_commit_transaction(trans, root);
946 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
948 struct btrfs_device *device;
949 struct btrfs_device *next_device;
950 struct block_device *bdev;
951 struct buffer_head *bh = NULL;
952 struct btrfs_super_block *disk_super;
959 mutex_lock(&uuid_mutex);
960 mutex_lock(&root->fs_info->volume_mutex);
962 all_avail = root->fs_info->avail_data_alloc_bits |
963 root->fs_info->avail_system_alloc_bits |
964 root->fs_info->avail_metadata_alloc_bits;
966 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
967 root->fs_info->fs_devices->rw_devices <= 4) {
968 printk("btrfs: unable to go below four devices on raid10\n");
973 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
974 root->fs_info->fs_devices->rw_devices <= 2) {
975 printk("btrfs: unable to go below two devices on raid1\n");
980 if (strcmp(device_path, "missing") == 0) {
981 struct list_head *cur;
982 struct list_head *devices;
983 struct btrfs_device *tmp;
986 devices = &root->fs_info->fs_devices->devices;
987 list_for_each(cur, devices) {
988 tmp = list_entry(cur, struct btrfs_device, dev_list);
989 if (tmp->in_fs_metadata && !tmp->bdev) {
998 printk("btrfs: no missing devices found to remove\n");
1002 bdev = open_bdev_exclusive(device_path, FMODE_READ,
1003 root->fs_info->bdev_holder);
1005 ret = PTR_ERR(bdev);
1009 set_blocksize(bdev, 4096);
1010 bh = btrfs_read_dev_super(bdev);
1015 disk_super = (struct btrfs_super_block *)bh->b_data;
1016 devid = le64_to_cpu(disk_super->dev_item.devid);
1017 dev_uuid = disk_super->dev_item.uuid;
1018 device = btrfs_find_device(root, devid, dev_uuid,
1026 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1027 printk("btrfs: unable to remove the only writeable device\n");
1032 if (device->writeable) {
1033 list_del_init(&device->dev_alloc_list);
1034 root->fs_info->fs_devices->rw_devices--;
1037 ret = btrfs_shrink_device(device, 0);
1041 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1045 device->in_fs_metadata = 0;
1046 if (device->fs_devices == root->fs_info->fs_devices) {
1047 list_del_init(&device->dev_list);
1048 root->fs_info->fs_devices->num_devices--;
1050 device->fs_devices->open_devices--;
1053 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1054 struct btrfs_device, dev_list);
1055 if (device->bdev == root->fs_info->sb->s_bdev)
1056 root->fs_info->sb->s_bdev = next_device->bdev;
1057 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1058 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1060 num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1061 btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
1063 if (device->fs_devices != root->fs_info->fs_devices) {
1064 BUG_ON(device->writeable);
1067 close_bdev_exclusive(bdev, FMODE_READ);
1070 close_bdev_exclusive(device->bdev, device->mode);
1071 device->bdev = NULL;
1072 device->fs_devices->open_devices--;
1074 if (device->fs_devices->open_devices == 0) {
1075 struct btrfs_fs_devices *fs_devices;
1076 fs_devices = root->fs_info->fs_devices;
1077 while (fs_devices) {
1078 if (fs_devices->seed == device->fs_devices)
1080 fs_devices = fs_devices->seed;
1082 fs_devices->seed = device->fs_devices->seed;
1083 device->fs_devices->seed = NULL;
1084 __btrfs_close_devices(device->fs_devices);
1091 * at this point, the device is zero sized. We want to
1092 * remove it from the devices list and zero out the old super
1094 if (device->writeable) {
1095 /* make sure this device isn't detected as part of
1098 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1099 set_buffer_dirty(bh);
1100 sync_dirty_buffer(bh);
1105 /* one close for the device struct or super_block */
1106 close_bdev_exclusive(device->bdev, device->mode);
1109 /* one close for us */
1110 close_bdev_exclusive(bdev, FMODE_READ);
1112 kfree(device->name);
1121 close_bdev_exclusive(bdev, FMODE_READ);
1123 mutex_unlock(&root->fs_info->volume_mutex);
1124 mutex_unlock(&uuid_mutex);
1129 * does all the dirty work required for changing file system's UUID.
1131 static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
1132 struct btrfs_root *root)
1134 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1135 struct btrfs_fs_devices *old_devices;
1136 struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
1137 struct btrfs_device *device;
1140 BUG_ON(!mutex_is_locked(&uuid_mutex));
1141 if (!fs_devices->seeding || fs_devices->opened != 1)
1144 old_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1148 memcpy(old_devices, fs_devices, sizeof(*old_devices));
1149 old_devices->opened = 1;
1150 old_devices->sprouted = 1;
1151 INIT_LIST_HEAD(&old_devices->devices);
1152 INIT_LIST_HEAD(&old_devices->alloc_list);
1153 list_splice_init(&fs_devices->devices, &old_devices->devices);
1154 list_splice_init(&fs_devices->alloc_list, &old_devices->alloc_list);
1155 list_for_each_entry(device, &old_devices->devices, dev_list) {
1156 device->fs_devices = old_devices;
1158 list_add(&old_devices->list, &fs_uuids);
1160 fs_devices->seeding = 0;
1161 fs_devices->num_devices = 0;
1162 fs_devices->open_devices = 0;
1163 fs_devices->seed = old_devices;
1165 generate_random_uuid(fs_devices->fsid);
1166 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1167 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1168 super_flags = btrfs_super_flags(disk_super) &
1169 ~BTRFS_SUPER_FLAG_SEEDING;
1170 btrfs_set_super_flags(disk_super, super_flags);
1176 * strore the expected generation for seed devices in device items.
1178 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1179 struct btrfs_root *root)
1181 struct btrfs_path *path;
1182 struct extent_buffer *leaf;
1183 struct btrfs_dev_item *dev_item;
1184 struct btrfs_device *device;
1185 struct btrfs_key key;
1186 u8 fs_uuid[BTRFS_UUID_SIZE];
1187 u8 dev_uuid[BTRFS_UUID_SIZE];
1191 path = btrfs_alloc_path();
1195 root = root->fs_info->chunk_root;
1196 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1198 key.type = BTRFS_DEV_ITEM_KEY;
1201 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1205 leaf = path->nodes[0];
1207 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1208 ret = btrfs_next_leaf(root, path);
1213 leaf = path->nodes[0];
1214 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1215 btrfs_release_path(root, path);
1219 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1220 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1221 key.type != BTRFS_DEV_ITEM_KEY)
1224 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1225 struct btrfs_dev_item);
1226 devid = btrfs_device_id(leaf, dev_item);
1227 read_extent_buffer(leaf, dev_uuid,
1228 (unsigned long)btrfs_device_uuid(dev_item),
1230 read_extent_buffer(leaf, fs_uuid,
1231 (unsigned long)btrfs_device_fsid(dev_item),
1233 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1236 if (device->fs_devices->seeding) {
1237 btrfs_set_device_generation(leaf, dev_item,
1238 device->generation);
1239 btrfs_mark_buffer_dirty(leaf);
1247 btrfs_free_path(path);
1251 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1253 struct btrfs_trans_handle *trans;
1254 struct btrfs_device *device;
1255 struct block_device *bdev;
1256 struct list_head *cur;
1257 struct list_head *devices;
1258 struct super_block *sb = root->fs_info->sb;
1260 int seeding_dev = 0;
1263 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1266 bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
1271 if (root->fs_info->fs_devices->seeding) {
1273 down_write(&sb->s_umount);
1274 mutex_lock(&uuid_mutex);
1277 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1278 mutex_lock(&root->fs_info->volume_mutex);
1280 devices = &root->fs_info->fs_devices->devices;
1281 list_for_each(cur, devices) {
1282 device = list_entry(cur, struct btrfs_device, dev_list);
1283 if (device->bdev == bdev) {
1289 device = kzalloc(sizeof(*device), GFP_NOFS);
1291 /* we can safely leave the fs_devices entry around */
1296 device->name = kstrdup(device_path, GFP_NOFS);
1297 if (!device->name) {
1303 ret = find_next_devid(root, &device->devid);
1309 trans = btrfs_start_transaction(root, 1);
1312 device->barriers = 1;
1313 device->writeable = 1;
1314 device->work.func = pending_bios_fn;
1315 generate_random_uuid(device->uuid);
1316 spin_lock_init(&device->io_lock);
1317 device->generation = trans->transid;
1318 device->io_width = root->sectorsize;
1319 device->io_align = root->sectorsize;
1320 device->sector_size = root->sectorsize;
1321 device->total_bytes = i_size_read(bdev->bd_inode);
1322 device->dev_root = root->fs_info->dev_root;
1323 device->bdev = bdev;
1324 device->in_fs_metadata = 1;
1326 set_blocksize(device->bdev, 4096);
1329 sb->s_flags &= ~MS_RDONLY;
1330 ret = btrfs_prepare_sprout(trans, root);
1334 device->fs_devices = root->fs_info->fs_devices;
1335 list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
1336 list_add(&device->dev_alloc_list,
1337 &root->fs_info->fs_devices->alloc_list);
1338 root->fs_info->fs_devices->num_devices++;
1339 root->fs_info->fs_devices->open_devices++;
1340 root->fs_info->fs_devices->rw_devices++;
1341 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1343 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1344 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1345 total_bytes + device->total_bytes);
1347 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1348 btrfs_set_super_num_devices(&root->fs_info->super_copy,
1352 ret = init_first_rw_device(trans, root, device);
1354 ret = btrfs_finish_sprout(trans, root);
1357 ret = btrfs_add_device(trans, root, device);
1360 unlock_chunks(root);
1361 btrfs_commit_transaction(trans, root);
1364 mutex_unlock(&uuid_mutex);
1365 up_write(&sb->s_umount);
1367 ret = btrfs_relocate_sys_chunks(root);
1371 mutex_unlock(&root->fs_info->volume_mutex);
1374 close_bdev_exclusive(bdev, 0);
1376 mutex_unlock(&uuid_mutex);
1377 up_write(&sb->s_umount);
1382 static int noinline btrfs_update_device(struct btrfs_trans_handle *trans,
1383 struct btrfs_device *device)
1386 struct btrfs_path *path;
1387 struct btrfs_root *root;
1388 struct btrfs_dev_item *dev_item;
1389 struct extent_buffer *leaf;
1390 struct btrfs_key key;
1392 root = device->dev_root->fs_info->chunk_root;
1394 path = btrfs_alloc_path();
1398 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1399 key.type = BTRFS_DEV_ITEM_KEY;
1400 key.offset = device->devid;
1402 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1411 leaf = path->nodes[0];
1412 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1414 btrfs_set_device_id(leaf, dev_item, device->devid);
1415 btrfs_set_device_type(leaf, dev_item, device->type);
1416 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1417 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1418 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1419 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1420 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1421 btrfs_mark_buffer_dirty(leaf);
1424 btrfs_free_path(path);
1428 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1429 struct btrfs_device *device, u64 new_size)
1431 struct btrfs_super_block *super_copy =
1432 &device->dev_root->fs_info->super_copy;
1433 u64 old_total = btrfs_super_total_bytes(super_copy);
1434 u64 diff = new_size - device->total_bytes;
1436 if (!device->writeable)
1438 if (new_size <= device->total_bytes)
1441 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1442 device->fs_devices->total_rw_bytes += diff;
1444 device->total_bytes = new_size;
1445 return btrfs_update_device(trans, device);
1448 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1449 struct btrfs_device *device, u64 new_size)
1452 lock_chunks(device->dev_root);
1453 ret = __btrfs_grow_device(trans, device, new_size);
1454 unlock_chunks(device->dev_root);
1458 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1459 struct btrfs_root *root,
1460 u64 chunk_tree, u64 chunk_objectid,
1464 struct btrfs_path *path;
1465 struct btrfs_key key;
1467 root = root->fs_info->chunk_root;
1468 path = btrfs_alloc_path();
1472 key.objectid = chunk_objectid;
1473 key.offset = chunk_offset;
1474 key.type = BTRFS_CHUNK_ITEM_KEY;
1476 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1479 ret = btrfs_del_item(trans, root, path);
1482 btrfs_free_path(path);
1486 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1489 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1490 struct btrfs_disk_key *disk_key;
1491 struct btrfs_chunk *chunk;
1498 struct btrfs_key key;
1500 array_size = btrfs_super_sys_array_size(super_copy);
1502 ptr = super_copy->sys_chunk_array;
1505 while (cur < array_size) {
1506 disk_key = (struct btrfs_disk_key *)ptr;
1507 btrfs_disk_key_to_cpu(&key, disk_key);
1509 len = sizeof(*disk_key);
1511 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1512 chunk = (struct btrfs_chunk *)(ptr + len);
1513 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1514 len += btrfs_chunk_item_size(num_stripes);
1519 if (key.objectid == chunk_objectid &&
1520 key.offset == chunk_offset) {
1521 memmove(ptr, ptr + len, array_size - (cur + len));
1523 btrfs_set_super_sys_array_size(super_copy, array_size);
1532 static int btrfs_relocate_chunk(struct btrfs_root *root,
1533 u64 chunk_tree, u64 chunk_objectid,
1536 struct extent_map_tree *em_tree;
1537 struct btrfs_root *extent_root;
1538 struct btrfs_trans_handle *trans;
1539 struct extent_map *em;
1540 struct map_lookup *map;
1544 printk("btrfs relocating chunk %llu\n",
1545 (unsigned long long)chunk_offset);
1546 root = root->fs_info->chunk_root;
1547 extent_root = root->fs_info->extent_root;
1548 em_tree = &root->fs_info->mapping_tree.map_tree;
1550 /* step one, relocate all the extents inside this chunk */
1551 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1554 trans = btrfs_start_transaction(root, 1);
1560 * step two, delete the device extents and the
1561 * chunk tree entries
1563 spin_lock(&em_tree->lock);
1564 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1565 spin_unlock(&em_tree->lock);
1567 BUG_ON(em->start > chunk_offset ||
1568 em->start + em->len < chunk_offset);
1569 map = (struct map_lookup *)em->bdev;
1571 for (i = 0; i < map->num_stripes; i++) {
1572 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1573 map->stripes[i].physical);
1576 if (map->stripes[i].dev) {
1577 ret = btrfs_update_device(trans, map->stripes[i].dev);
1581 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1586 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1587 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1591 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1594 spin_lock(&em_tree->lock);
1595 remove_extent_mapping(em_tree, em);
1596 spin_unlock(&em_tree->lock);
1601 /* once for the tree */
1602 free_extent_map(em);
1604 free_extent_map(em);
1606 unlock_chunks(root);
1607 btrfs_end_transaction(trans, root);
1611 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
1613 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
1614 struct btrfs_path *path;
1615 struct extent_buffer *leaf;
1616 struct btrfs_chunk *chunk;
1617 struct btrfs_key key;
1618 struct btrfs_key found_key;
1619 u64 chunk_tree = chunk_root->root_key.objectid;
1623 path = btrfs_alloc_path();
1627 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1628 key.offset = (u64)-1;
1629 key.type = BTRFS_CHUNK_ITEM_KEY;
1632 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1637 ret = btrfs_previous_item(chunk_root, path, key.objectid,
1644 leaf = path->nodes[0];
1645 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1647 chunk = btrfs_item_ptr(leaf, path->slots[0],
1648 struct btrfs_chunk);
1649 chunk_type = btrfs_chunk_type(leaf, chunk);
1650 btrfs_release_path(chunk_root, path);
1652 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
1653 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
1659 if (found_key.offset == 0)
1661 key.offset = found_key.offset - 1;
1665 btrfs_free_path(path);
1669 static u64 div_factor(u64 num, int factor)
1678 int btrfs_balance(struct btrfs_root *dev_root)
1681 struct list_head *cur;
1682 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1683 struct btrfs_device *device;
1686 struct btrfs_path *path;
1687 struct btrfs_key key;
1688 struct btrfs_chunk *chunk;
1689 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1690 struct btrfs_trans_handle *trans;
1691 struct btrfs_key found_key;
1693 if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
1696 mutex_lock(&dev_root->fs_info->volume_mutex);
1697 dev_root = dev_root->fs_info->dev_root;
1699 /* step one make some room on all the devices */
1700 list_for_each(cur, devices) {
1701 device = list_entry(cur, struct btrfs_device, dev_list);
1702 old_size = device->total_bytes;
1703 size_to_free = div_factor(old_size, 1);
1704 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1705 if (!device->writeable ||
1706 device->total_bytes - device->bytes_used > size_to_free)
1709 ret = btrfs_shrink_device(device, old_size - size_to_free);
1712 trans = btrfs_start_transaction(dev_root, 1);
1715 ret = btrfs_grow_device(trans, device, old_size);
1718 btrfs_end_transaction(trans, dev_root);
1721 /* step two, relocate all the chunks */
1722 path = btrfs_alloc_path();
1725 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1726 key.offset = (u64)-1;
1727 key.type = BTRFS_CHUNK_ITEM_KEY;
1730 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1735 * this shouldn't happen, it means the last relocate
1741 ret = btrfs_previous_item(chunk_root, path, 0,
1742 BTRFS_CHUNK_ITEM_KEY);
1746 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1748 if (found_key.objectid != key.objectid)
1751 chunk = btrfs_item_ptr(path->nodes[0],
1753 struct btrfs_chunk);
1754 key.offset = found_key.offset;
1755 /* chunk zero is special */
1756 if (key.offset == 0)
1759 btrfs_release_path(chunk_root, path);
1760 ret = btrfs_relocate_chunk(chunk_root,
1761 chunk_root->root_key.objectid,
1768 btrfs_free_path(path);
1769 mutex_unlock(&dev_root->fs_info->volume_mutex);
1774 * shrinking a device means finding all of the device extents past
1775 * the new size, and then following the back refs to the chunks.
1776 * The chunk relocation code actually frees the device extent
1778 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1780 struct btrfs_trans_handle *trans;
1781 struct btrfs_root *root = device->dev_root;
1782 struct btrfs_dev_extent *dev_extent = NULL;
1783 struct btrfs_path *path;
1790 struct extent_buffer *l;
1791 struct btrfs_key key;
1792 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1793 u64 old_total = btrfs_super_total_bytes(super_copy);
1794 u64 diff = device->total_bytes - new_size;
1796 if (new_size >= device->total_bytes)
1799 path = btrfs_alloc_path();
1803 trans = btrfs_start_transaction(root, 1);
1813 device->total_bytes = new_size;
1814 if (device->writeable)
1815 device->fs_devices->total_rw_bytes -= diff;
1816 ret = btrfs_update_device(trans, device);
1818 unlock_chunks(root);
1819 btrfs_end_transaction(trans, root);
1822 WARN_ON(diff > old_total);
1823 btrfs_set_super_total_bytes(super_copy, old_total - diff);
1824 unlock_chunks(root);
1825 btrfs_end_transaction(trans, root);
1827 key.objectid = device->devid;
1828 key.offset = (u64)-1;
1829 key.type = BTRFS_DEV_EXTENT_KEY;
1832 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1836 ret = btrfs_previous_item(root, path, 0, key.type);
1845 slot = path->slots[0];
1846 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1848 if (key.objectid != device->devid)
1851 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1852 length = btrfs_dev_extent_length(l, dev_extent);
1854 if (key.offset + length <= new_size)
1857 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1858 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1859 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1860 btrfs_release_path(root, path);
1862 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
1869 btrfs_free_path(path);
1873 static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1874 struct btrfs_root *root,
1875 struct btrfs_key *key,
1876 struct btrfs_chunk *chunk, int item_size)
1878 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1879 struct btrfs_disk_key disk_key;
1883 array_size = btrfs_super_sys_array_size(super_copy);
1884 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
1887 ptr = super_copy->sys_chunk_array + array_size;
1888 btrfs_cpu_key_to_disk(&disk_key, key);
1889 memcpy(ptr, &disk_key, sizeof(disk_key));
1890 ptr += sizeof(disk_key);
1891 memcpy(ptr, chunk, item_size);
1892 item_size += sizeof(disk_key);
1893 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
1897 static u64 noinline chunk_bytes_by_type(u64 type, u64 calc_size,
1898 int num_stripes, int sub_stripes)
1900 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
1902 else if (type & BTRFS_BLOCK_GROUP_RAID10)
1903 return calc_size * (num_stripes / sub_stripes);
1905 return calc_size * num_stripes;
1908 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1909 struct btrfs_root *extent_root,
1910 struct map_lookup **map_ret,
1911 u64 *num_bytes, u64 *stripe_size,
1912 u64 start, u64 type)
1914 struct btrfs_fs_info *info = extent_root->fs_info;
1915 struct btrfs_device *device = NULL;
1916 struct btrfs_fs_devices *fs_devices = info->fs_devices;
1917 struct list_head *cur;
1918 struct map_lookup *map = NULL;
1919 struct extent_map_tree *em_tree;
1920 struct extent_map *em;
1921 struct list_head private_devs;
1922 int min_stripe_size = 1 * 1024 * 1024;
1923 u64 calc_size = 1024 * 1024 * 1024;
1924 u64 max_chunk_size = calc_size;
1929 int num_stripes = 1;
1930 int min_stripes = 1;
1931 int sub_stripes = 0;
1935 int stripe_len = 64 * 1024;
1937 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
1938 (type & BTRFS_BLOCK_GROUP_DUP)) {
1940 type &= ~BTRFS_BLOCK_GROUP_DUP;
1942 if (list_empty(&fs_devices->alloc_list))
1945 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
1946 num_stripes = fs_devices->rw_devices;
1949 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
1953 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
1954 num_stripes = min_t(u64, 2, fs_devices->rw_devices);
1955 if (num_stripes < 2)
1959 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1960 num_stripes = fs_devices->rw_devices;
1961 if (num_stripes < 4)
1963 num_stripes &= ~(u32)1;
1968 if (type & BTRFS_BLOCK_GROUP_DATA) {
1969 max_chunk_size = 10 * calc_size;
1970 min_stripe_size = 64 * 1024 * 1024;
1971 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
1972 max_chunk_size = 4 * calc_size;
1973 min_stripe_size = 32 * 1024 * 1024;
1974 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1975 calc_size = 8 * 1024 * 1024;
1976 max_chunk_size = calc_size * 2;
1977 min_stripe_size = 1 * 1024 * 1024;
1980 /* we don't want a chunk larger than 10% of writeable space */
1981 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
1985 if (!map || map->num_stripes != num_stripes) {
1987 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1990 map->num_stripes = num_stripes;
1993 if (calc_size * num_stripes > max_chunk_size) {
1994 calc_size = max_chunk_size;
1995 do_div(calc_size, num_stripes);
1996 do_div(calc_size, stripe_len);
1997 calc_size *= stripe_len;
1999 /* we don't want tiny stripes */
2000 calc_size = max_t(u64, min_stripe_size, calc_size);
2002 do_div(calc_size, stripe_len);
2003 calc_size *= stripe_len;
2005 cur = fs_devices->alloc_list.next;
2008 if (type & BTRFS_BLOCK_GROUP_DUP)
2009 min_free = calc_size * 2;
2011 min_free = calc_size;
2014 * we add 1MB because we never use the first 1MB of the device, unless
2015 * we've looped, then we are likely allocating the maximum amount of
2016 * space left already
2019 min_free += 1024 * 1024;
2021 INIT_LIST_HEAD(&private_devs);
2022 while(index < num_stripes) {
2023 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2024 BUG_ON(!device->writeable);
2025 if (device->total_bytes > device->bytes_used)
2026 avail = device->total_bytes - device->bytes_used;
2031 if (device->in_fs_metadata && avail >= min_free) {
2032 ret = find_free_dev_extent(trans, device,
2033 min_free, &dev_offset);
2035 list_move_tail(&device->dev_alloc_list,
2037 map->stripes[index].dev = device;
2038 map->stripes[index].physical = dev_offset;
2040 if (type & BTRFS_BLOCK_GROUP_DUP) {
2041 map->stripes[index].dev = device;
2042 map->stripes[index].physical =
2043 dev_offset + calc_size;
2047 } else if (device->in_fs_metadata && avail > max_avail)
2049 if (cur == &fs_devices->alloc_list)
2052 list_splice(&private_devs, &fs_devices->alloc_list);
2053 if (index < num_stripes) {
2054 if (index >= min_stripes) {
2055 num_stripes = index;
2056 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2057 num_stripes /= sub_stripes;
2058 num_stripes *= sub_stripes;
2063 if (!looped && max_avail > 0) {
2065 calc_size = max_avail;
2071 map->sector_size = extent_root->sectorsize;
2072 map->stripe_len = stripe_len;
2073 map->io_align = stripe_len;
2074 map->io_width = stripe_len;
2076 map->num_stripes = num_stripes;
2077 map->sub_stripes = sub_stripes;
2080 *stripe_size = calc_size;
2081 *num_bytes = chunk_bytes_by_type(type, calc_size,
2082 num_stripes, sub_stripes);
2084 em = alloc_extent_map(GFP_NOFS);
2089 em->bdev = (struct block_device *)map;
2091 em->len = *num_bytes;
2092 em->block_start = 0;
2093 em->block_len = em->len;
2095 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2096 spin_lock(&em_tree->lock);
2097 ret = add_extent_mapping(em_tree, em);
2098 spin_unlock(&em_tree->lock);
2100 free_extent_map(em);
2102 ret = btrfs_make_block_group(trans, extent_root, 0, type,
2103 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2108 while (index < map->num_stripes) {
2109 device = map->stripes[index].dev;
2110 dev_offset = map->stripes[index].physical;
2112 ret = btrfs_alloc_dev_extent(trans, device,
2113 info->chunk_root->root_key.objectid,
2114 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2115 start, dev_offset, calc_size);
2123 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
2124 struct btrfs_root *extent_root,
2125 struct map_lookup *map, u64 chunk_offset,
2126 u64 chunk_size, u64 stripe_size)
2129 struct btrfs_key key;
2130 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2131 struct btrfs_device *device;
2132 struct btrfs_chunk *chunk;
2133 struct btrfs_stripe *stripe;
2134 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
2138 chunk = kzalloc(item_size, GFP_NOFS);
2143 while (index < map->num_stripes) {
2144 device = map->stripes[index].dev;
2145 device->bytes_used += stripe_size;
2146 ret = btrfs_update_device(trans, device);
2152 stripe = &chunk->stripe;
2153 while (index < map->num_stripes) {
2154 device = map->stripes[index].dev;
2155 dev_offset = map->stripes[index].physical;
2157 btrfs_set_stack_stripe_devid(stripe, device->devid);
2158 btrfs_set_stack_stripe_offset(stripe, dev_offset);
2159 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
2164 btrfs_set_stack_chunk_length(chunk, chunk_size);
2165 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
2166 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
2167 btrfs_set_stack_chunk_type(chunk, map->type);
2168 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
2169 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
2170 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
2171 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
2172 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
2174 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2175 key.type = BTRFS_CHUNK_ITEM_KEY;
2176 key.offset = chunk_offset;
2178 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
2181 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2182 ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
2191 * Chunk allocation falls into two parts. The first part does works
2192 * that make the new allocated chunk useable, but not do any operation
2193 * that modifies the chunk tree. The second part does the works that
2194 * require modifying the chunk tree. This division is important for the
2195 * bootstrap process of adding storage to a seed btrfs.
2197 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2198 struct btrfs_root *extent_root, u64 type)
2203 struct map_lookup *map;
2204 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2207 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2212 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2213 &stripe_size, chunk_offset, type);
2217 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2218 chunk_size, stripe_size);
2223 static int noinline init_first_rw_device(struct btrfs_trans_handle *trans,
2224 struct btrfs_root *root,
2225 struct btrfs_device *device)
2228 u64 sys_chunk_offset;
2232 u64 sys_stripe_size;
2234 struct map_lookup *map;
2235 struct map_lookup *sys_map;
2236 struct btrfs_fs_info *fs_info = root->fs_info;
2237 struct btrfs_root *extent_root = fs_info->extent_root;
2240 ret = find_next_chunk(fs_info->chunk_root,
2241 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
2244 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
2245 (fs_info->metadata_alloc_profile &
2246 fs_info->avail_metadata_alloc_bits);
2247 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2249 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2250 &stripe_size, chunk_offset, alloc_profile);
2253 sys_chunk_offset = chunk_offset + chunk_size;
2255 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
2256 (fs_info->system_alloc_profile &
2257 fs_info->avail_system_alloc_bits);
2258 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2260 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
2261 &sys_chunk_size, &sys_stripe_size,
2262 sys_chunk_offset, alloc_profile);
2265 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
2269 * Modifying chunk tree needs allocating new blocks from both
2270 * system block group and metadata block group. So we only can
2271 * do operations require modifying the chunk tree after both
2272 * block groups were created.
2274 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2275 chunk_size, stripe_size);
2278 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
2279 sys_chunk_offset, sys_chunk_size,
2285 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
2287 struct extent_map *em;
2288 struct map_lookup *map;
2289 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2293 spin_lock(&map_tree->map_tree.lock);
2294 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2295 spin_unlock(&map_tree->map_tree.lock);
2299 map = (struct map_lookup *)em->bdev;
2300 for (i = 0; i < map->num_stripes; i++) {
2301 if (!map->stripes[i].dev->writeable) {
2306 free_extent_map(em);
2310 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
2312 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
2315 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
2317 struct extent_map *em;
2320 spin_lock(&tree->map_tree.lock);
2321 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
2323 remove_extent_mapping(&tree->map_tree, em);
2324 spin_unlock(&tree->map_tree.lock);
2329 free_extent_map(em);
2330 /* once for the tree */
2331 free_extent_map(em);
2335 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
2337 struct extent_map *em;
2338 struct map_lookup *map;
2339 struct extent_map_tree *em_tree = &map_tree->map_tree;
2342 spin_lock(&em_tree->lock);
2343 em = lookup_extent_mapping(em_tree, logical, len);
2344 spin_unlock(&em_tree->lock);
2347 BUG_ON(em->start > logical || em->start + em->len < logical);
2348 map = (struct map_lookup *)em->bdev;
2349 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
2350 ret = map->num_stripes;
2351 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2352 ret = map->sub_stripes;
2355 free_extent_map(em);
2359 static int find_live_mirror(struct map_lookup *map, int first, int num,
2363 if (map->stripes[optimal].dev->bdev)
2365 for (i = first; i < first + num; i++) {
2366 if (map->stripes[i].dev->bdev)
2369 /* we couldn't find one that doesn't fail. Just return something
2370 * and the io error handling code will clean up eventually
2375 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2376 u64 logical, u64 *length,
2377 struct btrfs_multi_bio **multi_ret,
2378 int mirror_num, struct page *unplug_page)
2380 struct extent_map *em;
2381 struct map_lookup *map;
2382 struct extent_map_tree *em_tree = &map_tree->map_tree;
2386 int stripes_allocated = 8;
2387 int stripes_required = 1;
2392 struct btrfs_multi_bio *multi = NULL;
2394 if (multi_ret && !(rw & (1 << BIO_RW))) {
2395 stripes_allocated = 1;
2399 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
2404 atomic_set(&multi->error, 0);
2407 spin_lock(&em_tree->lock);
2408 em = lookup_extent_mapping(em_tree, logical, *length);
2409 spin_unlock(&em_tree->lock);
2411 if (!em && unplug_page)
2415 printk("unable to find logical %Lu len %Lu\n", logical, *length);
2419 BUG_ON(em->start > logical || em->start + em->len < logical);
2420 map = (struct map_lookup *)em->bdev;
2421 offset = logical - em->start;
2423 if (mirror_num > map->num_stripes)
2426 /* if our multi bio struct is too small, back off and try again */
2427 if (rw & (1 << BIO_RW)) {
2428 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
2429 BTRFS_BLOCK_GROUP_DUP)) {
2430 stripes_required = map->num_stripes;
2432 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2433 stripes_required = map->sub_stripes;
2437 if (multi_ret && rw == WRITE &&
2438 stripes_allocated < stripes_required) {
2439 stripes_allocated = map->num_stripes;
2440 free_extent_map(em);
2446 * stripe_nr counts the total number of stripes we have to stride
2447 * to get to this block
2449 do_div(stripe_nr, map->stripe_len);
2451 stripe_offset = stripe_nr * map->stripe_len;
2452 BUG_ON(offset < stripe_offset);
2454 /* stripe_offset is the offset of this block in its stripe*/
2455 stripe_offset = offset - stripe_offset;
2457 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2458 BTRFS_BLOCK_GROUP_RAID10 |
2459 BTRFS_BLOCK_GROUP_DUP)) {
2460 /* we limit the length of each bio to what fits in a stripe */
2461 *length = min_t(u64, em->len - offset,
2462 map->stripe_len - stripe_offset);
2464 *length = em->len - offset;
2467 if (!multi_ret && !unplug_page)
2472 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
2473 if (unplug_page || (rw & (1 << BIO_RW)))
2474 num_stripes = map->num_stripes;
2475 else if (mirror_num)
2476 stripe_index = mirror_num - 1;
2478 stripe_index = find_live_mirror(map, 0,
2480 current->pid % map->num_stripes);
2483 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2484 if (rw & (1 << BIO_RW))
2485 num_stripes = map->num_stripes;
2486 else if (mirror_num)
2487 stripe_index = mirror_num - 1;
2489 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2490 int factor = map->num_stripes / map->sub_stripes;
2492 stripe_index = do_div(stripe_nr, factor);
2493 stripe_index *= map->sub_stripes;
2495 if (unplug_page || (rw & (1 << BIO_RW)))
2496 num_stripes = map->sub_stripes;
2497 else if (mirror_num)
2498 stripe_index += mirror_num - 1;
2500 stripe_index = find_live_mirror(map, stripe_index,
2501 map->sub_stripes, stripe_index +
2502 current->pid % map->sub_stripes);
2506 * after this do_div call, stripe_nr is the number of stripes
2507 * on this device we have to walk to find the data, and
2508 * stripe_index is the number of our device in the stripe array
2510 stripe_index = do_div(stripe_nr, map->num_stripes);
2512 BUG_ON(stripe_index >= map->num_stripes);
2514 for (i = 0; i < num_stripes; i++) {
2516 struct btrfs_device *device;
2517 struct backing_dev_info *bdi;
2519 device = map->stripes[stripe_index].dev;
2521 bdi = blk_get_backing_dev_info(device->bdev);
2522 if (bdi->unplug_io_fn) {
2523 bdi->unplug_io_fn(bdi, unplug_page);
2527 multi->stripes[i].physical =
2528 map->stripes[stripe_index].physical +
2529 stripe_offset + stripe_nr * map->stripe_len;
2530 multi->stripes[i].dev = map->stripes[stripe_index].dev;
2536 multi->num_stripes = num_stripes;
2537 multi->max_errors = max_errors;
2540 free_extent_map(em);
2544 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2545 u64 logical, u64 *length,
2546 struct btrfs_multi_bio **multi_ret, int mirror_num)
2548 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
2552 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
2553 u64 chunk_start, u64 physical, u64 devid,
2554 u64 **logical, int *naddrs, int *stripe_len)
2556 struct extent_map_tree *em_tree = &map_tree->map_tree;
2557 struct extent_map *em;
2558 struct map_lookup *map;
2565 spin_lock(&em_tree->lock);
2566 em = lookup_extent_mapping(em_tree, chunk_start, 1);
2567 spin_unlock(&em_tree->lock);
2569 BUG_ON(!em || em->start != chunk_start);
2570 map = (struct map_lookup *)em->bdev;
2573 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2574 do_div(length, map->num_stripes / map->sub_stripes);
2575 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
2576 do_div(length, map->num_stripes);
2578 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
2581 for (i = 0; i < map->num_stripes; i++) {
2582 if (devid && map->stripes[i].dev->devid != devid)
2584 if (map->stripes[i].physical > physical ||
2585 map->stripes[i].physical + length <= physical)
2588 stripe_nr = physical - map->stripes[i].physical;
2589 do_div(stripe_nr, map->stripe_len);
2591 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2592 stripe_nr = stripe_nr * map->num_stripes + i;
2593 do_div(stripe_nr, map->sub_stripes);
2594 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
2595 stripe_nr = stripe_nr * map->num_stripes + i;
2597 bytenr = chunk_start + stripe_nr * map->stripe_len;
2598 WARN_ON(nr >= map->num_stripes);
2599 for (j = 0; j < nr; j++) {
2600 if (buf[j] == bytenr)
2604 WARN_ON(nr >= map->num_stripes);
2609 for (i = 0; i > nr; i++) {
2610 struct btrfs_multi_bio *multi;
2611 struct btrfs_bio_stripe *stripe;
2615 ret = btrfs_map_block(map_tree, WRITE, buf[i],
2616 &length, &multi, 0);
2619 stripe = multi->stripes;
2620 for (j = 0; j < multi->num_stripes; j++) {
2621 if (stripe->physical >= physical &&
2622 physical < stripe->physical + length)
2625 BUG_ON(j >= multi->num_stripes);
2631 *stripe_len = map->stripe_len;
2633 free_extent_map(em);
2637 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
2638 u64 logical, struct page *page)
2640 u64 length = PAGE_CACHE_SIZE;
2641 return __btrfs_map_block(map_tree, READ, logical, &length,
2646 static void end_bio_multi_stripe(struct bio *bio, int err)
2648 struct btrfs_multi_bio *multi = bio->bi_private;
2649 int is_orig_bio = 0;
2652 atomic_inc(&multi->error);
2654 if (bio == multi->orig_bio)
2657 if (atomic_dec_and_test(&multi->stripes_pending)) {
2660 bio = multi->orig_bio;
2662 bio->bi_private = multi->private;
2663 bio->bi_end_io = multi->end_io;
2664 /* only send an error to the higher layers if it is
2665 * beyond the tolerance of the multi-bio
2667 if (atomic_read(&multi->error) > multi->max_errors) {
2671 * this bio is actually up to date, we didn't
2672 * go over the max number of errors
2674 set_bit(BIO_UPTODATE, &bio->bi_flags);
2679 bio_endio(bio, err);
2680 } else if (!is_orig_bio) {
2685 struct async_sched {
2688 struct btrfs_fs_info *info;
2689 struct btrfs_work work;
2693 * see run_scheduled_bios for a description of why bios are collected for
2696 * This will add one bio to the pending list for a device and make sure
2697 * the work struct is scheduled.
2699 static int noinline schedule_bio(struct btrfs_root *root,
2700 struct btrfs_device *device,
2701 int rw, struct bio *bio)
2703 int should_queue = 1;
2705 /* don't bother with additional async steps for reads, right now */
2706 if (!(rw & (1 << BIO_RW))) {
2708 submit_bio(rw, bio);
2714 * nr_async_bios allows us to reliably return congestion to the
2715 * higher layers. Otherwise, the async bio makes it appear we have
2716 * made progress against dirty pages when we've really just put it
2717 * on a queue for later
2719 atomic_inc(&root->fs_info->nr_async_bios);
2720 WARN_ON(bio->bi_next);
2721 bio->bi_next = NULL;
2724 spin_lock(&device->io_lock);
2726 if (device->pending_bio_tail)
2727 device->pending_bio_tail->bi_next = bio;
2729 device->pending_bio_tail = bio;
2730 if (!device->pending_bios)
2731 device->pending_bios = bio;
2732 if (device->running_pending)
2735 spin_unlock(&device->io_lock);
2738 btrfs_queue_worker(&root->fs_info->submit_workers,
2743 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
2744 int mirror_num, int async_submit)
2746 struct btrfs_mapping_tree *map_tree;
2747 struct btrfs_device *dev;
2748 struct bio *first_bio = bio;
2749 u64 logical = (u64)bio->bi_sector << 9;
2752 struct btrfs_multi_bio *multi = NULL;
2757 length = bio->bi_size;
2758 map_tree = &root->fs_info->mapping_tree;
2759 map_length = length;
2761 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
2765 total_devs = multi->num_stripes;
2766 if (map_length < length) {
2767 printk("mapping failed logical %Lu bio len %Lu "
2768 "len %Lu\n", logical, length, map_length);
2771 multi->end_io = first_bio->bi_end_io;
2772 multi->private = first_bio->bi_private;
2773 multi->orig_bio = first_bio;
2774 atomic_set(&multi->stripes_pending, multi->num_stripes);
2776 while(dev_nr < total_devs) {
2777 if (total_devs > 1) {
2778 if (dev_nr < total_devs - 1) {
2779 bio = bio_clone(first_bio, GFP_NOFS);
2784 bio->bi_private = multi;
2785 bio->bi_end_io = end_bio_multi_stripe;
2787 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
2788 dev = multi->stripes[dev_nr].dev;
2789 BUG_ON(rw == WRITE && !dev->writeable);
2790 if (dev && dev->bdev) {
2791 bio->bi_bdev = dev->bdev;
2793 schedule_bio(root, dev, rw, bio);
2795 submit_bio(rw, bio);
2797 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
2798 bio->bi_sector = logical >> 9;
2799 bio_endio(bio, -EIO);
2803 if (total_devs == 1)
2808 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
2811 struct btrfs_device *device;
2812 struct btrfs_fs_devices *cur_devices;
2814 cur_devices = root->fs_info->fs_devices;
2815 while (cur_devices) {
2817 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
2818 device = __find_device(&cur_devices->devices,
2823 cur_devices = cur_devices->seed;
2828 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
2829 u64 devid, u8 *dev_uuid)
2831 struct btrfs_device *device;
2832 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2834 device = kzalloc(sizeof(*device), GFP_NOFS);
2837 list_add(&device->dev_list,
2838 &fs_devices->devices);
2839 device->barriers = 1;
2840 device->dev_root = root->fs_info->dev_root;
2841 device->devid = devid;
2842 device->work.func = pending_bios_fn;
2843 fs_devices->num_devices++;
2844 spin_lock_init(&device->io_lock);
2845 INIT_LIST_HEAD(&device->dev_alloc_list);
2846 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
2850 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
2851 struct extent_buffer *leaf,
2852 struct btrfs_chunk *chunk)
2854 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2855 struct map_lookup *map;
2856 struct extent_map *em;
2860 u8 uuid[BTRFS_UUID_SIZE];
2865 logical = key->offset;
2866 length = btrfs_chunk_length(leaf, chunk);
2868 spin_lock(&map_tree->map_tree.lock);
2869 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
2870 spin_unlock(&map_tree->map_tree.lock);
2872 /* already mapped? */
2873 if (em && em->start <= logical && em->start + em->len > logical) {
2874 free_extent_map(em);
2877 free_extent_map(em);
2880 map = kzalloc(sizeof(*map), GFP_NOFS);
2884 em = alloc_extent_map(GFP_NOFS);
2887 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2888 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2890 free_extent_map(em);
2894 em->bdev = (struct block_device *)map;
2895 em->start = logical;
2897 em->block_start = 0;
2898 em->block_len = em->len;
2900 map->num_stripes = num_stripes;
2901 map->io_width = btrfs_chunk_io_width(leaf, chunk);
2902 map->io_align = btrfs_chunk_io_align(leaf, chunk);
2903 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
2904 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
2905 map->type = btrfs_chunk_type(leaf, chunk);
2906 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
2907 for (i = 0; i < num_stripes; i++) {
2908 map->stripes[i].physical =
2909 btrfs_stripe_offset_nr(leaf, chunk, i);
2910 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
2911 read_extent_buffer(leaf, uuid, (unsigned long)
2912 btrfs_stripe_dev_uuid_nr(chunk, i),
2914 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
2916 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
2918 free_extent_map(em);
2921 if (!map->stripes[i].dev) {
2922 map->stripes[i].dev =
2923 add_missing_dev(root, devid, uuid);
2924 if (!map->stripes[i].dev) {
2926 free_extent_map(em);
2930 map->stripes[i].dev->in_fs_metadata = 1;
2933 spin_lock(&map_tree->map_tree.lock);
2934 ret = add_extent_mapping(&map_tree->map_tree, em);
2935 spin_unlock(&map_tree->map_tree.lock);
2937 free_extent_map(em);
2942 static int fill_device_from_item(struct extent_buffer *leaf,
2943 struct btrfs_dev_item *dev_item,
2944 struct btrfs_device *device)
2948 device->devid = btrfs_device_id(leaf, dev_item);
2949 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2950 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2951 device->type = btrfs_device_type(leaf, dev_item);
2952 device->io_align = btrfs_device_io_align(leaf, dev_item);
2953 device->io_width = btrfs_device_io_width(leaf, dev_item);
2954 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
2956 ptr = (unsigned long)btrfs_device_uuid(dev_item);
2957 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
2962 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
2964 struct btrfs_fs_devices *fs_devices;
2967 mutex_lock(&uuid_mutex);
2969 fs_devices = root->fs_info->fs_devices->seed;
2970 while (fs_devices) {
2971 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
2975 fs_devices = fs_devices->seed;
2978 fs_devices = find_fsid(fsid);
2983 if (fs_devices->opened) {
2988 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
2989 root->fs_info->bdev_holder);
2993 if (!fs_devices->seeding) {
2994 __btrfs_close_devices(fs_devices);
2999 fs_devices->seed = root->fs_info->fs_devices->seed;
3000 root->fs_info->fs_devices->seed = fs_devices;
3001 fs_devices->sprouted = 1;
3003 mutex_unlock(&uuid_mutex);
3007 static int read_one_dev(struct btrfs_root *root,
3008 struct extent_buffer *leaf,
3009 struct btrfs_dev_item *dev_item)
3011 struct btrfs_device *device;
3014 int seed_devices = 0;
3015 u8 fs_uuid[BTRFS_UUID_SIZE];
3016 u8 dev_uuid[BTRFS_UUID_SIZE];
3018 devid = btrfs_device_id(leaf, dev_item);
3019 read_extent_buffer(leaf, dev_uuid,
3020 (unsigned long)btrfs_device_uuid(dev_item),
3022 read_extent_buffer(leaf, fs_uuid,
3023 (unsigned long)btrfs_device_fsid(dev_item),
3026 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
3027 ret = open_seed_devices(root, fs_uuid);
3033 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
3034 if (!device || !device->bdev) {
3035 if (!btrfs_test_opt(root, DEGRADED) || seed_devices)
3039 printk("warning devid %Lu missing\n", devid);
3040 device = add_missing_dev(root, devid, dev_uuid);
3046 if (device->fs_devices != root->fs_info->fs_devices) {
3047 BUG_ON(device->writeable);
3048 if (device->generation !=
3049 btrfs_device_generation(leaf, dev_item))
3053 fill_device_from_item(leaf, dev_item, device);
3054 device->dev_root = root->fs_info->dev_root;
3055 device->in_fs_metadata = 1;
3056 if (device->writeable)
3057 device->fs_devices->total_rw_bytes += device->total_bytes;
3060 ret = btrfs_open_device(device);
3068 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
3070 struct btrfs_dev_item *dev_item;
3072 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
3074 return read_one_dev(root, buf, dev_item);
3077 int btrfs_read_sys_array(struct btrfs_root *root, u64 sb_bytenr)
3079 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
3080 struct extent_buffer *sb;
3081 struct btrfs_disk_key *disk_key;
3082 struct btrfs_chunk *chunk;
3084 unsigned long sb_ptr;
3090 struct btrfs_key key;
3092 sb = btrfs_find_create_tree_block(root, sb_bytenr,
3093 BTRFS_SUPER_INFO_SIZE);
3096 btrfs_set_buffer_uptodate(sb);
3097 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
3098 array_size = btrfs_super_sys_array_size(super_copy);
3100 ptr = super_copy->sys_chunk_array;
3101 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
3104 while (cur < array_size) {
3105 disk_key = (struct btrfs_disk_key *)ptr;
3106 btrfs_disk_key_to_cpu(&key, disk_key);
3108 len = sizeof(*disk_key); ptr += len;
3112 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3113 chunk = (struct btrfs_chunk *)sb_ptr;
3114 ret = read_one_chunk(root, &key, sb, chunk);
3117 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
3118 len = btrfs_chunk_item_size(num_stripes);
3127 free_extent_buffer(sb);
3131 int btrfs_read_chunk_tree(struct btrfs_root *root)
3133 struct btrfs_path *path;
3134 struct extent_buffer *leaf;
3135 struct btrfs_key key;
3136 struct btrfs_key found_key;
3140 root = root->fs_info->chunk_root;
3142 path = btrfs_alloc_path();
3146 /* first we search for all of the device items, and then we
3147 * read in all of the chunk items. This way we can create chunk
3148 * mappings that reference all of the devices that are afound
3150 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
3154 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3156 leaf = path->nodes[0];
3157 slot = path->slots[0];
3158 if (slot >= btrfs_header_nritems(leaf)) {
3159 ret = btrfs_next_leaf(root, path);
3166 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3167 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3168 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
3170 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
3171 struct btrfs_dev_item *dev_item;
3172 dev_item = btrfs_item_ptr(leaf, slot,
3173 struct btrfs_dev_item);
3174 ret = read_one_dev(root, leaf, dev_item);
3178 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
3179 struct btrfs_chunk *chunk;
3180 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3181 ret = read_one_chunk(root, &found_key, leaf, chunk);
3187 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3189 btrfs_release_path(root, path);
3194 btrfs_free_path(path);