Merge branch 'for-chris' of git://git.kernel.org/pub/scm/linux/kernel/git/arne/btrfs...
[linux-2.6.git] / fs / btrfs / volumes.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <asm/div64.h>
27 #include "compat.h"
28 #include "ctree.h"
29 #include "extent_map.h"
30 #include "disk-io.h"
31 #include "transaction.h"
32 #include "print-tree.h"
33 #include "volumes.h"
34 #include "async-thread.h"
35
36 static int init_first_rw_device(struct btrfs_trans_handle *trans,
37                                 struct btrfs_root *root,
38                                 struct btrfs_device *device);
39 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
40
41 static DEFINE_MUTEX(uuid_mutex);
42 static LIST_HEAD(fs_uuids);
43
44 static void lock_chunks(struct btrfs_root *root)
45 {
46         mutex_lock(&root->fs_info->chunk_mutex);
47 }
48
49 static void unlock_chunks(struct btrfs_root *root)
50 {
51         mutex_unlock(&root->fs_info->chunk_mutex);
52 }
53
54 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
55 {
56         struct btrfs_device *device;
57         WARN_ON(fs_devices->opened);
58         while (!list_empty(&fs_devices->devices)) {
59                 device = list_entry(fs_devices->devices.next,
60                                     struct btrfs_device, dev_list);
61                 list_del(&device->dev_list);
62                 kfree(device->name);
63                 kfree(device);
64         }
65         kfree(fs_devices);
66 }
67
68 int btrfs_cleanup_fs_uuids(void)
69 {
70         struct btrfs_fs_devices *fs_devices;
71
72         while (!list_empty(&fs_uuids)) {
73                 fs_devices = list_entry(fs_uuids.next,
74                                         struct btrfs_fs_devices, list);
75                 list_del(&fs_devices->list);
76                 free_fs_devices(fs_devices);
77         }
78         return 0;
79 }
80
81 static noinline struct btrfs_device *__find_device(struct list_head *head,
82                                                    u64 devid, u8 *uuid)
83 {
84         struct btrfs_device *dev;
85
86         list_for_each_entry(dev, head, dev_list) {
87                 if (dev->devid == devid &&
88                     (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
89                         return dev;
90                 }
91         }
92         return NULL;
93 }
94
95 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
96 {
97         struct btrfs_fs_devices *fs_devices;
98
99         list_for_each_entry(fs_devices, &fs_uuids, list) {
100                 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
101                         return fs_devices;
102         }
103         return NULL;
104 }
105
106 static void requeue_list(struct btrfs_pending_bios *pending_bios,
107                         struct bio *head, struct bio *tail)
108 {
109
110         struct bio *old_head;
111
112         old_head = pending_bios->head;
113         pending_bios->head = head;
114         if (pending_bios->tail)
115                 tail->bi_next = old_head;
116         else
117                 pending_bios->tail = tail;
118 }
119
120 /*
121  * we try to collect pending bios for a device so we don't get a large
122  * number of procs sending bios down to the same device.  This greatly
123  * improves the schedulers ability to collect and merge the bios.
124  *
125  * But, it also turns into a long list of bios to process and that is sure
126  * to eventually make the worker thread block.  The solution here is to
127  * make some progress and then put this work struct back at the end of
128  * the list if the block device is congested.  This way, multiple devices
129  * can make progress from a single worker thread.
130  */
131 static noinline int run_scheduled_bios(struct btrfs_device *device)
132 {
133         struct bio *pending;
134         struct backing_dev_info *bdi;
135         struct btrfs_fs_info *fs_info;
136         struct btrfs_pending_bios *pending_bios;
137         struct bio *tail;
138         struct bio *cur;
139         int again = 0;
140         unsigned long num_run;
141         unsigned long batch_run = 0;
142         unsigned long limit;
143         unsigned long last_waited = 0;
144         int force_reg = 0;
145         struct blk_plug plug;
146
147         /*
148          * this function runs all the bios we've collected for
149          * a particular device.  We don't want to wander off to
150          * another device without first sending all of these down.
151          * So, setup a plug here and finish it off before we return
152          */
153         blk_start_plug(&plug);
154
155         bdi = blk_get_backing_dev_info(device->bdev);
156         fs_info = device->dev_root->fs_info;
157         limit = btrfs_async_submit_limit(fs_info);
158         limit = limit * 2 / 3;
159
160 loop:
161         spin_lock(&device->io_lock);
162
163 loop_lock:
164         num_run = 0;
165
166         /* take all the bios off the list at once and process them
167          * later on (without the lock held).  But, remember the
168          * tail and other pointers so the bios can be properly reinserted
169          * into the list if we hit congestion
170          */
171         if (!force_reg && device->pending_sync_bios.head) {
172                 pending_bios = &device->pending_sync_bios;
173                 force_reg = 1;
174         } else {
175                 pending_bios = &device->pending_bios;
176                 force_reg = 0;
177         }
178
179         pending = pending_bios->head;
180         tail = pending_bios->tail;
181         WARN_ON(pending && !tail);
182
183         /*
184          * if pending was null this time around, no bios need processing
185          * at all and we can stop.  Otherwise it'll loop back up again
186          * and do an additional check so no bios are missed.
187          *
188          * device->running_pending is used to synchronize with the
189          * schedule_bio code.
190          */
191         if (device->pending_sync_bios.head == NULL &&
192             device->pending_bios.head == NULL) {
193                 again = 0;
194                 device->running_pending = 0;
195         } else {
196                 again = 1;
197                 device->running_pending = 1;
198         }
199
200         pending_bios->head = NULL;
201         pending_bios->tail = NULL;
202
203         spin_unlock(&device->io_lock);
204
205         while (pending) {
206
207                 rmb();
208                 /* we want to work on both lists, but do more bios on the
209                  * sync list than the regular list
210                  */
211                 if ((num_run > 32 &&
212                     pending_bios != &device->pending_sync_bios &&
213                     device->pending_sync_bios.head) ||
214                    (num_run > 64 && pending_bios == &device->pending_sync_bios &&
215                     device->pending_bios.head)) {
216                         spin_lock(&device->io_lock);
217                         requeue_list(pending_bios, pending, tail);
218                         goto loop_lock;
219                 }
220
221                 cur = pending;
222                 pending = pending->bi_next;
223                 cur->bi_next = NULL;
224                 atomic_dec(&fs_info->nr_async_bios);
225
226                 if (atomic_read(&fs_info->nr_async_bios) < limit &&
227                     waitqueue_active(&fs_info->async_submit_wait))
228                         wake_up(&fs_info->async_submit_wait);
229
230                 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
231
232                 submit_bio(cur->bi_rw, cur);
233                 num_run++;
234                 batch_run++;
235                 if (need_resched())
236                         cond_resched();
237
238                 /*
239                  * we made progress, there is more work to do and the bdi
240                  * is now congested.  Back off and let other work structs
241                  * run instead
242                  */
243                 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
244                     fs_info->fs_devices->open_devices > 1) {
245                         struct io_context *ioc;
246
247                         ioc = current->io_context;
248
249                         /*
250                          * the main goal here is that we don't want to
251                          * block if we're going to be able to submit
252                          * more requests without blocking.
253                          *
254                          * This code does two great things, it pokes into
255                          * the elevator code from a filesystem _and_
256                          * it makes assumptions about how batching works.
257                          */
258                         if (ioc && ioc->nr_batch_requests > 0 &&
259                             time_before(jiffies, ioc->last_waited + HZ/50UL) &&
260                             (last_waited == 0 ||
261                              ioc->last_waited == last_waited)) {
262                                 /*
263                                  * we want to go through our batch of
264                                  * requests and stop.  So, we copy out
265                                  * the ioc->last_waited time and test
266                                  * against it before looping
267                                  */
268                                 last_waited = ioc->last_waited;
269                                 if (need_resched())
270                                         cond_resched();
271                                 continue;
272                         }
273                         spin_lock(&device->io_lock);
274                         requeue_list(pending_bios, pending, tail);
275                         device->running_pending = 1;
276
277                         spin_unlock(&device->io_lock);
278                         btrfs_requeue_work(&device->work);
279                         goto done;
280                 }
281         }
282
283         cond_resched();
284         if (again)
285                 goto loop;
286
287         spin_lock(&device->io_lock);
288         if (device->pending_bios.head || device->pending_sync_bios.head)
289                 goto loop_lock;
290         spin_unlock(&device->io_lock);
291
292 done:
293         blk_finish_plug(&plug);
294         return 0;
295 }
296
297 static void pending_bios_fn(struct btrfs_work *work)
298 {
299         struct btrfs_device *device;
300
301         device = container_of(work, struct btrfs_device, work);
302         run_scheduled_bios(device);
303 }
304
305 static noinline int device_list_add(const char *path,
306                            struct btrfs_super_block *disk_super,
307                            u64 devid, struct btrfs_fs_devices **fs_devices_ret)
308 {
309         struct btrfs_device *device;
310         struct btrfs_fs_devices *fs_devices;
311         u64 found_transid = btrfs_super_generation(disk_super);
312         char *name;
313
314         fs_devices = find_fsid(disk_super->fsid);
315         if (!fs_devices) {
316                 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
317                 if (!fs_devices)
318                         return -ENOMEM;
319                 INIT_LIST_HEAD(&fs_devices->devices);
320                 INIT_LIST_HEAD(&fs_devices->alloc_list);
321                 list_add(&fs_devices->list, &fs_uuids);
322                 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
323                 fs_devices->latest_devid = devid;
324                 fs_devices->latest_trans = found_transid;
325                 mutex_init(&fs_devices->device_list_mutex);
326                 device = NULL;
327         } else {
328                 device = __find_device(&fs_devices->devices, devid,
329                                        disk_super->dev_item.uuid);
330         }
331         if (!device) {
332                 if (fs_devices->opened)
333                         return -EBUSY;
334
335                 device = kzalloc(sizeof(*device), GFP_NOFS);
336                 if (!device) {
337                         /* we can safely leave the fs_devices entry around */
338                         return -ENOMEM;
339                 }
340                 device->devid = devid;
341                 device->work.func = pending_bios_fn;
342                 memcpy(device->uuid, disk_super->dev_item.uuid,
343                        BTRFS_UUID_SIZE);
344                 spin_lock_init(&device->io_lock);
345                 device->name = kstrdup(path, GFP_NOFS);
346                 if (!device->name) {
347                         kfree(device);
348                         return -ENOMEM;
349                 }
350                 INIT_LIST_HEAD(&device->dev_alloc_list);
351
352                 mutex_lock(&fs_devices->device_list_mutex);
353                 list_add(&device->dev_list, &fs_devices->devices);
354                 mutex_unlock(&fs_devices->device_list_mutex);
355
356                 device->fs_devices = fs_devices;
357                 fs_devices->num_devices++;
358         } else if (!device->name || strcmp(device->name, path)) {
359                 name = kstrdup(path, GFP_NOFS);
360                 if (!name)
361                         return -ENOMEM;
362                 kfree(device->name);
363                 device->name = name;
364                 if (device->missing) {
365                         fs_devices->missing_devices--;
366                         device->missing = 0;
367                 }
368         }
369
370         if (found_transid > fs_devices->latest_trans) {
371                 fs_devices->latest_devid = devid;
372                 fs_devices->latest_trans = found_transid;
373         }
374         *fs_devices_ret = fs_devices;
375         return 0;
376 }
377
378 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
379 {
380         struct btrfs_fs_devices *fs_devices;
381         struct btrfs_device *device;
382         struct btrfs_device *orig_dev;
383
384         fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
385         if (!fs_devices)
386                 return ERR_PTR(-ENOMEM);
387
388         INIT_LIST_HEAD(&fs_devices->devices);
389         INIT_LIST_HEAD(&fs_devices->alloc_list);
390         INIT_LIST_HEAD(&fs_devices->list);
391         mutex_init(&fs_devices->device_list_mutex);
392         fs_devices->latest_devid = orig->latest_devid;
393         fs_devices->latest_trans = orig->latest_trans;
394         memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
395
396         mutex_lock(&orig->device_list_mutex);
397         list_for_each_entry(orig_dev, &orig->devices, dev_list) {
398                 device = kzalloc(sizeof(*device), GFP_NOFS);
399                 if (!device)
400                         goto error;
401
402                 device->name = kstrdup(orig_dev->name, GFP_NOFS);
403                 if (!device->name) {
404                         kfree(device);
405                         goto error;
406                 }
407
408                 device->devid = orig_dev->devid;
409                 device->work.func = pending_bios_fn;
410                 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
411                 spin_lock_init(&device->io_lock);
412                 INIT_LIST_HEAD(&device->dev_list);
413                 INIT_LIST_HEAD(&device->dev_alloc_list);
414
415                 list_add(&device->dev_list, &fs_devices->devices);
416                 device->fs_devices = fs_devices;
417                 fs_devices->num_devices++;
418         }
419         mutex_unlock(&orig->device_list_mutex);
420         return fs_devices;
421 error:
422         mutex_unlock(&orig->device_list_mutex);
423         free_fs_devices(fs_devices);
424         return ERR_PTR(-ENOMEM);
425 }
426
427 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
428 {
429         struct btrfs_device *device, *next;
430
431         mutex_lock(&uuid_mutex);
432 again:
433         mutex_lock(&fs_devices->device_list_mutex);
434         list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
435                 if (device->in_fs_metadata)
436                         continue;
437
438                 if (device->bdev) {
439                         blkdev_put(device->bdev, device->mode);
440                         device->bdev = NULL;
441                         fs_devices->open_devices--;
442                 }
443                 if (device->writeable) {
444                         list_del_init(&device->dev_alloc_list);
445                         device->writeable = 0;
446                         fs_devices->rw_devices--;
447                 }
448                 list_del_init(&device->dev_list);
449                 fs_devices->num_devices--;
450                 kfree(device->name);
451                 kfree(device);
452         }
453         mutex_unlock(&fs_devices->device_list_mutex);
454
455         if (fs_devices->seed) {
456                 fs_devices = fs_devices->seed;
457                 goto again;
458         }
459
460         mutex_unlock(&uuid_mutex);
461         return 0;
462 }
463
464 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
465 {
466         struct btrfs_device *device;
467
468         if (--fs_devices->opened > 0)
469                 return 0;
470
471         list_for_each_entry(device, &fs_devices->devices, dev_list) {
472                 if (device->bdev) {
473                         blkdev_put(device->bdev, device->mode);
474                         fs_devices->open_devices--;
475                 }
476                 if (device->writeable) {
477                         list_del_init(&device->dev_alloc_list);
478                         fs_devices->rw_devices--;
479                 }
480
481                 device->bdev = NULL;
482                 device->writeable = 0;
483                 device->in_fs_metadata = 0;
484         }
485         WARN_ON(fs_devices->open_devices);
486         WARN_ON(fs_devices->rw_devices);
487         fs_devices->opened = 0;
488         fs_devices->seeding = 0;
489
490         return 0;
491 }
492
493 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
494 {
495         struct btrfs_fs_devices *seed_devices = NULL;
496         int ret;
497
498         mutex_lock(&uuid_mutex);
499         ret = __btrfs_close_devices(fs_devices);
500         if (!fs_devices->opened) {
501                 seed_devices = fs_devices->seed;
502                 fs_devices->seed = NULL;
503         }
504         mutex_unlock(&uuid_mutex);
505
506         while (seed_devices) {
507                 fs_devices = seed_devices;
508                 seed_devices = fs_devices->seed;
509                 __btrfs_close_devices(fs_devices);
510                 free_fs_devices(fs_devices);
511         }
512         return ret;
513 }
514
515 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
516                                 fmode_t flags, void *holder)
517 {
518         struct block_device *bdev;
519         struct list_head *head = &fs_devices->devices;
520         struct btrfs_device *device;
521         struct block_device *latest_bdev = NULL;
522         struct buffer_head *bh;
523         struct btrfs_super_block *disk_super;
524         u64 latest_devid = 0;
525         u64 latest_transid = 0;
526         u64 devid;
527         int seeding = 1;
528         int ret = 0;
529
530         flags |= FMODE_EXCL;
531
532         list_for_each_entry(device, head, dev_list) {
533                 if (device->bdev)
534                         continue;
535                 if (!device->name)
536                         continue;
537
538                 bdev = blkdev_get_by_path(device->name, flags, holder);
539                 if (IS_ERR(bdev)) {
540                         printk(KERN_INFO "open %s failed\n", device->name);
541                         goto error;
542                 }
543                 set_blocksize(bdev, 4096);
544
545                 bh = btrfs_read_dev_super(bdev);
546                 if (!bh) {
547                         ret = -EINVAL;
548                         goto error_close;
549                 }
550
551                 disk_super = (struct btrfs_super_block *)bh->b_data;
552                 devid = btrfs_stack_device_id(&disk_super->dev_item);
553                 if (devid != device->devid)
554                         goto error_brelse;
555
556                 if (memcmp(device->uuid, disk_super->dev_item.uuid,
557                            BTRFS_UUID_SIZE))
558                         goto error_brelse;
559
560                 device->generation = btrfs_super_generation(disk_super);
561                 if (!latest_transid || device->generation > latest_transid) {
562                         latest_devid = devid;
563                         latest_transid = device->generation;
564                         latest_bdev = bdev;
565                 }
566
567                 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
568                         device->writeable = 0;
569                 } else {
570                         device->writeable = !bdev_read_only(bdev);
571                         seeding = 0;
572                 }
573
574                 device->bdev = bdev;
575                 device->in_fs_metadata = 0;
576                 device->mode = flags;
577
578                 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
579                         fs_devices->rotating = 1;
580
581                 fs_devices->open_devices++;
582                 if (device->writeable) {
583                         fs_devices->rw_devices++;
584                         list_add(&device->dev_alloc_list,
585                                  &fs_devices->alloc_list);
586                 }
587                 continue;
588
589 error_brelse:
590                 brelse(bh);
591 error_close:
592                 blkdev_put(bdev, flags);
593 error:
594                 continue;
595         }
596         if (fs_devices->open_devices == 0) {
597                 ret = -EIO;
598                 goto out;
599         }
600         fs_devices->seeding = seeding;
601         fs_devices->opened = 1;
602         fs_devices->latest_bdev = latest_bdev;
603         fs_devices->latest_devid = latest_devid;
604         fs_devices->latest_trans = latest_transid;
605         fs_devices->total_rw_bytes = 0;
606 out:
607         return ret;
608 }
609
610 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
611                        fmode_t flags, void *holder)
612 {
613         int ret;
614
615         mutex_lock(&uuid_mutex);
616         if (fs_devices->opened) {
617                 fs_devices->opened++;
618                 ret = 0;
619         } else {
620                 ret = __btrfs_open_devices(fs_devices, flags, holder);
621         }
622         mutex_unlock(&uuid_mutex);
623         return ret;
624 }
625
626 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
627                           struct btrfs_fs_devices **fs_devices_ret)
628 {
629         struct btrfs_super_block *disk_super;
630         struct block_device *bdev;
631         struct buffer_head *bh;
632         int ret;
633         u64 devid;
634         u64 transid;
635
636         mutex_lock(&uuid_mutex);
637
638         flags |= FMODE_EXCL;
639         bdev = blkdev_get_by_path(path, flags, holder);
640
641         if (IS_ERR(bdev)) {
642                 ret = PTR_ERR(bdev);
643                 goto error;
644         }
645
646         ret = set_blocksize(bdev, 4096);
647         if (ret)
648                 goto error_close;
649         bh = btrfs_read_dev_super(bdev);
650         if (!bh) {
651                 ret = -EINVAL;
652                 goto error_close;
653         }
654         disk_super = (struct btrfs_super_block *)bh->b_data;
655         devid = btrfs_stack_device_id(&disk_super->dev_item);
656         transid = btrfs_super_generation(disk_super);
657         if (disk_super->label[0])
658                 printk(KERN_INFO "device label %s ", disk_super->label);
659         else {
660                 /* FIXME, make a readl uuid parser */
661                 printk(KERN_INFO "device fsid %llx-%llx ",
662                        *(unsigned long long *)disk_super->fsid,
663                        *(unsigned long long *)(disk_super->fsid + 8));
664         }
665         printk(KERN_CONT "devid %llu transid %llu %s\n",
666                (unsigned long long)devid, (unsigned long long)transid, path);
667         ret = device_list_add(path, disk_super, devid, fs_devices_ret);
668
669         brelse(bh);
670 error_close:
671         blkdev_put(bdev, flags);
672 error:
673         mutex_unlock(&uuid_mutex);
674         return ret;
675 }
676
677 /* helper to account the used device space in the range */
678 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
679                                    u64 end, u64 *length)
680 {
681         struct btrfs_key key;
682         struct btrfs_root *root = device->dev_root;
683         struct btrfs_dev_extent *dev_extent;
684         struct btrfs_path *path;
685         u64 extent_end;
686         int ret;
687         int slot;
688         struct extent_buffer *l;
689
690         *length = 0;
691
692         if (start >= device->total_bytes)
693                 return 0;
694
695         path = btrfs_alloc_path();
696         if (!path)
697                 return -ENOMEM;
698         path->reada = 2;
699
700         key.objectid = device->devid;
701         key.offset = start;
702         key.type = BTRFS_DEV_EXTENT_KEY;
703
704         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
705         if (ret < 0)
706                 goto out;
707         if (ret > 0) {
708                 ret = btrfs_previous_item(root, path, key.objectid, key.type);
709                 if (ret < 0)
710                         goto out;
711         }
712
713         while (1) {
714                 l = path->nodes[0];
715                 slot = path->slots[0];
716                 if (slot >= btrfs_header_nritems(l)) {
717                         ret = btrfs_next_leaf(root, path);
718                         if (ret == 0)
719                                 continue;
720                         if (ret < 0)
721                                 goto out;
722
723                         break;
724                 }
725                 btrfs_item_key_to_cpu(l, &key, slot);
726
727                 if (key.objectid < device->devid)
728                         goto next;
729
730                 if (key.objectid > device->devid)
731                         break;
732
733                 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
734                         goto next;
735
736                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
737                 extent_end = key.offset + btrfs_dev_extent_length(l,
738                                                                   dev_extent);
739                 if (key.offset <= start && extent_end > end) {
740                         *length = end - start + 1;
741                         break;
742                 } else if (key.offset <= start && extent_end > start)
743                         *length += extent_end - start;
744                 else if (key.offset > start && extent_end <= end)
745                         *length += extent_end - key.offset;
746                 else if (key.offset > start && key.offset <= end) {
747                         *length += end - key.offset + 1;
748                         break;
749                 } else if (key.offset > end)
750                         break;
751
752 next:
753                 path->slots[0]++;
754         }
755         ret = 0;
756 out:
757         btrfs_free_path(path);
758         return ret;
759 }
760
761 /*
762  * find_free_dev_extent - find free space in the specified device
763  * @trans:      transaction handler
764  * @device:     the device which we search the free space in
765  * @num_bytes:  the size of the free space that we need
766  * @start:      store the start of the free space.
767  * @len:        the size of the free space. that we find, or the size of the max
768  *              free space if we don't find suitable free space
769  *
770  * this uses a pretty simple search, the expectation is that it is
771  * called very infrequently and that a given device has a small number
772  * of extents
773  *
774  * @start is used to store the start of the free space if we find. But if we
775  * don't find suitable free space, it will be used to store the start position
776  * of the max free space.
777  *
778  * @len is used to store the size of the free space that we find.
779  * But if we don't find suitable free space, it is used to store the size of
780  * the max free space.
781  */
782 int find_free_dev_extent(struct btrfs_trans_handle *trans,
783                          struct btrfs_device *device, u64 num_bytes,
784                          u64 *start, u64 *len)
785 {
786         struct btrfs_key key;
787         struct btrfs_root *root = device->dev_root;
788         struct btrfs_dev_extent *dev_extent;
789         struct btrfs_path *path;
790         u64 hole_size;
791         u64 max_hole_start;
792         u64 max_hole_size;
793         u64 extent_end;
794         u64 search_start;
795         u64 search_end = device->total_bytes;
796         int ret;
797         int slot;
798         struct extent_buffer *l;
799
800         /* FIXME use last free of some kind */
801
802         /* we don't want to overwrite the superblock on the drive,
803          * so we make sure to start at an offset of at least 1MB
804          */
805         search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
806
807         max_hole_start = search_start;
808         max_hole_size = 0;
809
810         if (search_start >= search_end) {
811                 ret = -ENOSPC;
812                 goto error;
813         }
814
815         path = btrfs_alloc_path();
816         if (!path) {
817                 ret = -ENOMEM;
818                 goto error;
819         }
820         path->reada = 2;
821
822         key.objectid = device->devid;
823         key.offset = search_start;
824         key.type = BTRFS_DEV_EXTENT_KEY;
825
826         ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
827         if (ret < 0)
828                 goto out;
829         if (ret > 0) {
830                 ret = btrfs_previous_item(root, path, key.objectid, key.type);
831                 if (ret < 0)
832                         goto out;
833         }
834
835         while (1) {
836                 l = path->nodes[0];
837                 slot = path->slots[0];
838                 if (slot >= btrfs_header_nritems(l)) {
839                         ret = btrfs_next_leaf(root, path);
840                         if (ret == 0)
841                                 continue;
842                         if (ret < 0)
843                                 goto out;
844
845                         break;
846                 }
847                 btrfs_item_key_to_cpu(l, &key, slot);
848
849                 if (key.objectid < device->devid)
850                         goto next;
851
852                 if (key.objectid > device->devid)
853                         break;
854
855                 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
856                         goto next;
857
858                 if (key.offset > search_start) {
859                         hole_size = key.offset - search_start;
860
861                         if (hole_size > max_hole_size) {
862                                 max_hole_start = search_start;
863                                 max_hole_size = hole_size;
864                         }
865
866                         /*
867                          * If this free space is greater than which we need,
868                          * it must be the max free space that we have found
869                          * until now, so max_hole_start must point to the start
870                          * of this free space and the length of this free space
871                          * is stored in max_hole_size. Thus, we return
872                          * max_hole_start and max_hole_size and go back to the
873                          * caller.
874                          */
875                         if (hole_size >= num_bytes) {
876                                 ret = 0;
877                                 goto out;
878                         }
879                 }
880
881                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
882                 extent_end = key.offset + btrfs_dev_extent_length(l,
883                                                                   dev_extent);
884                 if (extent_end > search_start)
885                         search_start = extent_end;
886 next:
887                 path->slots[0]++;
888                 cond_resched();
889         }
890
891         hole_size = search_end- search_start;
892         if (hole_size > max_hole_size) {
893                 max_hole_start = search_start;
894                 max_hole_size = hole_size;
895         }
896
897         /* See above. */
898         if (hole_size < num_bytes)
899                 ret = -ENOSPC;
900         else
901                 ret = 0;
902
903 out:
904         btrfs_free_path(path);
905 error:
906         *start = max_hole_start;
907         if (len)
908                 *len = max_hole_size;
909         return ret;
910 }
911
912 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
913                           struct btrfs_device *device,
914                           u64 start)
915 {
916         int ret;
917         struct btrfs_path *path;
918         struct btrfs_root *root = device->dev_root;
919         struct btrfs_key key;
920         struct btrfs_key found_key;
921         struct extent_buffer *leaf = NULL;
922         struct btrfs_dev_extent *extent = NULL;
923
924         path = btrfs_alloc_path();
925         if (!path)
926                 return -ENOMEM;
927
928         key.objectid = device->devid;
929         key.offset = start;
930         key.type = BTRFS_DEV_EXTENT_KEY;
931
932         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
933         if (ret > 0) {
934                 ret = btrfs_previous_item(root, path, key.objectid,
935                                           BTRFS_DEV_EXTENT_KEY);
936                 BUG_ON(ret);
937                 leaf = path->nodes[0];
938                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
939                 extent = btrfs_item_ptr(leaf, path->slots[0],
940                                         struct btrfs_dev_extent);
941                 BUG_ON(found_key.offset > start || found_key.offset +
942                        btrfs_dev_extent_length(leaf, extent) < start);
943                 ret = 0;
944         } else if (ret == 0) {
945                 leaf = path->nodes[0];
946                 extent = btrfs_item_ptr(leaf, path->slots[0],
947                                         struct btrfs_dev_extent);
948         }
949         BUG_ON(ret);
950
951         if (device->bytes_used > 0)
952                 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
953         ret = btrfs_del_item(trans, root, path);
954         BUG_ON(ret);
955
956         btrfs_free_path(path);
957         return ret;
958 }
959
960 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
961                            struct btrfs_device *device,
962                            u64 chunk_tree, u64 chunk_objectid,
963                            u64 chunk_offset, u64 start, u64 num_bytes)
964 {
965         int ret;
966         struct btrfs_path *path;
967         struct btrfs_root *root = device->dev_root;
968         struct btrfs_dev_extent *extent;
969         struct extent_buffer *leaf;
970         struct btrfs_key key;
971
972         WARN_ON(!device->in_fs_metadata);
973         path = btrfs_alloc_path();
974         if (!path)
975                 return -ENOMEM;
976
977         key.objectid = device->devid;
978         key.offset = start;
979         key.type = BTRFS_DEV_EXTENT_KEY;
980         ret = btrfs_insert_empty_item(trans, root, path, &key,
981                                       sizeof(*extent));
982         BUG_ON(ret);
983
984         leaf = path->nodes[0];
985         extent = btrfs_item_ptr(leaf, path->slots[0],
986                                 struct btrfs_dev_extent);
987         btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
988         btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
989         btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
990
991         write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
992                     (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
993                     BTRFS_UUID_SIZE);
994
995         btrfs_set_dev_extent_length(leaf, extent, num_bytes);
996         btrfs_mark_buffer_dirty(leaf);
997         btrfs_free_path(path);
998         return ret;
999 }
1000
1001 static noinline int find_next_chunk(struct btrfs_root *root,
1002                                     u64 objectid, u64 *offset)
1003 {
1004         struct btrfs_path *path;
1005         int ret;
1006         struct btrfs_key key;
1007         struct btrfs_chunk *chunk;
1008         struct btrfs_key found_key;
1009
1010         path = btrfs_alloc_path();
1011         BUG_ON(!path);
1012
1013         key.objectid = objectid;
1014         key.offset = (u64)-1;
1015         key.type = BTRFS_CHUNK_ITEM_KEY;
1016
1017         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1018         if (ret < 0)
1019                 goto error;
1020
1021         BUG_ON(ret == 0);
1022
1023         ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1024         if (ret) {
1025                 *offset = 0;
1026         } else {
1027                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1028                                       path->slots[0]);
1029                 if (found_key.objectid != objectid)
1030                         *offset = 0;
1031                 else {
1032                         chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1033                                                struct btrfs_chunk);
1034                         *offset = found_key.offset +
1035                                 btrfs_chunk_length(path->nodes[0], chunk);
1036                 }
1037         }
1038         ret = 0;
1039 error:
1040         btrfs_free_path(path);
1041         return ret;
1042 }
1043
1044 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1045 {
1046         int ret;
1047         struct btrfs_key key;
1048         struct btrfs_key found_key;
1049         struct btrfs_path *path;
1050
1051         root = root->fs_info->chunk_root;
1052
1053         path = btrfs_alloc_path();
1054         if (!path)
1055                 return -ENOMEM;
1056
1057         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1058         key.type = BTRFS_DEV_ITEM_KEY;
1059         key.offset = (u64)-1;
1060
1061         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1062         if (ret < 0)
1063                 goto error;
1064
1065         BUG_ON(ret == 0);
1066
1067         ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1068                                   BTRFS_DEV_ITEM_KEY);
1069         if (ret) {
1070                 *objectid = 1;
1071         } else {
1072                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1073                                       path->slots[0]);
1074                 *objectid = found_key.offset + 1;
1075         }
1076         ret = 0;
1077 error:
1078         btrfs_free_path(path);
1079         return ret;
1080 }
1081
1082 /*
1083  * the device information is stored in the chunk root
1084  * the btrfs_device struct should be fully filled in
1085  */
1086 int btrfs_add_device(struct btrfs_trans_handle *trans,
1087                      struct btrfs_root *root,
1088                      struct btrfs_device *device)
1089 {
1090         int ret;
1091         struct btrfs_path *path;
1092         struct btrfs_dev_item *dev_item;
1093         struct extent_buffer *leaf;
1094         struct btrfs_key key;
1095         unsigned long ptr;
1096
1097         root = root->fs_info->chunk_root;
1098
1099         path = btrfs_alloc_path();
1100         if (!path)
1101                 return -ENOMEM;
1102
1103         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1104         key.type = BTRFS_DEV_ITEM_KEY;
1105         key.offset = device->devid;
1106
1107         ret = btrfs_insert_empty_item(trans, root, path, &key,
1108                                       sizeof(*dev_item));
1109         if (ret)
1110                 goto out;
1111
1112         leaf = path->nodes[0];
1113         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1114
1115         btrfs_set_device_id(leaf, dev_item, device->devid);
1116         btrfs_set_device_generation(leaf, dev_item, 0);
1117         btrfs_set_device_type(leaf, dev_item, device->type);
1118         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1119         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1120         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1121         btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1122         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1123         btrfs_set_device_group(leaf, dev_item, 0);
1124         btrfs_set_device_seek_speed(leaf, dev_item, 0);
1125         btrfs_set_device_bandwidth(leaf, dev_item, 0);
1126         btrfs_set_device_start_offset(leaf, dev_item, 0);
1127
1128         ptr = (unsigned long)btrfs_device_uuid(dev_item);
1129         write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1130         ptr = (unsigned long)btrfs_device_fsid(dev_item);
1131         write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1132         btrfs_mark_buffer_dirty(leaf);
1133
1134         ret = 0;
1135 out:
1136         btrfs_free_path(path);
1137         return ret;
1138 }
1139
1140 static int btrfs_rm_dev_item(struct btrfs_root *root,
1141                              struct btrfs_device *device)
1142 {
1143         int ret;
1144         struct btrfs_path *path;
1145         struct btrfs_key key;
1146         struct btrfs_trans_handle *trans;
1147
1148         root = root->fs_info->chunk_root;
1149
1150         path = btrfs_alloc_path();
1151         if (!path)
1152                 return -ENOMEM;
1153
1154         trans = btrfs_start_transaction(root, 0);
1155         if (IS_ERR(trans)) {
1156                 btrfs_free_path(path);
1157                 return PTR_ERR(trans);
1158         }
1159         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1160         key.type = BTRFS_DEV_ITEM_KEY;
1161         key.offset = device->devid;
1162         lock_chunks(root);
1163
1164         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1165         if (ret < 0)
1166                 goto out;
1167
1168         if (ret > 0) {
1169                 ret = -ENOENT;
1170                 goto out;
1171         }
1172
1173         ret = btrfs_del_item(trans, root, path);
1174         if (ret)
1175                 goto out;
1176 out:
1177         btrfs_free_path(path);
1178         unlock_chunks(root);
1179         btrfs_commit_transaction(trans, root);
1180         return ret;
1181 }
1182
1183 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1184 {
1185         struct btrfs_device *device;
1186         struct btrfs_device *next_device;
1187         struct block_device *bdev;
1188         struct buffer_head *bh = NULL;
1189         struct btrfs_super_block *disk_super;
1190         u64 all_avail;
1191         u64 devid;
1192         u64 num_devices;
1193         u8 *dev_uuid;
1194         int ret = 0;
1195
1196         mutex_lock(&uuid_mutex);
1197         mutex_lock(&root->fs_info->volume_mutex);
1198
1199         all_avail = root->fs_info->avail_data_alloc_bits |
1200                 root->fs_info->avail_system_alloc_bits |
1201                 root->fs_info->avail_metadata_alloc_bits;
1202
1203         if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1204             root->fs_info->fs_devices->num_devices <= 4) {
1205                 printk(KERN_ERR "btrfs: unable to go below four devices "
1206                        "on raid10\n");
1207                 ret = -EINVAL;
1208                 goto out;
1209         }
1210
1211         if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1212             root->fs_info->fs_devices->num_devices <= 2) {
1213                 printk(KERN_ERR "btrfs: unable to go below two "
1214                        "devices on raid1\n");
1215                 ret = -EINVAL;
1216                 goto out;
1217         }
1218
1219         if (strcmp(device_path, "missing") == 0) {
1220                 struct list_head *devices;
1221                 struct btrfs_device *tmp;
1222
1223                 device = NULL;
1224                 devices = &root->fs_info->fs_devices->devices;
1225                 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1226                 list_for_each_entry(tmp, devices, dev_list) {
1227                         if (tmp->in_fs_metadata && !tmp->bdev) {
1228                                 device = tmp;
1229                                 break;
1230                         }
1231                 }
1232                 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1233                 bdev = NULL;
1234                 bh = NULL;
1235                 disk_super = NULL;
1236                 if (!device) {
1237                         printk(KERN_ERR "btrfs: no missing devices found to "
1238                                "remove\n");
1239                         goto out;
1240                 }
1241         } else {
1242                 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1243                                           root->fs_info->bdev_holder);
1244                 if (IS_ERR(bdev)) {
1245                         ret = PTR_ERR(bdev);
1246                         goto out;
1247                 }
1248
1249                 set_blocksize(bdev, 4096);
1250                 bh = btrfs_read_dev_super(bdev);
1251                 if (!bh) {
1252                         ret = -EINVAL;
1253                         goto error_close;
1254                 }
1255                 disk_super = (struct btrfs_super_block *)bh->b_data;
1256                 devid = btrfs_stack_device_id(&disk_super->dev_item);
1257                 dev_uuid = disk_super->dev_item.uuid;
1258                 device = btrfs_find_device(root, devid, dev_uuid,
1259                                            disk_super->fsid);
1260                 if (!device) {
1261                         ret = -ENOENT;
1262                         goto error_brelse;
1263                 }
1264         }
1265
1266         if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1267                 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1268                        "device\n");
1269                 ret = -EINVAL;
1270                 goto error_brelse;
1271         }
1272
1273         if (device->writeable) {
1274                 list_del_init(&device->dev_alloc_list);
1275                 root->fs_info->fs_devices->rw_devices--;
1276         }
1277
1278         ret = btrfs_shrink_device(device, 0);
1279         if (ret)
1280                 goto error_undo;
1281
1282         ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1283         if (ret)
1284                 goto error_undo;
1285
1286         device->in_fs_metadata = 0;
1287         btrfs_scrub_cancel_dev(root, device);
1288
1289         /*
1290          * the device list mutex makes sure that we don't change
1291          * the device list while someone else is writing out all
1292          * the device supers.
1293          */
1294         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1295         list_del_init(&device->dev_list);
1296         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1297
1298         device->fs_devices->num_devices--;
1299
1300         if (device->missing)
1301                 root->fs_info->fs_devices->missing_devices--;
1302
1303         next_device = list_entry(root->fs_info->fs_devices->devices.next,
1304                                  struct btrfs_device, dev_list);
1305         if (device->bdev == root->fs_info->sb->s_bdev)
1306                 root->fs_info->sb->s_bdev = next_device->bdev;
1307         if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1308                 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1309
1310         if (device->bdev) {
1311                 blkdev_put(device->bdev, device->mode);
1312                 device->bdev = NULL;
1313                 device->fs_devices->open_devices--;
1314         }
1315
1316         num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1317         btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
1318
1319         if (device->fs_devices->open_devices == 0) {
1320                 struct btrfs_fs_devices *fs_devices;
1321                 fs_devices = root->fs_info->fs_devices;
1322                 while (fs_devices) {
1323                         if (fs_devices->seed == device->fs_devices)
1324                                 break;
1325                         fs_devices = fs_devices->seed;
1326                 }
1327                 fs_devices->seed = device->fs_devices->seed;
1328                 device->fs_devices->seed = NULL;
1329                 __btrfs_close_devices(device->fs_devices);
1330                 free_fs_devices(device->fs_devices);
1331         }
1332
1333         /*
1334          * at this point, the device is zero sized.  We want to
1335          * remove it from the devices list and zero out the old super
1336          */
1337         if (device->writeable) {
1338                 /* make sure this device isn't detected as part of
1339                  * the FS anymore
1340                  */
1341                 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1342                 set_buffer_dirty(bh);
1343                 sync_dirty_buffer(bh);
1344         }
1345
1346         kfree(device->name);
1347         kfree(device);
1348         ret = 0;
1349
1350 error_brelse:
1351         brelse(bh);
1352 error_close:
1353         if (bdev)
1354                 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1355 out:
1356         mutex_unlock(&root->fs_info->volume_mutex);
1357         mutex_unlock(&uuid_mutex);
1358         return ret;
1359 error_undo:
1360         if (device->writeable) {
1361                 list_add(&device->dev_alloc_list,
1362                          &root->fs_info->fs_devices->alloc_list);
1363                 root->fs_info->fs_devices->rw_devices++;
1364         }
1365         goto error_brelse;
1366 }
1367
1368 /*
1369  * does all the dirty work required for changing file system's UUID.
1370  */
1371 static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
1372                                 struct btrfs_root *root)
1373 {
1374         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1375         struct btrfs_fs_devices *old_devices;
1376         struct btrfs_fs_devices *seed_devices;
1377         struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
1378         struct btrfs_device *device;
1379         u64 super_flags;
1380
1381         BUG_ON(!mutex_is_locked(&uuid_mutex));
1382         if (!fs_devices->seeding)
1383                 return -EINVAL;
1384
1385         seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1386         if (!seed_devices)
1387                 return -ENOMEM;
1388
1389         old_devices = clone_fs_devices(fs_devices);
1390         if (IS_ERR(old_devices)) {
1391                 kfree(seed_devices);
1392                 return PTR_ERR(old_devices);
1393         }
1394
1395         list_add(&old_devices->list, &fs_uuids);
1396
1397         memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1398         seed_devices->opened = 1;
1399         INIT_LIST_HEAD(&seed_devices->devices);
1400         INIT_LIST_HEAD(&seed_devices->alloc_list);
1401         mutex_init(&seed_devices->device_list_mutex);
1402         list_splice_init(&fs_devices->devices, &seed_devices->devices);
1403         list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1404         list_for_each_entry(device, &seed_devices->devices, dev_list) {
1405                 device->fs_devices = seed_devices;
1406         }
1407
1408         fs_devices->seeding = 0;
1409         fs_devices->num_devices = 0;
1410         fs_devices->open_devices = 0;
1411         fs_devices->seed = seed_devices;
1412
1413         generate_random_uuid(fs_devices->fsid);
1414         memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1415         memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1416         super_flags = btrfs_super_flags(disk_super) &
1417                       ~BTRFS_SUPER_FLAG_SEEDING;
1418         btrfs_set_super_flags(disk_super, super_flags);
1419
1420         return 0;
1421 }
1422
1423 /*
1424  * strore the expected generation for seed devices in device items.
1425  */
1426 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1427                                struct btrfs_root *root)
1428 {
1429         struct btrfs_path *path;
1430         struct extent_buffer *leaf;
1431         struct btrfs_dev_item *dev_item;
1432         struct btrfs_device *device;
1433         struct btrfs_key key;
1434         u8 fs_uuid[BTRFS_UUID_SIZE];
1435         u8 dev_uuid[BTRFS_UUID_SIZE];
1436         u64 devid;
1437         int ret;
1438
1439         path = btrfs_alloc_path();
1440         if (!path)
1441                 return -ENOMEM;
1442
1443         root = root->fs_info->chunk_root;
1444         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1445         key.offset = 0;
1446         key.type = BTRFS_DEV_ITEM_KEY;
1447
1448         while (1) {
1449                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1450                 if (ret < 0)
1451                         goto error;
1452
1453                 leaf = path->nodes[0];
1454 next_slot:
1455                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1456                         ret = btrfs_next_leaf(root, path);
1457                         if (ret > 0)
1458                                 break;
1459                         if (ret < 0)
1460                                 goto error;
1461                         leaf = path->nodes[0];
1462                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1463                         btrfs_release_path(path);
1464                         continue;
1465                 }
1466
1467                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1468                 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1469                     key.type != BTRFS_DEV_ITEM_KEY)
1470                         break;
1471
1472                 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1473                                           struct btrfs_dev_item);
1474                 devid = btrfs_device_id(leaf, dev_item);
1475                 read_extent_buffer(leaf, dev_uuid,
1476                                    (unsigned long)btrfs_device_uuid(dev_item),
1477                                    BTRFS_UUID_SIZE);
1478                 read_extent_buffer(leaf, fs_uuid,
1479                                    (unsigned long)btrfs_device_fsid(dev_item),
1480                                    BTRFS_UUID_SIZE);
1481                 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1482                 BUG_ON(!device);
1483
1484                 if (device->fs_devices->seeding) {
1485                         btrfs_set_device_generation(leaf, dev_item,
1486                                                     device->generation);
1487                         btrfs_mark_buffer_dirty(leaf);
1488                 }
1489
1490                 path->slots[0]++;
1491                 goto next_slot;
1492         }
1493         ret = 0;
1494 error:
1495         btrfs_free_path(path);
1496         return ret;
1497 }
1498
1499 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1500 {
1501         struct btrfs_trans_handle *trans;
1502         struct btrfs_device *device;
1503         struct block_device *bdev;
1504         struct list_head *devices;
1505         struct super_block *sb = root->fs_info->sb;
1506         u64 total_bytes;
1507         int seeding_dev = 0;
1508         int ret = 0;
1509
1510         if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1511                 return -EINVAL;
1512
1513         bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
1514                                   root->fs_info->bdev_holder);
1515         if (IS_ERR(bdev))
1516                 return PTR_ERR(bdev);
1517
1518         if (root->fs_info->fs_devices->seeding) {
1519                 seeding_dev = 1;
1520                 down_write(&sb->s_umount);
1521                 mutex_lock(&uuid_mutex);
1522         }
1523
1524         filemap_write_and_wait(bdev->bd_inode->i_mapping);
1525         mutex_lock(&root->fs_info->volume_mutex);
1526
1527         devices = &root->fs_info->fs_devices->devices;
1528         /*
1529          * we have the volume lock, so we don't need the extra
1530          * device list mutex while reading the list here.
1531          */
1532         list_for_each_entry(device, devices, dev_list) {
1533                 if (device->bdev == bdev) {
1534                         ret = -EEXIST;
1535                         goto error;
1536                 }
1537         }
1538
1539         device = kzalloc(sizeof(*device), GFP_NOFS);
1540         if (!device) {
1541                 /* we can safely leave the fs_devices entry around */
1542                 ret = -ENOMEM;
1543                 goto error;
1544         }
1545
1546         device->name = kstrdup(device_path, GFP_NOFS);
1547         if (!device->name) {
1548                 kfree(device);
1549                 ret = -ENOMEM;
1550                 goto error;
1551         }
1552
1553         ret = find_next_devid(root, &device->devid);
1554         if (ret) {
1555                 kfree(device->name);
1556                 kfree(device);
1557                 goto error;
1558         }
1559
1560         trans = btrfs_start_transaction(root, 0);
1561         if (IS_ERR(trans)) {
1562                 kfree(device->name);
1563                 kfree(device);
1564                 ret = PTR_ERR(trans);
1565                 goto error;
1566         }
1567
1568         lock_chunks(root);
1569
1570         device->writeable = 1;
1571         device->work.func = pending_bios_fn;
1572         generate_random_uuid(device->uuid);
1573         spin_lock_init(&device->io_lock);
1574         device->generation = trans->transid;
1575         device->io_width = root->sectorsize;
1576         device->io_align = root->sectorsize;
1577         device->sector_size = root->sectorsize;
1578         device->total_bytes = i_size_read(bdev->bd_inode);
1579         device->disk_total_bytes = device->total_bytes;
1580         device->dev_root = root->fs_info->dev_root;
1581         device->bdev = bdev;
1582         device->in_fs_metadata = 1;
1583         device->mode = FMODE_EXCL;
1584         set_blocksize(device->bdev, 4096);
1585
1586         if (seeding_dev) {
1587                 sb->s_flags &= ~MS_RDONLY;
1588                 ret = btrfs_prepare_sprout(trans, root);
1589                 BUG_ON(ret);
1590         }
1591
1592         device->fs_devices = root->fs_info->fs_devices;
1593
1594         /*
1595          * we don't want write_supers to jump in here with our device
1596          * half setup
1597          */
1598         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1599         list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
1600         list_add(&device->dev_alloc_list,
1601                  &root->fs_info->fs_devices->alloc_list);
1602         root->fs_info->fs_devices->num_devices++;
1603         root->fs_info->fs_devices->open_devices++;
1604         root->fs_info->fs_devices->rw_devices++;
1605         root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1606
1607         if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1608                 root->fs_info->fs_devices->rotating = 1;
1609
1610         total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1611         btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1612                                     total_bytes + device->total_bytes);
1613
1614         total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1615         btrfs_set_super_num_devices(&root->fs_info->super_copy,
1616                                     total_bytes + 1);
1617         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1618
1619         if (seeding_dev) {
1620                 ret = init_first_rw_device(trans, root, device);
1621                 BUG_ON(ret);
1622                 ret = btrfs_finish_sprout(trans, root);
1623                 BUG_ON(ret);
1624         } else {
1625                 ret = btrfs_add_device(trans, root, device);
1626         }
1627
1628         /*
1629          * we've got more storage, clear any full flags on the space
1630          * infos
1631          */
1632         btrfs_clear_space_info_full(root->fs_info);
1633
1634         unlock_chunks(root);
1635         btrfs_commit_transaction(trans, root);
1636
1637         if (seeding_dev) {
1638                 mutex_unlock(&uuid_mutex);
1639                 up_write(&sb->s_umount);
1640
1641                 ret = btrfs_relocate_sys_chunks(root);
1642                 BUG_ON(ret);
1643         }
1644 out:
1645         mutex_unlock(&root->fs_info->volume_mutex);
1646         return ret;
1647 error:
1648         blkdev_put(bdev, FMODE_EXCL);
1649         if (seeding_dev) {
1650                 mutex_unlock(&uuid_mutex);
1651                 up_write(&sb->s_umount);
1652         }
1653         goto out;
1654 }
1655
1656 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1657                                         struct btrfs_device *device)
1658 {
1659         int ret;
1660         struct btrfs_path *path;
1661         struct btrfs_root *root;
1662         struct btrfs_dev_item *dev_item;
1663         struct extent_buffer *leaf;
1664         struct btrfs_key key;
1665
1666         root = device->dev_root->fs_info->chunk_root;
1667
1668         path = btrfs_alloc_path();
1669         if (!path)
1670                 return -ENOMEM;
1671
1672         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1673         key.type = BTRFS_DEV_ITEM_KEY;
1674         key.offset = device->devid;
1675
1676         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1677         if (ret < 0)
1678                 goto out;
1679
1680         if (ret > 0) {
1681                 ret = -ENOENT;
1682                 goto out;
1683         }
1684
1685         leaf = path->nodes[0];
1686         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1687
1688         btrfs_set_device_id(leaf, dev_item, device->devid);
1689         btrfs_set_device_type(leaf, dev_item, device->type);
1690         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1691         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1692         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1693         btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1694         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1695         btrfs_mark_buffer_dirty(leaf);
1696
1697 out:
1698         btrfs_free_path(path);
1699         return ret;
1700 }
1701
1702 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1703                       struct btrfs_device *device, u64 new_size)
1704 {
1705         struct btrfs_super_block *super_copy =
1706                 &device->dev_root->fs_info->super_copy;
1707         u64 old_total = btrfs_super_total_bytes(super_copy);
1708         u64 diff = new_size - device->total_bytes;
1709
1710         if (!device->writeable)
1711                 return -EACCES;
1712         if (new_size <= device->total_bytes)
1713                 return -EINVAL;
1714
1715         btrfs_set_super_total_bytes(super_copy, old_total + diff);
1716         device->fs_devices->total_rw_bytes += diff;
1717
1718         device->total_bytes = new_size;
1719         device->disk_total_bytes = new_size;
1720         btrfs_clear_space_info_full(device->dev_root->fs_info);
1721
1722         return btrfs_update_device(trans, device);
1723 }
1724
1725 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1726                       struct btrfs_device *device, u64 new_size)
1727 {
1728         int ret;
1729         lock_chunks(device->dev_root);
1730         ret = __btrfs_grow_device(trans, device, new_size);
1731         unlock_chunks(device->dev_root);
1732         return ret;
1733 }
1734
1735 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1736                             struct btrfs_root *root,
1737                             u64 chunk_tree, u64 chunk_objectid,
1738                             u64 chunk_offset)
1739 {
1740         int ret;
1741         struct btrfs_path *path;
1742         struct btrfs_key key;
1743
1744         root = root->fs_info->chunk_root;
1745         path = btrfs_alloc_path();
1746         if (!path)
1747                 return -ENOMEM;
1748
1749         key.objectid = chunk_objectid;
1750         key.offset = chunk_offset;
1751         key.type = BTRFS_CHUNK_ITEM_KEY;
1752
1753         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1754         BUG_ON(ret);
1755
1756         ret = btrfs_del_item(trans, root, path);
1757         BUG_ON(ret);
1758
1759         btrfs_free_path(path);
1760         return 0;
1761 }
1762
1763 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1764                         chunk_offset)
1765 {
1766         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1767         struct btrfs_disk_key *disk_key;
1768         struct btrfs_chunk *chunk;
1769         u8 *ptr;
1770         int ret = 0;
1771         u32 num_stripes;
1772         u32 array_size;
1773         u32 len = 0;
1774         u32 cur;
1775         struct btrfs_key key;
1776
1777         array_size = btrfs_super_sys_array_size(super_copy);
1778
1779         ptr = super_copy->sys_chunk_array;
1780         cur = 0;
1781
1782         while (cur < array_size) {
1783                 disk_key = (struct btrfs_disk_key *)ptr;
1784                 btrfs_disk_key_to_cpu(&key, disk_key);
1785
1786                 len = sizeof(*disk_key);
1787
1788                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1789                         chunk = (struct btrfs_chunk *)(ptr + len);
1790                         num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1791                         len += btrfs_chunk_item_size(num_stripes);
1792                 } else {
1793                         ret = -EIO;
1794                         break;
1795                 }
1796                 if (key.objectid == chunk_objectid &&
1797                     key.offset == chunk_offset) {
1798                         memmove(ptr, ptr + len, array_size - (cur + len));
1799                         array_size -= len;
1800                         btrfs_set_super_sys_array_size(super_copy, array_size);
1801                 } else {
1802                         ptr += len;
1803                         cur += len;
1804                 }
1805         }
1806         return ret;
1807 }
1808
1809 static int btrfs_relocate_chunk(struct btrfs_root *root,
1810                          u64 chunk_tree, u64 chunk_objectid,
1811                          u64 chunk_offset)
1812 {
1813         struct extent_map_tree *em_tree;
1814         struct btrfs_root *extent_root;
1815         struct btrfs_trans_handle *trans;
1816         struct extent_map *em;
1817         struct map_lookup *map;
1818         int ret;
1819         int i;
1820
1821         root = root->fs_info->chunk_root;
1822         extent_root = root->fs_info->extent_root;
1823         em_tree = &root->fs_info->mapping_tree.map_tree;
1824
1825         ret = btrfs_can_relocate(extent_root, chunk_offset);
1826         if (ret)
1827                 return -ENOSPC;
1828
1829         /* step one, relocate all the extents inside this chunk */
1830         ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1831         if (ret)
1832                 return ret;
1833
1834         trans = btrfs_start_transaction(root, 0);
1835         BUG_ON(IS_ERR(trans));
1836
1837         lock_chunks(root);
1838
1839         /*
1840          * step two, delete the device extents and the
1841          * chunk tree entries
1842          */
1843         read_lock(&em_tree->lock);
1844         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1845         read_unlock(&em_tree->lock);
1846
1847         BUG_ON(em->start > chunk_offset ||
1848                em->start + em->len < chunk_offset);
1849         map = (struct map_lookup *)em->bdev;
1850
1851         for (i = 0; i < map->num_stripes; i++) {
1852                 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1853                                             map->stripes[i].physical);
1854                 BUG_ON(ret);
1855
1856                 if (map->stripes[i].dev) {
1857                         ret = btrfs_update_device(trans, map->stripes[i].dev);
1858                         BUG_ON(ret);
1859                 }
1860         }
1861         ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1862                                chunk_offset);
1863
1864         BUG_ON(ret);
1865
1866         trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
1867
1868         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1869                 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1870                 BUG_ON(ret);
1871         }
1872
1873         ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1874         BUG_ON(ret);
1875
1876         write_lock(&em_tree->lock);
1877         remove_extent_mapping(em_tree, em);
1878         write_unlock(&em_tree->lock);
1879
1880         kfree(map);
1881         em->bdev = NULL;
1882
1883         /* once for the tree */
1884         free_extent_map(em);
1885         /* once for us */
1886         free_extent_map(em);
1887
1888         unlock_chunks(root);
1889         btrfs_end_transaction(trans, root);
1890         return 0;
1891 }
1892
1893 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
1894 {
1895         struct btrfs_root *chunk_root = root->fs_info->chunk_root;
1896         struct btrfs_path *path;
1897         struct extent_buffer *leaf;
1898         struct btrfs_chunk *chunk;
1899         struct btrfs_key key;
1900         struct btrfs_key found_key;
1901         u64 chunk_tree = chunk_root->root_key.objectid;
1902         u64 chunk_type;
1903         bool retried = false;
1904         int failed = 0;
1905         int ret;
1906
1907         path = btrfs_alloc_path();
1908         if (!path)
1909                 return -ENOMEM;
1910
1911 again:
1912         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1913         key.offset = (u64)-1;
1914         key.type = BTRFS_CHUNK_ITEM_KEY;
1915
1916         while (1) {
1917                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1918                 if (ret < 0)
1919                         goto error;
1920                 BUG_ON(ret == 0);
1921
1922                 ret = btrfs_previous_item(chunk_root, path, key.objectid,
1923                                           key.type);
1924                 if (ret < 0)
1925                         goto error;
1926                 if (ret > 0)
1927                         break;
1928
1929                 leaf = path->nodes[0];
1930                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1931
1932                 chunk = btrfs_item_ptr(leaf, path->slots[0],
1933                                        struct btrfs_chunk);
1934                 chunk_type = btrfs_chunk_type(leaf, chunk);
1935                 btrfs_release_path(path);
1936
1937                 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
1938                         ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
1939                                                    found_key.objectid,
1940                                                    found_key.offset);
1941                         if (ret == -ENOSPC)
1942                                 failed++;
1943                         else if (ret)
1944                                 BUG();
1945                 }
1946
1947                 if (found_key.offset == 0)
1948                         break;
1949                 key.offset = found_key.offset - 1;
1950         }
1951         ret = 0;
1952         if (failed && !retried) {
1953                 failed = 0;
1954                 retried = true;
1955                 goto again;
1956         } else if (failed && retried) {
1957                 WARN_ON(1);
1958                 ret = -ENOSPC;
1959         }
1960 error:
1961         btrfs_free_path(path);
1962         return ret;
1963 }
1964
1965 static u64 div_factor(u64 num, int factor)
1966 {
1967         if (factor == 10)
1968                 return num;
1969         num *= factor;
1970         do_div(num, 10);
1971         return num;
1972 }
1973
1974 int btrfs_balance(struct btrfs_root *dev_root)
1975 {
1976         int ret;
1977         struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1978         struct btrfs_device *device;
1979         u64 old_size;
1980         u64 size_to_free;
1981         struct btrfs_path *path;
1982         struct btrfs_key key;
1983         struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1984         struct btrfs_trans_handle *trans;
1985         struct btrfs_key found_key;
1986
1987         if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
1988                 return -EROFS;
1989
1990         if (!capable(CAP_SYS_ADMIN))
1991                 return -EPERM;
1992
1993         mutex_lock(&dev_root->fs_info->volume_mutex);
1994         dev_root = dev_root->fs_info->dev_root;
1995
1996         /* step one make some room on all the devices */
1997         list_for_each_entry(device, devices, dev_list) {
1998                 old_size = device->total_bytes;
1999                 size_to_free = div_factor(old_size, 1);
2000                 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2001                 if (!device->writeable ||
2002                     device->total_bytes - device->bytes_used > size_to_free)
2003                         continue;
2004
2005                 ret = btrfs_shrink_device(device, old_size - size_to_free);
2006                 if (ret == -ENOSPC)
2007                         break;
2008                 BUG_ON(ret);
2009
2010                 trans = btrfs_start_transaction(dev_root, 0);
2011                 BUG_ON(IS_ERR(trans));
2012
2013                 ret = btrfs_grow_device(trans, device, old_size);
2014                 BUG_ON(ret);
2015
2016                 btrfs_end_transaction(trans, dev_root);
2017         }
2018
2019         /* step two, relocate all the chunks */
2020         path = btrfs_alloc_path();
2021         BUG_ON(!path);
2022
2023         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2024         key.offset = (u64)-1;
2025         key.type = BTRFS_CHUNK_ITEM_KEY;
2026
2027         while (1) {
2028                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2029                 if (ret < 0)
2030                         goto error;
2031
2032                 /*
2033                  * this shouldn't happen, it means the last relocate
2034                  * failed
2035                  */
2036                 if (ret == 0)
2037                         break;
2038
2039                 ret = btrfs_previous_item(chunk_root, path, 0,
2040                                           BTRFS_CHUNK_ITEM_KEY);
2041                 if (ret)
2042                         break;
2043
2044                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2045                                       path->slots[0]);
2046                 if (found_key.objectid != key.objectid)
2047                         break;
2048
2049                 /* chunk zero is special */
2050                 if (found_key.offset == 0)
2051                         break;
2052
2053                 btrfs_release_path(path);
2054                 ret = btrfs_relocate_chunk(chunk_root,
2055                                            chunk_root->root_key.objectid,
2056                                            found_key.objectid,
2057                                            found_key.offset);
2058                 BUG_ON(ret && ret != -ENOSPC);
2059                 key.offset = found_key.offset - 1;
2060         }
2061         ret = 0;
2062 error:
2063         btrfs_free_path(path);
2064         mutex_unlock(&dev_root->fs_info->volume_mutex);
2065         return ret;
2066 }
2067
2068 /*
2069  * shrinking a device means finding all of the device extents past
2070  * the new size, and then following the back refs to the chunks.
2071  * The chunk relocation code actually frees the device extent
2072  */
2073 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2074 {
2075         struct btrfs_trans_handle *trans;
2076         struct btrfs_root *root = device->dev_root;
2077         struct btrfs_dev_extent *dev_extent = NULL;
2078         struct btrfs_path *path;
2079         u64 length;
2080         u64 chunk_tree;
2081         u64 chunk_objectid;
2082         u64 chunk_offset;
2083         int ret;
2084         int slot;
2085         int failed = 0;
2086         bool retried = false;
2087         struct extent_buffer *l;
2088         struct btrfs_key key;
2089         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2090         u64 old_total = btrfs_super_total_bytes(super_copy);
2091         u64 old_size = device->total_bytes;
2092         u64 diff = device->total_bytes - new_size;
2093
2094         if (new_size >= device->total_bytes)
2095                 return -EINVAL;
2096
2097         path = btrfs_alloc_path();
2098         if (!path)
2099                 return -ENOMEM;
2100
2101         path->reada = 2;
2102
2103         lock_chunks(root);
2104
2105         device->total_bytes = new_size;
2106         if (device->writeable)
2107                 device->fs_devices->total_rw_bytes -= diff;
2108         unlock_chunks(root);
2109
2110 again:
2111         key.objectid = device->devid;
2112         key.offset = (u64)-1;
2113         key.type = BTRFS_DEV_EXTENT_KEY;
2114
2115         while (1) {
2116                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2117                 if (ret < 0)
2118                         goto done;
2119
2120                 ret = btrfs_previous_item(root, path, 0, key.type);
2121                 if (ret < 0)
2122                         goto done;
2123                 if (ret) {
2124                         ret = 0;
2125                         btrfs_release_path(path);
2126                         break;
2127                 }
2128
2129                 l = path->nodes[0];
2130                 slot = path->slots[0];
2131                 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
2132
2133                 if (key.objectid != device->devid) {
2134                         btrfs_release_path(path);
2135                         break;
2136                 }
2137
2138                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2139                 length = btrfs_dev_extent_length(l, dev_extent);
2140
2141                 if (key.offset + length <= new_size) {
2142                         btrfs_release_path(path);
2143                         break;
2144                 }
2145
2146                 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2147                 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2148                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2149                 btrfs_release_path(path);
2150
2151                 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
2152                                            chunk_offset);
2153                 if (ret && ret != -ENOSPC)
2154                         goto done;
2155                 if (ret == -ENOSPC)
2156                         failed++;
2157                 key.offset -= 1;
2158         }
2159
2160         if (failed && !retried) {
2161                 failed = 0;
2162                 retried = true;
2163                 goto again;
2164         } else if (failed && retried) {
2165                 ret = -ENOSPC;
2166                 lock_chunks(root);
2167
2168                 device->total_bytes = old_size;
2169                 if (device->writeable)
2170                         device->fs_devices->total_rw_bytes += diff;
2171                 unlock_chunks(root);
2172                 goto done;
2173         }
2174
2175         /* Shrinking succeeded, else we would be at "done". */
2176         trans = btrfs_start_transaction(root, 0);
2177         if (IS_ERR(trans)) {
2178                 ret = PTR_ERR(trans);
2179                 goto done;
2180         }
2181
2182         lock_chunks(root);
2183
2184         device->disk_total_bytes = new_size;
2185         /* Now btrfs_update_device() will change the on-disk size. */
2186         ret = btrfs_update_device(trans, device);
2187         if (ret) {
2188                 unlock_chunks(root);
2189                 btrfs_end_transaction(trans, root);
2190                 goto done;
2191         }
2192         WARN_ON(diff > old_total);
2193         btrfs_set_super_total_bytes(super_copy, old_total - diff);
2194         unlock_chunks(root);
2195         btrfs_end_transaction(trans, root);
2196 done:
2197         btrfs_free_path(path);
2198         return ret;
2199 }
2200
2201 static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
2202                            struct btrfs_root *root,
2203                            struct btrfs_key *key,
2204                            struct btrfs_chunk *chunk, int item_size)
2205 {
2206         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2207         struct btrfs_disk_key disk_key;
2208         u32 array_size;
2209         u8 *ptr;
2210
2211         array_size = btrfs_super_sys_array_size(super_copy);
2212         if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
2213                 return -EFBIG;
2214
2215         ptr = super_copy->sys_chunk_array + array_size;
2216         btrfs_cpu_key_to_disk(&disk_key, key);
2217         memcpy(ptr, &disk_key, sizeof(disk_key));
2218         ptr += sizeof(disk_key);
2219         memcpy(ptr, chunk, item_size);
2220         item_size += sizeof(disk_key);
2221         btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
2222         return 0;
2223 }
2224
2225 /*
2226  * sort the devices in descending order by max_avail, total_avail
2227  */
2228 static int btrfs_cmp_device_info(const void *a, const void *b)
2229 {
2230         const struct btrfs_device_info *di_a = a;
2231         const struct btrfs_device_info *di_b = b;
2232
2233         if (di_a->max_avail > di_b->max_avail)
2234                 return -1;
2235         if (di_a->max_avail < di_b->max_avail)
2236                 return 1;
2237         if (di_a->total_avail > di_b->total_avail)
2238                 return -1;
2239         if (di_a->total_avail < di_b->total_avail)
2240                 return 1;
2241         return 0;
2242 }
2243
2244 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2245                                struct btrfs_root *extent_root,
2246                                struct map_lookup **map_ret,
2247                                u64 *num_bytes_out, u64 *stripe_size_out,
2248                                u64 start, u64 type)
2249 {
2250         struct btrfs_fs_info *info = extent_root->fs_info;
2251         struct btrfs_fs_devices *fs_devices = info->fs_devices;
2252         struct list_head *cur;
2253         struct map_lookup *map = NULL;
2254         struct extent_map_tree *em_tree;
2255         struct extent_map *em;
2256         struct btrfs_device_info *devices_info = NULL;
2257         u64 total_avail;
2258         int num_stripes;        /* total number of stripes to allocate */
2259         int sub_stripes;        /* sub_stripes info for map */
2260         int dev_stripes;        /* stripes per dev */
2261         int devs_max;           /* max devs to use */
2262         int devs_min;           /* min devs needed */
2263         int devs_increment;     /* ndevs has to be a multiple of this */
2264         int ncopies;            /* how many copies to data has */
2265         int ret;
2266         u64 max_stripe_size;
2267         u64 max_chunk_size;
2268         u64 stripe_size;
2269         u64 num_bytes;
2270         int ndevs;
2271         int i;
2272         int j;
2273
2274         if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
2275             (type & BTRFS_BLOCK_GROUP_DUP)) {
2276                 WARN_ON(1);
2277                 type &= ~BTRFS_BLOCK_GROUP_DUP;
2278         }
2279
2280         if (list_empty(&fs_devices->alloc_list))
2281                 return -ENOSPC;
2282
2283         sub_stripes = 1;
2284         dev_stripes = 1;
2285         devs_increment = 1;
2286         ncopies = 1;
2287         devs_max = 0;   /* 0 == as many as possible */
2288         devs_min = 1;
2289
2290         /*
2291          * define the properties of each RAID type.
2292          * FIXME: move this to a global table and use it in all RAID
2293          * calculation code
2294          */
2295         if (type & (BTRFS_BLOCK_GROUP_DUP)) {
2296                 dev_stripes = 2;
2297                 ncopies = 2;
2298                 devs_max = 1;
2299         } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
2300                 devs_min = 2;
2301         } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
2302                 devs_increment = 2;
2303                 ncopies = 2;
2304                 devs_max = 2;
2305                 devs_min = 2;
2306         } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2307                 sub_stripes = 2;
2308                 devs_increment = 2;
2309                 ncopies = 2;
2310                 devs_min = 4;
2311         } else {
2312                 devs_max = 1;
2313         }
2314
2315         if (type & BTRFS_BLOCK_GROUP_DATA) {
2316                 max_stripe_size = 1024 * 1024 * 1024;
2317                 max_chunk_size = 10 * max_stripe_size;
2318         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
2319                 max_stripe_size = 256 * 1024 * 1024;
2320                 max_chunk_size = max_stripe_size;
2321         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
2322                 max_stripe_size = 8 * 1024 * 1024;
2323                 max_chunk_size = 2 * max_stripe_size;
2324         } else {
2325                 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
2326                        type);
2327                 BUG_ON(1);
2328         }
2329
2330         /* we don't want a chunk larger than 10% of writeable space */
2331         max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
2332                              max_chunk_size);
2333
2334         devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
2335                                GFP_NOFS);
2336         if (!devices_info)
2337                 return -ENOMEM;
2338
2339         cur = fs_devices->alloc_list.next;
2340
2341         /*
2342          * in the first pass through the devices list, we gather information
2343          * about the available holes on each device.
2344          */
2345         ndevs = 0;
2346         while (cur != &fs_devices->alloc_list) {
2347                 struct btrfs_device *device;
2348                 u64 max_avail;
2349                 u64 dev_offset;
2350
2351                 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2352
2353                 cur = cur->next;
2354
2355                 if (!device->writeable) {
2356                         printk(KERN_ERR
2357                                "btrfs: read-only device in alloc_list\n");
2358                         WARN_ON(1);
2359                         continue;
2360                 }
2361
2362                 if (!device->in_fs_metadata)
2363                         continue;
2364
2365                 if (device->total_bytes > device->bytes_used)
2366                         total_avail = device->total_bytes - device->bytes_used;
2367                 else
2368                         total_avail = 0;
2369                 /* avail is off by max(alloc_start, 1MB), but that is the same
2370                  * for all devices, so it doesn't hurt the sorting later on
2371                  */
2372
2373                 ret = find_free_dev_extent(trans, device,
2374                                            max_stripe_size * dev_stripes,
2375                                            &dev_offset, &max_avail);
2376                 if (ret && ret != -ENOSPC)
2377                         goto error;
2378
2379                 if (ret == 0)
2380                         max_avail = max_stripe_size * dev_stripes;
2381
2382                 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
2383                         continue;
2384
2385                 devices_info[ndevs].dev_offset = dev_offset;
2386                 devices_info[ndevs].max_avail = max_avail;
2387                 devices_info[ndevs].total_avail = total_avail;
2388                 devices_info[ndevs].dev = device;
2389                 ++ndevs;
2390         }
2391
2392         /*
2393          * now sort the devices by hole size / available space
2394          */
2395         sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
2396              btrfs_cmp_device_info, NULL);
2397
2398         /* round down to number of usable stripes */
2399         ndevs -= ndevs % devs_increment;
2400
2401         if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
2402                 ret = -ENOSPC;
2403                 goto error;
2404         }
2405
2406         if (devs_max && ndevs > devs_max)
2407                 ndevs = devs_max;
2408         /*
2409          * the primary goal is to maximize the number of stripes, so use as many
2410          * devices as possible, even if the stripes are not maximum sized.
2411          */
2412         stripe_size = devices_info[ndevs-1].max_avail;
2413         num_stripes = ndevs * dev_stripes;
2414
2415         if (stripe_size * num_stripes > max_chunk_size * ncopies) {
2416                 stripe_size = max_chunk_size * ncopies;
2417                 do_div(stripe_size, num_stripes);
2418         }
2419
2420         do_div(stripe_size, dev_stripes);
2421         do_div(stripe_size, BTRFS_STRIPE_LEN);
2422         stripe_size *= BTRFS_STRIPE_LEN;
2423
2424         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2425         if (!map) {
2426                 ret = -ENOMEM;
2427                 goto error;
2428         }
2429         map->num_stripes = num_stripes;
2430
2431         for (i = 0; i < ndevs; ++i) {
2432                 for (j = 0; j < dev_stripes; ++j) {
2433                         int s = i * dev_stripes + j;
2434                         map->stripes[s].dev = devices_info[i].dev;
2435                         map->stripes[s].physical = devices_info[i].dev_offset +
2436                                                    j * stripe_size;
2437                 }
2438         }
2439         map->sector_size = extent_root->sectorsize;
2440         map->stripe_len = BTRFS_STRIPE_LEN;
2441         map->io_align = BTRFS_STRIPE_LEN;
2442         map->io_width = BTRFS_STRIPE_LEN;
2443         map->type = type;
2444         map->sub_stripes = sub_stripes;
2445
2446         *map_ret = map;
2447         num_bytes = stripe_size * (num_stripes / ncopies);
2448
2449         *stripe_size_out = stripe_size;
2450         *num_bytes_out = num_bytes;
2451
2452         trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
2453
2454         em = alloc_extent_map();
2455         if (!em) {
2456                 ret = -ENOMEM;
2457                 goto error;
2458         }
2459         em->bdev = (struct block_device *)map;
2460         em->start = start;
2461         em->len = num_bytes;
2462         em->block_start = 0;
2463         em->block_len = em->len;
2464
2465         em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2466         write_lock(&em_tree->lock);
2467         ret = add_extent_mapping(em_tree, em);
2468         write_unlock(&em_tree->lock);
2469         BUG_ON(ret);
2470         free_extent_map(em);
2471
2472         ret = btrfs_make_block_group(trans, extent_root, 0, type,
2473                                      BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2474                                      start, num_bytes);
2475         BUG_ON(ret);
2476
2477         for (i = 0; i < map->num_stripes; ++i) {
2478                 struct btrfs_device *device;
2479                 u64 dev_offset;
2480
2481                 device = map->stripes[i].dev;
2482                 dev_offset = map->stripes[i].physical;
2483
2484                 ret = btrfs_alloc_dev_extent(trans, device,
2485                                 info->chunk_root->root_key.objectid,
2486                                 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2487                                 start, dev_offset, stripe_size);
2488                 BUG_ON(ret);
2489         }
2490
2491         kfree(devices_info);
2492         return 0;
2493
2494 error:
2495         kfree(map);
2496         kfree(devices_info);
2497         return ret;
2498 }
2499
2500 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
2501                                 struct btrfs_root *extent_root,
2502                                 struct map_lookup *map, u64 chunk_offset,
2503                                 u64 chunk_size, u64 stripe_size)
2504 {
2505         u64 dev_offset;
2506         struct btrfs_key key;
2507         struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2508         struct btrfs_device *device;
2509         struct btrfs_chunk *chunk;
2510         struct btrfs_stripe *stripe;
2511         size_t item_size = btrfs_chunk_item_size(map->num_stripes);
2512         int index = 0;
2513         int ret;
2514
2515         chunk = kzalloc(item_size, GFP_NOFS);
2516         if (!chunk)
2517                 return -ENOMEM;
2518
2519         index = 0;
2520         while (index < map->num_stripes) {
2521                 device = map->stripes[index].dev;
2522                 device->bytes_used += stripe_size;
2523                 ret = btrfs_update_device(trans, device);
2524                 BUG_ON(ret);
2525                 index++;
2526         }
2527
2528         index = 0;
2529         stripe = &chunk->stripe;
2530         while (index < map->num_stripes) {
2531                 device = map->stripes[index].dev;
2532                 dev_offset = map->stripes[index].physical;
2533
2534                 btrfs_set_stack_stripe_devid(stripe, device->devid);
2535                 btrfs_set_stack_stripe_offset(stripe, dev_offset);
2536                 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
2537                 stripe++;
2538                 index++;
2539         }
2540
2541         btrfs_set_stack_chunk_length(chunk, chunk_size);
2542         btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
2543         btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
2544         btrfs_set_stack_chunk_type(chunk, map->type);
2545         btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
2546         btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
2547         btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
2548         btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
2549         btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
2550
2551         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2552         key.type = BTRFS_CHUNK_ITEM_KEY;
2553         key.offset = chunk_offset;
2554
2555         ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
2556         BUG_ON(ret);
2557
2558         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2559                 ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
2560                                              item_size);
2561                 BUG_ON(ret);
2562         }
2563
2564         kfree(chunk);
2565         return 0;
2566 }
2567
2568 /*
2569  * Chunk allocation falls into two parts. The first part does works
2570  * that make the new allocated chunk useable, but not do any operation
2571  * that modifies the chunk tree. The second part does the works that
2572  * require modifying the chunk tree. This division is important for the
2573  * bootstrap process of adding storage to a seed btrfs.
2574  */
2575 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2576                       struct btrfs_root *extent_root, u64 type)
2577 {
2578         u64 chunk_offset;
2579         u64 chunk_size;
2580         u64 stripe_size;
2581         struct map_lookup *map;
2582         struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2583         int ret;
2584
2585         ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2586                               &chunk_offset);
2587         if (ret)
2588                 return ret;
2589
2590         ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2591                                   &stripe_size, chunk_offset, type);
2592         if (ret)
2593                 return ret;
2594
2595         ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2596                                    chunk_size, stripe_size);
2597         BUG_ON(ret);
2598         return 0;
2599 }
2600
2601 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
2602                                          struct btrfs_root *root,
2603                                          struct btrfs_device *device)
2604 {
2605         u64 chunk_offset;
2606         u64 sys_chunk_offset;
2607         u64 chunk_size;
2608         u64 sys_chunk_size;
2609         u64 stripe_size;
2610         u64 sys_stripe_size;
2611         u64 alloc_profile;
2612         struct map_lookup *map;
2613         struct map_lookup *sys_map;
2614         struct btrfs_fs_info *fs_info = root->fs_info;
2615         struct btrfs_root *extent_root = fs_info->extent_root;
2616         int ret;
2617
2618         ret = find_next_chunk(fs_info->chunk_root,
2619                               BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
2620         BUG_ON(ret);
2621
2622         alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
2623                         (fs_info->metadata_alloc_profile &
2624                          fs_info->avail_metadata_alloc_bits);
2625         alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2626
2627         ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2628                                   &stripe_size, chunk_offset, alloc_profile);
2629         BUG_ON(ret);
2630
2631         sys_chunk_offset = chunk_offset + chunk_size;
2632
2633         alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
2634                         (fs_info->system_alloc_profile &
2635                          fs_info->avail_system_alloc_bits);
2636         alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2637
2638         ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
2639                                   &sys_chunk_size, &sys_stripe_size,
2640                                   sys_chunk_offset, alloc_profile);
2641         BUG_ON(ret);
2642
2643         ret = btrfs_add_device(trans, fs_info->chunk_root, device);
2644         BUG_ON(ret);
2645
2646         /*
2647          * Modifying chunk tree needs allocating new blocks from both
2648          * system block group and metadata block group. So we only can
2649          * do operations require modifying the chunk tree after both
2650          * block groups were created.
2651          */
2652         ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2653                                    chunk_size, stripe_size);
2654         BUG_ON(ret);
2655
2656         ret = __finish_chunk_alloc(trans, extent_root, sys_map,
2657                                    sys_chunk_offset, sys_chunk_size,
2658                                    sys_stripe_size);
2659         BUG_ON(ret);
2660         return 0;
2661 }
2662
2663 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
2664 {
2665         struct extent_map *em;
2666         struct map_lookup *map;
2667         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2668         int readonly = 0;
2669         int i;
2670
2671         read_lock(&map_tree->map_tree.lock);
2672         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2673         read_unlock(&map_tree->map_tree.lock);
2674         if (!em)
2675                 return 1;
2676
2677         if (btrfs_test_opt(root, DEGRADED)) {
2678                 free_extent_map(em);
2679                 return 0;
2680         }
2681
2682         map = (struct map_lookup *)em->bdev;
2683         for (i = 0; i < map->num_stripes; i++) {
2684                 if (!map->stripes[i].dev->writeable) {
2685                         readonly = 1;
2686                         break;
2687                 }
2688         }
2689         free_extent_map(em);
2690         return readonly;
2691 }
2692
2693 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
2694 {
2695         extent_map_tree_init(&tree->map_tree);
2696 }
2697
2698 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
2699 {
2700         struct extent_map *em;
2701
2702         while (1) {
2703                 write_lock(&tree->map_tree.lock);
2704                 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
2705                 if (em)
2706                         remove_extent_mapping(&tree->map_tree, em);
2707                 write_unlock(&tree->map_tree.lock);
2708                 if (!em)
2709                         break;
2710                 kfree(em->bdev);
2711                 /* once for us */
2712                 free_extent_map(em);
2713                 /* once for the tree */
2714                 free_extent_map(em);
2715         }
2716 }
2717
2718 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
2719 {
2720         struct extent_map *em;
2721         struct map_lookup *map;
2722         struct extent_map_tree *em_tree = &map_tree->map_tree;
2723         int ret;
2724
2725         read_lock(&em_tree->lock);
2726         em = lookup_extent_mapping(em_tree, logical, len);
2727         read_unlock(&em_tree->lock);
2728         BUG_ON(!em);
2729
2730         BUG_ON(em->start > logical || em->start + em->len < logical);
2731         map = (struct map_lookup *)em->bdev;
2732         if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
2733                 ret = map->num_stripes;
2734         else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2735                 ret = map->sub_stripes;
2736         else
2737                 ret = 1;
2738         free_extent_map(em);
2739         return ret;
2740 }
2741
2742 static int find_live_mirror(struct map_lookup *map, int first, int num,
2743                             int optimal)
2744 {
2745         int i;
2746         if (map->stripes[optimal].dev->bdev)
2747                 return optimal;
2748         for (i = first; i < first + num; i++) {
2749                 if (map->stripes[i].dev->bdev)
2750                         return i;
2751         }
2752         /* we couldn't find one that doesn't fail.  Just return something
2753          * and the io error handling code will clean up eventually
2754          */
2755         return optimal;
2756 }
2757
2758 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2759                              u64 logical, u64 *length,
2760                              struct btrfs_multi_bio **multi_ret,
2761                              int mirror_num)
2762 {
2763         struct extent_map *em;
2764         struct map_lookup *map;
2765         struct extent_map_tree *em_tree = &map_tree->map_tree;
2766         u64 offset;
2767         u64 stripe_offset;
2768         u64 stripe_end_offset;
2769         u64 stripe_nr;
2770         u64 stripe_nr_orig;
2771         u64 stripe_nr_end;
2772         int stripes_allocated = 8;
2773         int stripes_required = 1;
2774         int stripe_index;
2775         int i;
2776         int num_stripes;
2777         int max_errors = 0;
2778         struct btrfs_multi_bio *multi = NULL;
2779
2780         if (multi_ret && !(rw & (REQ_WRITE | REQ_DISCARD)))
2781                 stripes_allocated = 1;
2782 again:
2783         if (multi_ret) {
2784                 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
2785                                 GFP_NOFS);
2786                 if (!multi)
2787                         return -ENOMEM;
2788
2789                 atomic_set(&multi->error, 0);
2790         }
2791
2792         read_lock(&em_tree->lock);
2793         em = lookup_extent_mapping(em_tree, logical, *length);
2794         read_unlock(&em_tree->lock);
2795
2796         if (!em) {
2797                 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
2798                        (unsigned long long)logical,
2799                        (unsigned long long)*length);
2800                 BUG();
2801         }
2802
2803         BUG_ON(em->start > logical || em->start + em->len < logical);
2804         map = (struct map_lookup *)em->bdev;
2805         offset = logical - em->start;
2806
2807         if (mirror_num > map->num_stripes)
2808                 mirror_num = 0;
2809
2810         /* if our multi bio struct is too small, back off and try again */
2811         if (rw & REQ_WRITE) {
2812                 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
2813                                  BTRFS_BLOCK_GROUP_DUP)) {
2814                         stripes_required = map->num_stripes;
2815                         max_errors = 1;
2816                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2817                         stripes_required = map->sub_stripes;
2818                         max_errors = 1;
2819                 }
2820         }
2821         if (rw & REQ_DISCARD) {
2822                 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2823                                  BTRFS_BLOCK_GROUP_RAID1 |
2824                                  BTRFS_BLOCK_GROUP_DUP |
2825                                  BTRFS_BLOCK_GROUP_RAID10)) {
2826                         stripes_required = map->num_stripes;
2827                 }
2828         }
2829         if (multi_ret && (rw & (REQ_WRITE | REQ_DISCARD)) &&
2830             stripes_allocated < stripes_required) {
2831                 stripes_allocated = map->num_stripes;
2832                 free_extent_map(em);
2833                 kfree(multi);
2834                 goto again;
2835         }
2836         stripe_nr = offset;
2837         /*
2838          * stripe_nr counts the total number of stripes we have to stride
2839          * to get to this block
2840          */
2841         do_div(stripe_nr, map->stripe_len);
2842
2843         stripe_offset = stripe_nr * map->stripe_len;
2844         BUG_ON(offset < stripe_offset);
2845
2846         /* stripe_offset is the offset of this block in its stripe*/
2847         stripe_offset = offset - stripe_offset;
2848
2849         if (rw & REQ_DISCARD)
2850                 *length = min_t(u64, em->len - offset, *length);
2851         else if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2852                               BTRFS_BLOCK_GROUP_RAID1 |
2853                               BTRFS_BLOCK_GROUP_RAID10 |
2854                               BTRFS_BLOCK_GROUP_DUP)) {
2855                 /* we limit the length of each bio to what fits in a stripe */
2856                 *length = min_t(u64, em->len - offset,
2857                                 map->stripe_len - stripe_offset);
2858         } else {
2859                 *length = em->len - offset;
2860         }
2861
2862         if (!multi_ret)
2863                 goto out;
2864
2865         num_stripes = 1;
2866         stripe_index = 0;
2867         stripe_nr_orig = stripe_nr;
2868         stripe_nr_end = (offset + *length + map->stripe_len - 1) &
2869                         (~(map->stripe_len - 1));
2870         do_div(stripe_nr_end, map->stripe_len);
2871         stripe_end_offset = stripe_nr_end * map->stripe_len -
2872                             (offset + *length);
2873         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
2874                 if (rw & REQ_DISCARD)
2875                         num_stripes = min_t(u64, map->num_stripes,
2876                                             stripe_nr_end - stripe_nr_orig);
2877                 stripe_index = do_div(stripe_nr, map->num_stripes);
2878         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
2879                 if (rw & (REQ_WRITE | REQ_DISCARD))
2880                         num_stripes = map->num_stripes;
2881                 else if (mirror_num)
2882                         stripe_index = mirror_num - 1;
2883                 else {
2884                         stripe_index = find_live_mirror(map, 0,
2885                                             map->num_stripes,
2886                                             current->pid % map->num_stripes);
2887                 }
2888
2889         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2890                 if (rw & (REQ_WRITE | REQ_DISCARD))
2891                         num_stripes = map->num_stripes;
2892                 else if (mirror_num)
2893                         stripe_index = mirror_num - 1;
2894
2895         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2896                 int factor = map->num_stripes / map->sub_stripes;
2897
2898                 stripe_index = do_div(stripe_nr, factor);
2899                 stripe_index *= map->sub_stripes;
2900
2901                 if (rw & REQ_WRITE)
2902                         num_stripes = map->sub_stripes;
2903                 else if (rw & REQ_DISCARD)
2904                         num_stripes = min_t(u64, map->sub_stripes *
2905                                             (stripe_nr_end - stripe_nr_orig),
2906                                             map->num_stripes);
2907                 else if (mirror_num)
2908                         stripe_index += mirror_num - 1;
2909                 else {
2910                         stripe_index = find_live_mirror(map, stripe_index,
2911                                               map->sub_stripes, stripe_index +
2912                                               current->pid % map->sub_stripes);
2913                 }
2914         } else {
2915                 /*
2916                  * after this do_div call, stripe_nr is the number of stripes
2917                  * on this device we have to walk to find the data, and
2918                  * stripe_index is the number of our device in the stripe array
2919                  */
2920                 stripe_index = do_div(stripe_nr, map->num_stripes);
2921         }
2922         BUG_ON(stripe_index >= map->num_stripes);
2923
2924         if (rw & REQ_DISCARD) {
2925                 for (i = 0; i < num_stripes; i++) {
2926                         multi->stripes[i].physical =
2927                                 map->stripes[stripe_index].physical +
2928                                 stripe_offset + stripe_nr * map->stripe_len;
2929                         multi->stripes[i].dev = map->stripes[stripe_index].dev;
2930
2931                         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
2932                                 u64 stripes;
2933                                 u32 last_stripe = 0;
2934                                 int j;
2935
2936                                 div_u64_rem(stripe_nr_end - 1,
2937                                             map->num_stripes,
2938                                             &last_stripe);
2939
2940                                 for (j = 0; j < map->num_stripes; j++) {
2941                                         u32 test;
2942
2943                                         div_u64_rem(stripe_nr_end - 1 - j,
2944                                                     map->num_stripes, &test);
2945                                         if (test == stripe_index)
2946                                                 break;
2947                                 }
2948                                 stripes = stripe_nr_end - 1 - j;
2949                                 do_div(stripes, map->num_stripes);
2950                                 multi->stripes[i].length = map->stripe_len *
2951                                         (stripes - stripe_nr + 1);
2952
2953                                 if (i == 0) {
2954                                         multi->stripes[i].length -=
2955                                                 stripe_offset;
2956                                         stripe_offset = 0;
2957                                 }
2958                                 if (stripe_index == last_stripe)
2959                                         multi->stripes[i].length -=
2960                                                 stripe_end_offset;
2961                         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2962                                 u64 stripes;
2963                                 int j;
2964                                 int factor = map->num_stripes /
2965                                              map->sub_stripes;
2966                                 u32 last_stripe = 0;
2967
2968                                 div_u64_rem(stripe_nr_end - 1,
2969                                             factor, &last_stripe);
2970                                 last_stripe *= map->sub_stripes;
2971
2972                                 for (j = 0; j < factor; j++) {
2973                                         u32 test;
2974
2975                                         div_u64_rem(stripe_nr_end - 1 - j,
2976                                                     factor, &test);
2977
2978                                         if (test ==
2979                                             stripe_index / map->sub_stripes)
2980                                                 break;
2981                                 }
2982                                 stripes = stripe_nr_end - 1 - j;
2983                                 do_div(stripes, factor);
2984                                 multi->stripes[i].length = map->stripe_len *
2985                                         (stripes - stripe_nr + 1);
2986
2987                                 if (i < map->sub_stripes) {
2988                                         multi->stripes[i].length -=
2989                                                 stripe_offset;
2990                                         if (i == map->sub_stripes - 1)
2991                                                 stripe_offset = 0;
2992                                 }
2993                                 if (stripe_index >= last_stripe &&
2994                                     stripe_index <= (last_stripe +
2995                                                      map->sub_stripes - 1)) {
2996                                         multi->stripes[i].length -=
2997                                                 stripe_end_offset;
2998                                 }
2999                         } else
3000                                 multi->stripes[i].length = *length;
3001
3002                         stripe_index++;
3003                         if (stripe_index == map->num_stripes) {
3004                                 /* This could only happen for RAID0/10 */
3005                                 stripe_index = 0;
3006                                 stripe_nr++;
3007                         }
3008                 }
3009         } else {
3010                 for (i = 0; i < num_stripes; i++) {
3011                         multi->stripes[i].physical =
3012                                 map->stripes[stripe_index].physical +
3013                                 stripe_offset +
3014                                 stripe_nr * map->stripe_len;
3015                         multi->stripes[i].dev =
3016                                 map->stripes[stripe_index].dev;
3017                         stripe_index++;
3018                 }
3019         }
3020         if (multi_ret) {
3021                 *multi_ret = multi;
3022                 multi->num_stripes = num_stripes;
3023                 multi->max_errors = max_errors;
3024         }
3025 out:
3026         free_extent_map(em);
3027         return 0;
3028 }
3029
3030 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3031                       u64 logical, u64 *length,
3032                       struct btrfs_multi_bio **multi_ret, int mirror_num)
3033 {
3034         return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
3035                                  mirror_num);
3036 }
3037
3038 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3039                      u64 chunk_start, u64 physical, u64 devid,
3040                      u64 **logical, int *naddrs, int *stripe_len)
3041 {
3042         struct extent_map_tree *em_tree = &map_tree->map_tree;
3043         struct extent_map *em;
3044         struct map_lookup *map;
3045         u64 *buf;
3046         u64 bytenr;
3047         u64 length;
3048         u64 stripe_nr;
3049         int i, j, nr = 0;
3050
3051         read_lock(&em_tree->lock);
3052         em = lookup_extent_mapping(em_tree, chunk_start, 1);
3053         read_unlock(&em_tree->lock);
3054
3055         BUG_ON(!em || em->start != chunk_start);
3056         map = (struct map_lookup *)em->bdev;
3057
3058         length = em->len;
3059         if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3060                 do_div(length, map->num_stripes / map->sub_stripes);
3061         else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3062                 do_div(length, map->num_stripes);
3063
3064         buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
3065         BUG_ON(!buf);
3066
3067         for (i = 0; i < map->num_stripes; i++) {
3068                 if (devid && map->stripes[i].dev->devid != devid)
3069                         continue;
3070                 if (map->stripes[i].physical > physical ||
3071                     map->stripes[i].physical + length <= physical)
3072                         continue;
3073
3074                 stripe_nr = physical - map->stripes[i].physical;
3075                 do_div(stripe_nr, map->stripe_len);
3076
3077                 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3078                         stripe_nr = stripe_nr * map->num_stripes + i;
3079                         do_div(stripe_nr, map->sub_stripes);
3080                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3081                         stripe_nr = stripe_nr * map->num_stripes + i;
3082                 }
3083                 bytenr = chunk_start + stripe_nr * map->stripe_len;
3084                 WARN_ON(nr >= map->num_stripes);
3085                 for (j = 0; j < nr; j++) {
3086                         if (buf[j] == bytenr)
3087                                 break;
3088                 }
3089                 if (j == nr) {
3090                         WARN_ON(nr >= map->num_stripes);
3091                         buf[nr++] = bytenr;
3092                 }
3093         }
3094
3095         *logical = buf;
3096         *naddrs = nr;
3097         *stripe_len = map->stripe_len;
3098
3099         free_extent_map(em);
3100         return 0;
3101 }
3102
3103 static void end_bio_multi_stripe(struct bio *bio, int err)
3104 {
3105         struct btrfs_multi_bio *multi = bio->bi_private;
3106         int is_orig_bio = 0;
3107
3108         if (err)
3109                 atomic_inc(&multi->error);
3110
3111         if (bio == multi->orig_bio)
3112                 is_orig_bio = 1;
3113
3114         if (atomic_dec_and_test(&multi->stripes_pending)) {
3115                 if (!is_orig_bio) {
3116                         bio_put(bio);
3117                         bio = multi->orig_bio;
3118                 }
3119                 bio->bi_private = multi->private;
3120                 bio->bi_end_io = multi->end_io;
3121                 /* only send an error to the higher layers if it is
3122                  * beyond the tolerance of the multi-bio
3123                  */
3124                 if (atomic_read(&multi->error) > multi->max_errors) {
3125                         err = -EIO;
3126                 } else if (err) {
3127                         /*
3128                          * this bio is actually up to date, we didn't
3129                          * go over the max number of errors
3130                          */
3131                         set_bit(BIO_UPTODATE, &bio->bi_flags);
3132                         err = 0;
3133                 }
3134                 kfree(multi);
3135
3136                 bio_endio(bio, err);
3137         } else if (!is_orig_bio) {
3138                 bio_put(bio);
3139         }
3140 }
3141
3142 struct async_sched {
3143         struct bio *bio;
3144         int rw;
3145         struct btrfs_fs_info *info;
3146         struct btrfs_work work;
3147 };
3148
3149 /*
3150  * see run_scheduled_bios for a description of why bios are collected for
3151  * async submit.
3152  *
3153  * This will add one bio to the pending list for a device and make sure
3154  * the work struct is scheduled.
3155  */
3156 static noinline int schedule_bio(struct btrfs_root *root,
3157                                  struct btrfs_device *device,
3158                                  int rw, struct bio *bio)
3159 {
3160         int should_queue = 1;
3161         struct btrfs_pending_bios *pending_bios;
3162
3163         /* don't bother with additional async steps for reads, right now */
3164         if (!(rw & REQ_WRITE)) {
3165                 bio_get(bio);
3166                 submit_bio(rw, bio);
3167                 bio_put(bio);
3168                 return 0;
3169         }
3170
3171         /*
3172          * nr_async_bios allows us to reliably return congestion to the
3173          * higher layers.  Otherwise, the async bio makes it appear we have
3174          * made progress against dirty pages when we've really just put it
3175          * on a queue for later
3176          */
3177         atomic_inc(&root->fs_info->nr_async_bios);
3178         WARN_ON(bio->bi_next);
3179         bio->bi_next = NULL;
3180         bio->bi_rw |= rw;
3181
3182         spin_lock(&device->io_lock);
3183         if (bio->bi_rw & REQ_SYNC)
3184                 pending_bios = &device->pending_sync_bios;
3185         else
3186                 pending_bios = &device->pending_bios;
3187
3188         if (pending_bios->tail)
3189                 pending_bios->tail->bi_next = bio;
3190
3191         pending_bios->tail = bio;
3192         if (!pending_bios->head)
3193                 pending_bios->head = bio;
3194         if (device->running_pending)
3195                 should_queue = 0;
3196
3197         spin_unlock(&device->io_lock);
3198
3199         if (should_queue)
3200                 btrfs_queue_worker(&root->fs_info->submit_workers,
3201                                    &device->work);
3202         return 0;
3203 }
3204
3205 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
3206                   int mirror_num, int async_submit)
3207 {
3208         struct btrfs_mapping_tree *map_tree;
3209         struct btrfs_device *dev;
3210         struct bio *first_bio = bio;
3211         u64 logical = (u64)bio->bi_sector << 9;
3212         u64 length = 0;
3213         u64 map_length;
3214         struct btrfs_multi_bio *multi = NULL;
3215         int ret;
3216         int dev_nr = 0;
3217         int total_devs = 1;
3218
3219         length = bio->bi_size;
3220         map_tree = &root->fs_info->mapping_tree;
3221         map_length = length;
3222
3223         ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
3224                               mirror_num);
3225         BUG_ON(ret);
3226
3227         total_devs = multi->num_stripes;
3228         if (map_length < length) {
3229                 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
3230                        "len %llu\n", (unsigned long long)logical,
3231                        (unsigned long long)length,
3232                        (unsigned long long)map_length);
3233                 BUG();
3234         }
3235         multi->end_io = first_bio->bi_end_io;
3236         multi->private = first_bio->bi_private;
3237         multi->orig_bio = first_bio;
3238         atomic_set(&multi->stripes_pending, multi->num_stripes);
3239
3240         while (dev_nr < total_devs) {
3241                 if (total_devs > 1) {
3242                         if (dev_nr < total_devs - 1) {
3243                                 bio = bio_clone(first_bio, GFP_NOFS);
3244                                 BUG_ON(!bio);
3245                         } else {
3246                                 bio = first_bio;
3247                         }
3248                         bio->bi_private = multi;
3249                         bio->bi_end_io = end_bio_multi_stripe;
3250                 }
3251                 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
3252                 dev = multi->stripes[dev_nr].dev;
3253                 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
3254                         bio->bi_bdev = dev->bdev;
3255                         if (async_submit)
3256                                 schedule_bio(root, dev, rw, bio);
3257                         else
3258                                 submit_bio(rw, bio);
3259                 } else {
3260                         bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
3261                         bio->bi_sector = logical >> 9;
3262                         bio_endio(bio, -EIO);
3263                 }
3264                 dev_nr++;
3265         }
3266         if (total_devs == 1)
3267                 kfree(multi);
3268         return 0;
3269 }
3270
3271 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
3272                                        u8 *uuid, u8 *fsid)
3273 {
3274         struct btrfs_device *device;
3275         struct btrfs_fs_devices *cur_devices;
3276
3277         cur_devices = root->fs_info->fs_devices;
3278         while (cur_devices) {
3279                 if (!fsid ||
3280                     !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3281                         device = __find_device(&cur_devices->devices,
3282                                                devid, uuid);
3283                         if (device)
3284                                 return device;
3285                 }
3286                 cur_devices = cur_devices->seed;
3287         }
3288         return NULL;
3289 }
3290
3291 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
3292                                             u64 devid, u8 *dev_uuid)
3293 {
3294         struct btrfs_device *device;
3295         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
3296
3297         device = kzalloc(sizeof(*device), GFP_NOFS);
3298         if (!device)
3299                 return NULL;
3300         list_add(&device->dev_list,
3301                  &fs_devices->devices);
3302         device->dev_root = root->fs_info->dev_root;
3303         device->devid = devid;
3304         device->work.func = pending_bios_fn;
3305         device->fs_devices = fs_devices;
3306         device->missing = 1;
3307         fs_devices->num_devices++;
3308         fs_devices->missing_devices++;
3309         spin_lock_init(&device->io_lock);
3310         INIT_LIST_HEAD(&device->dev_alloc_list);
3311         memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
3312         return device;
3313 }
3314
3315 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
3316                           struct extent_buffer *leaf,
3317                           struct btrfs_chunk *chunk)
3318 {
3319         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3320         struct map_lookup *map;
3321         struct extent_map *em;
3322         u64 logical;
3323         u64 length;
3324         u64 devid;
3325         u8 uuid[BTRFS_UUID_SIZE];
3326         int num_stripes;
3327         int ret;
3328         int i;
3329
3330         logical = key->offset;
3331         length = btrfs_chunk_length(leaf, chunk);
3332
3333         read_lock(&map_tree->map_tree.lock);
3334         em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
3335         read_unlock(&map_tree->map_tree.lock);
3336
3337         /* already mapped? */
3338         if (em && em->start <= logical && em->start + em->len > logical) {
3339                 free_extent_map(em);
3340                 return 0;
3341         } else if (em) {
3342                 free_extent_map(em);
3343         }
3344
3345         em = alloc_extent_map();
3346         if (!em)
3347                 return -ENOMEM;
3348         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3349         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3350         if (!map) {
3351                 free_extent_map(em);
3352                 return -ENOMEM;
3353         }
3354
3355         em->bdev = (struct block_device *)map;
3356         em->start = logical;
3357         em->len = length;
3358         em->block_start = 0;
3359         em->block_len = em->len;
3360
3361         map->num_stripes = num_stripes;
3362         map->io_width = btrfs_chunk_io_width(leaf, chunk);
3363         map->io_align = btrfs_chunk_io_align(leaf, chunk);
3364         map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
3365         map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
3366         map->type = btrfs_chunk_type(leaf, chunk);
3367         map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
3368         for (i = 0; i < num_stripes; i++) {
3369                 map->stripes[i].physical =
3370                         btrfs_stripe_offset_nr(leaf, chunk, i);
3371                 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
3372                 read_extent_buffer(leaf, uuid, (unsigned long)
3373                                    btrfs_stripe_dev_uuid_nr(chunk, i),
3374                                    BTRFS_UUID_SIZE);
3375                 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
3376                                                         NULL);
3377                 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
3378                         kfree(map);
3379                         free_extent_map(em);
3380                         return -EIO;
3381                 }
3382                 if (!map->stripes[i].dev) {
3383                         map->stripes[i].dev =
3384                                 add_missing_dev(root, devid, uuid);
3385                         if (!map->stripes[i].dev) {
3386                                 kfree(map);
3387                                 free_extent_map(em);
3388                                 return -EIO;
3389                         }
3390                 }
3391                 map->stripes[i].dev->in_fs_metadata = 1;
3392         }
3393
3394         write_lock(&map_tree->map_tree.lock);
3395         ret = add_extent_mapping(&map_tree->map_tree, em);
3396         write_unlock(&map_tree->map_tree.lock);
3397         BUG_ON(ret);
3398         free_extent_map(em);
3399
3400         return 0;
3401 }
3402
3403 static int fill_device_from_item(struct extent_buffer *leaf,
3404                                  struct btrfs_dev_item *dev_item,
3405                                  struct btrfs_device *device)
3406 {
3407         unsigned long ptr;
3408
3409         device->devid = btrfs_device_id(leaf, dev_item);
3410         device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
3411         device->total_bytes = device->disk_total_bytes;
3412         device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
3413         device->type = btrfs_device_type(leaf, dev_item);
3414         device->io_align = btrfs_device_io_align(leaf, dev_item);
3415         device->io_width = btrfs_device_io_width(leaf, dev_item);
3416         device->sector_size = btrfs_device_sector_size(leaf, dev_item);
3417
3418         ptr = (unsigned long)btrfs_device_uuid(dev_item);
3419         read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
3420
3421         return 0;
3422 }
3423
3424 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
3425 {
3426         struct btrfs_fs_devices *fs_devices;
3427         int ret;
3428
3429         mutex_lock(&uuid_mutex);
3430
3431         fs_devices = root->fs_info->fs_devices->seed;
3432         while (fs_devices) {
3433                 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3434                         ret = 0;
3435                         goto out;
3436                 }
3437                 fs_devices = fs_devices->seed;
3438         }
3439
3440         fs_devices = find_fsid(fsid);
3441         if (!fs_devices) {
3442                 ret = -ENOENT;
3443                 goto out;
3444         }
3445
3446         fs_devices = clone_fs_devices(fs_devices);
3447         if (IS_ERR(fs_devices)) {
3448                 ret = PTR_ERR(fs_devices);
3449                 goto out;
3450         }
3451
3452         ret = __btrfs_open_devices(fs_devices, FMODE_READ,
3453                                    root->fs_info->bdev_holder);
3454         if (ret)
3455                 goto out;
3456
3457         if (!fs_devices->seeding) {
3458                 __btrfs_close_devices(fs_devices);
3459                 free_fs_devices(fs_devices);
3460                 ret = -EINVAL;
3461                 goto out;
3462         }
3463
3464         fs_devices->seed = root->fs_info->fs_devices->seed;
3465         root->fs_info->fs_devices->seed = fs_devices;
3466 out:
3467         mutex_unlock(&uuid_mutex);
3468         return ret;
3469 }
3470
3471 static int read_one_dev(struct btrfs_root *root,
3472                         struct extent_buffer *leaf,
3473                         struct btrfs_dev_item *dev_item)
3474 {
3475         struct btrfs_device *device;
3476         u64 devid;
3477         int ret;
3478         u8 fs_uuid[BTRFS_UUID_SIZE];
3479         u8 dev_uuid[BTRFS_UUID_SIZE];
3480
3481         devid = btrfs_device_id(leaf, dev_item);
3482         read_extent_buffer(leaf, dev_uuid,
3483                            (unsigned long)btrfs_device_uuid(dev_item),
3484                            BTRFS_UUID_SIZE);
3485         read_extent_buffer(leaf, fs_uuid,
3486                            (unsigned long)btrfs_device_fsid(dev_item),
3487                            BTRFS_UUID_SIZE);
3488
3489         if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
3490                 ret = open_seed_devices(root, fs_uuid);
3491                 if (ret && !btrfs_test_opt(root, DEGRADED))
3492                         return ret;
3493         }
3494
3495         device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
3496         if (!device || !device->bdev) {
3497                 if (!btrfs_test_opt(root, DEGRADED))
3498                         return -EIO;
3499
3500                 if (!device) {
3501                         printk(KERN_WARNING "warning devid %llu missing\n",
3502                                (unsigned long long)devid);
3503                         device = add_missing_dev(root, devid, dev_uuid);
3504                         if (!device)
3505                                 return -ENOMEM;
3506                 } else if (!device->missing) {
3507                         /*
3508                          * this happens when a device that was properly setup
3509                          * in the device info lists suddenly goes bad.
3510                          * device->bdev is NULL, and so we have to set
3511                          * device->missing to one here
3512                          */
3513                         root->fs_info->fs_devices->missing_devices++;
3514                         device->missing = 1;
3515                 }
3516         }
3517
3518         if (device->fs_devices != root->fs_info->fs_devices) {
3519                 BUG_ON(device->writeable);
3520                 if (device->generation !=
3521                     btrfs_device_generation(leaf, dev_item))
3522                         return -EINVAL;
3523         }
3524
3525         fill_device_from_item(leaf, dev_item, device);
3526         device->dev_root = root->fs_info->dev_root;
3527         device->in_fs_metadata = 1;
3528         if (device->writeable)
3529                 device->fs_devices->total_rw_bytes += device->total_bytes;
3530         ret = 0;
3531         return ret;
3532 }
3533
3534 int btrfs_read_sys_array(struct btrfs_root *root)
3535 {
3536         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
3537         struct extent_buffer *sb;
3538         struct btrfs_disk_key *disk_key;
3539         struct btrfs_chunk *chunk;
3540         u8 *ptr;
3541         unsigned long sb_ptr;
3542         int ret = 0;
3543         u32 num_stripes;
3544         u32 array_size;
3545         u32 len = 0;
3546         u32 cur;
3547         struct btrfs_key key;
3548
3549         sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
3550                                           BTRFS_SUPER_INFO_SIZE);
3551         if (!sb)
3552                 return -ENOMEM;
3553         btrfs_set_buffer_uptodate(sb);
3554         btrfs_set_buffer_lockdep_class(sb, 0);
3555
3556         write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
3557         array_size = btrfs_super_sys_array_size(super_copy);
3558
3559         ptr = super_copy->sys_chunk_array;
3560         sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
3561         cur = 0;
3562
3563         while (cur < array_size) {
3564                 disk_key = (struct btrfs_disk_key *)ptr;
3565                 btrfs_disk_key_to_cpu(&key, disk_key);
3566
3567                 len = sizeof(*disk_key); ptr += len;
3568                 sb_ptr += len;
3569                 cur += len;
3570
3571                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3572                         chunk = (struct btrfs_chunk *)sb_ptr;
3573                         ret = read_one_chunk(root, &key, sb, chunk);
3574                         if (ret)
3575                                 break;
3576                         num_stripes = btrfs_chunk_num_stripes(sb, chunk);
3577                         len = btrfs_chunk_item_size(num_stripes);
3578                 } else {
3579                         ret = -EIO;
3580                         break;
3581                 }
3582                 ptr += len;
3583                 sb_ptr += len;
3584                 cur += len;
3585         }
3586         free_extent_buffer(sb);
3587         return ret;
3588 }
3589
3590 int btrfs_read_chunk_tree(struct btrfs_root *root)
3591 {
3592         struct btrfs_path *path;
3593         struct extent_buffer *leaf;
3594         struct btrfs_key key;
3595         struct btrfs_key found_key;
3596         int ret;
3597         int slot;
3598
3599         root = root->fs_info->chunk_root;
3600
3601         path = btrfs_alloc_path();
3602         if (!path)
3603                 return -ENOMEM;
3604
3605         /* first we search for all of the device items, and then we
3606          * read in all of the chunk items.  This way we can create chunk
3607          * mappings that reference all of the devices that are afound
3608          */
3609         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
3610         key.offset = 0;
3611         key.type = 0;
3612 again:
3613         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3614         if (ret < 0)
3615                 goto error;
3616         while (1) {
3617                 leaf = path->nodes[0];
3618                 slot = path->slots[0];
3619                 if (slot >= btrfs_header_nritems(leaf)) {
3620                         ret = btrfs_next_leaf(root, path);
3621                         if (ret == 0)
3622                                 continue;
3623                         if (ret < 0)
3624                                 goto error;
3625                         break;
3626                 }
3627                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3628                 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3629                         if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
3630                                 break;
3631                         if (found_key.type == BTRFS_DEV_ITEM_KEY) {
3632                                 struct btrfs_dev_item *dev_item;
3633                                 dev_item = btrfs_item_ptr(leaf, slot,
3634                                                   struct btrfs_dev_item);
3635                                 ret = read_one_dev(root, leaf, dev_item);
3636                                 if (ret)
3637                                         goto error;
3638                         }
3639                 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
3640                         struct btrfs_chunk *chunk;
3641                         chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3642                         ret = read_one_chunk(root, &found_key, leaf, chunk);
3643                         if (ret)
3644                                 goto error;
3645                 }
3646                 path->slots[0]++;
3647         }
3648         if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3649                 key.objectid = 0;
3650                 btrfs_release_path(path);
3651                 goto again;
3652         }
3653         ret = 0;
3654 error:
3655         btrfs_free_path(path);
3656         return ret;
3657 }